#@+leo-ver=5-thin
#@+node:ekr.20051110111150: * @file leoScripts.txt
#@+all
#@+node:ekr.20150416064259.1: ** Compare
#@+node:EKR.20040424065452: *3* script: Compare files
p1 = r"c:\prog\leoMenu(1).py"
p2 = r"c:\prog\leoMenu(2).py"

f1 = open(p1,"rb") ; f2 = open(p2,"rb")
lines1 = f1.readlines()
lines2 = f2.readlines()
f1.close() ; f2.close()

f1 = open(p1,"rb") ; f2 = open(p2,"rb")
text1 = f1.read()
text2 = f2.read()
f1.close() ; f2.close()

cr1 = text1.count('\r')
cr2 = text2.count('\r')

print '-'*20
print "lines ",len(lines1),len(lines2)
print "chars ",len(text1),len(text2)
print "non-cr",len(text1)-cr1,len(text2)-cr2
print "cr    ",cr1,cr2
#@+node:EKR.20040424091411: *3* script: Compare ignoring newlines
p1 = r"c:\prog\leoMenu(1).py"
p2 = r"c:\prog\leoMenu(2).py"

f1 = open(p1,"rb") ; f2 = open(p2,"rb")
g.es("equal (raw mode)",f1.read()==f2.read())
f1.close() ; f2.close()

f1 = open(p1) ; f2 = open(p2)
g.es("equal (text mode)",f1.read()==f2.read())
f1.close() ; f2.close()
#@+node:ekr.20111017085134.16202: *3* script: Compare old and new nodes
old,new = None,None
for child in p.children():
    if child.h.startswith('old'):
        old = child.copy()
    if child.h.startswith('new'):
        new = child.copy()
if old and new:
    print(old.b == new.b)
    if old.b != new.b:
        print('*' * 20)
        print(len(old.b),len(new.b))
        old_lines = g.splitLines(old.b)
        new_lines = g.splitLines(new.b)
        for i in range(max(len(old_lines),len(new_lines))):
            if (i < len(old_lines) and i < len(new_lines) and
                old_lines[i] == new_lines[i]
            ):
                pass
            else:
                if i < len(old_lines):
                    print('old %2d %s' % (i,repr(old_lines[i])))
                if i < len(new_lines):
                    print('new %2d %s' % (i,repr(new_lines[i])))
#@+node:ekr.20150416063838.1: ** Data
#@+node:ekr.20050310082013: *3* script: AutoIt script from e
@ http://sourceforge.net/forum/message.php?msg_id=3039793

heres a short script to open a leo, then a New leo from that one, pause then exit.

still have to work out how to collect error output, especially if Leo doesn't start at all
can't then depend on Leo error reporting!

make a node
@url ./leoopen1.au3

make another
@nosent leoopen1.au3
@c

@language elisp

; AutoIt Version: 3.0 a BASIC like language
; http://www.hiddensoft.com

; Opens Leo with no filename, then opens a new, closes it
; Preliminary, will eventually programatically create
; leoPlugins.txt and various leoSettings.leo
; and run commands in all permutations looking for failures.

; Paths are hardwired but later scripts will be created on the fly
; maybe Leo can have a -trace mode to output to file a log of activities?

; exit when CTRL+ALT+x is pressed
HotKeySet("^!x", "MyExit")

Func MyExit()
    Exit 
EndFunc 

Opt("SendKeyDelay", 1)
Opt("WinWaitDelay", 80)

;fix path to leo.py
Run("python c:\c\leo\V43leos\leo\src\leo.py")
Sleep(2700) 

WinWaitActive("untitled")   

Sleep(700) 
Send("!Fn")  ; how to tell if there are errors?
Sleep(2700) 

WinWaitActive("untitled1")
Send("!Fx")
Sleep(2700) 

;careful you don't close the leo you are working from
Send("!Fx")
#@+node:ekr.20061018084920: *3* script: Munge database records
'''A script to munge database records so they conform to an existing db's format.'''
# Almost infinitely easier to do this in Python rather than FileMaker Pro's laughable scripting language.
path = r'c:\rebecca\REBECCA9_97.txt'
path2 = r'c:\rebecca\REBECCA9_97-converted.txt'
f = file(path)
s = f.read() ; f.close()
f = file(path2,'w')
print '-' * 40, len(s)
lines = s.split('\n')
n = 0
for line in lines:
    n += 1
    if not line.strip(): continue
    # bug: fails for fields containing commas.  Should use a regex instead.
    fields = line.split(',')
    result = []
    info1,last,full,addr1,addr2,zip,info2,info3 = fields
    assert full.startswith('"')
    data = full[1:-1].strip().split(' ')
    first = '"%s"' % data[0].strip()
    data = addr2[1:-1].strip().split(' ')
    city = '"%s"' % ''.join(data[:-2])
    state = '"%s"' % data[-2].upper()
    for field in (first,last,addr1,city,state,zip,info1,info2,info3):
        if field.startswith('"'):
            # Capitalize each word.
            s = field[1:-1].strip()
            aList = s.split(' ')
            s = ' '.join([z.capitalize() for z in aList])
            result.append('"%s"' % s)
        else:
            result.append(field)
    s = ','.join(result)
    print s
    f.write(s+'\n')
f.close()
print '%d records' % n
#@+node:ekr.20190509062342.1: *3* script: sqlite3 demo
# From https://docs.python.org/3/library/sqlite3.html

# I'll only mention changes to the python demo here.

import datetime
import sqlite3
conn = sqlite3.connect('example.db')

# Change 1:  Name the cursor 'cursor' to preserve Leo's c.
cursor = conn.cursor()
    
# Change 2: Catch any exception.
# This allows this demo to continue if the table already exists.
try:
    cursor.execute('''CREATE TABLE stocks
    (date text, trans text, symbol text, qty real, price real)''')
except Exception:
    pass

cursor.execute("INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14)")
conn.commit()

# Change 3: don't bother closing and reopning the connection.

t = ('RHAT',)
cursor.execute('SELECT * FROM stocks WHERE symbol=?', t)
purchases = [
    ('2006-03-28', 'BUY',  'IBM',  1000, 45.00),
    ('2006-04-05', 'BUY',  'MSFT', 1000, 72.00),
    ('2006-04-06', 'SELL', 'IBM',  500,  53.00),
]
cursor.executemany('INSERT INTO stocks VALUES (?,?,?,?,?)', purchases)

# Change 4: Get the result as a iterator.

# Don't exhaust the iterator by printing results!
query = 'SELECT * FROM stocks ORDER BY price'
result = cursor.execute(query)

# Change 5: Put the results a new @query node, with a timestamp.
last = c.lastTopLevel()
p = last.insertAfter()
timestamp = datetime.datetime.now().strftime("%Y%m%d:%H%M%S")
p.h = '@query %s: %s' % (timestamp, query)
p.b = '\n'.join([str(z) for z in result])
#
# Make the new node visible.
last.expand()
c.redraw() 
#@+node:ekr.20211009080619.1: ** Files
#@+node:ekr.20061220094557: *3* script: Backup Outlook Express files
# http://support.microsoft.com/default.aspx/kb/270670
# See Notes child for the manual instructions taken from the URL above.

import glob
import os

theDir = (r'C:\Documents and Settings\Ed\Local Settings\Application Data\Identities' +
    r'\{B14A7A05-7759-4249-A9C2-FDC8B4FDC50C}\Microsoft\Outlook Express')

files = glob.glob(theDir + r'\*.*')
files.sort()

#theFiles = [g.os_path_basename(z) for z in files]
#theFiles.sort()
#print g.listToString(theFiles)

dest = r'c:\OutlookBackup'
if not g.os_path_exists(dest):
    g.es_print('***** Creating %s' % dest)
    os.mkdir(dest)

for inputName in files:
    base = g.os_path_basename(inputName)
    outputName = g.os_path_join(dest,base)
    inputFile = file(inputName)
    s = inputFile.read()
    inputFile.close()
    outputFile = file(outputName,'w')
    outputFile.write(s)
    outputFile.close()
    g.es_print('***** Wrote %s' % outputName)
#@+node:ekr.20061220094557.1: *4* Notes
@killcolor
@

To backup Outlook Express data:
• Copy mail files to a backup folder 
• Export the Address Book to a file 
• Export the mail account to a file 
• Export the news account to a file

To restore or import Outlook Express data:
• Import messages from the backup folder 
• Import the Address Book file 
• Import the mail account file 
• Import the news account file 

Step 1: Copy Mail Files to a Backup Folder (Make a backup copy of your Outlook Express e-mail message files):

(EKR: get the file name)

1. On the Tools menu, click Options. 
2. On the Maintenance tab, click Store Folder. 
3. Select the folder location, and then press CTRL+C to copy the location.
4. Click Cancel, and then click Cancel again to close the dialog box. 

r'C:\Documents and Settings\Ed\Local Settings\Application Data\Identities\{B14A7A05-7759-4249-A9C2-FDC8B4FDC50C}\Microsoft\Outlook Express'

(EKR: open the directory)

5. Click Start, and then click Run. 
6. In the Open box, press CTRL+V, and then click OK.

(EKR: Copy all files to a folder)

7. On the Edit menu, click Select All. 
8. On the Edit menu, click Copy, and then close the window.
9. Right-click any empty space on your desktop, click New, and then click Folder. 
10. Type mail backup for the folder name, and then press ENTER. 
11. Double-click the Mail Backup folder to open it. 
12. On the Edit menu, click Paste. 
13. Close the Mail Backup window. 

Step 2: Export the Address Book to a File

A .WAB (Windows Address Book) file is used by Outlook Express 5.x and 6.0 versions, even if multiple Identities are used. The individual data for each Identity is stored in a folder, by user name, within the .WAB file in use.

Exporting this data, while logged in to a specific Identity, is the only means of segregating the Address Book data. If the .WAB file becomes dissociated from the user Identities, the data can only be exported in total - not folder by folder.

Another reason to export the .WAB file to a .csv file is that if the .WAB file is shared with Microsoft Outlook, the addresses are stored in the *.pst file in Outlook. When you export the file from the Outlook Express File menu to a *.csv file it exports the correct contacts. If the Address Book is shared with Microsoft Outlook, you are not able to export from within the Address Book on the File menu. This option is dimmed or not available.

To export your Outlook Express address book: 1. On the File menu, click Export, and then click Address Book. 
2. Click Text File (Comma Separated Values), and then click Export. 
3. Click Browse. 
4. Locate the Mail Backup folder that you created. 
5. In the File Name box, type address book backup, and then click Save. 
6. Click Next. 
7. Click to select the check boxes for the fields that you want to export, and then click Finish. 
8. Click OK and then click Close. 

Step 3: Make a backup copy of your Outlook Express mail account:

1. On the Tools menu, click Accounts. 
2. On the Mail tab, click the mail account that you want to export, and then click Export. 
3. In the Save In box, locate the Mail Backup folder on your desktop, and then click Save. 
4. Repeat these steps for each mail account that you want to export. 
5. Click Close. 

Step 4: Make a backup copy of your Outlook Express news accounts:

1. On the Tools menu, click Accounts. 
2. On the News tab, click the news account that you want to export, and then click Export. 
3. In the Save In box, use locate the Mail Backup folder on your desktop, and then click Save. 
4. Repeat these steps for each news account that you want to export. 
5. Click Close. 

Importing Outlook Express data

To restore data, you may need to re-create the Identities for each user, prior to using the following steps.
***Repeat each step, as needed, for each Identity.

Step 1: Import Messages from the Backup Folder (Import your Outlook Express e-mail messages from the Backup folder)

1. On the File menu, point to Import, and then click Messages. 
2. In the Select an e-mail program to import from box, click Microsoft Outlook Express 5 or Microsoft Outlook Express 6, and then click Next. 
3. Click Import mail from an OE5 store directory or Import mail from an OE6 store directory, and then click OK. 
4. Click Browse, and then click on the Mail Backup folder on your desktop. 
5. Click OK, and then click Next. 
6. Click All folders, click Next, and then click Finish. 

Step 2: Import the Address Book File

1. On the File menu, click Import, and then click Other Address Book. 
2. Click Text File (Comma Separated Values), and then click Import. 
3. Click Browse. 
4. Locate the Mail Backup folder on your desktop, click the address book Backup.csv file, and then click Open. 
5. Click Next, and then click Finish. 
6. Click OK, and then click Close. 

Step 3: Import the Mail Account File

1. On the Tools menu, click Accounts. 
2. On the Mail tab, click Import. 
3. In the Look In box, locate the Mail Backup folder on your desktop. 
4. Click the mail account that you want to import, and then click Open. 
5. Repeat these steps for each mail account that you want to import. 
6. Click Close. 

Step 4: Import the Newsgroup Account File

1. On the Tools menu, click Accounts. 
2. On the News tab, click Import. 
3. In the Look In box, locate the Mail Backup folder on your desktop. 
4. Click the news account that you want to import, and then click Open. 
5. Repeat these steps for each news account that you want to import. 
6. Click Close.

@color
#@+node:ekr.20050219054039: *3* script: Convert ChoiceMail .ini file to .csv file
@ These scripts work on the child nodes as shown.

The address children contained the actual data (removed for privacy)
#@+node:ekr.20050218174326.1: *4* Scipt to remove rejected entries
p = p.firstChild().firstChild()
s = p.b
lines = g.splitLines(s)
reject = 'reason=Rejected before registration'
result = []
entries = []
for line in lines:
    if line.startswith('['):
        # Add all previous entries
        for entry in entries:
            result.append(entry)
        entries = []
        entries.append(line)
    elif line.startswith(reject):
        # Kill all previous entries.
        entries = []
    else:
        entries.append(line)

result = ''.join(result)
c.setBodyString(p,result)
print 'done!'
#@+node:ekr.20050218170806.1: *5* @killcolor
@killcolor
#@+node:ekr.20050218170806.2: *6* address
#@+node:ekr.20050218170806: *4* Script to clean address
p = p.firstChild().firstChild()
s = p.b
lines = g.splitLines(s)
prefixes = (
    'access=',
    'bccsender=',
    'filtered=',
    'registered=',
    'messagetocount=',
    'messagecount=',
    'lastmessagedate=',
    'lastsource=',
    'replyToAddresses=',
    'creationdate=',
    'reason=Approved before registration',
    'reason=Address Book Contact',
    'registrationDate=',
    'registrationRequestSent=',
    'reason=Pre-approved sender',
    'preaccepted=1',
)
result = []
for line in lines:
    for prefix in prefixes:
        if line.startswith(prefix):
            # print 'removing',line
            break
    else:
        if line.startswith('name='):
            result.append(line[5:])
        else:
            result.append(line)

result = ''.join(result)
c.setBodyString(p,result)
print 'done!'
#@+node:ekr.20050219054351: *5* @killcolor
@killcolor
#@+node:ekr.20050219054351.1: *6* address
#@+node:ekr.20050218184044.10: *4* Script to create comma delimited lists
address_p = p.firstChild()
result_p = p.firstChild().next()

result = []
entries = []
for child in address_p.children():
    s = child.b
    lines = g.splitLines(s)
    for line in lines:
        if line.startswith('['):
            # Add all previous entries
            if entries:
                if len(entries) > 1:
                    # entries2 = [entries[0],entries[1]]
                    # entries2.extend(entries[2:])
                    result.append(','.join(entries[:2]))
                else:
                    result.append(entries[0])
            entries = [] ; entries2 = []
            entries.append(line.rstrip()[1:-1])
        elif line.strip():
            entries.append(line.rstrip())

result.sort()
result = '\n'.join(result)
c.setBodyString(result_p,result)
print 'done!'
#@+node:ekr.20050219054039.1: *5* address
#@+node:ekr.20050218184044.11: *5* result
#@+node:ekr.20040715105834: ** Fun
#@+node:ekr.20080222103719: *3* script: square problem
limit = 1000*1000

def computeSquares(limit):
    squares = {}
    i = 2
    while i < limit:
        n = i*i
        squares[n] = i # big keys
        i += 1
    return squares

def inc(n):
    if n <= 0: return None
    n1 = n ; digits = []
    while n > 0:
        digit = n%10
        if digit == 9: return None
        digits.append(digit)
        n = n//10
    digits.reverse()
    n = len(digits) ; result = 0 ; i = 0
    while i < n:
        digit = digits[i]
        if result > 0 or digit > 0:
            base = (n-i-1)
            result += (digit+1)*(10**base)
        i += 1
    return result

squares = computeSquares(limit)

i = 5
while i < 1000:
    j = i * i
    k = inc(j)
    k2 = squares.get(k)
    if k2:
        assert(k2*k2==k)
        print '**found**',i,'*',i,'=',j,k,'= %d*%d' % (k2,k2)
    # else: print i,j,k
    i += 1
print 'tested all numbers <',i
#@+node:ekr.20050803075926: *3* script: sudoku puzzle
@tabwidth -4

# Solves the sudoku puzzle.
# For another Leo program that does this, see: http://members.dslextreme.com/users/kayvan/sudoku/

import copy

digits = '123456789' # valid digits.

if 0:
    << 1-star puzzles >>
    << 2-star puzzles >>
    << 3-star puzzles >>
    << 4-star puzzles >>
    << 5-star puzzles >>

<< define data >>

@others

print '-' * 40

solver = sudokuClass(data=data)
if not solver.errors:
    solver.solve()
#@+node:ekr.20050804073824: *4* << define data >>
data = (
    '',
) 

data = (     # 9/2107 5 stars
    '7xx1xx6xx',
    'xxxxx6x14',
    '1xxx5xxx2',
    'x3x4xx82x',
    'xxx7x2xxx',
    'x71xx9x3x',
    '8xxx2xxx3',
    '31x8xxxxx',
    'xx7xx5xx8',
)
#@+node:ekr.20050911123109: *4* << 5-star puzzles >>
# Neither of these are solvable without guessing.

data = ( 
    'xx24xxxxx',
    'x41x3xxxx',
    '8xxx6xx4x',
    'x6xxx3xx9',
    'x7x9x8x3x',
    '2xx6xxx7x',
    'x2xx4xxx1',
    'xxxx1x72x',
    'xxxxx64xx') # 4/27/07 5 stars (very hard)

data = ( 
    '1x5xxx37x',
    'xxxxxx2xx',
    'x973xxx1x',
    'xxxx531x2',
    '3xx8x1xx4',
    '2x147xxxx',
    'x7xxx864x',
    'xx8xxxxxx',
    'x12xxx8x7') # 8/4 5 stars

data = ( 
    '2xxxx1834',
    'xxxx9xxxx',
    'x1x3xxx5x',
    'xx75xxxxx',
    '16xxxxx72',
    'xxxxx93xx',
    'x7xxx4x1x',
    'xxxx8xxxx',
    '8549xxxx3') # 8/9 5 stars (may be invalid)
#@+node:ekr.20050911145104: *4* << 4-star puzzles >>
data = (
    'x13xxxxx2',
    '6x2xx4xx8',
    '4xx3xxx6x',
    '2xx8xxxx7',
    'xxx715xxx',
    '9xxxx3xx4',
    'x2xxx1xx3',
    '1xx5xx4x9',
    '8xxxxx62x') # 9/10/05 4 stars
#@+node:ekr.20050811075608: *4* << 3-star puzzles >>
data = (
    '8xxx9x21x',
    'x9x4xxxxx',
    'xx58x7xx9',
    '7xx1xx9xx',
    'xxxx5xxxx',
    'xx6xx3x28',
    '6xx5x93xx',
    'xxxxx6x7x',
    'x48x1xxx6',
)

data = ( # 8/3: solvable.
    'x5xx9xxxx',
    'xx48xxxx9',
    'xxx1x728x',
    '56xxxx137',
    'xxxxxxxxx',
    '173xxxx42',
    'x215x8xxx',
    '6xxxx38xx',
    'xxxx1xx6x')

data = ( # 1 stars
    'x4xxxx179',
    'xx2xx8x54',
    'xx6xx5xx8',
    'x8xx7x91x',
    'x5xx9xx3x',
    'x1xx6xx4x',
    '3xx4xx7xx',
    '57x1xx2xx',
    '928xxxx6x')

data = (
    '6xx75x1xx',
    '8xxxx34xx',
    'x3x96xx25',
    'xxx4xx3x2',
    '7xxxxxxx6',
    '2x1xx5xxx',
    '31xx89x4x',
    'xx65xxxx1',
    'xx5x42xx3') # solvable.

data = (
    'xxxxx6xx5',
    'xx41xx8xx',
    'x5xx78x42',
    '58xxxx9xx',
    '3xxxxxxx7',
    'xx6xxxx18',
    '24x39xx7x',
    'xx7xx52xx',
    '9xx7xxxxx') # solvable

data = (
    'xxxxx6xx5',
    'xx41xx8xx',
    'x5xx78x42',
    '58xxxx9xx',
    '3xxxxxxx7',
    'xx6xxxx18',
    '24x39xx7x',
    'xx7xx52xx',
    '9xx7xxxxx') # 9/8/05 3 stars

data = (
    'xxxx64x15',
    'x549xx6x2',
    'xxxxxxx7x',
    'xxxx8x2xx',
    '1x8xxx5x7',
    'xx7x4xxxx',
    'x3xxxxxxx',
    '8x2xx319x',
    '94x87xxxx') # 9/9/05 3 stars

data = (
    '8xxxxxxxx',
    'x915x36xx',
    'x62xxxx8x',
    'xx9xx8xxx',
    'x752x984x',
    'xxx4xx9xx',
    'x1xxxx42x',
    'xx49x276x',
    'xxx7xxxx5',
)
#@+node:ekr.20050929065040: *4* << 2-star puzzles >>
data = (
    '4xxxxxxxx',
    '96xxx85xx',
    'x374x6xx1',
    '3x48xxx6x',
    'xxxx1xxxx',
    'x5xxx92x7',
    '5xx1x267x',
    'xx95xxx82',
    'xxxxxxxx9',
)

data = (
    'xxx395xxx',
    'xx5xx89x2',
    'xxxx2xxx5',
    '6x2xxxxx7',
    'x84xxx53x',
    '7xxxxx1x6',
    '3xxx6xxxx',
    '5x62xx7xx',
    'xxx831xxx',
)

data = ( # 8/6 2 stars
    '2x6xxxx49',
    'x37xx9xxx',
    '1xx7xxxx6',
    'xxx58x9xx',
    '7x5xxx8x4',
    'xx9x62xxx',
    '9xxxx4xx1',
    'xxx3xx49x',
    '41xxxx2x8')

data = (
    '9xx7x3xx6',
    'x87xx2xxx',
    '15xxxxx9x',
    'xxx6xx82x',
    'xx8xxx1xx',
    'x26xx8xxx',
    'x6xxxxx31',
    'xxx4xx97x',
    '4xx2x1xx8') # 8/30 2 stars

data = (
    '8xx3xxx7x',
    'xx57xxxxx',
    '9xx165x3x',
    '34xxxxxx9',
    'xxx5x4xxx',
    '7xxxxxx83',
    'x8x253xx4',
    'xxxxx65xx',
    'x2xxx1xx6',
) # 9/6/ 2 stars

data = ( 
    'x6x29xx8x',
    'xx8xxxx3x',
    'x1xx78xxx',
    'x217x9xx8',
    '6xxxxxxx3',
    '7xx6x492x',
    'xxx12xx4x',
    'x7xxxx3xx',
    'x5xx86x7x') # 2 stars
#@+node:ekr.20050927122648: *4* << 1-star puzzles >>
data = (
    'x4735xxx9',
    'x5x8x93xx',
    'xx84xx12x',
    '12x57xxx8',
    '7x5xxx2x6',
    '3xxx82x17',
    'x12xx59xx',
    'xx92x6x4x',
    '4xxx1875x',
)

data = (
    '19xxx84xx',
    '7xxx9xxxx',
    'x5xxxx986',
    'x19xxxx6x',
    '8xxxxxxx5',
    'x6xxxx72x',
    '684xxxx3x',
    'xxxx2xxx7',
    'xx26xxx14',
)

data = (
    'xx2xx7xx9',
    'x8x249x3x',
    'x31xx572x',
    'xx9xx8xx1',
    'x65xxx847',
    '4xx7xx2xx',
    'x931xx65x',
    'x5x862x73',
    '8xx5xx4xx') # 1 star

data = ( # 8/10 1 star
    'x6xxx5x19',
    'x9x34xxxx',
    'x8x96x5xx',
    'xxx8x93x1',
    '8x9xxx4x7',
    '2x17x4xxx',
    'xx3x86x4x',
    'xxxx27x6x',
    '47x5xxx8x')
#@+node:ekr.20050803075926.1: *4* class sudokuClass
class sudokuClass:

    '''A class to solve the sudoku puzzle.'''

    @others
#@+node:ekr.20050803075926.2: *5*  ctor (main) & helpers
def __init__ (self,data):

    # g.trace('main')
    self.cells = []
    self.cols = []
    self.colGroups = []
    self.data = data
    self.errors = 0
    self.excludedGroupNumbers = []
    self.level = 0
    self.rowGroups = []
    self.rows = []
    self.squareGroups = []
    self.tracing = True
    self.valid = True
    self.verbose = False

    # Check the data and finish the init process.
    self.checkData()
    self.initFromData()
    self.printData()
    self.finishInit()
    # self.dump()
#@+node:ekr.20050803202932: *6* initFromData
def initFromData (self):

    i = 0
    for row in self.data:
        thisRow = []
        j = 0
        for ch in row:
            if ch in digits:    val = ch
            else:               val = None
            self.cells.append(self.cellClass(self,val,i,j))
            thisRow.append(val)
            j += 1
        self.rows.append(thisRow)
        i += 1

    for j in xrange(9):
        col = [row[j] for row in self.rows]
        self.cols.append(col)
#@+node:ekr.20050803075926.4: *6* finishInit
def finishInit (self):

    for i in xrange(9):
        self.squareGroups.append(self.squareGroupClass(self,i))
        self.rowGroups.append(self.colGroupClass(self,i))
        self.colGroups.append(self.rowGroupClass(self,i))

    for z in self.squareGroups:
        z.finishCreate()
    for z in self.rowGroups:
        z.finishCreate()
    for z in self.colGroups:
        z.finishCreate()

    # Must be done last!
    for z in self.squareGroups:
        z.computeRelatedGroups()
    for z in self.cells:
        z.finishCreate()
#@+node:ekr.20050803121102: *6* checkData
def checkData (self):

    rows = len(self.data)

    if rows != 9:
        return self.error('wrong number of rows: %d' % rows)

    for row in self.data:
        cols = len(row)
        if cols != 9:
            return self.error('wrong number of columns in row %d: %d' % (i,cols))
#@+node:ekr.20050804070733: *6* check & helper
def check (self):

    for groups in (self.colGroups,self.rowGroups,self.squareGroups):
        for group in groups:
            if not self.checkGroup(group):
                return False
    return True
#@+node:ekr.20050804071049: *7* checkGroup
def checkGroup (self,group):

    vals = []
    for cell in group.cells:
        n = len(cell.values)
        if n == 1:
            val = cell.values[0]
            if val in vals:
                g.trace('%s appears twice in group %s' % (val,repr(group)))
                return False
            vals.append(val)
    return True
#@+node:ekr.20050804071242: *5* printing & dumping
#@+node:ekr.20050803080858: *6* dump
def dump (self):

    if 0:
        print ; print 'groups...'
        for group in self.groups:
            print 'group %d, rowsNumbers: %s colNumbers: %s' % (
                group.groupNumber,group.rowNumbers,group.colNumbers)

    if 0:
        print ; print 'row groups...'
        for group in self.rowGroups:
            print '%d %s' % (group.rowNumber, group.rowcol)

    if 0:
        print ; print 'related groups...'
        for group in self.groups:     
            print 'Groups related to group %d: %s' % (
                group.groupNumber,[g.groupNumber for g in group.relatedGroups])
#@+node:ekr.20050803121730: *6* printData
def printData (self,tag='initial data'):

    print ; print tag ; print

    i = 0
    for row in self.rows:
        i += 1
        print
        for ch in row:
            if ch:
                print ('  %s  ') % ch,
            else:
                print ' ___ ',
        if i % 3 == 0 and i < 9:
            print ; print ; print '_' * 53
        print
    print
#@+node:ekr.20050803200132: *6* printCells
def printCells (self,tag=''):

    print
    if tag: print tag ; print

    i = 0
    for cell in self.cells:
        if len(cell.values) == 9:
            print '%7s' % '1..9',
        else:
            print '%7s' % ''.join(cell.values),
        i += 1
        if i % 9 == 0:
            print
#@+node:ekr.20050911112043.1: *5* Utils
#@+node:ekr.20050803095202: *6* groupNumber
def groupNumber (self,row,col):

    return (3 * (row // 3)) + (col // 3)
#@+node:ekr.20050803075926.3: *6* error
def error (self,s):

    print 'oops',s
    self.errors += 1
#@+node:ekr.20050803215553: *6* trace
def trace (self,s):

    if self.tracing:
        print s
#@+node:ekr.20050803202932.1: *6* isFinished
def isFinished (self):

    for cell in self.cells:
        n = len(cell.values)
        assert(n > 0) # We should have check for self.valid previously.
        if n > 1:
            return False
    return True
#@+node:ekr.20050911094859: *5* Guesses
#@+node:ekr.20050803203001: *6* findBestGroup
def findBestGroup (self,excludedGroupNumbers):

    bestGroup = None
    bestKnown = 0
    for group in self.squareGroups:
        n = 0 # Number of known cells
        for cell in group.cells:
            if len(cell.values) == 1:
                n += 1
        if 9 > n > bestKnown:
            if group.groupNumber not in excludedGroupNumbers:
                bestGroup = group
                bestKnown = n

    if bestGroup:
        if self.tracing:
            print ; print 'best group %d' % bestGroup.groupNumber
            if 0:
                if self.verbose:
                    print 'unknown cells',
                    for cell in bestGroup.unknownCells():
                        print cell,
                    print 'unknown vals',
                    for val in bestGroup.unknownVals():
                        print val,
                    print

    return bestGroup
#@+node:ekr.20050803210939: *6* findGuesses
def findGuesses (self):

    guesses = []
    group = self.findBestGroup(self.excludedGroupNumbers)
    self.excludedGroupNumbers.append(group.groupNumber)
    if not group:
        g.trace('No groups left to guess: %s' % excludedGroupNumbers)
        self.valid = False
        return []

    # Generate all combinations of cells and unkown vals.
    cells = [cell for cell in group.cells if len(cell.values) > 1]
    vals = []
    for cell in cells:
        for val in cell.values:
            if val not in vals: vals.append(val)
    n = len(vals)
    for i in xrange(n):
        guess = [] ; j = 0
        for cell in cells:
            bunch = g.bunch(cell=cell,val=vals[(i+j)%n])
            j += 1
            guess.append(bunch)
        if self.isValidGuess(guess):
            guesses.append(guess)

    if not guesses:
        g.trace('No valid guess for group %d' % group.groupNumber)
        self.valid = False
        return []

    if 0: # Another trace is in initFromGuess
        print 'level %d guesses...' % self.level
        for guess in guesses:
            for bunch in guess:
                print bunch.cell,bunch.val

    return guesses
#@+node:ekr.20050804060706: *6* isValidGuess
def isValidGuess (self,guess):

    return True ##

    for bunch in guess:
        if not bunch.cell.valIsPossible(bunch.val):
            return False

    return True
#@+node:ekr.20050803075926.5: *5* solve (main)
def solve (self):

    n = 0 ; self.valid = True
    while not self.errors and self.valid:
        n += 1
        self.progress = 0
        if self.tracing:
            print '*' * 40
            print 'solve: iteration %d at level %d' % (n,self.level)
        if not self.check(): return False
        if self.tracing: self.printCells()
        for cell in self.cells:
            # Reduce the possible values for the cell.
            cell.reduce()
        if not self.valid: break
        for cell in self.cells:
            # Find any values that appear only in one place in a group.
            cell.unique()
        if self.isFinished():
            self.printCells('success!') ; return True
        if self.tracing: self.printCells()
        for cell in self.cells:
            # Remove any possible values that would make other groups impossible.
            cell.removeConflicts()
        if not self.valid: break
        if self.isFinished():
            if self.level == 0: self.printCells('success!')
            return True
        if self.progress == 0:
            << guess an answer >>

    if self.tracing:
        if not self.valid:
            print ; print 'reached invalid state'
        if self.progress == 0:
            print ; print 'no progress'
        self.printCells()
    return False
#@+node:ekr.20050911085945: *6* << guess an answer >>
if self.level < 2:
    # Save the previous data.
    save = [g.bunch(cell=cell,values=cell.values[:]) for cell in self.cells]
    guesses = self.findGuesses()
    if self.tracing:
        print '-'*20,'%d valid guesses' % len(guesses)
    if not guesses:
        return False
    n = 0
    for guess in guesses:
        # Restore the previous state.
        for b in save:
            b.cell.values = b.values[:]
        # Make the guess.
        self.level += 1 ; n += 1
        if self.tracing:
            print ; print '-'*40,'making guess %d at level %d' % (n,self.level)
        for b in guess:
            b.cell.values = str(b.val)
            if self.tracing: g.trace(b.cell,b.val)
        if self.tracing:
            self.printCells()
        # Call ourselves recursively.
        ok = self.solve()
        self.level -= 1
        if ok: return True
    if self.tracing or self.level == 0:
        print 'no solution is possible at level %d' % self.level
    # Restore the previous state.
    for b in save:
        b.cell.values = b.values[:]
    return False
else:
    if self.tracing:
        print 'maximum depth exceeded'
return False
#@+node:ekr.20050911135016: *5* group classes
@ A group is essentially just a collection of cells.
#@+node:ekr.20050911101819: *6* class squareGroupClass
class squareGroupClass:

    @others
#@+node:ekr.20050803121102.2: *7*  ctor
def __init__ (self,sudoku,n):

    # g.trace('square',n)
    self.groupNumber = n
    self.main = sudoku

    # Set later...
    self.cells = []
    self.colNumbers = []
    self.rowNumbers = []
    self.relatedGroups = []
#@+node:ekr.20050911101819.1: *7* __repr__ & __str__
def __repr__ (self):

    return '<square group %d>' % self.groupNumber

__str__ = __repr__
#@+node:ekr.20050803130829: *7* finishCreate
def finishCreate (self):

    main = self.main

    self.cells = [cell for cell in main.cells if cell.groupNumber == self.groupNumber]

    for cell in self.cells:
        cell.squareGroup = self

    self.rowNumbers = []
    for cell in self.cells:
        if cell.i not in self.rowNumbers:
            self.rowNumbers.append(cell.i)
    self.rowNumbers.sort()

    self.colNumbers = []
    for cell in self.cells:
        if cell.j not in self.colNumbers:
            self.colNumbers.append(cell.j)
    self.colNumbers.sort()
#@+node:ekr.20050803161504: *7* computeRelatedGroups
def computeRelatedGroups (self):

    self.relatedGroups = []
    for group in self.main.squareGroups:
        if group is not self:
            related = False
            for n in group.colNumbers:
                if n in self.colNumbers:
                    related = True
            for n in group.rowNumbers:
                if n in self.rowNumbers:
                    related = True
            if related and group not in self.relatedGroups:
                self.relatedGroups.append(group)
#@+node:ekr.20050910194752: *6* class colGroupClass
class colGroupClass:

    @others
#@+node:ekr.20050910194752.1: *7* ctor
def __init__ (self,sudoku,j):

    # g.trace('col',j)
    self.j = j
    self.main = sudoku

    # Set later...
    self.cells = []
    self.col = None
#@+node:ekr.20050911102800: *7* __repr__ & __str__
def __repr__ (self):

    return '<col group %d>' % self.j

__str__ = __repr__
#@+node:ekr.20050910195107: *7* finishCreate
def finishCreate(self):

    j = self.j ; main = self.main

    self.col = self.main.cols[j]

    self.cells = [cell for cell in main.cells if cell.j == j]

    for cell in self.cells:
        cell.colGroup = self
#@+node:ekr.20050910194752.2: *6* class rowGroupClass
class rowGroupClass:

    @others
#@+node:ekr.20050910194752.3: *7* ctor
def __init__ (self,sudoku,i):

    # g.trace('row',i)
    self.i = i
    self.main = sudoku

    # Set later...
    self.cells = []
    self.row = None
#@+node:ekr.20050911102800.1: *7* __repr__ & __str__
def __repr__ (self):

    return '<row group %d>' % self.i

__str__ = __repr__
#@+node:ekr.20050910195107.1: *7* finishCreate
def finishCreate(self):

    i = self.i ; main = self.main

    self.row = self.main.rows[i]

    self.cells = [cell for cell in main.cells if cell.i == i]

    for cell in self.cells:
        cell.rowGroup = self
#@+node:ekr.20050803075926.7: *5* class cellClass
class cellClass:

    '''A class representing what is known about a particular cell.'''

    @others
#@+node:ekr.20050911144450: *6*  birth
#@+node:ekr.20050803081438: *7*  ctor (cell)
def __init__ (self,sudoku,val,i,j):

    # g.trace('cell',i,j,val)
    self.i = i
    self.j = j
    self.groupNumber = sudoku.groupNumber(i,j)
    self.main = sudoku
    if val is None:     self.values = [digit for digit in digits]
    else:               self.values = [str(val)]
    self.verbose = self.main.verbose

    # Set by group ctors...
    self.colGroup = None
    self.rowGroup = None
    self.squareGroup = None
#@+node:ekr.20050803200724: *7* __repr__ & __str__
def __repr__ (self):

    return 'cell[%d,%d]' % (self.i, self.j)

__str__ = __repr__
#@+node:ekr.20050911113403: *7* finishCreate
def finishCreate(self):

    # g.trace(self)
    assert(self.colGroup)
    assert(self.rowGroup)
    assert(self.squareGroup)

    self.colGroups = [group for group in self.main.squareGroups if self.j in group.colNumbers]
    self.rowGroups = [group for group in self.main.squareGroups if self.i in group.rowNumbers]
#@+node:ekr.20050911112043.2: *6* error & trace
def error (self,s):

    self.main.error(s)

def trace (self,s):

    self.main.trace(s)
#@+node:ekr.20050911092707: *6* reduce
def reduce (self):

    '''Reduce the possible values in self.values: remove an item from
    self.values if any cell in this cell's groups contains only that value.

    Increments self.main.progress or set self.main.valid = False to indicate status.
    '''

    if not self.main.valid: return
    n = len(self.values)
    if n == 0: self.main.valid = False
    if n < 2:  return

    for group in (
        self.colGroup,
        self.rowGroup,
        self.squareGroup,
    ):
        for cell in group.cells:
            if (
                cell is not self and
                len(cell.values) == 1 and 
                cell.values[0] in self.values
            ):
                self.values.remove(cell.values[0])
                n -= 1 
                if n == 0:
                    self.main.valid = False
                    return
                if n == 1:
                    self.setValue(self.values[0])
                else:
                    self.main.progress += 1
#@+node:ekr.20050911111404: *6* removeConflicts
# This is about the most sophisticated deduction that a human could make.

def removeConflicts (self):

    '''Remove a possible value if assigning to this value would make it
    impossible to satisfy a related group.

    Increments self.main.progress or set self.main.valid = False to indicate status. '''

    if not self.main.valid: return
    n = len(self.values)
    if n == 0: self.main.valid = False
    if n < 2:  return

    i,j = self.i,self.j

    colGroups = [group for group in self.colGroups if group != self.squareGroup]
    rowGroups = [group for group in self.rowGroups if group != self.squareGroup]

    # Check for row conflicts.
    for val in self.values:
        for group in rowGroups:
            spots = 0
            for cell in group.cells:
                if i != cell.i and val in cell.values:
                    spots += 1
            if spots == 0:
                self.trace('row conflict: cell: %s, val: %s, group %s' % (self,val,group))
                self.values.remove(val)
                n -= 1 ; self.main.progress += 1
                if n == 0:
                    self.main.valid = False
                return

    # Check for col conflicts.
    for val in self.values:
        for group in colGroups:
            spots = 0
            for cell in group.cells:
                if j != cell.j and val in cell.values:
                    spots += 1
            if spots == 0:
                self.trace('col conflict: cell: %s, val: %s, group %s' % (self,val,group))
                self.values.remove(val)
                n -= 1
                if n == 0:
                    self.main.valid = False
                if n == 1:
                    self.setValue(self.values[0])
                else:
                    self.main.progress += 1
                return
#@+node:ekr.20050911094544: *6* setValue
def setValue (self,val):

    '''We have discovered the proper value for this cell.
    Set self.values=[val] and remove val from self.values from all *other* cells of this groups.'''

    values = self.values
    if self.main.tracing:
        g.trace(self,val,values)
    assert(val in values)
    values.remove(val)
    self.main.progress += 1
    self.values = [str(val)]

    for group in (
        self.colGroup,
        self.rowGroup,
        self.squareGroup,
    ):
        for cell in group.cells:
            if cell is not self:
                if val in cell.values:
                    cell.values.remove(str(val))
                    self.main.progress += 1
                    if len(cell.values) == 0:
                        self.main.valid = False
#@+node:ekr.20050911092707.1: *6* unique
def unique (self):

    '''Set self.values to [val] if val appears in only one place in any of this cells groups.

    Increments self.main.progress or set self.main.valid = False to indicate status.
    '''

    if not self.main.valid: return
    n = len(self.values)
    if n == 0: self.main.valid = False
    if n < 2:  return

    for group in (
        self.colGroup,
        self.rowGroup,
        self.squareGroup,
    ):
        for val in self.values:
            spots = 0
            for cell in group.cells:
                if val in cell.values:
                    spots += 1
            # val is in self.values, and self is in each of its groups.
            assert(spots>0)
            if spots == 1:
                # We have found the only possible place for this value.
                self.setValue(val) # Increments self.main.progress.
                return
#@+node:ekr.20050803134436.1: *6* valIsPossible
def valIsPossible (self,val):

    # g.trace(self,val)
    assert(val is not None)

    for cell in self.rowGroup.cells:
        if cell is not self:
            if len(cell.values) == 1 and cell.values[0] == val:
                if self.main.tracing:
                    g.trace('invalid guess: %s in row %d: %s' % (val,self.i,self.rowGroup.cells))
                return False

    for cell in self.colGroup.cells:
        if cell is not self:
            if len(cell.values) == 1 and cell.values[0] == val:
                if self.main.tracing:
                    g.trace('invalid guess: %s in col %d: %s' % (val,self.j,self.colGroup.cells))
                return False

    return True
#@+node:ekr.20080124063225: *3* sudoku changes
# There are bugs in the guessing logic.
# The following does not fix all of them.

In findGuesses:

    guesses = self.findAllGuesses(vals,cells)
    g.trace('%d raw guess' % len(guesses))
    guesses = [z for z in guesses if self.isValidGuess(z)]

findAllGuesses:def findAllGuesses(self,vals,cells):

    if not vals or not cells:
        return []

    guesses = []
    cell = cells[0]
    for val in vals:
        guess = [g.bunch(cell=cell,val=val)]
        vals2 = [z for z in vals if z != val]
        self.completeGuess(guess,vals2,cells[1:])
        guesses.append(guess)
    return guesses

def completeGuess(self,guess,vals,cells):

    if not vals or not cells:
        return []

    cell = cells[0]
    for val in vals:
        guess.append(g.bunch(cell=cell,val=val))
        vals2 = [z for z in vals if z != val]
        self.completeGuess(guess,vals2,cells[1:])


This works for the following 5* problem:

data = (     # 5 stars
    '1xx2x9xx3',
    'xx6xx3xx7',
    'x3xxxx8xx',
    'xxxx9xxx5',
    '78xxxxx94',
    '9xxx2xxxx',
    'xx1xxxx3x',
    '2xx8xx4xx',
    '8xx5x6xx2',
)
#@+node:ekr.20230925012359.1: *3* script: list all rhythms in a 4/4 measure
g.cls()

@others  # Define helpers
        
total, total_square, total_jazz = 0, 0, 0

for group in range(5):

    n8 = group * 2
    n4 = compute_quarters(n8)
    print('')
    print(f"===== Group {group + 1} =====: {n8} eighth notes, {n4} quarter notes...")
    print('')

    results = find_patterns(n8)
    converted = convert_patterns(results)
    assert len(converted) == len(results)
    
    square_patterns = [z for z in results if is_square(z)]
    jazz_patterns = [z for z in results if not is_square(z)]
    
    total += len(results)
    total_square += len(square_patterns)
    total_jazz += len(jazz_patterns)

    print(
        f"{len(results)} total rhythm{g.plural(len(results))}. "
        f"{len(square_patterns)} square, {len(jazz_patterns)} jazz")

    square_converted = [convert_pattern(z) for z in square_patterns]
    jazz_converted = [convert_pattern(z) for z in jazz_patterns]

    print('\nSquare:\n')
    for i, z in enumerate(square_converted):
        print(f"{i + 1:2} {z!s}")
    
    if jazz_patterns:
        print('\nJazz:\n')
        for i, z in enumerate(jazz_converted):
            print(f"{i + 1:2} {z!s}")
            
print(f"\nGrand totals: {total} patterns, {total_square} square, {total_jazz} jazz")
#@+node:ekr.20230925012359.2: *4* compute_quarters
def compute_quarters(n8) -> int:
    """
    Return the quarter notes in the measure containing the given number of
    eighth notes.
    """
    assert (n8 % 2) == 0, (n8, (n8 % 2))
    return int(4 - n8/2)
#@+node:ekr.20230925012359.3: *4* convert_pattern and convert_patterns
def convert_pattern(pattern:str) -> str:
    """Convert a string of 1s and 0s to a string of dots and dashes"""
    return pattern.replace('1', '.').replace('0', ' _')

def convert_patterns(patterns: list[int]) -> list[str]:
    return [convert_pattern(pattern) for pattern in patterns]
#@+node:ekr.20230925012359.4: *4* find_patterns
def find_patterns(n8: int) -> list[str]:
    """
    Given a desired number of eighth notes in a 4-beat bar, return the
    padded string representation of all the base-2 numbers (in descending
    order) representing rhythms contain the given number of eighth notes.
    
    As always in this program, 1s represent an eighth note, 0s represent
    quarter notes.
    """
    # Special case.
    if n8 == 0:
        return ['0000']

    # Compute a string with n8 leading 1s and n_zeros 
    n_zeros = compute_quarters(n8)
    n_s = '1' * n8 + '0' * n_zeros
    width = n8 + n_zeros
    
    # Convert the string to a base-2 number.
    n = int(n_s, 2)
    
    # Count down, accepting only numbers with the desired number of 1s.
    results = []
    while n > 0:
        s = bin(n)[2:]
        if s.count('1') == n8:
            pad = '0' * (width - len(s))
            results.append(pad + s)
        n -= 1
    return results
#@+node:ekr.20230925012359.5: *4* is_square
def is_square(s) -> bool:
    """
    Return True if s represents a square rhythm.
    """
    # Remove square patterns (0 or 11) from the left.
    # s represents a jazz pattern if the remainder ever contains 01.
    digits = list(s)
    while digits:
        if digits[0] == '0':
            digits = digits[1:]
            continue
        tail = ''.join(digits[0:2])
        # if tail in ('1', '01'):
        if tail == '10':
            return False
        digits = digits[2:]
    return True
#@+node:ekr.20230925012913.1: ** Git
#@+node:ekr.20220319162442.1: *3* script: diff-two-revs-wo-comments
g.cls()

# Monkey-patched git-diff-pr command.
import leo.commands.editFileCommands as efc

rev1 = '7fe2cc486153'  # Preveious commit of devel'
rev2 = 'c4d109012028'  # Last commit of ekr-clean-comments
x = efc.GitDiffController(c)

@others

# Monkey-patch, with x bound.
x.make_diff_outlines = make_diff_outlines_ignoring_comments
x.diff_two_revs(rev1, rev2)
#@+node:ekr.20220319162442.2: *4* function: make_diff_outlines_ignoring_comments
def make_diff_outlines_ignoring_comments(c1, c2, fn, rev1='', rev2=''):
    """Create an outline-oriented diff from the *hidden* outlines c1 and c2."""
    self = x
    added, deleted, changed = self.compute_dicts(c1, c2)
    table = (
        (added, 'Added'),
        (deleted, 'Deleted'),
        (changed, 'Changed'))
    for d, kind in table:
        if kind.lower() == 'changed':
            for key in d:
                v1, v2 = d.get(key)
                v1.b = strip_comments(v1.b)
                v2.b = strip_comments(v2.b)
        self.create_compare_node(c1, c2, d, kind, rev1, rev2)
#@+node:ekr.20220319162442.3: *4* function: strip_comments
def strip_comments(aString):
    """
    Strip everything that looks like a comment from aString.
    It's fine, for now, to ignore strings and docstrings.
    """
    result = []
    lines = g.splitLines(aString)
    for s in lines:
        if s.strip().startswith('#@'):
            # Retain everything that looks like a sentinel.
            result.append(s)
        else:
            # Strip the comment, ignoring the end of the line.
            i = s.find('#')
            if i == -1:
                result.append(s)
            else:
                tail = s[:i]
                if tail.strip():
                    result.append(tail.rstrip() + '\n')
    return ''.join(result)
#@+node:ekr.20150416062327.1: ** Gui
#@+node:ekr.20120328102352.6948: *3* @@button set-style
@language rest

@
http://groups.google.com/group/leo-editor/browse_thread/thread/ba9eb63337467d42/a3f3750d0ce6e847
Here's a one line @button node you can add to myLeoSettings.leo

Important:  setStyleSheet *replaces* the previous stylesheet with the
new stylesheet, so you had best set all the attributes of Leo's
default stylesheet.

As an alternative, if w is any Qt widget, w.setStyleSheet(p.b) will
set the stylesheet for that widget only: the top-level stylesheet (the
stylesheet for c.frame.top.leo_ui) remains unchanged.
@c

@language python

c.frame.top.leo_ui.setStyleSheet(p.b)
# c.frame.top.setStyleSheet(p.b)
#@+node:ekr.20051110105027.106: *3* script: Create diagrams using Graphviz and pydot
#@+node:ekr.20051110105027.107: *4* pydot notes by EKR
@nocolor

- I have found it easiest to create pydot objects rather than creating Graphviz strings.  It's the natural way, IMO.

- The pydot documentation is poor.  When you cut through the blah-blah-blah all that is really going on is that you use ctors to create pydot objects.  Typically you specify attributes in the ctors, but there are also getters and setters (various silly redundant flavors) to do this.

- It took me awhile to get the difference between names and labels.  Names are essentially object identifiers, and they are restricted to what are basically C identifiers.  Labels are what are shown in nodes.  The default label is the node's name.   It's a bit strange to use strings instead of Python object references, but it's no big deal.

- The documentation for Graphviz is weak.  Very few examples.  It took me a long time to realize that by default Graphviz lays out nodes and edges independently of the order in which they were created.  The ordering="out" argument to the Dot ctor overrides some parts of the layout algorithm so that nodes are laid out in roughly the definition order.  If you want to place nodes yourself, you can specify their exact position.  This would be feasible to do in a script and I haven't done that yet.

In short, Graphviz and pydot are very impressive tools.  The documentation could be improved, but once one gets the hang of things it is fairly easy to get real work done.
#@+node:ekr.20051110105027.108: *4* pydot docs
@nocolor
#@+node:ekr.20051110105027.109: *5* General note about attributes
The original documentation repeats endlessly the same info about attributes.

Attributes can be set in several ways:

set("attributeName")

set_[attribute name], i.e. set_color, set_fontname

object.attributeName = val

Similarly, you can get attribute values with corresponding getters.
#@+node:ekr.20051110105027.110: *5* Cluster(Graph)
class Cluster(Graph) 

Methods:

__init__(self, graph_name='subG', suppress_disconnected=False, **attrs)

graph_name:
    the cluster's name (the string 'cluster' will be always prepended)

suppress_disconnected:
    False: remove from the cluster any disconnected nodes.


Attributes:

attributes = ['pencolor', 'bgcolor', 'labeljust', 'labelloc', 'URL', 'fontcolor', 'fontsize', 'label', 'fontname', 'lp', 'style', 'target', 'color', 'peripheries', 'fillcolor']
#@+node:ekr.20051110105027.111: *5* Common
class Common 
    Common information to several classes.

Should not be directly used, several classes are derived from this one.

char_range(self, a, b)
Generate a list containing a range of characters.

is_ID(self, s)
Checks whether a string is an dot language ID.

Data:

chars_ID = None
parent_graph = None 



#@+node:ekr.20051110105027.112: *5* Dot(Graph)
class Dot(Graph) 
    A container for handling a dot language file.

This class implements methods to write and process a dot language file.


Methods defined here:

__init__(self, **args)


Attributes:

formats = ['ps', 'ps2', 'hpgl', 'pcl', 'mif', 'pic', 'gd', 'gd2', 'gif', 'jpg', 'jpeg', 'png', 'wbmp', 'ismap', 'imap', 'cmap', 'vrml', 'vtx', 'mp', 'fig', ...]
progs = None
#@+node:ekr.20051110105027.113: *6* create and create_xxx
create(self, prog='dot', format='ps')
Creates and returns a Postscript representation of the graph.

create will write the graph to a temporary dot file and process
it with the program given by 'prog' (which defaults to 'twopi'),
reading the Postscript output and returning it as a string is the
operation is successful.
On failure None is returned.

There's also the preferred possibility of using:

        create_'format'(prog='program')

which are automatically defined for all the supported formats.

[create_ps(), create_gif(), create_dia(), ...]
#@+node:ekr.20051110105027.114: *6* write and write_xxx
write(self, path, prog='dot', format='raw')
Writes a graph to a file.

Given a filename 'path' it will open/create and truncate
such file and write on it a representation of the graph
defined by the dot object and in the format specified by
'format'.

The format 'raw' is used to dump the string representation
of the Dot object, without further processing.

The output can be processed by any of graphviz tools, defined
in 'prog', which defaults to 'dot'

Returns True or False according to the success of the write operation.

There's also the preferred possibility of using:

        write_'format'(path, prog='program')

which are automatically defined for all the supported formats.
[write_ps(), write_gif(), write_dia(), ...]
#@+node:ekr.20051110105027.115: *5* Edge
class Edge(__builtin__.object, Common) 
    A graph edge.

This class represents a graph's edge with all its attributes.

edge(src, dst, attribute=value, ...)

src: source node's name
dst: destination node's name


--------------------------------------------------------------------------------
Methods defined here:

__eq__(self, edge)
Compare two edges.

If the parent graph is directed, arcs linking
node A to B are considered equal and A->B != B->A

If the parent graph is undirected, any edge
connecting two nodes is equal to any other
edge connecting the same nodes, A->B == B->A

__init__(self, src, dst, **attrs)

get_destination(self)
Get the edge's destination node name.

get_source(self)
Get the edges source node name.

parse_node_ref(self, node_str)

set(self, name, value)
Set an attribute value by name.

Given an attribute 'name' it will set its value to 'value'.
There's always the possibility of using the methods:
        set_'name'(value)
which are defined for all the existing attributes.
to_string(self)
Returns a string representation of the edge in dot language.

--------------------------------------------------------------------------------
Data and other attributes defined here:

__dict__ = <dictproxy object>
dictionary for instance variables (if defined)
__weakref__ = <attribute '__weakref__' of 'Edge' objects>
list of weak references to the object (if defined)
attributes = ['style', 'target', 'pos', 'layer', 'tooltip', 'color', 'showboxes', 'URL', 'fontcolor', 'fontsize', 'label', 'fontname', 'comment', 'lp', 'arrowhead', 'arrowsize', 'arrowtail', 'constraint', 'decorate', 'dir', ...]
#@+node:ekr.20051110105027.116: *5* Error
class Error(exceptions.Exception) 
    General error handling class.

Methods defined here:

__init__(self, value)
__str__(self)
#@+node:ekr.20051110105027.117: *5* Graph(Common)
class Graph(__builtin__.object, Common) 
    Class representing a graph in Graphviz's dot language.

This class implements the methods to work on a representation
of a graph in Graphviz's dot language.


Data and other attributes:

__dict__ = <dictproxy object>
dictionary for instance variables (if defined)

__weakref__ = <attribute '__weakref__' of 'Graph' objects>
list of weak references to the object (if defined)

attributes = ['Damping', 'bb', 'center', 'clusterrank', 'compound', 'concentrate', 'defaultdist', 'dim', 'fontpath', 'epsilon', 'layers', 'layersep', 'margin', 'maxiter', 'mclimit', 'mindist', 'pack', 'packmode', 'model', 'page', ...]
#@+node:ekr.20051110105027.118: *6* Graph.__init__
__init__(self, graph_name='G', type='digraph', strict=False, suppress_disconnected=False, simplify=False, **attrs)


graph_name: the graph's name

type: 'graph' or 'digraph'

suppress_disconnected:
    defaults to False, which will remove from the graph any disconnected nodes.

simplify:
    if True it will avoid displaying equal edges, i.e. only one edge between two nodes. removing the duplicated ones.

All the attributes defined in the Graphviz dot language should be supported.

Attributes can be set through the dynamically generated methods:

set_[attribute name], i.e. set_size, set_fontname

or using the instance's attributes:

 Graph.[attribute name], i.e. graph_instance.label, graph_instance.fontname
#@+node:ekr.20051110105027.119: *6* add_edge
add_edge(self, graph_edge)

Adds an edge object to the graph.
#@+node:ekr.20051110105027.120: *6* add_node
add_node(self, graph_node)

Adds a node object to the graph.
#@+node:ekr.20051110105027.121: *6* add_subgraph
add_subgraph(self, sgraph)

Adds an edge object to the graph.
#@+node:ekr.20051110105027.122: *6* getters...
get(self, name)
Get an attribute value by name.

get_'name'() is defined for all attributes.

get_edge(self, src, dst)
Retrieved an edge from the graph.
Returns a list, a single Edge, or None

get_edge_list(self)
Returns the list of Edge instances composing the graph.

get_name(self)
Get the graph's name.

get_node(self, name)
Given a node's name the corresponding Node instance will be returned.
Returns a list, a single Node or None.

get_node_list(self)
Returns the list of Node instances composing the graph.

get_simplify(self)
Get whether to simplify or not.

get_strict(self, val)
Get graph's 'strict' mode (True, False).
This option is only valid for top level graphs.

get_subgraph(self, name)
Given a subgraph's name the corresponding Subgraph instance will be returned.
Returns a list of Subgraphs, a single Subgraph or None.

get_subgraph_list(self)
Returns the list of Subgraph instances in the graph.

get_suppress_disconnected(val)
Get if suppress disconnected is set.

get_type(self)
Get the graph's type, 'graph' or 'digraph'.
#@+node:ekr.20051110105027.123: *6* setters...
set(self, name, value)
Set an attribute value by name.

set_'name'(value) are defined for all the existing attributes.

set_graph_parent(self, parent)
Sets a graph and its elements to point the the parent.
Any subgraph added to a parent graph receives a reference to the parent to access some common data.

set_name(self, graph_name)
Set the graph's name.

set_simplify(self, simplify)
Set whether to simplify or not.
 If True it will avoid displaying equal edges.

set_strict(self, val)
Set graph to 'strict' mode.
This option is only valid for top level graphs.

set_suppress_disconnected(val)
Suppress disconnected nodes in the output graph.

set_type(self, graph_type)
Set the graph's type, 'graph' or 'digraph'.

#@+node:ekr.20051110105027.124: *6* toString
to_string(self, indent='')
Returns a string representation of the graph in dot language.
#@+node:ekr.20051110105027.125: *5* Node(Common)
class Node(__builtin__.object, Common) 
    A graph node.

This class represents a graph's node with all its attributes.


Data and attributes:

__dict__ = <dictproxy object>
dictionary for instance variables (if defined)

__weakref__ = <attribute '__weakref__' of 'Node' objects>
list of weak references to the object (if defined)

attributes = ['showboxes', 'URL', 'fontcolor', 'fontsize', 'label', 'fontname', 'comment', 'root', 'toplabel', 'vertices', 'width', 'z', 'bottomlabel', 'distortion', 'fixedsize', 'group', 'height', 'orientation', 'pin', 'rects', ...]
#@+node:ekr.20051110105027.126: *6* Node.__init__
node(name, attribute=value, ...)

name: node's name

All the attributes defined in the Graphviz dot language should be supported.

__init__(self, name, **attrs)
#@+node:ekr.20051110105027.127: *6* get_name
get_name(self)
Get the node's name.
#@+node:ekr.20051110105027.128: *6* set, set_x and set_name
set(self, name, value)
Set an attribute value by name.

Given an attribute 'name' it will set its value to 'value'.

set_'name'(value) is defined for all the existing attributes.

set_name(self, node_name)
Set the node's name.
#@+node:ekr.20051110105027.129: *6* toString
to_string(self)
Returns a string representation of the node in dot language.
#@+node:ekr.20051110105027.130: *5* Subgraph(Graph)
class Subgraph(Graph) 

Methods:

__init__(self, graph_name='subG', suppress_disconnected=False, **attrs)

graph_name:
    the subgraph's name

suppress_disconnected:
    False: removes from the subgraph any disconnected nodes.

Attributes:

attributes = ['Damping', 'bb', 'center', 'clusterrank', 'compound', 'concentrate', 'defaultdist', 'dim', 'fontpath', 'epsilon', 'layers', 'layersep', 'margin', 'maxiter', 'mclimit', 'mindist', 'pack', 'packmode', 'model', 'page', ...]
#@+node:ekr.20051110105027.131: *5* Functions
#@+node:ekr.20051110105027.132: *6* find_graphviz
find_graphviz()

Locate Graphviz's executables in the system.

Attempts  to locate  graphviz's  executables in a Unix system.
It will look for 'dot', 'twopi' and 'neato' in all the directories
specified in the PATH environment variable.
It will return a dictionary containing the program names as keys
and their paths as values.
#@+node:ekr.20051110105027.133: *6* graph_from_adjacency_matrix
graph_from_adjacency_matrix(matrix, node_prefix='', directed=False)

Creates a basic graph out of an adjacency matrix.

The matrix has to be a list of rows of values
representing an adjacency matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
#@+node:ekr.20051110105027.134: *6* graph_from_edges
graph_from_edges(edge_list, node_prefix='', directed=False)

Creates a basic graph out of an edge list.

The edge list has to be a list of tuples representing the nodes connected by the edge.

The values can be anything: bool, int, float, str.

If the graph is undirected by default, it is only
calculated from one of the symmetric halves of the matrix.
#@+node:ekr.20051110105027.135: *6* graph_from_incidence_matrix
graph_from_incidence_matrix(matrix, node_prefix='', directed=False)

Creates a basic graph out of an incidence matrix.

The matrix has to be a list of rows of values
representing an incidence matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
#@+node:ekr.20051110105027.136: *4* @url http://www.research.att.com/sw/tools/graphviz/refs.html
#@+node:ekr.20051110105027.137: *4* @url http://dkbza.org/pydot/pydot.html
#@+node:ekr.20051110105027.138: *4* Write an outline using Graphviz
import string

try:
    import pydot
except:
    s = "pydot must be installed"
    print s ; es(s,color="red")
    pydot = None

<< code >>

if pydot:
    graph = pydot.Dot(simplify=True,ordering="out")
    root = g.findNodeInTree(p,"Root")
    addLeoNodesToGraph(root,graph,top=True)
    graph.write_jpeg(r'c:\prog\test\pydotOut.jpg',prog='dot')
#@+node:ekr.20051110105027.139: *5* << code >>
@others
#@+node:ekr.20051110105027.140: *6* addLeoNodesToGraph
def addLeoNodesToGraph(p,graph,top=False):

    # Create p's vnode.
    thisNode = pydot.Node(name=vnodeRepr(p.v),label=vnodeLabel(p.v))
    graph.add_node(thisNode)

    if p.hasChildren():
        child = p.firstChild()
        childNode = addLeoNodesToGraph(child,graph)
        graph.add_node(childNode)
        edge2 = pydot.Edge(tnodeRepr(p.v),vnodeRepr(child.v))
        graph.add_edge(edge2)

        while child.hasNext():
            next = child.next()
            edge = pydot.Edge(vnodeRepr(child.v),vnodeRepr(next.v),dir="both")
            nextNode = addLeoNodesToGraph(next,graph)
            graph.add_node(nextNode)
            graph.add_edge(edge)
            child = next

    tnode = pydot.Node(name=tnodeRepr(p.v),shape="box",label=tnodeLabel(p.v))
    edge1 = pydot.Edge(vnodeRepr(p.v),tnodeRepr(p.v),arrowhead="none")
    graph.add_edge(edge1)
    graph.add_node(tnode)

    if 0: # Confusing.
        if not top and p.v._parent:
            edge = pydot.Edge(vnodeRepr(p.v),vnodeRepr(p.v._parent),
                style="dotted",arrowhead="onormal")
            graph.add_edge(edge)

    if 0: # Marginally useful.
        for v in p.v.vnodeList:
            edge = pydot.Edge(tnodeRepr(p.v),vnodeRepr(v),
                style="dotted",arrowhead="onormal")
            graph.add_edge(edge)

    return thisNode
#@+node:ekr.20051110105027.141: *6* tnode/vnodeLabel
def tnodeLabel(t):

    return "t %d [%d]" % (id(t),len(t.vnodeList))

def vnodeLabel(v):

    return "v %d %s" % (id(v),v.h)
#@+node:ekr.20051110105027.142: *6* tnode/vnodeRepr
def dotId(s):

    """Convert s to a C id"""

    s2 = [ch for ch in s if ch in (string.letters + string.digits + '_')]
    return string.join(s2,'')

def tnodeRepr(t):

    return "t_%d" % id(t)

def vnodeRepr(v):

    return "v_%d_%s" % (id(v),dotId(v.h))
#@+node:ekr.20051110105027.143: *5* Root
#@+node:ekr.20051110105027.144: *6* clone
#@+node:ekr.20051110105027.145: *7* Child1
#@+node:ekr.20051110105027.146: *8* GrandChild
#@+node:ekr.20051110105027.147: *7* Child2
#@+node:ekr.20130816100419.23046: *3* script: Full tree preview
@language python

<< docstring >>

from PyQt4 import QtGui, QtCore
from xml.sax.saxutils import escape

def add_html(html, nd):
    """recursively add to an html list with links to nodes"""
    
    unl = nd.get_UNL()
    html.append("<div class='level'>"
        "<div><a href='%s' title='%s'>%s</a></div>" %
        (unl, unl, escape(nd.h)))
    html.append("<pre>%s</pre>"%escape(nd.b))
    for child in nd.children():
        add_html(html, child)
    html.append("</div>")

def make_overview(c):
    """build the overview widget"""

    te = QtGui.QTextBrowser()
    te.setReadOnly(True)
    te.setOpenLinks(False)
    
    def anchorClicked(url, c=c, te=te):
        
        url = str(url.toString())
        g.handleUrl(url,c=c,p=c.p)
        
        if te.ctrl_click:
            te.deleteLater()
        
    te.anchorClicked.connect(anchorClicked)
    
    def mousePressEvent(event, te=te, original=te.mousePressEvent):
        te.ctrl_click = bool(event.modifiers() & QtCore.Qt.ControlModifier)
        original(event)
    
    te.mousePressEvent = mousePressEvent
    
    html = ["""<html><head><style>
    .level .level {margin-left: 1.5em}
    a {text-decoration: none; font-size: 120%}
    </style></head><body>"""]
    
    for nd in c.getSelectedPositions():
        add_html(html, nd)

    html.append("</body></html>") 
    
    html = '\\n'.join(html)
    
    te.setHtml(html)
    
    return te

class OverviewPaneProvider:
    def __init__(self, c):
        self.c = c
        # Careful: we may be unit testing.
        if hasattr(c, 'free_layout'):
            splitter = c.free_layout.get_top_splitter()
            if splitter:
                splitter.register_provider(self)
    def ns_provides(self):
        return[('Overview', '_add_overview_pane')]
    def ns_provide(self, id_):
        if id_ == '_add_overview_pane':
            w = make_overview(c)
            return w
    def ns_title(self, id_):
        if id_ == '_add_overview_pane':
            return "Leo Outline Overview"
    def ns_provider_id(self):
        # used by register_provider() to unregister previously registered
        # providers of the same service
        return "outline overview window"

OverviewPaneProvider(c)
#@+node:ekr.20130816100419.23047: *4* << docstring >>
''' The script sets up Leo to display all the parts of the tree (all bodies
and subheadings) as continuous text, much like a word processor outline.

By Terry Brown

1) Paste the code below into a node, then hit the "run-script" button.

2) Then select a node with some hierarchy, not too much.

3) Then right click on the panel dividers between the tree / body / log
   panes, you should see a context menu with an "Open Window" sub-menu,
   which should contain an "Overview" item.

You should get a continuous view of the hierarchy with clickable
headlines which take you to the node.

You can select multiple nodes in step 2 above, with normal list Ctrl-
or Shift- click operations.  Nodes are shown in the overview in the
order selected.  This is how you'd generate the overview for a whole
outline - i.e. contract the whole outline, click the first top level
node, shift click the last top level node, and then step 3.

You can also embed the overview in a pane in the Leo window by select
"Insert" rather than "Open window" in step 3, click the action button
and select Overview.

This was the low hanging fruit, based on code used in bookmarks.py.  A
refresh button for the outline wouldn't be too hard, but right now
you need to close the window / pane and open it again to refresh.
'''
#@+node:ekr.20071116114235: *3* script: Print all Tango icons in wiki-markup format
# Prints all icons in the Icons/Tango folder in wiki-markup format.
# This allows the icons to be inserted into Leo's body pane,
# provided that the color_markup and add_directives plugins are enabled.

import glob

folders = (
    'actions','animations','apps','categories','devices',
    'emblems','emotes','mimetypes','places','status',)

for z in folders:
    theDir = g.os_path_join(g.app.loadDir,'..','Icons','Tango','16x16',z)
    print ; print z
    aList = glob.glob(g.os_path_normpath(g.os_path_join(theDir,'*.*')))
    aList.sort()
    aList = ['{picture file=%s} %s' % (z,g.shortFileName(z)) for z in aList]
    aList = [str(z) for z in aList]
    s = g.listToString(aList).replace("'",'').replace('[','').replace(']','')
    print s
#@+node:ekr.20140704052551.17946: *3* script: Set icon for .leo files
'''
This leo script _almost_ adds an icon to .leo files in Windows. I say
almost in that on my system it runs without error, the associated registry
key is created and contains the right path, but Windows still doesn't know
what to do with the file.

It's intended to be run after "create-leobat" has been run and the Leo.File
filetype is already present.

Anyone else have some ideas how to improve it?
'''

from _winreg import *
def register_leo_icon():
    '''Tell Windows what icon to use for the  Leo.File filetype (.leo)

    Resources:

http://stackoverflow.com/questions/2331690/how-to-set-a-icon-file-while-creating-file

http://stackoverflow.com/questions/771689/how-can-i-set-an-icon-for-my-own-file-extension
    '''

    icon = "%s\\Icons\\LeoDoc.ico" % g.computeLeoDir()

    g.es("\\nAttempting to register leo icon with .leo files...")

    if g.os_path_exists(icon):
        g.es("Found:", icon)
        myTestKey = OpenKey(HKEY_CLASSES_ROOT, "Leo.File")
        iconKey= CreateKey(myTestKey, "DefaultIcon")
        CloseKey(myTestKey)

        SetValue(iconKey, None, REG_SZ, icon)
        CloseKey(iconKey)
        g.es("Registered!")
    else:
        g.es("LeoDoc.ico not in expected location, can't continue.")
#@+node:ekr.20201030065548.5: *3* script: set window to youtube size
height, width = 682, 1264
w = c.frame.top
while w.parent():
    w = w.parent()
w.resize(1264, 682)
w.move(200, 200)
#@+node:ekr.20150416063804.1: *3* Tk
# These are obsolete, unless Tk is run in a separate process.
#@+node:ekr.20041220080654: *4* script: Prototype: Setting Tk config values safely
import Tkinter as Tk

<< documentation about how to set general options >>
t = Tk.Text()

print '-' * 20

settings = (
    ('height','xyz'),
    ('width',30),
    ('xyzzy',2),
)

widget_keys = t.keys() # List of all valid settings for this widget.
widget_keys.sort()

# Make a list of valid settings, and warn about invalid settings.
valid_settings = []
for key,val in settings:
    if key in widget_keys:
        setting = key,val
        valid_settings.append(setting)
    else:
        s = "'%s' is not a valid Tk option for this widget" % key
        print s ; g.es(s,color='blue')
valid_settings.sort()

print 'before changes...'
for key,val in valid_settings:
    print '%s = %s' % (key,str(t.cget(key)))

for key,val in valid_settings:
    d = {key:val}
    try:
        if 1: # The preferred way, using the 'extended call syntax'.
            # This was introduced in Python 2.0.
            t.configure(**d)
        else: # The Python 1.x way.  Deprecated since Python 2.3.
            apply(t.configure,[],d)
    except Tk.TclError:
        s = "Tk exception setting '%s' to %s" % (key,repr(val))
        print s ; g.es(s,color='blue')

print 'after changes...'
for key,val in valid_settings:
    print '%s = %s' % (key, str(t.cget(key)))

if 0:
    print ; print 'all keys...'
    for key in widget_keys:
        print '%s = %s' % (key, str(t.cget(key)))
#@+node:ekr.20041220091350: *5* << documentation about how to set general options >>
@nocolor
@
The keyword argument syntax is of course much more elegant, and less error prone. However, for compatibility with existing code, Tkinter still supports the older syntax. You shouldn't use this syntax in new programs, even if it might be tempting in some cases. For example, if you create a custom widget which needs to pass configuration options along to its parent class, you may come up with something like:

@color

    def __init__(self, master, **kw):
        Canvas.__init__(self, master, kw) # kw is a dictionary

@nocolor
This works just fine with the current version of Tkinter, but it may not work with future versions. A more general approach is to use the apply function:
@color

    def __init__(self, master, **kw):
        apply(Canvas.__init__, (self, master), kw)

@nocolor
The apply function takes a function (an unbound method, in this case), a tuple with arguments (which must include self since we're calling an unbound method), and optionally, a dictionary which provides the keyword arguments.

--------- Apply is deprecated ---------

apply( function, args[, keywords]) 

The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence. The function is called with args as the argument list; the number of arguments is the length of the tuple. If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the argument list.

Calling apply() is different from just calling function(args), since in that case there is always exactly one argument. The use of apply() is equivalent to function(*args, **keywords). Use of apply() is not necessary since the ``extended call syntax,'' as used in the last example, is completely equivalent. 

Deprecated since release 2.3. Use the extended call syntax instead, as described above.
#@+node:ekr.20050726101926: *4* script: Prototype: TK: keyPressed
def keyPressed( self, event ):

    << create the command >>
    self.kTconsume = self.kRconsume = consume = self.emacs.masterCommand(event,command)
    if consume:
        # Block the event from going elsewhere, like the DocumentModel.
        event.consume()
        return

    kc = event.getKeyChar()
    if self.tab_for_colon and kc == '\n':
        event.consume()
        self.insertPreviousLeadAndNewline()
    if self.completers.has_key(kc):
        << handle auto completion >>
    elif kc == '\t' and self.tab_width == -4:
        << handle auto-tabbing >>
#@+node:ekr.20050726101926.1: *5* << create the command >>
modifiers = event.getModifiers()
mtxt = event.getKeyModifiersText(modifiers)
ktxt = event.getKeyText(event.getKeyCode())

if mtxt == ktxt:
    command = mtxt
else:
    command = '%s %s' % (mtxt,ktxt).strip()
#@+node:ekr.20050726101926.2: *5* << handle auto completion >>
editor = self.emacs.editor
doc = editor.getDocument()
pos = editor.getCaretPosition()
try:
    pc = doc.getText( pos -1, 1 )
    if pc in ( '"', "'" ): return
except: pass

event.consume()
self.kTconsume = True
self.kRconsume = True
ac = self.completers[ kc ]
doc.insertString( pos, '%s%s' %( kc, ac ), None )
editor.setCaretPosition( pos + 1 )
if hasattr(self.emacs.c.frame.body.editor, "autocompleter"):
    self.emacs.c.frame.body.editor.autocompleter.hideAutoBox() 
#@+node:ekr.20050726101926.3: *5* << handle auto-tabbing >>
self.kTconsume = True
self.kRconsume = True
event.consume()
editor = self.emacs.editor
doc = editor.getDocument()
pos = editor.getCaretPosition()
try:
    doc.insertString( pos, " " * 4, None )
except: pass
#@+node:ekr.20050219073752.1: *4* script: Tk: Add a menu item after Open With
def callback(*args,**keys):
    g.trace('after Open With')

# Get the actual Tkinter menu.
fileMenu = c.frame.menu.getMenu('File')

# Now use raw Tkinter calls to insert the menu.
fileMenu.insert(3,'command',label='Test',command=callback) 
#@+node:ekr.20051011211253: *4* script: Tk: Debugger canvas
import Tkinter as Tk
import tkFont

images = {}

@others

h = 500 ; w = 900

top = Tk.Toplevel(None) ; top.title("Debugger")

outer = Tk.Frame(top,height=h,width=w)
outer.pack(expand=1,fill='both')

canvas = Tk.Canvas(outer,background='LightSteelBlue1',width=14)
canvas.pack(side='left',fill='y',expand=0)

text = Tk.Text(outer)
text.pack(side='left',fill='both',expand=1,pady=0,padx=0)

line_h = getLineHeight(text)
# print line_h
image = getImage('minusnode.gif',canvas)

y = line_h / 2 - 2
while y < h:
    id = canvas.create_image(4,y,image=image,anchor="nw")
    y += line_h
#@+node:ekr.20051011213138: *5* getImage
def getImage(name,canvas):

    icon = images.get(name)
    if icon: return icon

    try:
        fullname = g.os_path_join(g.app.loadDir,"..","Icons",name)
        fullname = g.os_path_normpath(fullname)
        image = Tk.PhotoImage(master=canvas,file=fullname)
        images [name] = image
        return image
    except:
        g.es("Exception loading: " + fullname)
        g.es_exception()
        return None
#@+node:ekr.20051011215038: *5* getLineHeight
def getLineHeight (text):

    try:
        fontName = text.cget('font')
        font = tkFont.Font(font=fontName)
        metrics = font.metrics()
        return metrics ["linespace"]
    except Exception:
        g.es("exception setting outline line height")
        g.es_exception()
        return 20 # default
#@+node:EKR.20040626212434: *4* script: TK: Drawing experiments
#@+node:EKR.20040626212434.1: *5* Rectangles & ovals
import Tkinter as Tk
import random as r ; rand = r.randint

top = Tk.Toplevel(None) ; top.title("Drawing")
canvas = Tk.Canvas(top,height="5i",width="9i") # ,background="white")
canvas.pack() ; top.update()

mincolor,maxcolor=125,225

for n in xrange(5000):
    x,y = rand(0,900),rand(0,500)
    w = rand(1,10) ; h = w * r.uniform(0.5,1.5)
    color = "#%02x%02x%02x" % (rand(0,maxcolor/2),rand(mincolor,maxcolor),rand(mincolor,maxcolor))
    kind = rand(1,3)
    sign = rand(-1,1) # rand(0,2)-1
    if kind == 1:
        canvas.create_rectangle(x,y,x+w,y+h,fill=color,width=0)
    elif kind == 2:
        canvas.create_oval(x,y,x+w,y+h,fill=color,width=0)
    else:
        canvas.create_line(x,y,x+sign*5*w,y+5*h,fill=color)
    if 0: # Redrawing slows things down a lot.
        if (n % 1000) == 0: top.update()
#@+node:EKR.20040626212434.2: *5* Lines & arcs
import Tkinter as Tk
import random as r

top = Tk.Toplevel(None) ; top.title("Drawing")
canvas = Tk.Canvas(top, height = "5i", width = "9i")
canvas.pack() ; top.update()
rand = r.randint

x,y = 10,10
mincolor,maxcolor=125,225

for n in xrange(2000):
    x2,y2 = rand(0,900),rand(0,500)
    color = "#%02x%02x%02x" % (rand(mincolor,maxcolor),rand(mincolor,maxcolor),rand(mincolor,maxcolor))
    width = "%fm" % r.uniform(0.1,0.6)
    canvas.create_line(x,y,x2,y2,fill=color,width=width)
    extent = rand(180,270)
    canvas.create_arc(x,y,x2,y2,outline=color,width=width,style="arc",extent=extent)
    x,y = x2,y2
    # if (n % 1000) == 0: top.update()
#@+node:EKR.20040626213007: *5* Paul Klee
import Tkinter as Tk
import random as r ; rand = r.randint

top = Tk.Toplevel(None) ; top.title("Paul Klee")
canvas = Tk.Canvas(top,height="5i",width="9i") # ,background="white")
canvas.pack() ; top.update()

# Paul Klee
mincolor,maxcolor=125,225
xmax,ymax = 800,400
stipples = [None,"gray75"] # "gray12","gray25","gray50",]
h=w=90
for x in xrange(10,xmax,w):
    for y in xrange(10,ymax,h):
        color = "#%02x%02x%02x" % (rand(mincolor,maxcolor),rand(mincolor,maxcolor),rand(mincolor,maxcolor))
        range = h/6
        stipple = stipples[rand(0,len(stipples)-1)]
        dx = r.uniform(0.0,range) - range/2
        dy = r.uniform(0.0,range) - range/2
        canvas.create_rectangle(x+dx,y+dy,x+dx+w,y+dy+h,fill=color,width=0,stipple=stipple)
#@+node:EKR.20040627150213: *5* Complex functions
import Tkinter as Tk
import random as r ; rand = r.randint

mincolor,maxcolor=125,225
xmax,ymax = 300,300
h=w=1
i = r.uniform(2.0,4.0)
j = r.uniform(-5.0,5.0)
power = r.uniform(1.1,1.2)
    #(1.5,1.7)

top = Tk.Toplevel(None)
top.title("Complex function: (%f,%f)*(x,y)**%f" % (i,j,power))
canvas = Tk.Canvas(top,height="5i",width="9i") # ,background="white")
canvas.pack() ; top.update()

@others

for n in xrange(1):
    i += 0.5
    j += 0.5
    power += 0.01
    # print "i,j,power:",i,j,power
    c = complex(i,j)
    for format,m in (
        #("#%02x%02x%02x",256),
        #("#%03x%03x%03x",256*8),
        ("#%04x%04x%04x",256*256),
    ):
        for x in xrange(0,xmax,1):
            for y in xrange(0,ymax,1):
                n = complex(x,y)
                z = pow(c*n,power)
                n1 = int(z.real*m) ; n2 = int(z.imag*m)
                color = format % (n1%m,n2%m,abs(n1-n2)%m)
                # canvas.create_rectangle(w*x,h*y,w*x+w,h*y+h,fill=color,width=0)
                canvas.create_line(x,y,x+1,y+1,fill=color,width=1)
        top.update()
print "done"
#@+node:ekr.20040319111213: *4* script: Tk: FilterHoist
from leoPlugins import *
from leoGlobals import *
from leoNodes import *
import Tkinter
import re
import sys

@others

fhp = None
fhp_entry = None

if 1:
    addMenu("none",None)
else:
    hooks = choose(sys.platform == 'win32',
        ('open2',"new"),
        ('start2','open2',"new"))

    print "hi"

    registerHandler(hooks,addMenu)

    __version__ = ".1"
    plugin_signon(__name__)
#@+node:ekr.20040319111213.1: *5* description
@nocolor

@ This is what it does:

1. Under Outline it puts an Option called 'FilterHoist'
2. Selecting the option pops up an ugly little window.  On it is a section where
you can type in text.  You can close the window with the close button.  You
can activate the functionality with the Filter Button.
3. Filtering will walk the Leo tree, looking for a text match from the Text
field with the Nodes bodyString.
4. After finding some nodes it creates a new node at the root.  Then it clones
the matching nodes under that new node.  A Hoist operation is performed on the
new node.  This gives a view of all matching nodes.

I put this together because I wanted a find that was based in terms of Leo's
nodes.  Find as it is bounces you around the Tree(it bothers me).  This brings
the nodes to you and presents them.  As it is I may work further on this if
people like the idea.  It's possible I might migrate it to the NodeRoundup plugin
to.

You need 2.3 python; it uses generators in it's find method.

That was one motivation for writing this thing, using a generator vs. Recursive
approach to tree walking.
#@+node:ekr.20040319142708: *5* filter
def filter(c,e):

    pat = re.compile(e.get())

    t = tnode('','A Filtered Hoist')
    newRoot = c.rootVnode().insertAfter(t)
    p = c.rootVnode()
    while p:
        if pat.search(p.b):
            clone = p.clone(p)
            clone.moveToLastChildOf(newRoot)
        p = p.threadNext()
    newRoot.moveToRoot(c.rootVnode())
    c.setCurrentVnode(newRoot)
    c.redraw()

    c.hoist()
    fhp.withdraw()
#@+node:ekr.20040319142202: *5* old code
#@+node:ekr.20040319111213.2: *6* filter
def OLDfilter(c,e):

    v = c.rootVnode()

    nodes = []
    while v:
        nodes.append(v)
        v = v.next()

    regex = re.compile(e.get())
    t = tnode('','A Filtered Hoist')
    ticker = c.rootVnode().insertAfter(t)
    for z in nodes:
        for x in search(z,regex):
            clone = x.clone( x )
            clone.moveToNthChildOf(ticker,0)
    c.setCurrentVnode(ticker)
    ticker.moveToRoot(c.rootVnode())
    c.redraw()
    c.hoist()
    fhp.withdraw()
#@+node:ekr.20040319111213.3: *6* search
from __future__ import generators # To make the code work in Python 2.2.

def OLDsearch(vn,regex):

    sn = vn 
    while vn != None:
        if regex.search( vn.b ) : yield vn
        nc = vn.numberOfChildren()
        if nc == 0:
            i = vn.childIndex()
            p = vn.parent()
            if p == None: 
                vn = None
                continue
            if i == 0:
                while 1:
                    if p == sn :
                        vn = None
                        break
                    vn = p.back() 
                    if vn == None:
                        p = p.parent()
                        continue
                    break
                continue                                                        
            vn = p.nthChild( i - 1)
            continue
        vn = vn.nthChild( nc - 1 )
#@+node:ekr.20040319111213.4: *5* filterHoist
def filterHoist(c):

    global fhp
    global e

    if fhp is None:

        fhp = Tkinter.Toplevel()
        fhp.title('FilterHoist')

        fhp_entry = e = Tkinter.Entry(fhp)
        e.pack(side="top",fill="both")

        def closeCallback(fhp=fhp):
            fhp.withdraw()

        def filterCallback(c=c,entry=e):
            filter(c,entry)

        b1 = Tkinter.Button(fhp,text='Close',command=closeCallback)
        b2 = Tkinter.Button(fhp,text='Filter',command=filterCallback)
        b1.pack(side="left")
        b2.pack(side="right")

    fhp.geometry('200x200+250+250') 
    fhp.deiconify()
    fhp_entry.focus_set()
#@+node:ekr.20040319111213.5: *5* addMenu
def addMenu(tag,keywords):

    c = top()

    trace()

    def callback(c=c):
        filterHoist(c)

    table = ("FilterHoist",None,callback),

    c.frame.menu.createMenuItemsFromTable("Outline",table)
#@+node:ekr.20150416072721.1: ** Import & export
#@+node:ekr.20121013084734.16370: *3* script: Recursive import using c.recursiveImport
'''Recursively import all python files in a directory and clean the result.'''

# Latest change: use c.recursiveImport.

c.recursiveImport(
    dir_ = r'C:\prog\pyflakes-0.6.0\pyflakes',
    kind = '@clean', # The new best practice.
    one_file = False,
    safe_at_file = False,
    theTypes = None, # Same as ['.py']
)
#@+node:ekr.20130810093044.16941: *3* script: Export full contents
@language python

'''
From: Terry <webtourist@gmail.com>

I need to present to people who don't have leo installation, in easily
readable format, the full content of a .leo file, not just the outline, but
all nodes and all contents.

This script only exports selected nodes, so if you want to export
everything, you have to select all the top level nodes, i.e. collapse all
the nodes so only the top level is visible, click the first one, and
shift-click the last one.

It exports to plain text... although you might be able to use the
template to describe HTML, not sure.

Paste the content into a node, then click the script-button button to
create a new button for running this script. The button's name will be the
node's name, what it is doesn't matter but 'export' would be an obvious
choice. Then select the node(s) you want exported, presumably not including
the node containing the script :)

Then it will ask for a file name and whether to include unexpanded nodes.

'''

# template is everything between r""" and second """
# placeholders are H heading B body C children
# use \\n in B and C lines for conditional blank lines

template = r"""H
    B
  * C"""

lines=[]
exp_only = g.app.gui.runAskYesNoCancelDialog(
    c, 'Expanded nodes only?', 'Expanded nodes only?')
if exp_only == 'no':
    exp_only = False
    
def export_text(p, indent=''):
    
    spaces = ' '*(len(indent) - len(indent.lstrip(' ')))
    
    for line in template.split('\\n'):
        
        if 'H' in line:
            lines.append(indent + line.replace('H', p.h))
        elif 'B' in line and p.b.strip():
            prefix = line[:line.find('B')].replace('\\\\n', '\\n')
            for i in p.b.strip().split('\\n'):
                lines.append(spaces + prefix + i)
                prefix = line[:line.find('B')].replace('\\\\n', '')
            if line.endswith('\\\\n'):
                lines.append('')
        elif 'C' in line and (not exp_only or p.isExpanded()):
            prefix = line[:line.find('C')].replace('\\\\n', '\\n')
            for child in p.children():
                export_text(child, indent=spaces + prefix)
            if line.endswith('\\\\n'):
                lines.append('')
        elif 'C' not in line and 'B' not in line:
            lines.append(line)

if exp_only != 'cancel':
    for i in c.getSelectedPositions():
        export_text(i)
    
    filename = g.app.gui.runSaveFileDialog('Save to file')
    # filename = '/home/tbrown/del.txt'
    
    if filename is not None:
        open(filename,'w').write('\\n'.join(lines))
#@+node:ekr.20051103072643: *3* script: Export to treepad
# simple script to export current node and children as a treepad document
# the file format for treepad 2.x is simple.
# See: "TreePad 2.x File format" at http://www.treepad.com/docs/

#need to start the levels at 0
topLevel = p.level()
fileName = "exported.hjt"
nl = "\n"
mode = 'w' if c.config.output_newline=="platform" else 'wb'
try:
    theFile = open(fileName,mode)
    theFile.write("<hj-Treepad version 2.7>" + nl)
    for p in p.copy().self_and_subtree():
        theFile.write("dt=text" + nl)
        theFile.write("<node>" + nl)
        theFile.write(p.h + nl)
        theFile.write(repr(p.level() - topLevel) + nl)
        theFile.write(p.b + nl)
        theFile.write("<end node> 5P9i0s8y19Z" + nl)
    theFile.close()
    g.es("Wrote to file " + fileName,color="blue")
except IOError:
    g.es("Can not open " + fileName,color="blue")
#@+node:ekr.20041126035448: *3* script: Import a file
# Note: the source files contain mixed tabs/blanks, and that's very hard for Leo's imports to handle.

@tabwidth 8
@language python

path = r"c:\Python23\Lib\site-packages\Pmw\Pmw_1_1\lib\PmwPanedWidget.py"

path = r"c:\prog\PmwPanedWidget.py" # The same file with tabs converted to 8 blanks.

path = r"c:\Python23\Lib\site-packages\Pmw\Pmw_1_1\demos\All.py"

assert g.os_path_exists(path)

c.importCommands.importFilesCommand([path],"@file")
#@+node:ekr.20130526065545.18367: *3* script: Import nodes from an email account
# https://groups.google.com/forum/?fromgroups#!searchin/leo-editor/imaplib/leo-editor/i_U-PBv0Ek0/c0XsmlI_UugJ

import imaplib

def munge(part):
    s = g.toUnicode(repr(part[0][1]))
    s = s.strip("'")
    return s.replace('\\r\\n','\n')

m = imaplib.IMAP4_SSL('imap.gmail.com',993) # Require SSL: Yes
m.login(<< your login name>>,<< your password>>)
print('connected to edreamleo@gmail.com')
# for z in m.list():
    # print(z)
try:
    box = 'Leo/Later' # 'INBOX'
    m.select(box,True) # readonly.
    ok, aList = m.search(None,'ALL')
    if ok == 'OK':
        parent = p.insertAsLastChild()
        parent.h = box
        aList = [g.toUnicode(z) for z in aList]
        nums = aList[0].split()
        for num in nums:
            # ok,data = m.fetch(num,'(RFC822)') # Gets everything.
            ok,part = m.fetch(num,"(UID BODY[TEXT])")
            if ok != 'OK': continue
            print('reading %s %s' % (box,num))
            child = parent.insertAsLastChild()
            body = munge(part)
            ok,part = m.fetch(num,"(UID BODY[HEADER.FIELDS (FROM)])")
            if ok == 'OK': body = munge(part) + body
            child.b = '@nocolor\n\n' + body
            ok,part = m.fetch(num,"(UID BODY[HEADER.FIELDS (SUBJECT)])")
            if ok == 'OK':
                head = munge(part)
                tag = 'Subject: '
                if head.startswith(tag): head = head[len(tag):]
                child.h = head.replace('\n','')
            else:
                child.h = '%s %s' % (box,num)
finally:
    m.close()
    m.logout()
p.contract()
c.redraw()
print('done')
#@+node:ekr.20110929185034.15716: *3* script: import-org-mode
'''Import each file in the files list after the presently selected node.'''


files = (
    r'c:\Users\edreamleo\test\import-org-mode.txt',
    r'c:\Users\edreamleo\test\import-org-mode.txt',
)

@others

for fn in files:
    try:
        root = c.p.copy()
        f = open(fn)
        s = f.read()
        scan(c,fn,s)
        c.selectPosition(root)
    except IOError:
        print('can not open %s' % fn)
#@+node:ekr.20110929185034.15717: *4* scan
def scan (c,fn,s):

    last = root = c.p.insertAsLastChild()
    last.h = g.shortFileName(fn)
    level,stack = 0,[root]
    body = ['@others\n']
    
    for s in g.splitLines(s):
        if s.startswith('*'):
            i,level = 0,0
            while s[i] == '*':
                i += 1 ; level += 1
            if level > len(stack):
                g.trace('bad level',repr(s))
            elif level == len(stack):
                last.b = ''.join(body)
            else:
                last.b = ''.join(body)
                stack = stack[:level]
            parent = stack[-1]
            last = parent.insertAsLastChild()
            last.h = s.strip()
            stack.append(last)
            body = []
        else:
            body.append(s)
            
    # Finish any trailing lines.
    if body:
        last.b = ''.join(body)
        
    root.contract()
    c.redraw(root)
#@+node:ekr.20130802103517.20480: *3* script: push_to_Gist.txt
# Leo button to publish selected node as Gist.
# https://groups.google.com/forum/?fromgroups=#!topic/leo-editor/KgejcZHiEl0

import requests
import json
tmp = g.os.environ['TEMP']
description = "published from Leo"
public = True
filename = p.h      # node headline
content = p.b       # node body
g.es(filename)
print('\n\n--- %s ---' % filename)
payload = {
    'description': description,
    'public': public,
    'files': {
        filename: {'content': content}
        }
    } 
print(payload)
r = requests.post('https://api.github.com/gists',  data=json.dumps(payload))
print(r.status_code)
print(r.text)
#@+node:ekr.20110916103731.2459: *3* script: Recursivly create @auto nodes
import os

@others

types = ('.py',)
theDir =  r'c:\leo.repo\trunk\leo\core'

if  g.os_path_exists(theDir):
    importFiles(theDir,types,recursive=True)
    g.es("done",color="blue")
else:
    g.es("directory does not exist: " + theDir)
#@+node:ekr.20111217090057.12360: *4* cleanEmptyDirs & helper
def cleanEmptyDirs(root):
    
    '''Remove all @path nodes not containing any @auto nodes.'''
    p = root.copy()
    while p:
        if p.h.startswith('@path') and isEmpty(p):
            next = p.nodeAfterTree()
            p.doDelete()
            p = next
        else:
            p.moveToThreadNext()
#@+node:ekr.20111217090057.13163: *5* isEmpty
def isEmpty(p):
    
    for p in p.subtree():
        if p.h.startswith('@auto'):
            return False
    else:
        return True
#@+node:ekr.20111217090057.12361: *4* computeFiles
def computeFiles (theDir,recursive):
    
    '''Compute the lists of all directories and files to be added.'''
    
    dirs,files = [],[]
    for f in os.listdir(theDir):
        path = g.os_path_join(theDir,f)
        if g.os_path_isfile(path):
            name, ext = g.os_path_splitext(f)
            if not types or ext in types:
                files.append(path)
        elif recursive:
            dirs.append(path)
        
    return dirs,files
#@+node:ekr.20110916103731.2462: *4* createLastChildOf
def createLastChildOf (p,headline):

    child = p.insertAsLastChild()
    child.h = headline.replace('\\','/')
    return child
#@+node:ekr.20110916103731.2461: *4* importDir
def importDir (theDir,types,recursive,root,level=0):

    dirs,files = computeFiles(theDir,recursive)
    if not dirs and not files:
        return
    path_part = theDir if level==0 else g.os_path_basename(theDir)
    root = createLastChildOf(root,'@path %s' % (path_part))
    c.selectPosition(root)
    for fn in files:
        p2 = createLastChildOf(root,'@auto %s' % (
            g.shortFileName(fn)))
    for theDir in sorted(dirs):
        importDir(theDir,types,recursive,root,level+1)
#@+node:ekr.20110916103731.2460: *4* importFiles (top-level)
def importFiles (theDir,type=None,recursive=False):
    
    root = c.p.insertAfter()
    root.h = "imported files"
    try:
        importDir (theDir,type,recursive,root)
        cleanEmptyDirs(root)
        c.contractAllHeadlines()
        c.selectPosition(root)
        root.expand()
        for p in root.subtree():
            p.clearDirty() # Important: don't write automatically.
        c.redraw()
    except Exception:
        g.es("exception in importFiles script")
        g.es_exception()
    
#@+node:ekr.20060209173725: *3* script: rst-to-leo
'''A script to import rst files into Leo.

This script parses a file containing Restructured text and creates node suitable for the rst3 plugin.

To run, do the following:

1. Create a call to the function ReSt2Leo below.
   The argument to ReST2Leo should be the url (including a local file)
   of a file containing restructure text.

2. Run this node using the Execute Script command or the Run Script button.

The script will create a sibling of this node for each call to ReST2Leo in this node.
For example, this node will create a sibling called 'reStructuredText Demonstration'.
The root headline of the created tree will be the top-level heading of the imported file.
'''

@language python
@tabwidth -4

__version__ = '0.1'
<< version history >>

<< define valid_underline_characters >>

@others

# files to test with:
if 1:
    ReST2Leo('http://docutils.sourceforge.net/docs/user/rst/demo.txt')
if 0:
    ReST2Leo('http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.txt')
if 0:
    ReST2Leo('http://springfed.com/ac/IssueNo0003Info/editform')
#@+node:ekr.20060209173725.1: *4* << define valid_underline_characters >>
# all the allowable underline characters.

valid_underline_characters = [
    '!','"','#','$','%','&',"'",'(',')','*','+',
    ',','-','.','/',':',';','<','=','>','?','@',
    '[','\\',']','^','_','`','{','|','}','~',
]
#@+node:ekr.20060209175929: *4* << version history >>
@nocolor
@

v 0.1: Kent Tenney with minor mods by EKR.
#@+node:ekr.20060209173725.2: *4* class ParseReST
class ParseReST:
    """Processes a chunks of ReST, creating a list of nodes/sections
    """
    @others
#@+node:ekr.20060209173725.3: *5* __init__
def __init__(self, input):

    """Initialize document level variables
    """
    if type(input) == type('string'):
        self.lines = input.split("\n")
    else:            
        self.lines = input

    self.index = 0 

    # for each section gather title, contents and underline character
    # over-under titles are indicated by 
    # 2 character strings for underline_character
    # the initial section is root
    self.section = {'title':'root', 'contents':[], 'underline_character':'root'}
    # the list of nodes
    self.sections = []
#@+node:ekr.20060209173725.4: *5* isCharacterLine
def isCharacterLine(self):
    """Determine if the current line consists of only 
    valid underline characters
    """
    line = self.lines[self.index]
    character_line_found = False
    if len(line) > 0:
        if line[0] in valid_underline_characters:
            c = line[0]
            for char in line:
                if char == c:
                    character_line_found = True
                else:
                    character_line_found = False
                    #get out of the loop
                    #otherwise error if 1st and last are characters
                    break
    else:
        return False
    return character_line_found
#@+node:ekr.20060209173725.5: *5* isTransition
def isTransition(self):
    """self.index is pointing to a character line
    if there are blank lines on either side, this is a transition
    """

    current = self.lines[self.index]
    prev = self.lines[self.index - 1]
    next = self.lines[self.index + 1]

    return len(prev) == 0 and len(next) == 0
#@+node:ekr.20060209173725.6: *5* isUnderline
def isUnderline(self):
    """self.index is pointing to a character line 
    if we are preceded by a blank line, then a line
    not longer than this, we have an underline
    """

    current = self.lines[self.index].strip()
    prev = self.lines[self.index - 1].strip()
    prevprev = self.lines[self.index - 2].strip()


    return len(prev) > 0 and \
    len(prev) <= len(current) and \
    len(prevprev) == 0
#@+node:ekr.20060209173725.7: *5* isUnderOverline
def isUnderOverline(self):
    """self.index is pointing at a character line
    if there is a line not longer than this
    followed by a character line like this,
    we have an UnderOverline
    """

    current = self.lines[self.index].strip()
    next = self.lines[self.index + 1].strip()
    #the last line may be a character line
    try:
        nextnext = self.lines[self.index + 2]
    except IndexError:
        return False

    return (nextnext == current) and (len(next) > 0) \
    and len(next) <= len(current)
#@+node:ekr.20060209173725.8: *5* isSectionHead
def isSectionHead(self):
    """The current line is a character line,
    is this a section heading?
    http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#sections
    """
    # save typing with aliases
    current = self.lines[self.index]
    prev = self.lines[self.index - 1]
    next = self.lines[self.index + 1]

    # a transition has a blank line before and after
    if  self.isTransition():
        return False

    # underline section heading    
    if self.isUnderline():
        # previous to discovering the underline, we appended
        # the section title to the current section. 
        # Remove it before closing the section
        self.section['contents'].pop()
        self.closeCurrentSection()
        self.section['underline_character'] = current[0]
        self.section['title'] = prev
        # step index past this line
        self.index += 1
        return True

    # over-under section heading
    if self.isUnderOverline():
        self.closeCurrentSection()
        self.section['underline_character'] = current[0:2]
        # leading whitespace is allowed in over under style, remove it
        self.section['title'] = next.strip()
        # step index past overline, section title, and underline
        self.index += 3
        return True

        raise Exception ("Error in foundSectionHead()")
#@+node:ekr.20060209173725.9: *5* closeCurrentSection
def closeCurrentSection(self):
    """We have a section title, which ended the previous
    section. Add this section to nodes, and start the next
    """
    self.sections.append(self.section)
    self.section = {'title':'', 'contents':[], 'underline_character':''}
#@+node:ekr.20060209173725.10: *5* insertTitle
def insertTitle(self, uc, isSubTitle = False):
    """Inserting a title consists of merging section[1],
    the first section, into section[0], the root.
    This works the same for title and subtitle, since
    merging title deletes section[1], making the subtitle
    section[1]

    The 'isSubTitle' parameter differentiates between title and subtitle
    """    
    title = self.sections[1]['title']

    if not isSubTitle:
        self.sections[0]['title'] = title

    # extend the charline and pad the title
    charline = (len(title) * uc[0]) + (4 * uc[0])
    title = '  ' + title

    self.sections[0]['contents'].append('')
    self.sections[0]['contents'].append(charline)
    self.sections[0]['contents'].append(title)
    self.sections[0]['contents'].append(charline)
    self.sections[0]['contents'].append('')

    # append each line, not the list of lines
    for line in self.sections[1]['contents']:
        self.sections[0]['contents'].append(line)

    del self.sections[1]
#@+node:ekr.20060209173725.11: *5* fixupSections
def fixupSections(self):
    """Make corrections to the list of sections
    to reflect the syntax for 'Title' and 'Subtitle'

    If the first section heading is a unique over/under
    it is a title, and should stay in the root section.

    If the second section heading is a unique over/under
    it is a subtitle and should remain in the root section.
    """

    def isUnique(uc, start):
        index = start
        while index < len(self.sections):
            if self.sections[index]['underline_character'] == uc:
                return False
            index += 1
        return True                

    # self.sections[0] is root, a special case
    underline_first = self.sections[1]['underline_character'] 
    if len(underline_first) > 1:
        if isUnique(underline_first, 2):
            # the section head is the document title and must
            # be added to the root section
            self.insertTitle(underline_first)
    if len(self.sections) > 2:
        underline_second = self.sections[2]['underline_character'] 
        if len(underline_second) > 1:
            if isUnique(underline_second, 3):
                # the section head is the document subtitle and must
                # be added to the root section
                self.insertTitle(underline_second, True)
#@+node:ekr.20060209173725.12: *5* contents2String
def contents2String(self):
    """convert the list of strings in 
    self.sections[index]['contents'] to a string
    suitable for sticking into a Leo body
    """

    for section in self.sections:
        section['contents'] = '\n'.join(section['contents'])
#@+node:ekr.20060209173725.13: *5* processLines
def processLines(self):
    """Loop through the lines of ReST input, building a list
    of sectopms. A section consists of::
        -title
        -contents
        -underline_character
    """
    line_count = len(self.lines)

    while self.index < line_count:
        if self.isCharacterLine() and self.isSectionHead():
            # isCharacterLine() and isSectionHead() do all the housekeeping
            # required. This doesn't look like good style, but I'm not
            # sure how this should be written.
            pass
        else:        
            self.section['contents'].append(self.lines[self.index])
            self.index += 1

    self.closeCurrentSection()
    if len(self.sections) > 1:
        if len(self.sections[0]['underline_character']) > 1:
            self.fixupSections()
    self.contents2String()
    return self.sections
#@+node:ekr.20060209173725.14: *4* class BuildLeo
class BuildLeo:
    @others
#@+node:ekr.20060209173725.15: *5* __init__
def __init__(self, nodes):
    """the nodes paramater is returned by ParseReST.processLines
    It is a list of dictionaries consisting of 
    underline_character, title, contents 
    """
    self.nodes = nodes

    # self.levels is a dictionary, the keys
    # are underline_character and the value is the
    # last Leo node created at that level
    self.levels = {}

    # self.underline_characters is a list of the underline characters
    # in the order of levels. The first is always 'root'
    self.underline_characters = ['root',]




#@+node:ekr.20060209173725.16: *5* processNodes
def processNodes(self):
    """Step through the list of nodes created by
    parseReST creating the appropriate Leo nodes
    """

    # Create root node as a sibling of current node
    root = p.insertAfter()
    self.levels['root'] = root

    rootstring = self.nodes[0]['contents']
    roottitle = self.nodes[0]['title']
    root.setBodyString(rootstring)
    root.setHeadString(roottitle)

    # step through the rest of the nodes
    index = 1
    while index < len(self.nodes):
        uc = self.nodes[index]['underline_character']
        title = self.nodes[index]['title']
        contents = self.nodes[index]['contents']

        # this level exists, insert the node
        if self.levels.has_key(uc):
            # get parent of this node
            parent_index = self.underline_characters.index(uc) - 1
            parent_uc = self.underline_characters[parent_index]
            current = self.levels[parent_uc].insertAsLastChild()
            self.levels[uc] = current
            current.setHeadString(title)
            current.setBodyString(contents)

        # if this is the first time this uc is encountered
        # it means we are creating a new sublevel 
        # create the level then insert the node
        else:
            # if we are descending to a new level, the parent 
            # underline character is currently the last one
            parent = self.levels[self.underline_characters[-1] ]
            self.underline_characters.append(uc)
            current = parent.insertAsLastChild()
            self.levels[uc]  = current
            current.setHeadString(title)
            current.setBodyString(contents)

        index += 1
#@+node:ekr.20060209173725.17: *4* ReST2Leo
def ReST2Leo(input):
    """ A wrapper for ParseReST and BuildLeo
    """

    if type(input) == type(""):
        if input.startswith("http"):
            from urllib import urlopen
            try:
                data = urlopen(input).read()
            except HTTPError:
                print 'Unable to open page %s' % input
                return
        else:
            try:
                data = open(input, 'r').read()
            except IOError:
                print 'Unable to open file %s' % input
                return

    parsed = ParseReST(data)
    sections = parsed.processLines()
    nodes = BuildLeo(sections)
    nodes.processNodes()
    c.redraw()    
#@+node:ekr.20211014123559.1: *3* script: web-to-outline
"""
Convert the TeX sources, assumed to be in ~/tex.web, to an outline.

https://mirror.las.iastate.edu/tex-archive/systems/knuth/dist/tex/tex.web
"""
g.cls()
import os
import re
# Read
path = os.path.expanduser('~/tex.web')
with open(path) as f:
    contents = f.read()
# Create root.
last = c.lastTopLevel()
if last.h == 'tex.web':
    last.doDelete()
last = c.lastTopLevel()
root = last.insertAfter()
root.h = 'tex.web'
root.b = '@language tex'
prefix = root.insertAsLastChild()
prefix.h = 'prefix'
# Patterns
at_star_pat = re.compile(r'^@\*(.*?)$')
at_space_pat = re.compile(r'^@ (.*?)$')
at_p_pat = re.compile(r'^@p (.*?)$')
at_sec_pat = re.compile(r'^@<(.*?)@>=(.*?)$')
@others  # Define handlers and helpers.
table = (
    (at_star_pat, do_at_star),
    (at_space_pat, do_at_space),
    (at_p_pat, do_p),
    (at_sec_pat, do_sec),
)
count = 0
parents = [('prefix', prefix)]  # Tuples: (kind, p)
for i, s in enumerate(g.splitLines(contents)):
    for pattern, helper in table:
        m = pattern.match(s)
        if m:
            helper(i, m, s)  # m not used at present.
            count += 1
    else:
        parent = parents[-1][1]
        parent.b += s
# Finish
root.expand()
c.redraw(root)
print(f"done: {count} pattern{g.plural(count)}")
#@+node:ekr.20211014123559.2: *4* Handlers
def do_at_star(i, m, s):
    global parents
    print(s.rstrip())  # A good progress indicator.
    parent = root.insertAsLastChild()
    parent.h = s.strip()
    parents = [('@*', parent)]  # Always prune the stack.

def do_at_space(i, m, s):
    new_node('@ ', s)
    
def do_p(i, m, s):
    new_node('@p', s)

def do_sec(i, m, s):
    new_node('@<', s)
#@+node:ekr.20211014123559.3: *4* new_node
def new_node(kind, h):
    """Create a new node as the last child of an '@*' node."""
    global parents
    kind = parents[-1][0]
    if kind == '@*':
        parent = parents[-1][1]
    else:
        # Prune the stack back to the '@*' entry.
        parent_tuple = parents[0]
        assert parent_tuple[0] == '@*', parents
        parents = [parent_tuple]
        parent = parent_tuple[1]
    child = parent.insertAsLastChild()
    child.h = h.strip()
    parents.append(('@ ', child))
#@+node:ekr.20211009080137.1: ** Installation
#@+node:ekr.20130812034101.12558: *3* script: Add docutils to python3
@language python

@ Matt Wilkie <maphew@gmail.com>

Here is a recipe using the pip python installer, that adds docutils to
python 3 in about 5 minutes. Ideally the same template/process would be
extended for all of Leo, and wrapped up in a nice package.

Depends on win32 `curl.exe` being available,
http://curl.haxx.se/dlwiz/?type=bin&os=Win32&flav=-&ver=-

There are lots of scary looking warnings and messages emitted to the
console, mostly about unicode and files looked for and not found. A couple
of places I needed to tap [enter] (with no prompt saying that was
necessary). At the "install docutils" stage there was a long pause with
nothing apparent happening, perhaps 3 minutes.

The command shell was a generic windows cmd.exe shell with no python
variables set (e.g. PYTHONPATH, PYTHONHOME, etc.)

Recipe adapted from http://trac.osgeo.org/osgeo4w/wiki/ExternalPythonPackages
@c

pushd c:\\python32

:: test for docutils
python -c "import docutils; dir(docutils)"

::Traceback (most recent call last):
::  File "<string>", line 1, in <module>
::ImportError: No module named docutils

:: install python `distribute`
curl http://python-distribute.org/distribute_setup.py | python

:: install pip
curl --insecure https://raw.github.com/pypa/pip/master/contrib/get-pip.py |
python

::install docutils
.\\scripts\\pip.exe install docutils

:: test that docutils is available
python -c "import docutils; help(docutils)"

::Help on package docutils:
::
::NAME
::    docutils - This is the Docutils (Python Documentation Utilities)
package.
::
::DESCRIPTION
::    Package Structure
::    =================
::: ...snip...
#@+node:ekr.20201030065548.6: *3* script: Speed Ream: install leowapp
@language python
@tabwidth -4
<< docstring >>
<< usage >>
<< commands >>
@others
run()
#@+node:ekr.20201030065548.7: *4* << docstring >>
@language rest
@wrap

"""
install.py

Version 1.0
Copyright David Speed Ream 25 November 2018
This file is released to public domain.


Install leowapp into a virtual python virtual environment on Ubuntu or
Debian.

run python install.py for usage information.

This install does not depend upon or install QT.
This install does require git to be installed.

To use this script to install leowapp in a NOT python virtual environment,
go to the routine:
    run_env_cmd()

and comment out everything except the last two lines. Make sure to examine
the consequences of doing this prior to doing it. Or make a backup first.
    
After this install is completed, new updates to any of the projects
(except tornado) can be pulled from github, getting only the most recent
changes, additions or deletions to any of the project files. After such a
change, the project must be reinstalled again using pip using one of the
script commands. For example, after pulling a change to leowapp, reinstall
leowapp again by:
    starting the virtual environment
    running python install.py leoins

This install method requires manually getting zip files for each project
and placing them into the ./zips folder.

All the flexx project zips come from github flexxui. Leo which comes from
github leo-editor. Tornado comes from tornadoweb.

In the ./zips directory, place the following zip files:
 dialite-master.zip
 flexx-master.zip
 leo-editor-skeleton.zip
 pscript-master.zip
 tornado-master.zip
 webruntime-master.zip

The directory structure when starting this script:

~/anydir
~/anydir/zips
~/anydir/zips/dialite-master.zip
~/anydir/zips/flexx-master.zip
~/anydir/zips/leo-editor-skeleton.zip
~/anydir/zips/pscript-master.zip
~/anydir/zips/tornado-master.zip
~/anydir/zips/webruntime-master.zip

Creathing the python virtual environment:
    This is what I did on Ubuntu 18.10

cd ~/anydir

python3 -m venv env

Prior to running this install script, and always prior to running leowapp,
enter the virtual env by:

cd ~/anydir
source env/bin/activate

After done with work:
cd ~/anydir
deactivate
"""
#@+node:ekr.20201030065548.8: *4* << usage >>
@language rest
@wrap

u = \
"""
Basic Usage:

cd ~/this_directory

python3 install.py
    print this usage message, then:
    list all available steps

python3 install.py show
    show the next step in the sequence

python3 install.py next
    do the next step in the sequence

python3 install.py <step name>
    do the given step

python3 install.py restart
    Restart the process over from the start. This deletes the file data.dat
    from the current directory.

python3 install.py commands
    show some interesting command lines used by this script

"""

#@+node:ekr.20201030065548.9: *4* << commands >>
@language rest
@wrap

"""
The following lines are displayed by entering:
    python install.py commands

"""

cmds = \
"""

unzip, force overwrite, to destination folder.

unzip -o myzip.zip -d /home/me/working_stuff/destination_folder

From an existing empty project folder, create a valid .git folder for an
existing github project.

-mkdir /home/me/stuff/my-project/.git
-cd /home/me/stuff/my-project/.git
-git clone --bare --single-branch -b mybranch --depth=1 --shallow-submodules https://github.com/my-project/my-project.git .

After making a valid .git folder for the project, and after extracting the
source files from a git zip into the project folder, setup the project for
use and get any recent changes to the project from github.

-cd /home/me/stuff/my-project
-git init
-git reset HEAD
-git pull --depth=1 --allow-unrelated-histories https://github.com/my-project/my-project.git mybranch
-git status
-After running git status, if any items are marked 'modified' or 'deleted',
then manually run git: checkout <item>
-keep running status and checkout until all items are up to date

Using pip, install a package downloaded via the github process above into
the current python installation without pulling in other dependencies.
Please note that all the given command line options must be used, even when installing a package for the first time.

-pip install --upgrade --no-deps --force-reinstall /home/me/stuff/my-project
"""
#@+node:ekr.20201030065548.10: *4* imports
from os      import environ
from os      import getcwd
from os      import mkdir
from os      import system
from os      import unlink
from os.path import exists
from os.path import join
from pickle  import dump
from pickle  import load
from sys     import argv
from sys     import exit

"""
from subprocess import Popen, PIPE
"""
#@+node:ekr.20201030065548.11: *4* globals
CWD         = getcwd()
DATA_NAME   = "data.dat"
DATA_FILE   = join(CWD,DATA_NAME)
DIAL_REPO   = "https://github.com/flexxui/dialite.git"
SOURCES     = "sources"
VIRTUAL_ENV ="/home/bridge1/Virtual/leowapp/env"
ar1         = ar2 = ar3 = None

#@+node:ekr.20201030065548.12: *4* data
repo_list = (
(
"dialite",
"master",
"/home/bridge1/Virtual/leowapp/zips/dialite-master.zip",
"https://github.com/flexxui/dialite.git",
),
(
"pscript",
"master",
"/home/bridge1/Virtual/leowapp/zips/pscript-master.zip",
"https://github.com/flexxui/pscript.git",
),
(
"tornado",
"master",
"/home/bridge1/Virtual/leowapp/zips/tornado-master.zip",
"https://github.com/tornadoweb/tornado.git",
),
(
"webruntime",
"master",
"/home/bridge1/Virtual/leowapp/zips/webruntime-master.zip",
"https://github.com/flexxui/webruntime.git",
),
(
"flexx",
"master",
"/home/bridge1/Virtual/leowapp/zips/flexx-master.zip",
"https://github.com/flexxui/flexx.git",
),
(
"leo-editor",
"skeleton",
"/home/bridge1/Virtual/leowapp/zips/leo-editor-skeleton.zip",
"https://github.com/leo-editor/leo-editor.git",
),
)

order = (
"dialzip",
"dialgit",
"dialins",
"pscrzip",
"pscrgit",
"pscrins",
"tornzip",
"torngit",
"tornins",
"webrzip",
"webrgit",
"webrins",
"flexzip",
"flexgit",
"flexins",
"leozip",
"leogit",
"leoins",
"leocpy",
)

data = {
"dialzip":("setup_zip('dialite')",
           "Unzip the dialite zip and place it in the correct directory",
           False),
"dialgit":("setup_git('dialite')",
           "Setup the dialite github repo.",
           False),
"dialins":("install('dialite')",
           "Using pip, install dialite from the sources directory",
           False),
"pscrzip":("setup_zip('pscript')",
           "Unzip the pscript zip and place it in the correct directory",
           False),
"pscrgit":("setup_git('pscript')",
           "Setup the pcscript github repo.",
           False),
"pscrins":("install('pscript')",
           "Using pip, install pscript from the sources directory",
           False),
"tornzip":("setup_zip('tornado')",
           "Unzip the tornado zip and place it in the correct directory",
           False),
"torngit":("setup_git('tornado')",
           "Setup the tornado github repo.",
           False),
"tornins":("install('tornado')",
           "Using pip, install tornado from the sources directory",
           False),
"webrzip":("setup_zip('webruntime')",
           "Unzip the webruntime zip and place it in the correct directory",
           False),
"webrgit":("setup_git('webruntime')",
           "Setup the webruntime github repo.",
           False),
"webrins":("install('webruntime')",
           "Using pip, install webruntime from the sources directory",
           False),
"flexzip":("setup_zip('flexx')",
           "Unzip the flexx zip and place it in the correct directory",
           False),
"flexgit":("setup_git('flexx')",
           "Setup the flexx github repo.",
           False),
"flexins":("install('flexx')",
           "Using pip, install flexx from the sources directory",
           False),
"leozip" :("setup_zip('leo-editor')",
           "Unzip the leo-editor zip and place it in the correct directory",
           False),
"leogit": ("setup_git('leo-editor')",
           "Setup the leo-editor github repo.",
           False),
"leoins": ("install('leo-editor')",
           "Using pip, install leo-editor from the sources directory",
           False),
"leocpy": ("leo_copy_test()",
           "Copy the leo test directory, which is used by leowapp, but"\
           " is not part of the leo github distribution in the master"\
           " branch",
           False),
}

if exists(DATA_FILE):
    with open(DATA_FILE, "rb") as f:
        data = load(f)
#@+node:ekr.20201030065548.13: *4* control routines
def usage():
    print(u)

def get_args():
    global ar1,ar2,ar3
    try:
        ar1 = argv[0]
        #print("ar1 = " + ar1)
        ar2 = argv[1]
        #print("ar2 = " + ar2)
        ar3 = argv[2]
        #print("ar3 = " + ar3)
    except:
        pass        

def dump_opts():
    for item in order:
        print("{}: {}".format(item.ljust(7," "),data[item][1]))
    print("")

def save_data():
    with open(DATA_FILE, "wb") as f:
        dump(data,f,protocol=0)

def do_step(item):
    # Do this step in the process, regardless of whether it has been done
    # or not. If it completes without failure, mark it done.
    val = data[item]
    print
    print("{}: {}".format(item,val[1]))
    print(val[0])
    exec(val[0])
    print
    data[item] = (val[0],val[1],True)

def run():
    # ar2 is the step name.
    global ar2

    get_args()

    if not ar2:
        # If there is no step name, just show usage and the steps
        usage()
        dump_opts()

    elif ar2 in data:
        # If the step name is in the list, just do this step and
        # mark it done
        do_step(ar2)
        save_data()

    if ar2 == "next":
        # do the next step in the process that hasn't been done already,
        # then mark it done
        for item in order:
            if data[item][2] == False:
                do_step(item)
                save_data()
                break

    if ar2 == "steps":
        # list all the steps
        print("")
        dump_opts()

    if ar2 == "show":
        # show the next step in the process that hasn't been done already
        for item in order:
            if data[item][2] == False:
                print("\n{}: {}\n".format(item,data[item][1]))
                break

    if ar2 == "restart":
        # delete data.dat from the current directory
        if exists(DATA_FILE):
            unlink(DATA_FILE)

    if ar2 == "commands":
        # show some interesting command lines used by this script
        print(cmds)
#@+node:ekr.20201030065548.14: *4* run routines
@others
#@+node:ekr.20201030065548.15: *5* setup_dirs
def setup_dirs():
    for item in dir_list:
        create_dir(item)
#@+node:ekr.20201030065548.16: *5* leo_copy_test
def leo_copy_test():
    """
    TODO: Copy the entire directory:
        sources/leo-editor-skeleton/test to
    site-packages/leo/test
    """
    pass
#@+node:ekr.20201030065548.17: *5* create_dir
def create_dir(item):
    name = join(CWD,item)
    if not exists(name):
        print("creating {}".format(name))
        mkdir(name)
#@+node:ekr.20201030065548.18: *5* run_env_cmd
def run_env_cmd(cmd):
    msg = "\n\nERROR! Pip install must be run under the "\
          "virtual environment:\n'{}'!\n\n".format(VIRTUAL_ENV)
    try:
        if not environ['VIRTUAL_ENV'] == VIRTUAL_ENV:
            barf # raise exception
    except:
        print(msg)
        exit(0)
    print(cmd)
    system(cmd)
#@+node:ekr.20201030065548.19: *5* clone_repo
def clone_repo(name,branch,git_name):
    git_dir = join(CWD,SOURCES,"{}-{}/.git".format(name,branch))
    if not exists(git_dir):
        mkdir(git_dir)
    cmd =  "cd {} && git clone --bare --single-branch -b {} --depth=1 --shallow-submodules {} .".format(git_dir,branch,git_name)
    print(cmd)
    system(cmd)
    return
#@+node:ekr.20201030065548.20: *5* setup_git
"""
Clone a bare .git directory for the github repo
"""
def setup_git(repo_name):
    name,branch,zip_name,git_name = get_names(repo_name)
    if not name:
        return
    clone_repo(name,branch,git_name)
    pull_repo(name,branch,git_name)
    return
#@+node:ekr.20201030065548.21: *5* unzip
def unzip(name):
    dest = join(CWD,SOURCES)
    cmd = "unzip -o {} -d {}".format(name,dest)
    print(cmd)
    system(cmd)
    return
    src  = join(CWD,"zips","{}.zip".format(name))
    dest = join(CWD,SOURCES)
    #print(src)

#@+node:ekr.20201030065548.22: *5* get_names
def get_names(repo_name):
    for item in repo_list:
        (name,branch,zip_name,git_name) = item
        if name == repo_name:
            stub = "name:{}\nbranch:{}\nzip: {}\ngit: {}"
            print(stub.format(name,branch,zip_name,git_name))
            return name,branch,zip_name,git_name
    return None,None,None,None
#@+node:ekr.20201030065548.23: *5* setup_zip
"""
Unzip a master zip file into its source directory
"""
def setup_zip(repo_name):
    name,branch,zip_name,git_name = get_names(repo_name)
    if not name:
        return
    unzip(zip_name)
#@+node:ekr.20201030065548.24: *5* pull_repo
def pull_repo(name,branch,git_name):
    print("pull_repo")
    repo_dir = join(CWD,SOURCES,"{}-{}".format(name,branch))
    cmd =  "cd {} && git init".format(repo_dir)
    print(cmd)
    system(cmd)
    cmd =  "cd {} && git pull --depth=1 --allow-unrelated-histories {}  {}".format(repo_dir,git_name,branch)
    print(cmd)
    system(cmd)
    return
#@+node:ekr.20201030065548.25: *5* install
def install(repo_name):

    name,branch,zip_name,git_name = get_names(repo_name)
    if not name:
        return
    src = join(CWD, SOURCES,"{}-{}".format(name,branch))
    cmd = "pip install --upgrade --no-deps --force-reinstall {}"
    cmd = cmd.format(src)
    run_env_cmd(cmd)
#@+node:mhw-debug-1081.20190310203401.1: *3* Windows-only scripts
Scripts only of interest when on Microsoft Windows platform.
@path win

The register- and unregister-leo scripts have been moved to Desktop-integration.leo
#@+node:maphew.20130613230258.2801: *4* @file elevate.py
''' Open a new python intepreter after asking for elevated UAC permissions, and feed it the python script specified on the command line.

    python elevate.py d:\full\path\to\some-script.py {args for some-script}
'''
import sys
import ctypes
import tempfile

# -i : ask python interpreter to stay open when done, to see messages
params = "-i {}".format(' '.join(sys.argv[1:]))
#print(params)

@others

elevate(params)
#@+node:maphew.20130613230258.2803: *5* UAC Elevation
def elevate(params):
    hwnd = 0                # parent window
    lpOperation = 'runas'   # force elevated UAC prompt
    lpFile = sys.executable # path to python
    lpFile = lpFile.replace('pythonw.exe', 'python.exe') # force console python, only way to see messages
    lpParameters = params   # arguments to pass to python
    lpDirectory = tempfile.gettempdir() # working dir
    nShowCmd = 1            # window visibility, must be 1 for Leo.

    print(lpFile, lpParameters)
    #g.es(lpFile, lpParameters)
    retcode = ctypes.windll.shell32.ShellExecuteW(hwnd, lpOperation, lpFile, lpParameters, lpDirectory, nShowCmd)
    msg = 'Exit code: {0} - {1}'.format(retcode, ctypes.FormatError(retcode))
    print(msg)
    #g.es(msg)




#@+node:mhw-debug-1081.20190222004721.1: *6* thank_you
def thank_you():
    '''
@SnakE (Sergey Gromov):
    ShellExecuteA() for ASCII and ShellExecuteW() for Unicode
    https://forum.dlang.org/post/MPG.221aef3bb709d91989690@news.digitalmars.com
    '''
#@+node:maphew.20130809155103.2863: *4* @auto build-leo.bat
@language batch
@:: Build source distribution and wheel
@:: typically used for deploying to PyPi.org and installing Leo with Pip
@set _p=%prompt%
@set prompt=$H
@echo This can take a 5 or more minutes, be patient with no screen output.
pushd %~dp0..\..\..
python setup.py --quiet sdist bdist_wheel
start dist
popd
@set prompt=%_p%
#@+node:ekr.20060824111500: ** jEdit2Py
#@+node:ekr.20060824111500.108: *3* @button jEdit2Py
"""
Convert jEdit description file (an .xml file) to an equivalent .py file.
"""
import string
import textwrap
import xml.sax
import xml.sax.saxutils

# Globals...
files_list = ['openscad.xml']
theDir = g.os_path_abspath(g.os_path_join(g.app.loadDir, '..', 'modes'))
files = [g.os_path_abspath(g.os_path_join(theDir, s)) for s in files_list]

@others

main()

@language python
@tabwidth -4
@pagewidth 80
#@+node:ekr.20060824111500.113: *4* top-level
#@+node:ekr.20160319080102.1: *5* cleanSaxInputString
def cleanSaxInputString(s):
    '''Clean control characters from s.
    s may be a bytes or a (unicode) string.'''
    # Note: form-feed ('\f') is 12 decimal.
    badchars = [chr(ch) for ch in range(32)]
    badchars.remove('\t')
    badchars.remove('\r')
    badchars.remove('\n')
    flatten = ''.join(badchars)
    pad = ' ' * len(flatten)
    flatten = bytes(flatten, 'utf-8')
    pad = bytes(pad, 'utf-8')
    transtable = bytes.maketrans(flatten, pad)
    return s.translate(transtable)
#@+node:ekr.20060824111500.115: *5* parse_jEdit_file
def parse_jEdit_file(inputFileName, language):

    if not inputFileName:
        return None
    if not inputFileName.endswith('.xml'):
        inputFileName = inputFileName + '.xml'
    path = g.os_path_join(g.app.loadDir, '../', 'modes', inputFileName)
    path = g.os_path_normpath(path)
    if not g.os_path_exists(path):
        g.es_print('not found:', path)
        return
    try:
        f = open(path, 'r')
    except IOError:
        g.trace(f"can not open {path!r}")
        return None
    try:
        try:
            mode = None
            parser = xml.sax.make_parser()
                # Do not include external general entities.
                # The actual feature name is
                # "http://xml.org/sax/features/external-general-entities"
            parser.setFeature(xml.sax.handler.feature_external_ges, 0)
            handler = ContentHandler(inputFileName)
            parser.setContentHandler(handler)
            parser.parse(f)
                # Works with Python 3.
                # Fails with Python 2 when the file contains non-ascii characters.
            mode = handler.getMode()
        except:
            g.es(f"unexpected exception parsing {inputFileName}", color='red')
            g.es_exception()
    finally:
        f.close()
        return mode
#@+node:ekr.20060824111500.114: *5* convert
def convert(inputFileName, outputFileName):

    junk, fn = g.os_path_split(inputFileName)
    language, junk = g.os_path_splitext(fn)
    mode = parse_jEdit_file(inputFileName, language)
    if not mode:
        g.trace('FAIL: no mode', language, inputFileName)
        return
    with open(outputFileName, 'w') as f:
        try:
            mode.write(f, language)
            g.es_print('wrote', outputFileName)
        except IOError:
            g.es_print('can not create', outputFileName)
#@+node:ekr.20060824111500.116: *5* munge
def munge(s):

    '''Munge a mode name so that it is a valid python id.'''

    valid = string.ascii_letters + string.digits + '_'
    return ''.join([ch.lower() if ch in valid else '_' for ch in s])
#@+node:ekr.20160319081141.1: *5* main
def main():
    for path1 in files:
        if path1.endswith('.xml'):
            path2 = path1[:-4] + '.py'
        else:
            path2 = path1 + '.py'
        try:
            convert(path1, path2)
        except Exception:
            print('Exception creating', path2)
            g.es_exception()
    g.es_print('done')
#@+node:ekr.20060824111500.117: *4* class Mode
class Mode:
    """A class representing one jEdit language-description mode."""

    boolAttrs = [  # Boolean attributes.
        'at_line_start', 'at_whitespace_end', 'at_word_start',
        'exclude_match', 'highlight_digits', 'ignore_case',
        'no_escape', 'no_line_break', 'no_word_break',
    ]
    ruleElements = [  # Elements that start a rule.
        'eol_span', 'eol_span_regexp',
        'import', 'keywords',
        'mark_following', 'mark_previous',
        'seq', 'seq_regexp',
        'span', 'span_regexp',
        'terminate',
    ]

    @others
#@+node:ekr.20060824111500.118: *5*  Mode.__init__
def __init__ (self, contentHandler, fileName):
    
    self.contentHandler = contentHandler
    self.fileName = g.shortFileName(fileName)  # The file from which the mode was imported.
    modeName, junk = g.os_path_splitext(self.fileName)
    self.fileModeName = modeName
    self.modeName = munge(modeName).lower()
    self.outputFile = None  # The open output file to which Python statements get written.

    # Mode semantics.
    self.attributes = {}
    self.handlerCount = 0
    self.importedRules = []  # A bunch describing the imported ruleset.
    self.inProps = False
    self.inRules = False
    self.keywords = None
    self.modeProperties = []
    self.presentProperty = None  # A bunch to be assigned to modeProperties or rulesetProperties.
    self.rule = None
    self.rulesets = []
    self.rules = []  # The rules of the present rules element.
    self.rulesetProperties = []
    self.rulesetAttributes = {}  # The attributes of the present rules element.
#@+node:ekr.20060824111500.119: *5*  Mode.__str__ & __repr__
def __str__ (self):
    return f"<Mode for {self.fileName}>"

__repr__ = __str__
#@+node:ekr.20060824111500.153: *5* Mode.error
def error (self, message):
    self.contentHandler.error(message)
#@+node:ekr.20060824111500.154: *5* Mode: Getters
def getAttributes (self):
    return self.attributes

def getAttributesForRuleset (self, ruleset):
    bunch = ruleset
    return bunch.attributes

def getFileName (self):
    return self.fileName

def getKeywords (self, n, ruleset):
    bunch = ruleset
    keywords = bunch.keywords
    if keywords:
        return keywords.get(f"keyword{n}", [])
    return []

def getLanguage (self):
    path, name = g.os_path_split(self.fileName)
    language, ext = g.os_path_splitext(name)
    return language

def getPropertiesForMode (self):
    return self.props

def getPropertiesForRuleset (self, name=''):
    bunch = self.getRuleset(name)
    if bunch:
        return bunch.properties
    else:
        return []

def getRuleset(self, name=''):
    if not name:
        return self.rulesets[0] # Return the main ruleset.
    for ruleset in self.rulesets:
        if ruleset.name.lower()==name.lower():
            return ruleset
    else: return None

def getRulesets(self):
    return self.rulesets

def getRulesForRuleset (self, name=''):
    bunch = self.getRuleset(name)
    if bunch:
        return bunch.rules
    else:
        return []
#@+node:ekr.20220927115543.1: *5* Mode: Handlers
#@+node:ekr.20060824111500.150: *6* Mode.doAttribute
def doAttribute (self, name, val):

    name = str(name.lower())
    if name in self.boolAttrs:
        val = True if val.lower() == 'true' else False
    else:
        val = str(val) # Do NOT lower this value!
    if self.rule:
        d = self.rule.attributes
        d [name] = val
    elif self.presentProperty:
        d = self.presentProperty.get('attributes')
        d [name] = val
    elif self.inRules:
        self.rulesetAttributes[name] = val
    else:
        self.attributes[name] = val
#@+node:ekr.20060824111500.151: *6* Mode.doContent
def doContent (self, elementName, content):

    if not content:
        return
    name = str(elementName.lower())
    if self.inRule('keywords'):
        d = self.rule.keywordsDict
        d [ content ] = name
    elif self.rule:
        d = self.rule.contents
        s = d.get(name, '')
        d [name] = s + g.toUnicode(content)
        self.contents = d
#@+node:ekr.20060824111500.152: *6* Mode.endElement
def endElement (self, elementName):

    name = elementName.lower()

    if name == 'props':
        self.inProps = True
    if name == 'rules':
        self.inRules = False
        ruleset = Ruleset(self.rulesetAttributes, self.keywords, self.rulesetProperties, self.rules)
        self.rulesets.append(ruleset)
    if name == 'property':
        bunch = self.presentProperty
        if bunch:
            if self.inRules:
                self.rulesetProperties.append(bunch)
            else:
                self.modeProperties.append(bunch)
        else:
            self.error(f"end {name} not matched by start {name}")
        self.presentProperty = None
    if name in self.ruleElements:
        if self.inRule(name):
            self.rules.append(self.rule)
            self.rule = None
        else:
            self.error(f"end {name} not matched by start {name}")
#@+node:ekr.20060824111500.155: *6* Mode.inRule
def inRule (self, elementName):
    return self.rule and self.rule.name == elementName
#@+node:ekr.20060824111500.156: *6* Mode.startElement
def startElement (self, elementName):

    name = elementName.lower()

    if name == 'props':
        self.inProps = True
    if name == 'rules':
        self.inRules = True
        self.attributes=[]
        self.keywords=[]
        self.rulesetProperties=[]
        self.rules=[]
    if name == 'property':
        if self.inProps:
            self.presentProperty = g.bunch(name=name, attributes={})
        else:
            self.error('property not in props element')
    if name in self.ruleElements:
        if self.inRules:
            self.rule = Rule(name=name)
            if name == 'keywords':
                self.keywords = self.rule
        else:
            self.error(f"{name} not in rules element")
#@+node:ekr.20060824111500.120: *5* Mode: Output...
# These methods generate the output file.
#@+node:ekr.20060824111500.121: *6* Mode.escapeString & quoteString
def escapeString (self, s):
    '''Return string s enclosed in double quotes.'''
    if not s:
        return '""'
    if isinstance(s, (list, tuple)):
        s = ''.join(s)
    s = g.toUnicode(s)
    # Order is important: escape backspaces first.
    # Don't use an f-string here!
    return '"%s"' % s.replace('\\', '\\\\').replace('"', '\\"').replace('\t', '\\t')

quoteString = escapeString
#@+node:ekr.20060827162343: *6* Mode.fullDelegate
def fullDelegate (self, delegate):
    if not delegate:
        return ''
    delegate = delegate.lower()
    if '::' in delegate:
        return delegate
    return f"{self.fileModeName.lower()}::{delegate}"
#@+node:ekr.20060824111500.122: *6* Mode.put
def put (self, s):
    self.outputFile.write(s)
#@+node:ekr.20060824111500.123: *6* Mode.putAttributes
def putAttributes (self):

    dd = {}
    data = (
        ('default', 'null'),
        ('digit_re', ''),
        ('escape', ''),
        ('highlight_digits', True),
        ('ignore_case', True),
        ('no_word_sep', None), # could be false or ''.
    )
    for ruleset in self.rulesets:
        d = {}
        prefix = f"{self.modeName}_{ruleset.name}"
        self.put(f"# Attributes dict for {prefix} ruleset.\n")
        for key, default in data:
            val = ruleset.attributes.get(key, default)
            if default == True:
                val = 'true' if val else 'false'
            elif default == None:
                if val and val.lower() == 'false':
                    val = ''
                else:
                    val = val or ''
            d [key] = val
        self.putDict(f"{prefix}_attributes_dict", d)
        dd [ prefix ] = f"{prefix}_attributes_dict"

    self.put(f"# Dictionary of attributes dictionaries for {self.modeName} mode.\n")
    self.putDict('attributesDictDict', dd, escape=False)
#@+node:ekr.20060824111500.124: *6* Mode.putDict & putDictOfLists
def putDict (self, name, theDict, escape=True):

    esc = self.escapeString
    esc2 = self.escapeString if escape else lambda a: a
    keys = list(set((theDict.keys())))
    keys = sorted([g.toUnicode(z) for z in keys if z])
    s = ''.join([
        f"    {esc(key)}: {esc2(theDict.get(key))},\n" for key in keys
    ])
    if s:
        s = '\n' + s
    self.put(f"{name} = {{{s}}}\n\n")

def putDictOfLists (self, name, theDict, strings=False):

    esc = self.escapeString
    keys = list(set(theDict.keys()))
    keys = sorted([g.toUnicode(z) for z in keys if z])
    theList = []
    for key in keys:
        if strings:
            # Not completely general, but it works for the import dict, and that's good enough.
            s = ', '.join([f'"{z}"' for z in theDict.get(key)])
        else:
             s = ', '.join([f"{z}" for z in theDict.get(key)])
        theList.append(f"    {esc(key)}: [{s}],\n")
    s = ''.join(theList)
    if s:
        s = '\n' + s
    self.put(f"{name} = {{{s}}}\n\n")
#@+node:ekr.20060824111500.125: *6* Mode.putImportDict
def putImportDict (self):

    d = {}
    for ruleset in self.rulesets:
        prefix = f"{self.modeName}_{ruleset.name}"
        for rule in ruleset.rules:
            if rule.name == 'import':
                delegate = rule.getStrAttrib('delegate').lower()
                if delegate:
                    delegate_name = delegate if '::' in delegate else f"{prefix}::{delegate}"
                    theList = d.get(prefix, [])
                    if delegate_name not in theList:
                        theList.append(delegate_name)
                        d [prefix] = theList

    self.put(f"# Import dict for {self.modeName} mode.\n")
    self.putDictOfLists('importDict', d, strings=True)
#@+node:ekr.20060824111500.126: *6* Mode.putKeywordsData
def putKeywordsData (self):

    dd = {}
    for ruleset in self.rulesets:
        prefix = f"{self.modeName}_{ruleset.name}"
        prefix = g.toUnicode(prefix)
        ignore_case = ruleset.attributes.get('ignore_case', 'false')
        if not isinstance(ignore_case, bool):
            ignore_case = ignore_case.lower() == 'false'
        self.put(f"# Keywords dict for {prefix} ruleset.\n")
        for rule in ruleset.rules:
            if rule.name == 'keywords':
                # d = rule.keywordsDict
                d = {
                    g.toUnicode(key): g.toUnicode(val)
                        for key, val in rule.keywordsDict.items()
                            if key.strip()
                }
                for key in sorted(set(d.keys())):
                    if ' ' in key or '\t' in key:
                        del d [key]
                        g.es_print(f"Ignoring keyword containing whitespace: {key!r}")
                    elif ignore_case: # New in 4.4.1 final.
                        # Downcase all keys.
                        val = d.get(key)
                        key2 = key.lower()
                        if key2 != key:
                            if key in d: del d[key]
                        d[key2] = val
                break
        else:
            d = {}
        self.putDict('%s_keywords_dict' % (prefix), d)
        dd [ prefix ] = '%s_keywords_dict' % (prefix)

    self.put(f"# Dictionary of keywords dictionaries for {self.modeName} mode.\n")
    self.putDict('keywordsDictDict', dd, escape=False)
#@+node:ekr.20060824111500.128: *6* Mode.putModeProperties
def putModeProperties (self, language):

    d = {}
    self.put(f"# Properties for {language} mode.\n")
    for prop in self.modeProperties:
        d2 = prop.attributes
        name = d2.get('name')
        d [name] = d2.get('value')
    self.putDict('properties', d)
#@+node:ekr.20060824111500.129: *6* Mode.putRule & rule creators
def putRule (self, rule):
    """Call the rule creator for the given rule."""
    d = {
        'eol_span':         self.putEolSpan,
        'eol_span_regexp':  self.putEolSpanRegexp,
        'import':           self.putImport,
        'keywords':         self.putKeywords,
        'mark_following':   self.putMarkFollowing,
        'mark_previous':    self.putMarkPrevious,
        'seq':              self.putSeq,
        'seq_regexp':       self.putSeqRegexp,
        'span':             self.putSpan,
        'span_regexp':      self.putSpanRegexp,
        'terminate':        self.putTerminate,
    }
    f = d.get(rule.name, self.putBadRule)
    val = f (rule)
    self.handlerCount += 1
    return val
#@+node:ekr.20060824111500.130: *7* Mode.putBadRule
def putBadRule (self, rule):

    self.put(f"\n\n# *****no output creator for {rule.name}*****")
#@+node:ekr.20060824111500.131: *7* Mode.putEolSpan
def putEolSpan (self, rule):

    quote = self.quoteString
    seq = rule.getSeq(rule.name)

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_eol_span(s, i, kind=%s, seq=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s,
        delegate=%s, exclude_match=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(seq),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        quote(self.fullDelegate(rule.getStrAttrib('delegate'))),
        rule.getBoolAttrib('exclude_match'),
    )
    self.put(textwrap.dedent(s))
    return seq[0]
#@+node:ekr.20060824111500.132: *7* Mode.putEolSpanRegexp
def putEolSpanRegexp (self, rule):

    quote = self.quoteString
    seq = rule.getSeq(rule.name)
    hash_char = rule.getStrAttrib('hash_char') or seq[0]

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_eol_span_regexp(s, i, kind=%s, regexp=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s,
        delegate=%s, exclude_match=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(seq),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        quote(self.fullDelegate(rule.getStrAttrib('delegate'))),
        rule.getBoolAttrib('exclude_match'),
    )
    self.put(textwrap.dedent(s))
    return hash_char
#@+node:ekr.20060824111500.133: *7* Mode.putImport
# Do nothing here: putImportDict creates x.importDict.

def putImport (self, rule):

    # Decrement the count to indicate that this method did not generate a rule.
    self.handlerCount -= 1
    return ''
#@+node:ekr.20060824111500.134: *7* Mode.putKeywords
def putKeywords (self, rule):

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_keywords(s, i)''' % (
    self.fileModeName,
    self.handlerCount)
    self.put(textwrap.dedent(s))
    return 'keywords'
#@+node:ekr.20060824111500.135: *7* Mode.putMarkFollowing
def putMarkFollowing (self, rule):

    quote = self.quoteString
    seq = rule.getSeq(rule.name)

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_mark_following(s, i, kind=%s, pattern=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s, exclude_match=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(seq),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        rule.getBoolAttrib('exclude_match'),
    )
    self.put(textwrap.dedent(s))
    return seq[0]
#@+node:ekr.20060824111500.136: *7* Mode.putMarkPrevious
def putMarkPrevious (self, rule):

    quote = self.quoteString
    seq = rule.getSeq(rule.name)

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_mark_previous(s, i, kind=%s, pattern=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s, exclude_match=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(seq),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        rule.getBoolAttrib('exclude_match'),
    )
    self.put(textwrap.dedent(s))
    return seq[0]
#@+node:ekr.20060824111500.137: *7* Mode.putSeq
def putSeq (self, rule):

    quote = self.quoteString
    seq = rule.getSeq(rule.name)

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_seq(s, i, kind=%s, seq=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s, delegate=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(seq),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        quote(self.fullDelegate(rule.getStrAttrib('delegate'))),
    )
    self.put(textwrap.dedent(s))
    return seq[0]
#@+node:ekr.20060824111500.138: *7* Mode.putSeqRegexp
def putSeqRegexp (self, rule):

    quote = self.quoteString
    seq = rule.getSeq(rule.name)
    hash_char = rule.getStrAttrib('hash_char') or seq[0]

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_seq_regexp(s, i, kind=%s, regexp=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s, delegate=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(seq),
        # quote(rule.getStrAttrib('hash_char')),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        quote(self.fullDelegate(rule.getStrAttrib('delegate'))),
    )
    self.put(textwrap.dedent(s))
    return hash_char
#@+node:ekr.20060824111500.139: *7* Mode.putSpan
def putSpan (self, rule):

    quote = self.quoteString
    begin, end = rule.getSpan()

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_span(s, i, kind=%s, begin=%s, end=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s,
        delegate=%s, exclude_match=%s,
        no_escape=%s, no_line_break=%s, no_word_break=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(begin), quote(end),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        quote(self.fullDelegate(rule.getStrAttrib('delegate'))),
        rule.getBoolAttrib('exclude_match'),
        rule.getBoolAttrib('no_escape'),
        rule.getBoolAttrib('no_line_break'),
        rule.getBoolAttrib('no_word_break'),
    )
    self.put(textwrap.dedent(s))
    return begin[0]
#@+node:ekr.20060824111500.140: *7* Mode.putSpanRegexp
def putSpanRegexp (self, rule):

    quote = self.quoteString
    begin, end = rule.getSpan()
    hash_char = rule.getStrAttrib('hash_char') or begin[0]

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_span_regexp(s, i, kind=%s, begin=%s, end=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s,
        delegate=%s, exclude_match=%s,
        no_escape=%s, no_line_break=%s, no_word_break=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(begin), quote(end),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        quote(self.fullDelegate(rule.getStrAttrib('delegate'))),
        rule.getBoolAttrib('exclude_match'),
        rule.getBoolAttrib('no_escape'),
        rule.getBoolAttrib('no_line_break'),
        rule.getBoolAttrib('no_word_break'),
    )
    self.put(textwrap.dedent(s))
    return hash_char
#@+node:ekr.20060824111500.141: *7* Mode.putTerminate
def putTerminate (self, rule):

    quote = self.quoteString

    n = rule.getIntAttrib('at_char')
    if n == None:
        return
    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_terminate(s, i, kind=%s, at_char=%d)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        n,
    )
    self.put(textwrap.dedent(s))
#@+node:ekr.20060824111500.143: *6* Mode.write
def write (self, theFile, language):

    # Compute all the letters that can occur in a keyword.
    self.keywordChars = [ch for ch in string.ascii_letters + string.digits + '@']
    for ruleset in self.rulesets:
        for rule in ruleset.rules:
            d = rule.keywordsDict
            for key in list(d.keys()):
                key = g.toUnicode(key)
                for ch in key:
                    if ch not in self.keywordChars and ch not in (' ', '\t', '\n'):
                        self.keywordChars.append(ch)
        
    self.keywordChars = ''.join(list(set(self.keywordChars)))
    self.outputFile = theFile
    self.put('# Leo colorizer control file for %s mode.\n' % language)
    self.put('# This file is in the public domain.\n\n')
    self.putModeProperties(language)
    self.putAttributes()
    self.putKeywordsData()
    self.putRules()
    self.putImportDict()
#@+node:ekr.20060824111500.142: *6* Mode.putRules
def putRules (self):

    '''Create all rule matchers, a rules dict for each ruleset and x.rulesDictDict.'''

    d = {} ; d2Count = 0
    for ruleset in self.rulesets:
        d2 = {}
        prefix = f"{self.modeName}_{ruleset.name}"
        self.put(f"# Rules for {prefix} ruleset.\n")
        for rule in ruleset.rules:
            ch = self.putRule(rule)
            self.put('\n')
            if ch == 'keywords':
                for ch in self.keywordChars:
                    theList = d2.get(ch, [])
                    theList.append(f"{self.fileModeName}_rule{self.handlerCount-1}")
                    d2 [ch] = theList
            elif ch:
                theList = d2.get(ch, [])
                theList.append(f"{self.fileModeName}_rule{self.handlerCount-1}")
                d2 [ch] = theList
        # Create the rules dict for the ruleset.
        self.put(f"\n# Rules dict for {prefix} ruleset.\n")
        d2Count += 1
        name = f"rulesDict{d2Count}"
        self.putDictOfLists(name, d2)
        d [prefix] = name
    # Create rulesDictDict.
    self.put(f"# x.rulesDictDict for {self.modeName} mode.\n")    
    self.putDict('rulesDictDict', d, escape=False)
#@+node:ekr.20060824111500.144: *5* Mode: Printing...
# Not used.
#@+node:ekr.20060824111500.145: *6* Mode.printModeAttributes, printRulesetAttributes & printAttributesHelper
def printModeAttributes (self):
    self.printAttributesHelper('mode attributes', self.attributes)

def printRulesetAttributes (self, ruleset, tag=None):
    if not tag:
        tag = 'main ruleset'
    self.printAttributesHelper(tag, ruleset.attributes)

def printAttributesHelper (self, kind, attrs):
    print('%-15s' % (kind), 'attrs:', attrs)
#@+node:ekr.20060824111500.146: *6* Mode.printProperty
def printProperty (self, theProperty):
    # A property is a bunch.
    d = theProperty.attributes
    if d:
        self.printAttributesHelper('property', d)
#@+node:ekr.20060824111500.147: *6* Mode.printRule
def printRule (self, rule):

    # A rule is a g.Bunch.
    if rule.name == 'keywords':
        print('%-15s' % ('rule:keywords'))
        d = rule.keywordsDict
        d2 = {}
        for key in d:
            val = d.get(key)
            d2 [val] = d2.get(val, 0) + 1
        keys = list(d2.keys())
        keys.sort()
        for key in keys:
            print(f"{key}: {d2.get(key)}")
        print()
    else:
        d = rule.attributes
        d2 = rule.contents
        if d or d2:
            print('%-15s' % ('rule:'+rule.name))
            if d:
                print('attrs:', d)
            if d2:
                print('contents:', d2)
            print()
#@+node:ekr.20060824111500.148: *6* Mode.printRuleset
def printRuleset (self, ruleset, tag):

    self.printRulesetAttributes(ruleset, tag)

    for rule in self.rulesets[0].rules:
        self.printRule(rule)
#@+node:ekr.20060824111500.159: *4* class Rule
class Rule:
    """A class representing one xml rule."""
    
    def __init__ (self, name):
        self.attributes = {}
        self.contents = {}
        self.keywordsDict = {}
        self.name = name

    def __str__ (self):
        return (
            f"<Rule {self.name}\n"
            f"   attr: {g.dictToString(self.attributes)}\n"
            f"conents: {g.dictToString(self.contents)}"
        )

    __repr__ = __str__

    @others
#@+node:ekr.20060824111500.161: *5* rule.getters
def getBoolAttrib(self, name):
    d = self.attributes
    val = d.get(name)
    return 'True' if val else 'False'

def getIntAttrib(self, name):
    d = self.attributes
    val = d.get(name)
    if val is not None:
        try:
            val = int(val)
        except ValueError:
            g.trace(f"bad int argument: {name} = {val!r}")
            val = None
    return val

def getSpan (self):
    d = self.contents
    begin = d.get('begin', '')
    end   = d.get('end', '')
    return begin, end

def getStrAttrib(self, name):
    d = self.attributes
    val = d.get(name, '')
    return str(val)

def getSeq(self, kind):
    d = self.contents
    return d.get(kind, '')
#@+node:ekr.20060824111500.157: *4* class Ruleset
class Ruleset:

    def __init__ (self, attributes, keywords, properties, rules):
    
        self.name = munge(attributes.get('set', 'main'))
        self.attributes = attributes.copy()  # A dict.
        self.properties = properties[:]  # A list.
        self.keywords = keywords  # A bunch.
        self.rules = rules[:]  # A list.
        self.defaultColor = self.attributes.get('default')

    def __str__ (self):
        return f"<ruleset {self.name}>"

    __repr__ = __str__
#@+node:ekr.20060824111500.162: *4* class ContentHandler(xml.sax.saxutils.XMLGenerator)
class ContentHandler (xml.sax.saxutils.XMLGenerator):

    '''A sax content handler class that handles jEdit language-description files.

    Creates mode that can be retrieved using the getMode method.'''

    @others
#@+node:ekr.20060824111500.163: *5*  __init__ & helpers
def __init__ (self, inputFileName):

    # Init the base class.
    xml.sax.saxutils.XMLGenerator.__init__(self)
    self.inputFileName = inputFileName

    # Options...
    self.ignoreWs = True  # True: don't print contents with only ws.
    self.newLineAfterStartElement = [
        'keywords', 'mode', 'props', 'property', 'rules', 'span', 'eol_span',
    ]

    # Semantics: most of these should be mode ivars.
    self.elementStack = []
    self.errors = 0
    self.mode = None # The present mode, or None if outside all modes.
    self.modes = [] # All modes defined here or by imports.
#@+node:ekr.20060824111500.164: *5* helpers
#@+node:ekr.20060824111500.165: *6* attrsToList
def attrsToList (self, attrs):
    """
    Convert attrs to a list of g.Bunches.

    attrs: an Attributes item passed to startElement.
    """
    return [
        g.Bunch(name=name, val=attrs.getValue(name))
            for name in attrs.getNames()
    ]
#@+node:ekr.20060824111500.166: *6* attrsToString
def attrsToString (self, attrs, sep='\n'):

    '''Convert the attributes to a string.

    attrs: an Attributes item passed to startElement.

    sep: the separator charater between attributes.'''

    result = [
        f'{bunch.name}="{bunch.val}"' for bunch in self.attrsToList(attrs)
    ]
    return sep.join(result)
#@+node:ekr.20060824111500.168: *6* error
def error (self, message):
    print(f"\n\nXML error: {message}\n")
    self.errors += 1
#@+node:ekr.20060824111500.169: *6* printStartElement
def printStartElement(self, name, attrs):

    name_s = g.toEncodedString(name, "ascii").strip()
    if attrs.getLength() > 0:
        attrs_s = self.attrsToString(attrs, sep=' ')
        print(f"<{name_s} {attrs_s}")
    else:
        print(f"<{name_s}>")
        
    if name.lower() in self.newLineAfterStartElement:
        print('')
#@+node:ekr.20060824111500.171: *5* sax over-rides
#@+node:ekr.20060824111500.172: *6*  Do nothing...
# Expected calls...
def endDocument(self):
    pass
    
def startDocument(self):
    pass

# Trace unexpected calls...
def ignorableWhitespace(self):
    g.trace()

def processingInstruction (self, target, data):
    g.trace()

def skippedEntity(self, name):
    g.trace(name)

def startElementNS(self, name, qname, attrs):
    g.trace(name)

def endElementNS(self, name, qname):
    g.trace(name)
#@+node:ekr.20060824111500.176: *6* characters
def characters(self, content):
    content = content.replace('\r', '')
    if content.strip():
        content = content.strip()
    content = g.toEncodedString(content, "ascii")
    elementName = self.elementStack[-1].lower() if self.elementStack else '<no element name>'
    if self.mode:
        self.mode.doContent(elementName, content)
    else:
        self.error('characters outside of mode')
#@+node:ekr.20060824111500.177: *6* elements & helpers
def endElement(self, name):
    self.doEndElement(name)
    name2 = self.elementStack.pop()
    assert name == name2

def startElement(self, name, attrs):
    self.elementStack.append(name)
    self.doStartElement(name, attrs)
#@+node:ekr.20060824111500.179: *7* doStartElement
def doStartElement (self, elementName, attrs):

    elementName = elementName.lower()
    if elementName == 'mode':
        if self.mode:
            self.error('Multiple modes')
        else:
            self.mode = Mode(self, self.inputFileName)
    elif self.mode:
        self.mode.startElement(elementName)
        for bunch in self.attrsToList(attrs):
            self.mode.doAttribute(bunch.name, bunch.val)
    else:
        self.error(f"Start element appears outside of Mode:{elementName}")
        for bunch in self.attrsToList(attrs):
            self.error(f"Attribute appears outside of Mode:{bunch.name}")
#@+node:ekr.20060824111500.180: *7* doEndElement
def doEndElement (self, elementName):
    if self.mode:
        self.mode.endElement(elementName)
    else:
        self.error(f"End element appears outside of Mode:{elementName}")
#@+node:ekr.20060824111500.181: *5* getMode
def getMode (self):
    return None if self.errors else self.mode
#@+node:ekr.20060824111500.1: *3* @ignore Colorizing test scripts
#@+node:ekr.20060824111500.2: *4* @button colorize
w = c.frame.body.bodyCtrl # comment
c.frame.body.colorizer.colorize(p)
names = w.tag_names()
for name in names:
    theList = w.tag_ranges(name)
    if theList:
        print 'tag',name,len(theList)
#@+node:ekr.20060824111500.3: *4* @button red:f1
w = c.frame.body.bodyCtrl
w.tag_configure('red',background='red')
w.tag_add('red','insert-1c','insert+1c')
w.tag_add('f1','insert-1c','insert+1c')
#@+node:ekr.20060824111500.4: *4* @button blue:f2
w = c.frame.body.bodyCtrl
w.tag_configure('blue',background='blue')
w.tag_add('blue','insert-1c','insert+1c')
w.tag_add('f2','insert-1c','insert+1c')
#@+node:ekr.20060824111500.5: *4* @button print tags
w = c.frame.body.bodyCtrl
names = w.tag_names()
print '-' * 20
total = 0
for name in names:
    theList = w.tag_ranges(name)
    if theList:
        print name,len(theList)/2
        total += len(theList)/2
print 'total tag ranges',total
#@+node:ekr.20060824111500.6: *4* @button remove tags
w = c.frame.body.bodyCtrl
names = w.tag_names()
for name in names:
    theList = w.tag_ranges(name)
    if theList:
        print 'removing',name,len(theList)
        while theList:
            a,b = theList[0],theList[1]
            w.tag_remove(name,theList[0],theList[1])
            theList = theList[2:]
#@+node:ekr.20060824111500.7: *4* @button print f1
w = c.frame.body.bodyCtrl

def f1(a,b):
    print 'f1','a',a,'b',b

theList = w.tag_ranges('f1')
while theList:
    a,b = theList[0],theList[1]
    f1(a,b)
    theList = theList[2:]
#@+node:ekr.20060824111500.8: *4* test
@color

abdddddddddddddc
xyz

<< section ref >>

@nocolor
#@+node:ekr.20060824111500.9: *4* latex keywords
#If you see two idenitical words, with minor capitalization differences
#DO NOT ASSUME that they are the same word. For example \vert produces
#a single vertical line and \Vert produces a double vertical line

latex_special_keyword_characters = "@(){}%"

# This is a comment.
'This is a string' # One more comment. Still fast.  Oh joy.

latex_keywords = [
    #special keyworlds.
    "\\%", # 11/9/03
    "\\@", "\\(", "\\)", "\\{", "\\}",
    #A
    "\\acute", "\\addcontentsline", "\\addtocontents", "\\addtocounter", "\\address",
    "\\addtolength", "\\addvspace", "\\AE", "\\ae", "\\aleph", "\\alph", "\\angle",
    "\\appendix", 
    "\\approx", "\\arabic", "\\arccos", "\\arcsin", "\\arctan", "\\ast", "\\author",
    #B
    "\\b", "\\backmatter", "\\backslash", "\\bar", "\\baselineskip", "\\baselinestretch",
    "\\begin", "\\beta", "\\bezier", "\\bf", "\\bfseries", "\\bibitem", "\\bigcap",
    "\\bigcup", "\\bigodot", "\\bigoplus", "\\bigotimes", "\\bigskip", "\\biguplus",
    "\\bigvee", "\\bigwedge", "\\bmod", "\\boldmath", "\\Box", "\\breve", "\\bullet",
    #C
    "\\c", "\\cal", "\\caption", "\\cdot", "\\cdots", "\\centering", "\\chapter",
    "\\check", "\\chi", "\\circ", "\\circle", "\\cite", "\\cleardoublepage", "\\clearpage",
    "\\cline", "\\closing", "\\clubsuit", "\\coprod", "\\copywright", "\\cos", "\\cosh",
    "\\cot", "\\coth", "csc",
    #D
    "\\d", "\\dag", "\\dashbox", "\\date", "\\ddag", "\\ddot", "\\ddots", "\\decl",
    "\\deg", "\\Delta", 
    "\\delta", "\\depthits", "\\det", 
    "\\DH", "\\dh", "\\Diamond", "\\diamondsuit", "\\dim", "\\div", "\\DJ", "\\dj",
    "\\documentclass", "\\documentstyle", 
    "\\dot", "\\dotfil", "\\downarrow",
    #E
    "\\ell", "\\em", "\\emph", "\\end", "\\enlargethispage", "\\ensuremath",
    "\\enumi", "\\enuii", "\\enumiii", "\\enuiv", "\\epsilon", "\\equation", "\\equiv",
    "\\eta", "\\example", "\\exists", "\\exp",
    #F
    "\\fbox", "\\figure", "\\flat", "\\flushbottom", "\\fnsymbol", "\\footnote",
    "\\footnotemark", "\\fotenotesize", 
    "\\footnotetext", "\\forall", "\\frac", "\\frame", "\\framebox", "\\frenchspacing",
    "\\frontmatter",
    #G
    "\\Gamma", "\\gamma", "\\gcd", "\\geq", "\\gg", "\\grave", "\\guillemotleft", 
    "\\guillemotright", "\\guilsinglleft", "\\guilsinglright",
    #H
    "\\H", "\\hat", "\\hbar", "\\heartsuit", "\\heightits", "\\hfill", "\\hline", "\\hom",
    "\\hrulefill", "\\hspace", "\\huge", "\\Huge", "\\hyphenation"
    #I
    "\\Im", "\\imath", "\\include", "includeonly", "indent", "\\index", "\\inf", "\\infty", 
    "\\input", "\\int", "\\iota", "\\it", "\\item", "\\itshape",
    #J
    "\\jmath", "\\Join",
    #K
    "\\k", "\\kappa", "\\ker", "\\kill",
    #L
    "\\label", "\\Lambda", "\\lambda", "\\langle", "\\large", "\\Large", "\\LARGE", 
    "\\LaTeX", "\\LaTeXe", 
    "\\ldots", "\\leadsto", "\\left", "\\Leftarrow", "\\leftarrow", "\\lefteqn", "\\leq",
    "\\lg", "\\lhd", "\\lim", "\\liminf", "\\limsup", "\\line", "\\linebreak", 
    "\\linethickness", "\\linewidth", "\\listfiles",
    "\\ll", "\\ln", "\\location", "\\log", "\\Longleftarrow", "\\longleftarrow", 
    "\\Longrightarrow", "longrightarrow",
    #M
    "\\mainmatter", "\\makebox", "\\makeglossary", "\\makeindex","\\maketitle", "\\markboth", "\\markright",
    "\\mathbf", "\\mathcal", "\\mathit", "\\mathnormal", "\\mathop",
    "\\mathrm", "\\mathsf", "\\mathtt", "\\max", "\\mbox", "\\mdseries", "\\medskip",
    "\\mho", "\\min", "\\mp", "\\mpfootnote", "\\mu", "\\multicolumn", "\\multiput",
    #N
    "\\nabla", "\\natural", "\\nearrow", "\\neq", "\\newcommand", "\\newcounter", 
    "\\newenvironment", "\\newfont",
    "\\newlength", "\\newline", "\\newpage", "\\newsavebox", "\\newtheorem", "\\NG", "\\ng",
    "\\nocite", "\\noindent", "\\nolinbreak", "\\nopagebreak", "\\normalsize",
    "\\not", "\\nu", "nwarrow",
    #O
    "\\Omega", "\\omega", "\\onecolumn", "\\oint", "\\opening", "\\oval", 
    "\\overbrace", "\\overline",
    #P
    "\\P", "\\page", "\\pagebreak", "\\pagenumbering", "\\pageref", "\\pagestyle", 
    "\\par", "\\parbox", "\\paragraph", "\\parindent", "\\parskip", "\\part", 
    "\\partial", "\\per", "\\Phi", "\\phi", "\\Pi", "\\pi", "\\pm", 
    "\\pmod", "\\pounds", "\\prime", "\\printindex", "\\prod", "\\propto", "\\protext", 
    "\\providecomamnd", "\\Psi", "\\psi", "\\put",
    #Q
    "\\qbezier", "\\quoteblbase", "\\quotesinglbase",
    #R
    "\\r", "\\raggedbottom", "\\raggedleft", "\\raggedright", "\\raisebox", "\\rangle", 
    "\\Re", "\\ref", "\\renewcommand", "\\renewenvironment", "\\rhd", "\\rho", "\\right", 
    "\\Rightarrow", "\\rightarrow", "\\rm", "\\rmfamily",
    "\\Roman", "\\roman", "\\rule", 
    #S
    "\\s", "\\samepage", "\\savebox", "\\sbox", "\\sc", "\\scriptsize", "\\scshape", 
    "\\searrow", "\\sec", "\\section",
    "\\setcounter", "\\setlength", "\\settowidth", "\\settodepth", "\\settoheight", 
    "\\settowidth", "\\sf", "\\sffamily", "\\sharp", "\\shortstack", "\\Sigma", "\\sigma", 
    "\\signature", "\\sim", "\\simeq", "\\sin", "\\sinh", "\\sl", "\\SLiTeX",
    "\\slshape", "\\small", "\\smallskip", "\\spadesuit", "\\sqrt", "\\sqsubset",
    "\\sqsupset", "\\SS",
    "\\stackrel", "\\star", "\\subsection", "\\subset", 
    "\\subsubsection", "\\sum", "\\sup", "\\supressfloats", "\\surd", "\\swarrow",
    #T
    "\\t", "\\table", "\\tableofcontents", "\\tabularnewline", "\\tan", "\\tanh", 
    "\\tau", "\\telephone", "\\TeX", "\\textbf",
    "\\textbullet", "\\textcircled", "\\textcompworkmark", "\\textemdash", 
    "\\textendash", "\\textexclamdown", "\\textheight", "\\textquestiondown", 
    "\\textquoteblleft", "\\textquoteblright", "\\textquoteleft",
    "\\textperiod", "\\textquotebl", "\\textquoteright", "\\textmd", "\\textit", "\\textrm", 
    "\\textsc", "\\textsl", "\\textsf", "\\textsuperscript", "\\texttt", "\\textup",
    "\\textvisiblespace", "\\textwidth", "\\TH", "\\th", "\\thanks", "\\thebibligraphy",
    "\\Theta", "theta", 
    "\\tilde", "\\thinlines", 
    "\\thispagestyle", "\\times", "\\tiny", "\\title", "\\today", "\\totalheightits", 
    "\\triangle", "\\tt", 
    "\\ttfamily", "\\twocoloumn", "\\typeout", "\\typein",
    #U
    "\\u", "\\underbrace", "\\underline", "\\unitlength", "\\unlhd", "\\unrhd", "\\Uparrow",
    "\\uparrow", "\\updownarrow", "\\upshape", "\\Upsilon", "\\upsilon", "\\usebox",
    "\\usecounter", "\\usepackage", 
    #V
    "\\v", "\\value", "\\varepsilon", "\\varphi", "\\varpi", "\\varrho", "\\varsigma", 
    "\\vartheta", "\\vdots", "\\vec", "\\vector", "\\verb", "\\Vert", "\\vert", "\\vfill",
    "\\vline", "\\vphantom", "\\vspace",
    #W
    "\\widehat", "\\widetilde", "\\widthits", "\\wp",
    #X
    "\\Xi", "\\xi",
    #Z
    "\\zeta" ]

# A test at the very end.  This is jolly.

latex_keywords = [
    #special keyworlds.
    "\\%", # 11/9/03
    "\\@", "\\(", "\\)", "\\{", "\\}",
    #A
    "\\acute", "\\addcontentsline", "\\addtocontents", "\\addtocounter", "\\address",
    "\\addtolength", "\\addvspace", "\\AE", "\\ae", "\\aleph", "\\alph", "\\angle",
    "\\appendix", 
    "\\approx", "\\arabic", "\\arccos", "\\arcsin", "\\arctan", "\\ast", "\\author",
    #B
    "\\b", "\\backmatter", "\\backslash", "\\bar", "\\baselineskip", "\\baselinestretch",
    "\\begin", "\\beta", "\\bezier", "\\bf", "\\bfseries", "\\bibitem", "\\bigcap",
    "\\bigcup", "\\bigodot", "\\bigoplus", "\\bigotimes", "\\bigskip", "\\biguplus",
    "\\bigvee", "\\bigwedge", "\\bmod", "\\boldmath", "\\Box", "\\breve", "\\bullet",
    #C
    "\\c", "\\cal", "\\caption", "\\cdot", "\\cdots", "\\centering", "\\chapter",
    "\\check", "\\chi", "\\circ", "\\circle", "\\cite", "\\cleardoublepage", "\\clearpage",
    "\\cline", "\\closing", "\\clubsuit", "\\coprod", "\\copywright", "\\cos", "\\cosh",
    "\\cot", "\\coth", "csc",
    #D
    "\\d", "\\dag", "\\dashbox", "\\date", "\\ddag", "\\ddot", "\\ddots", "\\decl",
    "\\deg", "\\Delta", 
    "\\delta", "\\depthits", "\\det", 
    "\\DH", "\\dh", "\\Diamond", "\\diamondsuit", "\\dim", "\\div", "\\DJ", "\\dj",
    "\\documentclass", "\\documentstyle", 
    "\\dot", "\\dotfil", "\\downarrow",
    #E
    "\\ell", "\\em", "\\emph", "\\end", "\\enlargethispage", "\\ensuremath",
    "\\enumi", "\\enuii", "\\enumiii", "\\enuiv", "\\epsilon", "\\equation", "\\equiv",
    "\\eta", "\\example", "\\exists", "\\exp",
    #F
    "\\fbox", "\\figure", "\\flat", "\\flushbottom", "\\fnsymbol", "\\footnote",
    "\\footnotemark", "\\fotenotesize", 
    "\\footnotetext", "\\forall", "\\frac", "\\frame", "\\framebox", "\\frenchspacing",
    "\\frontmatter",
    #G
    "\\Gamma", "\\gamma", "\\gcd", "\\geq", "\\gg", "\\grave", "\\guillemotleft", 
    "\\guillemotright", "\\guilsinglleft", "\\guilsinglright",
    #H
    "\\H", "\\hat", "\\hbar", "\\heartsuit", "\\heightits", "\\hfill", "\\hline", "\\hom",
    "\\hrulefill", "\\hspace", "\\huge", "\\Huge", "\\hyphenation"
    #I
    "\\Im", "\\imath", "\\include", "includeonly", "indent", "\\index", "\\inf", "\\infty", 
    "\\input", "\\int", "\\iota", "\\it", "\\item", "\\itshape",
    #J
    "\\jmath", "\\Join",
    #K
    "\\k", "\\kappa", "\\ker", "\\kill",
    #L
    "\\label", "\\Lambda", "\\lambda", "\\langle", "\\large", "\\Large", "\\LARGE", 
    "\\LaTeX", "\\LaTeXe", 
    "\\ldots", "\\leadsto", "\\left", "\\Leftarrow", "\\leftarrow", "\\lefteqn", "\\leq",
    "\\lg", "\\lhd", "\\lim", "\\liminf", "\\limsup", "\\line", "\\linebreak", 
    "\\linethickness", "\\linewidth", "\\listfiles",
    "\\ll", "\\ln", "\\location", "\\log", "\\Longleftarrow", "\\longleftarrow", 
    "\\Longrightarrow", "longrightarrow",
    #M
    "\\mainmatter", "\\makebox", "\\makeglossary", "\\makeindex","\\maketitle", "\\markboth", "\\markright",
    "\\mathbf", "\\mathcal", "\\mathit", "\\mathnormal", "\\mathop",
    "\\mathrm", "\\mathsf", "\\mathtt", "\\max", "\\mbox", "\\mdseries", "\\medskip",
    "\\mho", "\\min", "\\mp", "\\mpfootnote", "\\mu", "\\multicolumn", "\\multiput",
    #N
    "\\nabla", "\\natural", "\\nearrow", "\\neq", "\\newcommand", "\\newcounter", 
    "\\newenvironment", "\\newfont",
    "\\newlength", "\\newline", "\\newpage", "\\newsavebox", "\\newtheorem", "\\NG", "\\ng",
    "\\nocite", "\\noindent", "\\nolinbreak", "\\nopagebreak", "\\normalsize",
    "\\not", "\\nu", "nwarrow",
    #O
    "\\Omega", "\\omega", "\\onecolumn", "\\oint", "\\opening", "\\oval", 
    "\\overbrace", "\\overline",
    #P
    "\\P", "\\page", "\\pagebreak", "\\pagenumbering", "\\pageref", "\\pagestyle", 
    "\\par", "\\parbox", "\\paragraph", "\\parindent", "\\parskip", "\\part", 
    "\\partial", "\\per", "\\Phi", "\\phi", "\\Pi", "\\pi", "\\pm", 
    "\\pmod", "\\pounds", "\\prime", "\\printindex", "\\prod", "\\propto", "\\protext", 
    "\\providecomamnd", "\\Psi", "\\psi", "\\put",
    #Q
    "\\qbezier", "\\quoteblbase", "\\quotesinglbase",
    #R
    "\\r", "\\raggedbottom", "\\raggedleft", "\\raggedright", "\\raisebox", "\\rangle", 
    "\\Re", "\\ref", "\\renewcommand", "\\renewenvironment", "\\rhd", "\\rho", "\\right", 
    "\\Rightarrow", "\\rightarrow", "\\rm", "\\rmfamily",
    "\\Roman", "\\roman", "\\rule", 
    #S
    "\\s", "\\samepage", "\\savebox", "\\sbox", "\\sc", "\\scriptsize", "\\scshape", 
    "\\searrow", "\\sec", "\\section",
    "\\setcounter", "\\setlength", "\\settowidth", "\\settodepth", "\\settoheight", 
    "\\settowidth", "\\sf", "\\sffamily", "\\sharp", "\\shortstack", "\\Sigma", "\\sigma", 
    "\\signature", "\\sim", "\\simeq", "\\sin", "\\sinh", "\\sl", "\\SLiTeX",
    "\\slshape", "\\small", "\\smallskip", "\\spadesuit", "\\sqrt", "\\sqsubset",
    "\\sqsupset", "\\SS",
    "\\stackrel", "\\star", "\\subsection", "\\subset", 
    "\\subsubsection", "\\sum", "\\sup", "\\supressfloats", "\\surd", "\\swarrow",
    #T
    "\\t", "\\table", "\\tableofcontents", "\\tabularnewline", "\\tan", "\\tanh", 
    "\\tau", "\\telephone", "\\TeX", "\\textbf",
    "\\textbullet", "\\textcircled", "\\textcompworkmark", "\\textemdash", 
    "\\textendash", "\\textexclamdown", "\\textheight", "\\textquestiondown", 
    "\\textquoteblleft", "\\textquoteblright", "\\textquoteleft",
    "\\textperiod", "\\textquotebl", "\\textquoteright", "\\textmd", "\\textit", "\\textrm", 
    "\\textsc", "\\textsl", "\\textsf", "\\textsuperscript", "\\texttt", "\\textup",
    "\\textvisiblespace", "\\textwidth", "\\TH", "\\th", "\\thanks", "\\thebibligraphy",
    "\\Theta", "theta", 
    "\\tilde", "\\thinlines", 
    "\\thispagestyle", "\\times", "\\tiny", "\\title", "\\today", "\\totalheightits", 
    "\\triangle", "\\tt", 
    "\\ttfamily", "\\twocoloumn", "\\typeout", "\\typein",
    #U
    "\\u", "\\underbrace", "\\underline", "\\unitlength", "\\unlhd", "\\unrhd", "\\Uparrow",
    "\\uparrow", "\\updownarrow", "\\upshape", "\\Upsilon", "\\upsilon", "\\usebox",
    "\\usecounter", "\\usepackage", 
    #V
    "\\v", "\\value", "\\varepsilon", "\\varphi", "\\varpi", "\\varrho", "\\varsigma", 
    "\\vartheta", "\\vdots", "\\vec", "\\vector", "\\verb", "\\Vert", "\\vert", "\\vfill",
    "\\vline", "\\vphantom", "\\vspace",
    #W
    "\\widehat", "\\widetilde", "\\widthits", "\\wp",
    #X
    "\\Xi", "\\xi",
    #Z
    "\\zeta" ]

# A test at the very end.  This is jolly.

latex_keywords = [
    #special keyworlds.
    "\\%", # 11/9/03
    "\\@", "\\(", "\\)", "\\{", "\\}",
    #A
    "\\acute", "\\addcontentsline", "\\addtocontents", "\\addtocounter", "\\address",
    "\\addtolength", "\\addvspace", "\\AE", "\\ae", "\\aleph", "\\alph", "\\angle",
    "\\appendix", 
    "\\approx", "\\arabic", "\\arccos", "\\arcsin", "\\arctan", "\\ast", "\\author",
    #B
    "\\b", "\\backmatter", "\\backslash", "\\bar", "\\baselineskip", "\\baselinestretch",
    "\\begin", "\\beta", "\\bezier", "\\bf", "\\bfseries", "\\bibitem", "\\bigcap",
    "\\bigcup", "\\bigodot", "\\bigoplus", "\\bigotimes", "\\bigskip", "\\biguplus",
    "\\bigvee", "\\bigwedge", "\\bmod", "\\boldmath", "\\Box", "\\breve", "\\bullet",
    #C
    "\\c", "\\cal", "\\caption", "\\cdot", "\\cdots", "\\centering", "\\chapter",
    "\\check", "\\chi", "\\circ", "\\circle", "\\cite", "\\cleardoublepage", "\\clearpage",
    "\\cline", "\\closing", "\\clubsuit", "\\coprod", "\\copywright", "\\cos", "\\cosh",
    "\\cot", "\\coth", "csc",
    #D
    "\\d", "\\dag", "\\dashbox", "\\date", "\\ddag", "\\ddot", "\\ddots", "\\decl",
    "\\deg", "\\Delta", 
    "\\delta", "\\depthits", "\\det", 
    "\\DH", "\\dh", "\\Diamond", "\\diamondsuit", "\\dim", "\\div", "\\DJ", "\\dj",
    "\\documentclass", "\\documentstyle", 
    "\\dot", "\\dotfil", "\\downarrow",
    #E
    "\\ell", "\\em", "\\emph", "\\end", "\\enlargethispage", "\\ensuremath",
    "\\enumi", "\\enuii", "\\enumiii", "\\enuiv", "\\epsilon", "\\equation", "\\equiv",
    "\\eta", "\\example", "\\exists", "\\exp",
    #F
    "\\fbox", "\\figure", "\\flat", "\\flushbottom", "\\fnsymbol", "\\footnote",
    "\\footnotemark", "\\fotenotesize", 
    "\\footnotetext", "\\forall", "\\frac", "\\frame", "\\framebox", "\\frenchspacing",
    "\\frontmatter",
    #G
    "\\Gamma", "\\gamma", "\\gcd", "\\geq", "\\gg", "\\grave", "\\guillemotleft", 
    "\\guillemotright", "\\guilsinglleft", "\\guilsinglright",
    #H
    "\\H", "\\hat", "\\hbar", "\\heartsuit", "\\heightits", "\\hfill", "\\hline", "\\hom",
    "\\hrulefill", "\\hspace", "\\huge", "\\Huge", "\\hyphenation"
    #I
    "\\Im", "\\imath", "\\include", "includeonly", "indent", "\\index", "\\inf", "\\infty", 
    "\\input", "\\int", "\\iota", "\\it", "\\item", "\\itshape",
    #J
    "\\jmath", "\\Join",
    #K
    "\\k", "\\kappa", "\\ker", "\\kill",
    #L
    "\\label", "\\Lambda", "\\lambda", "\\langle", "\\large", "\\Large", "\\LARGE", 
    "\\LaTeX", "\\LaTeXe", 
    "\\ldots", "\\leadsto", "\\left", "\\Leftarrow", "\\leftarrow", "\\lefteqn", "\\leq",
    "\\lg", "\\lhd", "\\lim", "\\liminf", "\\limsup", "\\line", "\\linebreak", 
    "\\linethickness", "\\linewidth", "\\listfiles",
    "\\ll", "\\ln", "\\location", "\\log", "\\Longleftarrow", "\\longleftarrow", 
    "\\Longrightarrow", "longrightarrow",
    #M
    "\\mainmatter", "\\makebox", "\\makeglossary", "\\makeindex","\\maketitle", "\\markboth", "\\markright",
    "\\mathbf", "\\mathcal", "\\mathit", "\\mathnormal", "\\mathop",
    "\\mathrm", "\\mathsf", "\\mathtt", "\\max", "\\mbox", "\\mdseries", "\\medskip",
    "\\mho", "\\min", "\\mp", "\\mpfootnote", "\\mu", "\\multicolumn", "\\multiput",
    #N
    "\\nabla", "\\natural", "\\nearrow", "\\neq", "\\newcommand", "\\newcounter", 
    "\\newenvironment", "\\newfont",
    "\\newlength", "\\newline", "\\newpage", "\\newsavebox", "\\newtheorem", "\\NG", "\\ng",
    "\\nocite", "\\noindent", "\\nolinbreak", "\\nopagebreak", "\\normalsize",
    "\\not", "\\nu", "nwarrow",
    #O
    "\\Omega", "\\omega", "\\onecolumn", "\\oint", "\\opening", "\\oval", 
    "\\overbrace", "\\overline",
    #P
    "\\P", "\\page", "\\pagebreak", "\\pagenumbering", "\\pageref", "\\pagestyle", 
    "\\par", "\\parbox", "\\paragraph", "\\parindent", "\\parskip", "\\part", 
    "\\partial", "\\per", "\\Phi", "\\phi", "\\Pi", "\\pi", "\\pm", 
    "\\pmod", "\\pounds", "\\prime", "\\printindex", "\\prod", "\\propto", "\\protext", 
    "\\providecomamnd", "\\Psi", "\\psi", "\\put",
    #Q
    "\\qbezier", "\\quoteblbase", "\\quotesinglbase",
    #R
    "\\r", "\\raggedbottom", "\\raggedleft", "\\raggedright", "\\raisebox", "\\rangle", 
    "\\Re", "\\ref", "\\renewcommand", "\\renewenvironment", "\\rhd", "\\rho", "\\right", 
    "\\Rightarrow", "\\rightarrow", "\\rm", "\\rmfamily",
    "\\Roman", "\\roman", "\\rule", 
    #S
    "\\s", "\\samepage", "\\savebox", "\\sbox", "\\sc", "\\scriptsize", "\\scshape", 
    "\\searrow", "\\sec", "\\section",
    "\\setcounter", "\\setlength", "\\settowidth", "\\settodepth", "\\settoheight", 
    "\\settowidth", "\\sf", "\\sffamily", "\\sharp", "\\shortstack", "\\Sigma", "\\sigma", 
    "\\signature", "\\sim", "\\simeq", "\\sin", "\\sinh", "\\sl", "\\SLiTeX",
    "\\slshape", "\\small", "\\smallskip", "\\spadesuit", "\\sqrt", "\\sqsubset",
    "\\sqsupset", "\\SS",
    "\\stackrel", "\\star", "\\subsection", "\\subset", 
    "\\subsubsection", "\\sum", "\\sup", "\\supressfloats", "\\surd", "\\swarrow",
    #T
    "\\t", "\\table", "\\tableofcontents", "\\tabularnewline", "\\tan", "\\tanh", 
    "\\tau", "\\telephone", "\\TeX", "\\textbf",
    "\\textbullet", "\\textcircled", "\\textcompworkmark", "\\textemdash", 
    "\\textendash", "\\textexclamdown", "\\textheight", "\\textquestiondown", 
    "\\textquoteblleft", "\\textquoteblright", "\\textquoteleft",
    "\\textperiod", "\\textquotebl", "\\textquoteright", "\\textmd", "\\textit", "\\textrm", 
    "\\textsc", "\\textsl", "\\textsf", "\\textsuperscript", "\\texttt", "\\textup",
    "\\textvisiblespace", "\\textwidth", "\\TH", "\\th", "\\thanks", "\\thebibligraphy",
    "\\Theta", "theta", 
    "\\tilde", "\\thinlines", 
    "\\thispagestyle", "\\times", "\\tiny", "\\title", "\\today", "\\totalheightits", 
    "\\triangle", "\\tt", 
    "\\ttfamily", "\\twocoloumn", "\\typeout", "\\typein",
    #U
    "\\u", "\\underbrace", "\\underline", "\\unitlength", "\\unlhd", "\\unrhd", "\\Uparrow",
    "\\uparrow", "\\updownarrow", "\\upshape", "\\Upsilon", "\\upsilon", "\\usebox",
    "\\usecounter", "\\usepackage", 
    #V
    "\\v", "\\value", "\\varepsilon", "\\varphi", "\\varpi", "\\varrho", "\\varsigma", 
    "\\vartheta", "\\vdots", "\\vec", "\\vector", "\\verb", "\\Vert", "\\vert", "\\vfill",
    "\\vline", "\\vphantom", "\\vspace",
    #W
    "\\widehat", "\\widetilde", "\\widthits", "\\wp", # This is a test.
    #X
    "\\Xi", "\\xi",
    #Z
    "\\zeta" ]

# A test at the very end.  This is jolly.
#@+node:ekr.20060824111500.10: *3* Colorizer tests
#@+node:ekr.20060824111500.11: *4* C
@color
@language c

#include abc

// tests

/* test
end of test. */

for (i = 1; i < 6; i++) {
    continue
#@+node:ekr.20060824111500.12: *4* Python
@color

<< test >>

@doc test
another line

@c

'test'

'''test'''

#@verbatim
# @nocolor

for i = 1;
    yield
#@+node:ekr.20060824111500.13: *4* php.py
# Leo colorizer control file for php mode.
# This file is in the public domain.

# Properties for php mode.
properties = {
	"commentEnd": "-->",
	"commentStart": "<!--",
	"indentCloseBrackets": "}",
	"indentOpenBrackets": "{",
	"lineUpClosingBracket": "true",
}

# Attributes dict for php_main ruleset.
php_main_attributes_dict = {
	"default": "null",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_tags ruleset.
php_tags_attributes_dict = {
	"default": "MARKUP",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_tags_literal ruleset.
php_tags_literal_attributes_dict = {
	"default": "LITERAL1",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_php ruleset.
php_php_attributes_dict = {
	"default": "LITERAL1",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_php_literal ruleset.
php_php_literal_attributes_dict = {
	"default": "LITERAL1",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_javascript ruleset.
php_javascript_attributes_dict = {
	"default": "MARKUP",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_javascript_php ruleset.
php_javascript_php_attributes_dict = {
	"default": "MARKUP",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_phpdoc ruleset.
php_phpdoc_attributes_dict = {
	"default": "COMMENT3",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Dictionary of attributes dictionaries for php mode.
attributesDictDict = {
	"php_javascript": php_javascript_attributes_dict,
	"php_javascript_php": php_javascript_php_attributes_dict,
	"php_main": php_main_attributes_dict,
	"php_php": php_php_attributes_dict,
	"php_php_literal": php_php_literal_attributes_dict,
	"php_phpdoc": php_phpdoc_attributes_dict,
	"php_tags": php_tags_attributes_dict,
	"php_tags_literal": php_tags_literal_attributes_dict,
}

# Keywords dict for php_main ruleset.
php_main_keywords_dict = {}

# Keywords dict for php_tags ruleset.
php_tags_keywords_dict = {}

# Keywords dict for php_tags_literal ruleset.
php_tags_literal_keywords_dict = {}

# Keywords dict for php_php ruleset.
php_php_keywords_dict = {
	"COM_invoke": "keyword2",
	"COM_load": "keyword2",
	"__CLASS__": "keyword3",
	"__FILE__": "keyword3",
	"__FUNCTION__": "keyword3",
	"__LINE__": "keyword3",
	"__METHOD__": "keyword3",
	"abs": "keyword2",
	"abstract": "keyword1",
	"accept_connect": "keyword2",
	"acos": "keyword2",
	"add": "keyword2",
	"add_iovec": "keyword2",
	"addaction": "keyword2",
	"addcolor": "keyword2",
	"addcslashes": "keyword2",
	"addentry": "keyword2",
	"addfill": "keyword2",
	"addshape": "keyword2",
	"addslashes": "keyword2",
	"addstring": "keyword2",
	"align": "keyword2",
	"and": "operator",
	"apache_child_terminate": "keyword2",
	"apache_lookup_uri": "keyword2",
	"apache_note": "keyword2",
	"apache_sub_req": "keyword2",
	"array": "keyword1",
	"array_combine": "keyword2",
	"array_count_values": "keyword2",
	"array_diff": "keyword2",
	"array_diff_assoc": "keyword2",
	"array_diff_uassoc": "keyword2",
	"array_filter": "keyword2",
	"array_flip": "keyword2",
	"array_intersect": "keyword2",
	"array_intersect_assoc": "keyword2",
	"array_keys": "keyword2",
	"array_map": "keyword2",
	"array_merge": "keyword2",
	"array_merge_recursive": "keyword2",
	"array_multisort": "keyword2",
	"array_pad": "keyword2",
	"array_pop": "keyword2",
	"array_push": "keyword2",
	"array_rand": "keyword2",
	"array_reduce": "keyword2",
	"array_reverse": "keyword2",
	"array_search": "keyword2",
	"array_shift": "keyword2",
	"array_slice": "keyword2",
	"array_splice": "keyword2",
	"array_sum": "keyword2",
	"array_udiff": "keyword2",
	"array_udiff_assoc": "keyword2",
	"array_udiff_uassoc": "keyword2",
	"array_unique": "keyword2",
	"array_unshift": "keyword2",
	"array_values": "keyword2",
	"array_walk": "keyword2",
	"array_walk_recursive": "keyword2",
	"arsort": "keyword2",
	"as": "keyword1",
	"asin": "keyword2",
	"asort": "keyword2",
	"aspell_check": "keyword2",
	"aspell_check_raw": "keyword2",
	"aspell_new": "keyword2",
	"aspell_suggest": "keyword2",
	"assert": "keyword2",
	"assert_options": "keyword2",
	"atan": "keyword2",
	"atan2": "keyword2",
	"base64_decode": "keyword2",
	"base64_encode": "keyword2",
	"base_convert": "keyword2",
	"basename": "keyword2",
	"bcadd": "keyword2",
	"bccomp": "keyword2",
	"bcdiv": "keyword2",
	"bcmod": "keyword2",
	"bcmul": "keyword2",
	"bcpow": "keyword2",
	"bcscale": "keyword2",
	"bcsqrt": "keyword2",
	"bcsub": "keyword2",
	"bin2hex": "keyword2",
	"bind": "keyword2",
	"bindec": "keyword2",
	"bindtextdomain": "keyword2",
	"break": "keyword1",
	"build_iovec": "keyword2",
	"bzclose": "keyword2",
	"bzcompress": "keyword2",
	"bzdecompress": "keyword2",
	"bzerrno": "keyword2",
	"bzerror": "keyword2",
	"bzerrstr": "keyword2",
	"bzflush": "keyword2",
	"bzopen": "keyword2",
	"bzread": "keyword2",
	"bzwrite": "keyword2",
	"call_user_func": "keyword2",
	"call_user_func_array": "keyword2",
	"call_user_method": "keyword2",
	"call_user_method_array": "keyword2",
	"case": "keyword1",
	"catch": "keyword1",
	"ccvs_add": "keyword2",
	"ccvs_auth": "keyword2",
	"ccvs_command": "keyword2",
	"ccvs_count": "keyword2",
	"ccvs_delete": "keyword2",
	"ccvs_done": "keyword2",
	"ccvs_init": "keyword2",
	"ccvs_lookup": "keyword2",
	"ccvs_new": "keyword2",
	"ccvs_report": "keyword2",
	"ccvs_return": "keyword2",
	"ccvs_reverse": "keyword2",
	"ccvs_sale": "keyword2",
	"ccvs_status": "keyword2",
	"ccvs_textvalue": "keyword2",
	"ccvs_void": "keyword2",
	"ceil": "keyword2",
	"chdir": "keyword2",
	"checkdate": "keyword2",
	"checkdnsrr": "keyword2",
	"chgrp": "keyword2",
	"chmod": "keyword2",
	"chop": "keyword2",
	"chown": "keyword2",
	"chr": "keyword2",
	"chroot": "keyword2",
	"chunk_split": "keyword2",
	"class": "keyword1",
	"class_exists": "keyword2",
	"clearstatcache": "keyword2",
	"clone": "keyword1",
	"close": "keyword2",
	"closedir": "keyword2",
	"closelog": "keyword2",
	"com_get": "keyword2",
	"com_propget": "keyword2",
	"com_propput": "keyword2",
	"com_propset": "keyword2",
	"com_set": "keyword2",
	"compact": "keyword2",
	"confirm_cybermut_compiled": "keyword2",
	"confirm_extname_compiled": "keyword2",
	"connect": "keyword2",
	"connection_aborted": "keyword2",
	"connection_status": "keyword2",
	"const": "keyword1",
	"constant": "keyword2",
	"continue": "keyword1",
	"convert_cyr_string": "keyword2",
	"convert_uudecode": "keyword2",
	"convert_uuencode": "keyword2",
	"copy": "keyword2",
	"cos": "keyword2",
	"count": "keyword2",
	"count_chars": "keyword2",
	"cpdf_add_annotation": "keyword2",
	"cpdf_add_outline": "keyword2",
	"cpdf_arc": "keyword2",
	"cpdf_begin_text": "keyword2",
	"cpdf_circle": "keyword2",
	"cpdf_clip": "keyword2",
	"cpdf_close": "keyword2",
	"cpdf_closepath": "keyword2",
	"cpdf_closepath_fill_stroke": "keyword2",
	"cpdf_closepath_stroke": "keyword2",
	"cpdf_continue_text": "keyword2",
	"cpdf_curveto": "keyword2",
	"cpdf_end_text": "keyword2",
	"cpdf_fill": "keyword2",
	"cpdf_fill_stroke": "keyword2",
	"cpdf_finalize": "keyword2",
	"cpdf_finalize_page": "keyword2",
	"cpdf_global_set_document_limits": "keyword2",
	"cpdf_import_jpeg": "keyword2",
	"cpdf_lineto": "keyword2",
	"cpdf_moveto": "keyword2",
	"cpdf_newpath": "keyword2",
	"cpdf_open": "keyword2",
	"cpdf_output_buffer": "keyword2",
	"cpdf_page_init": "keyword2",
	"cpdf_place_inline_image": "keyword2",
	"cpdf_rect": "keyword2",
	"cpdf_restore": "keyword2",
	"cpdf_rlineto": "keyword2",
	"cpdf_rmoveto": "keyword2",
	"cpdf_rotate": "keyword2",
	"cpdf_rotate_text": "keyword2",
	"cpdf_save": "keyword2",
	"cpdf_save_to_file": "keyword2",
	"cpdf_scale": "keyword2",
	"cpdf_set_action_url": "keyword2",
	"cpdf_set_char_spacing": "keyword2",
	"cpdf_set_creator": "keyword2",
	"cpdf_set_current_page": "keyword2",
	"cpdf_set_font": "keyword2",
	"cpdf_set_font_directories": "keyword2",
	"cpdf_set_font_map_file": "keyword2",
	"cpdf_set_horiz_scaling": "keyword2",
	"cpdf_set_keywords": "keyword2",
	"cpdf_set_leading": "keyword2",
	"cpdf_set_page_animation": "keyword2",
	"cpdf_set_subject": "keyword2",
	"cpdf_set_text_matrix": "keyword2",
	"cpdf_set_text_pos": "keyword2",
	"cpdf_set_text_rendering": "keyword2",
	"cpdf_set_text_rise": "keyword2",
	"cpdf_set_title": "keyword2",
	"cpdf_set_viewer_preferences": "keyword2",
	"cpdf_set_word_spacing": "keyword2",
	"cpdf_setdash": "keyword2",
	"cpdf_setflat": "keyword2",
	"cpdf_setgray": "keyword2",
	"cpdf_setgray_fill": "keyword2",
	"cpdf_setgray_stroke": "keyword2",
	"cpdf_setlinecap": "keyword2",
	"cpdf_setlinejoin": "keyword2",
	"cpdf_setlinewidth": "keyword2",
	"cpdf_setmiterlimit": "keyword2",
	"cpdf_setrgbcolor": "keyword2",
	"cpdf_setrgbcolor_fill": "keyword2",
	"cpdf_setrgbcolor_stroke": "keyword2",
	"cpdf_show": "keyword2",
	"cpdf_show_xy": "keyword2",
	"cpdf_stringwidth": "keyword2",
	"cpdf_stroke": "keyword2",
	"cpdf_text": "keyword2",
	"cpdf_translate": "keyword2",
	"crack_check": "keyword2",
	"crack_closedict": "keyword2",
	"crack_getlastmessage": "keyword2",
	"crack_opendict": "keyword2",
	"crash": "keyword2",
	"crc32": "keyword2",
	"create_function": "keyword2",
	"crypt": "keyword2",
	"ctype_alnum": "keyword2",
	"ctype_alpha": "keyword2",
	"ctype_cntrl": "keyword2",
	"ctype_digit": "keyword2",
	"ctype_graph": "keyword2",
	"ctype_lower": "keyword2",
	"ctype_print": "keyword2",
	"ctype_punct": "keyword2",
	"ctype_space": "keyword2",
	"ctype_upper": "keyword2",
	"ctype_xdigit": "keyword2",
	"curl_close": "keyword2",
	"curl_errno": "keyword2",
	"curl_error": "keyword2",
	"curl_exec": "keyword2",
	"curl_getinfo": "keyword2",
	"curl_init": "keyword2",
	"curl_setopt": "keyword2",
	"curl_version": "keyword2",
	"current": "keyword2",
	"cv_add": "keyword2",
	"cv_auth": "keyword2",
	"cv_command": "keyword2",
	"cv_count": "keyword2",
	"cv_delete": "keyword2",
	"cv_done": "keyword2",
	"cv_init": "keyword2",
	"cv_lookup": "keyword2",
	"cv_new": "keyword2",
	"cv_report": "keyword2",
	"cv_return": "keyword2",
	"cv_reverse": "keyword2",
	"cv_sale": "keyword2",
	"cv_status": "keyword2",
	"cv_textvalue": "keyword2",
	"cv_void": "keyword2",
	"cybercash_base64_decode": "keyword2",
	"cybercash_base64_encode": "keyword2",
	"cybercash_decr": "keyword2",
	"cybercash_encr": "keyword2",
	"cybermut_creerformulairecm": "keyword2",
	"cybermut_creerreponsecm": "keyword2",
	"cybermut_testmac": "keyword2",
	"date": "keyword2",
	"dba_close": "keyword2",
	"dba_delete": "keyword2",
	"dba_exists": "keyword2",
	"dba_fetch": "keyword2",
	"dba_firstkey": "keyword2",
	"dba_insert": "keyword2",
	"dba_nextkey": "keyword2",
	"dba_open": "keyword2",
	"dba_optimize": "keyword2",
	"dba_popen": "keyword2",
	"dba_replace": "keyword2",
	"dba_sync": "keyword2",
	"dbase_add_record": "keyword2",
	"dbase_close": "keyword2",
	"dbase_create": "keyword2",
	"dbase_delete_record": "keyword2",
	"dbase_get_record": "keyword2",
	"dbase_get_record_with_names": "keyword2",
	"dbase_numfields": "keyword2",
	"dbase_numrecords": "keyword2",
	"dbase_open": "keyword2",
	"dbase_pack": "keyword2",
	"dbase_replace_record": "keyword2",
	"dblist": "keyword2",
	"dbmclose": "keyword2",
	"dbmdelete": "keyword2",
	"dbmexists": "keyword2",
	"dbmfetch": "keyword2",
	"dbmfirstkey": "keyword2",
	"dbminsert": "keyword2",
	"dbmnextkey": "keyword2",
	"dbmopen": "keyword2",
	"dbmreplace": "keyword2",
	"dbx_close": "keyword2",
	"dbx_cmp_asc": "keyword2",
	"dbx_cmp_desc": "keyword2",
	"dbx_connect": "keyword2",
	"dbx_error": "keyword2",
	"dbx_query": "keyword2",
	"dbx_sort": "keyword2",
	"dcgettext": "keyword2",
	"debug_backtrace": "keyword2",
	"debug_print_backtrace": "keyword2",
	"decbin": "keyword2",
	"dechex": "keyword2",
	"declare": "keyword1",
	"decoct": "keyword2",
	"default": "keyword1",
	"define": "keyword2",
	"define_syslog_variables": "keyword2",
	"defined": "keyword2",
	"deg2rad": "keyword2",
	"delete_iovec": "keyword2",
	"dgettext": "keyword2",
	"die": "keyword2",
	"dir": "keyword2",
	"dirname": "keyword2",
	"diskfreespace": "keyword2",
	"display_disabled_function": "keyword2",
	"dl": "keyword2",
	"do": "keyword1",
	"domxml_add_root": "keyword2",
	"domxml_attributes": "keyword2",
	"domxml_children": "keyword2",
	"domxml_dumpmem": "keyword2",
	"domxml_elem_get_attribute": "keyword2",
	"domxml_elem_set_attribute": "keyword2",
	"domxml_get_attribute": "keyword2",
	"domxml_getattr": "keyword2",
	"domxml_new_child": "keyword2",
	"domxml_new_xmldoc": "keyword2",
	"domxml_node": "keyword2",
	"domxml_node_attributes": "keyword2",
	"domxml_node_children": "keyword2",
	"domxml_node_new_child": "keyword2",
	"domxml_node_set_content": "keyword2",
	"domxml_node_unlink_node": "keyword2",
	"domxml_root": "keyword2",
	"domxml_set_attribute": "keyword2",
	"domxml_setattr": "keyword2",
	"domxml_unlink_node": "keyword2",
	"domxml_version": "keyword2",
	"doubleval": "keyword2",
	"drawarc": "keyword2",
	"drawcircle": "keyword2",
	"drawcubic": "keyword2",
	"drawcubicto": "keyword2",
	"drawcurve": "keyword2",
	"drawcurveto": "keyword2",
	"drawglyph": "keyword2",
	"drawline": "keyword2",
	"drawlineto": "keyword2",
	"each": "keyword2",
	"easter_date": "keyword2",
	"easter_days": "keyword2",
	"echo": "keyword1",
	"else": "keyword1",
	"elseif": "keyword1",
	"empty": "keyword1",
	"end": "keyword2",
	"endfor": "keyword1",
	"endforeach": "keyword1",
	"endif": "keyword1",
	"endswitch": "keyword1",
	"endwhile": "keyword1",
	"ereg": "keyword2",
	"ereg_replace": "keyword2",
	"eregi": "keyword2",
	"eregi_replace": "keyword2",
	"error_log": "keyword2",
	"error_reporting": "keyword2",
	"escapeshellarg": "keyword2",
	"escapeshellcmd": "keyword2",
	"exec": "keyword2",
	"exit": "keyword2",
	"exp": "keyword2",
	"explode": "keyword2",
	"extends": "keyword1",
	"extension_loaded": "keyword2",
	"extract": "keyword2",
	"ezmlm_hash": "keyword2",
	"false": "keyword3",
	"fbsql": "keyword2",
	"fbsql_affected_rows": "keyword2",
	"fbsql_autocommit": "keyword2",
	"fbsql_close": "keyword2",
	"fbsql_commit": "keyword2",
	"fbsql_connect": "keyword2",
	"fbsql_create_db": "keyword2",
	"fbsql_data_seek": "keyword2",
	"fbsql_database": "keyword2",
	"fbsql_database_password": "keyword2",
	"fbsql_db_query": "keyword2",
	"fbsql_drop_db": "keyword2",
	"fbsql_errno": "keyword2",
	"fbsql_error": "keyword2",
	"fbsql_fetch_array": "keyword2",
	"fbsql_fetch_assoc": "keyword2",
	"fbsql_fetch_field": "keyword2",
	"fbsql_fetch_lengths": "keyword2",
	"fbsql_fetch_object": "keyword2",
	"fbsql_fetch_row": "keyword2",
	"fbsql_field_flags": "keyword2",
	"fbsql_field_len": "keyword2",
	"fbsql_field_name": "keyword2",
	"fbsql_field_seek": "keyword2",
	"fbsql_field_table": "keyword2",
	"fbsql_field_type": "keyword2",
	"fbsql_free_result": "keyword2",
	"fbsql_hostname": "keyword2",
	"fbsql_insert_id": "keyword2",
	"fbsql_list_dbs": "keyword2",
	"fbsql_list_fields": "keyword2",
	"fbsql_list_tables": "keyword2",
	"fbsql_next_result": "keyword2",
	"fbsql_num_fields": "keyword2",
	"fbsql_num_rows": "keyword2",
	"fbsql_password": "keyword2",
	"fbsql_pconnect": "keyword2",
	"fbsql_query": "keyword2",
	"fbsql_result": "keyword2",
	"fbsql_rollback": "keyword2",
	"fbsql_select_db": "keyword2",
	"fbsql_start_db": "keyword2",
	"fbsql_stop_db": "keyword2",
	"fbsql_username": "keyword2",
	"fbsql_warnings": "keyword2",
	"fclose": "keyword2",
	"fd_alloc": "keyword2",
	"fd_clear": "keyword2",
	"fd_dealloc": "keyword2",
	"fd_isset": "keyword2",
	"fd_set": "keyword2",
	"fd_zero": "keyword2",
	"fdf_add_template": "keyword2",
	"fdf_close": "keyword2",
	"fdf_create": "keyword2",
	"fdf_get_file": "keyword2",
	"fdf_get_status": "keyword2",
	"fdf_get_value": "keyword2",
	"fdf_next_field_name": "keyword2",
	"fdf_open": "keyword2",
	"fdf_save": "keyword2",
	"fdf_set_ap": "keyword2",
	"fdf_set_file": "keyword2",
	"fdf_set_flags": "keyword2",
	"fdf_set_javascript_action": "keyword2",
	"fdf_set_opt": "keyword2",
	"fdf_set_status": "keyword2",
	"fdf_set_submit_form_action": "keyword2",
	"fdf_set_value": "keyword2",
	"feof": "keyword2",
	"fetch_iovec": "keyword2",
	"fflush": "keyword2",
	"fgetc": "keyword2",
	"fgetcsv": "keyword2",
	"fgets": "keyword2",
	"fgetss": "keyword2",
	"file": "keyword2",
	"file_exists": "keyword2",
	"file_get_contents": "keyword2",
	"file_put_contents": "keyword2",
	"fileatime": "keyword2",
	"filectime": "keyword2",
	"filegroup": "keyword2",
	"fileinode": "keyword2",
	"filemtime": "keyword2",
	"fileowner": "keyword2",
	"fileperms": "keyword2",
	"filepro": "keyword2",
	"filepro_fieldcount": "keyword2",
	"filepro_fieldname": "keyword2",
	"filepro_fieldtype": "keyword2",
	"filepro_fieldwidth": "keyword2",
	"filepro_retrieve": "keyword2",
	"filepro_rowcount": "keyword2",
	"filesize": "keyword2",
	"filetype": "keyword2",
	"final": "keyword1",
	"floatval": "keyword2",
	"flock": "keyword2",
	"floor": "keyword2",
	"flush": "keyword2",
	"fopen": "keyword2",
	"fopenstream": "keyword2",
	"for": "keyword1",
	"foreach": "keyword1",
	"fpassthru": "keyword2",
	"fputs": "keyword2",
	"fread": "keyword2",
	"free_iovec": "keyword2",
	"frenchtojd": "keyword2",
	"fribidi_log2vis": "keyword2",
	"fscanf": "keyword2",
	"fseek": "keyword2",
	"fsockopen": "keyword2",
	"fstat": "keyword2",
	"ftell": "keyword2",
	"ftp_alloc": "keyword2",
	"ftp_cdup": "keyword2",
	"ftp_chdir": "keyword2",
	"ftp_connect": "keyword2",
	"ftp_delete": "keyword2",
	"ftp_exec": "keyword2",
	"ftp_fget": "keyword2",
	"ftp_fput": "keyword2",
	"ftp_get": "keyword2",
	"ftp_login": "keyword2",
	"ftp_mdtm": "keyword2",
	"ftp_mkdir": "keyword2",
	"ftp_nlist": "keyword2",
	"ftp_pasv": "keyword2",
	"ftp_put": "keyword2",
	"ftp_pwd": "keyword2",
	"ftp_quit": "keyword2",
	"ftp_rawlist": "keyword2",
	"ftp_rename": "keyword2",
	"ftp_rmdir": "keyword2",
	"ftp_site": "keyword2",
	"ftp_size": "keyword2",
	"ftp_ssl_connect": "keyword2",
	"ftp_systype": "keyword2",
	"ftruncate": "keyword2",
	"func_get_arg": "keyword2",
	"func_get_args": "keyword2",
	"func_num_args": "keyword2",
	"function": "keyword1",
	"function_exists": "keyword2",
	"fwrite": "keyword2",
	"gd_info": "keyword2",
	"get_all_headers": "keyword2",
	"get_browser": "keyword2",
	"get_cfg_var": "keyword2",
	"get_class": "keyword2",
	"get_class_methods": "keyword2",
	"get_class_vars": "keyword2",
	"get_current_user": "keyword2",
	"get_declared_classes": "keyword2",
	"get_declared_interfaces": "keyword2",
	"get_defined_functions": "keyword2",
	"get_defined_vars": "keyword2",
	"get_extension_funcs": "keyword2",
	"get_headers": "keyword2",
	"get_html_translation_table": "keyword2",
	"get_included_files": "keyword2",
	"get_loaded_extensions": "keyword2",
	"get_magic_quotes_gpc": "keyword2",
	"get_magic_quotes_runtime": "keyword2",
	"get_meta_tags": "keyword2",
	"get_object_vars": "keyword2",
	"get_parent_class": "keyword2",
	"get_required_files": "keyword2",
	"get_resource_type": "keyword2",
	"getallheaders": "keyword2",
	"getascent": "keyword2",
	"getcwd": "keyword2",
	"getdate": "keyword2",
	"getdescent": "keyword2",
	"getenv": "keyword2",
	"getheight": "keyword2",
	"gethostbyaddr": "keyword2",
	"gethostbyname": "keyword2",
	"gethostbynamel": "keyword2",
	"getimagesize": "keyword2",
	"getlastmod": "keyword2",
	"getleading": "keyword2",
	"getmxrr": "keyword2",
	"getmyinode": "keyword2",
	"getmypid": "keyword2",
	"getmyuid": "keyword2",
	"getopt": "keyword2",
	"getpeername": "keyword2",
	"getprotobyname": "keyword2",
	"getprotobynumber": "keyword2",
	"getrandmax": "keyword2",
	"getrusage": "keyword2",
	"getservbyname": "keyword2",
	"getservbyport": "keyword2",
	"getshape1": "keyword2",
	"getshape2": "keyword2",
	"getsockname": "keyword2",
	"getsockopt": "keyword2",
	"gettext": "keyword2",
	"gettimeofday": "keyword2",
	"gettype": "keyword2",
	"getwidth": "keyword2",
	"global": "keyword1",
	"gmdate": "keyword2",
	"gmmktime": "keyword2",
	"gmp_abs": "keyword2",
	"gmp_add": "keyword2",
	"gmp_and": "keyword2",
	"gmp_clrbit": "keyword2",
	"gmp_cmp": "keyword2",
	"gmp_com": "keyword2",
	"gmp_div": "keyword2",
	"gmp_div_q": "keyword2",
	"gmp_div_qr": "keyword2",
	"gmp_div_r": "keyword2",
	"gmp_divexact": "keyword2",
	"gmp_fact": "keyword2",
	"gmp_gcd": "keyword2",
	"gmp_gcdext": "keyword2",
	"gmp_hamdist": "keyword2",
	"gmp_init": "keyword2",
	"gmp_intval": "keyword2",
	"gmp_invert": "keyword2",
	"gmp_jacobi": "keyword2",
	"gmp_legendre": "keyword2",
	"gmp_mod": "keyword2",
	"gmp_mul": "keyword2",
	"gmp_neg": "keyword2",
	"gmp_or": "keyword2",
	"gmp_perfect_square": "keyword2",
	"gmp_popcount": "keyword2",
	"gmp_pow": "keyword2",
	"gmp_powm": "keyword2",
	"gmp_prob_prime": "keyword2",
	"gmp_random": "keyword2",
	"gmp_scan0": "keyword2",
	"gmp_scan1": "keyword2",
	"gmp_setbit": "keyword2",
	"gmp_sign": "keyword2",
	"gmp_sqrt": "keyword2",
	"gmp_sqrtrem": "keyword2",
	"gmp_strval": "keyword2",
	"gmp_sub": "keyword2",
	"gmp_xor": "keyword2",
	"gmstrftime": "keyword2",
	"gregoriantojd": "keyword2",
	"gzclose": "keyword2",
	"gzcompress": "keyword2",
	"gzdeflate": "keyword2",
	"gzencode": "keyword2",
	"gzeof": "keyword2",
	"gzfile": "keyword2",
	"gzgetc": "keyword2",
	"gzgets": "keyword2",
	"gzgetss": "keyword2",
	"gzinflate": "keyword2",
	"gzopen": "keyword2",
	"gzpassthru": "keyword2",
	"gzputs": "keyword2",
	"gzread": "keyword2",
	"gzrewind": "keyword2",
	"gzseek": "keyword2",
	"gztell": "keyword2",
	"gzuncompress": "keyword2",
	"gzwrite": "keyword2",
	"header": "keyword2",
	"headers_list": "keyword2",
	"headers_sent": "keyword2",
	"hebrev": "keyword2",
	"hebrevc": "keyword2",
	"hexdec": "keyword2",
	"highlight_file": "keyword2",
	"highlight_string": "keyword2",
	"htmlentities": "keyword2",
	"htmlspecialchars": "keyword2",
	"http_build_query": "keyword2",
	"hw_array2objrec": "keyword2",
	"hw_changeobject": "keyword2",
	"hw_children": "keyword2",
	"hw_childrenobj": "keyword2",
	"hw_close": "keyword2",
	"hw_connect": "keyword2",
	"hw_connection_info": "keyword2",
	"hw_cp": "keyword2",
	"hw_deleteobject": "keyword2",
	"hw_docbyanchor": "keyword2",
	"hw_docbyanchorobj": "keyword2",
	"hw_document_attributes": "keyword2",
	"hw_document_bodytag": "keyword2",
	"hw_document_content": "keyword2",
	"hw_document_setcontent": "keyword2",
	"hw_document_size": "keyword2",
	"hw_dummy": "keyword2",
	"hw_edittext": "keyword2",
	"hw_error": "keyword2",
	"hw_errormsg": "keyword2",
	"hw_free_document": "keyword2",
	"hw_getanchors": "keyword2",
	"hw_getanchorsobj": "keyword2",
	"hw_getandlock": "keyword2",
	"hw_getcgi": "keyword2",
	"hw_getchildcoll": "keyword2",
	"hw_getchildcollobj": "keyword2",
	"hw_getchilddoccoll": "keyword2",
	"hw_getchilddoccollobj": "keyword2",
	"hw_getobject": "keyword2",
	"hw_getobjectbyftquery": "keyword2",
	"hw_getobjectbyftquerycoll": "keyword2",
	"hw_getobjectbyftquerycollobj": "keyword2",
	"hw_getobjectbyftqueryobj": "keyword2",
	"hw_getobjectbyquery": "keyword2",
	"hw_getobjectbyquerycoll": "keyword2",
	"hw_getobjectbyquerycollobj": "keyword2",
	"hw_getobjectbyqueryobj": "keyword2",
	"hw_getparents": "keyword2",
	"hw_getparentsobj": "keyword2",
	"hw_getrellink": "keyword2",
	"hw_getremote": "keyword2",
	"hw_getremotechildren": "keyword2",
	"hw_getsrcbydestobj": "keyword2",
	"hw_gettext": "keyword2",
	"hw_getusername": "keyword2",
	"hw_identify": "keyword2",
	"hw_incollections": "keyword2",
	"hw_info": "keyword2",
	"hw_inscoll": "keyword2",
	"hw_insdoc": "keyword2",
	"hw_insertanchors": "keyword2",
	"hw_insertdocument": "keyword2",
	"hw_insertobject": "keyword2",
	"hw_mapid": "keyword2",
	"hw_modifyobject": "keyword2",
	"hw_mv": "keyword2",
	"hw_new_document": "keyword2",
	"hw_new_document_from_file": "keyword2",
	"hw_objrec2array": "keyword2",
	"hw_output_document": "keyword2",
	"hw_pconnect": "keyword2",
	"hw_pipecgi": "keyword2",
	"hw_pipedocument": "keyword2",
	"hw_root": "keyword2",
	"hw_setlinkroot": "keyword2",
	"hw_stat": "keyword2",
	"hw_unlock": "keyword2",
	"hw_who": "keyword2",
	"ibase_blob_add": "keyword2",
	"ibase_blob_cancel": "keyword2",
	"ibase_blob_close": "keyword2",
	"ibase_blob_create": "keyword2",
	"ibase_blob_echo": "keyword2",
	"ibase_blob_get": "keyword2",
	"ibase_blob_import": "keyword2",
	"ibase_blob_info": "keyword2",
	"ibase_blob_open": "keyword2",
	"ibase_close": "keyword2",
	"ibase_commit": "keyword2",
	"ibase_connect": "keyword2",
	"ibase_errmsg": "keyword2",
	"ibase_execute": "keyword2",
	"ibase_fetch_object": "keyword2",
	"ibase_fetch_row": "keyword2",
	"ibase_field_info": "keyword2",
	"ibase_free_query": "keyword2",
	"ibase_free_result": "keyword2",
	"ibase_num_fields": "keyword2",
	"ibase_pconnect": "keyword2",
	"ibase_prepare": "keyword2",
	"ibase_query": "keyword2",
	"ibase_rollback": "keyword2",
	"ibase_timefmt": "keyword2",
	"ibase_trans": "keyword2",
	"icap_create_calendar": "keyword2",
	"icap_delete_calendar": "keyword2",
	"icap_delete_event": "keyword2",
	"icap_fetch_event": "keyword2",
	"icap_list_alarms": "keyword2",
	"icap_list_events": "keyword2",
	"icap_open": "keyword2",
	"icap_popen": "keyword2",
	"icap_rename_calendar": "keyword2",
	"icap_reopen": "keyword2",
	"icap_snooze": "keyword2",
	"icap_store_event": "keyword2",
	"iconv": "keyword2",
	"iconv_get_encoding": "keyword2",
	"iconv_set_encoding": "keyword2",
	"idate": "keyword2",
	"if": "keyword1",
	"ifx_affected_rows": "keyword2",
	"ifx_blobinfile_mode": "keyword2",
	"ifx_byteasvarchar": "keyword2",
	"ifx_close": "keyword2",
	"ifx_connect": "keyword2",
	"ifx_copy_blob": "keyword2",
	"ifx_create_blob": "keyword2",
	"ifx_create_char": "keyword2",
	"ifx_do": "keyword2",
	"ifx_error": "keyword2",
	"ifx_errormsg": "keyword2",
	"ifx_fetch_row": "keyword2",
	"ifx_fieldproperties": "keyword2",
	"ifx_fieldtypes": "keyword2",
	"ifx_free_blob": "keyword2",
	"ifx_free_char": "keyword2",
	"ifx_free_result": "keyword2",
	"ifx_get_blob": "keyword2",
	"ifx_get_char": "keyword2",
	"ifx_getsqlca": "keyword2",
	"ifx_htmltbl_result": "keyword2",
	"ifx_nullformat": "keyword2",
	"ifx_num_fields": "keyword2",
	"ifx_num_rows": "keyword2",
	"ifx_pconnect": "keyword2",
	"ifx_prepare": "keyword2",
	"ifx_query": "keyword2",
	"ifx_textasvarchar": "keyword2",
	"ifx_update_blob": "keyword2",
	"ifx_update_char": "keyword2",
	"ifxus_close_slob": "keyword2",
	"ifxus_create_slob": "keyword2",
	"ifxus_free_slob": "keyword2",
	"ifxus_open_slob": "keyword2",
	"ifxus_read_slob": "keyword2",
	"ifxus_seek_slob": "keyword2",
	"ifxus_tell_slob": "keyword2",
	"ifxus_write_slob": "keyword2",
	"ignore_user_abort": "keyword2",
	"iis_addserver": "keyword2",
	"iis_getdirsecurity": "keyword2",
	"iis_getscriptmap": "keyword2",
	"iis_getserverbycomment": "keyword2",
	"iis_getserverbypath": "keyword2",
	"iis_getserverright": "keyword2",
	"iis_getservicestate": "keyword2",
	"iis_removeserver": "keyword2",
	"iis_setappsettings": "keyword2",
	"iis_setdirsecurity": "keyword2",
	"iis_setscriptmap": "keyword2",
	"iis_setserverright": "keyword2",
	"iis_startserver": "keyword2",
	"iis_startservice": "keyword2",
	"iis_stopserver": "keyword2",
	"iis_stopservice": "keyword2",
	"image2wbmp": "keyword2",
	"image_type_to_extension": "keyword2",
	"imagealphablending": "keyword2",
	"imagearc": "keyword2",
	"imagechar": "keyword2",
	"imagecharup": "keyword2",
	"imagecolorallocate": "keyword2",
	"imagecolorat": "keyword2",
	"imagecolorclosest": "keyword2",
	"imagecolorclosestalpha": "keyword2",
	"imagecolorclosesthwb": "keyword2",
	"imagecolordeallocate": "keyword2",
	"imagecolorexact": "keyword2",
	"imagecolorexactalpha": "keyword2",
	"imagecolormatch": "keyword2",
	"imagecolorresolve": "keyword2",
	"imagecolorresolvealpha": "keyword2",
	"imagecolorset": "keyword2",
	"imagecolorsforindex": "keyword2",
	"imagecolorstotal": "keyword2",
	"imagecolortransparent": "keyword2",
	"imagecopy": "keyword2",
	"imagecopymerge": "keyword2",
	"imagecopymergegray": "keyword2",
	"imagecopyresampled": "keyword2",
	"imagecopyresized": "keyword2",
	"imagecreate": "keyword2",
	"imagecreatefromgif": "keyword2",
	"imagecreatefromjpeg": "keyword2",
	"imagecreatefrompng": "keyword2",
	"imagecreatefromstring": "keyword2",
	"imagecreatefromwbmp": "keyword2",
	"imagecreatefromxbm": "keyword2",
	"imagecreatefromxpm": "keyword2",
	"imagecreatetruecolor": "keyword2",
	"imagedashedline": "keyword2",
	"imagedestroy": "keyword2",
	"imageellipse": "keyword2",
	"imagefill": "keyword2",
	"imagefilledarc": "keyword2",
	"imagefilledellipse": "keyword2",
	"imagefilledpolygon": "keyword2",
	"imagefilledrectangle": "keyword2",
	"imagefilltoborder": "keyword2",
	"imagefilter": "keyword2",
	"imagefontheight": "keyword2",
	"imagefontwidth": "keyword2",
	"imagegammacorrect": "keyword2",
	"imagegif": "keyword2",
	"imageinterlace": "keyword2",
	"imagejpeg": "keyword2",
	"imagelayereffect": "keyword2",
	"imageline": "keyword2",
	"imageloadfont": "keyword2",
	"imagepalettecopy": "keyword2",
	"imagepng": "keyword2",
	"imagepolygon": "keyword2",
	"imagepsbbox": "keyword2",
	"imagepscopyfont": "keyword2",
	"imagepsencodefont": "keyword2",
	"imagepsextendfont": "keyword2",
	"imagepsfreefont": "keyword2",
	"imagepsloadfont": "keyword2",
	"imagepsslantfont": "keyword2",
	"imagepstext": "keyword2",
	"imagerectangle": "keyword2",
	"imagerotate": "keyword2",
	"imagesetbrush": "keyword2",
	"imagesetpixel": "keyword2",
	"imagesetstyle": "keyword2",
	"imagesetthickness": "keyword2",
	"imagesettile": "keyword2",
	"imagestring": "keyword2",
	"imagestringup": "keyword2",
	"imagesx": "keyword2",
	"imagesy": "keyword2",
	"imagetruecolortopalette": "keyword2",
	"imagettfbbox": "keyword2",
	"imagettftext": "keyword2",
	"imagetypes": "keyword2",
	"imagewbmp": "keyword2",
	"imap_8bit": "keyword2",
	"imap_alerts": "keyword2",
	"imap_append": "keyword2",
	"imap_base64": "keyword2",
	"imap_binary": "keyword2",
	"imap_body": "keyword2",
	"imap_bodystruct": "keyword2",
	"imap_check": "keyword2",
	"imap_clearflag_full": "keyword2",
	"imap_close": "keyword2",
	"imap_create": "keyword2",
	"imap_createmailbox": "keyword2",
	"imap_delete": "keyword2",
	"imap_deletemailbox": "keyword2",
	"imap_errors": "keyword2",
	"imap_expunge": "keyword2",
	"imap_fetch_overview": "keyword2",
	"imap_fetchbody": "keyword2",
	"imap_fetchheader": "keyword2",
	"imap_fetchstructure": "keyword2",
	"imap_fetchtext": "keyword2",
	"imap_get_quota": "keyword2",
	"imap_getmailboxes": "keyword2",
	"imap_getsubscribed": "keyword2",
	"imap_header": "keyword2",
	"imap_headerinfo": "keyword2",
	"imap_headers": "keyword2",
	"imap_last_error": "keyword2",
	"imap_list": "keyword2",
	"imap_listmailbox": "keyword2",
	"imap_listsubscribed": "keyword2",
	"imap_lsub": "keyword2",
	"imap_mail": "keyword2",
	"imap_mail_compose": "keyword2",
	"imap_mail_copy": "keyword2",
	"imap_mail_move": "keyword2",
	"imap_mailboxmsginfo": "keyword2",
	"imap_mime_header_decode": "keyword2",
	"imap_msgno": "keyword2",
	"imap_num_msg": "keyword2",
	"imap_num_recent": "keyword2",
	"imap_open": "keyword2",
	"imap_ping": "keyword2",
	"imap_popen": "keyword2",
	"imap_qprint": "keyword2",
	"imap_rename": "keyword2",
	"imap_renamemailbox": "keyword2",
	"imap_reopen": "keyword2",
	"imap_rfc822_parse_adrlist": "keyword2",
	"imap_rfc822_parse_headers": "keyword2",
	"imap_rfc822_write_address": "keyword2",
	"imap_scan": "keyword2",
	"imap_scanmailbox": "keyword2",
	"imap_search": "keyword2",
	"imap_set_quota": "keyword2",
	"imap_setflag_full": "keyword2",
	"imap_sort": "keyword2",
	"imap_status": "keyword2",
	"imap_subscribe": "keyword2",
	"imap_uid": "keyword2",
	"imap_undelete": "keyword2",
	"imap_unsubscribe": "keyword2",
	"imap_utf7_decode": "keyword2",
	"imap_utf7_encode": "keyword2",
	"imap_utf8": "keyword2",
	"implements": "keyword1",
	"implode": "keyword2",
	"in_array": "keyword2",
	"include": "keyword1",
	"include_once": "keyword1",
	"ingres_autocommit": "keyword2",
	"ingres_close": "keyword2",
	"ingres_commit": "keyword2",
	"ingres_connect": "keyword2",
	"ingres_fetch_array": "keyword2",
	"ingres_fetch_object": "keyword2",
	"ingres_fetch_row": "keyword2",
	"ingres_field_length": "keyword2",
	"ingres_field_name": "keyword2",
	"ingres_field_nullable": "keyword2",
	"ingres_field_precision": "keyword2",
	"ingres_field_scale": "keyword2",
	"ingres_field_type": "keyword2",
	"ingres_num_fields": "keyword2",
	"ingres_num_rows": "keyword2",
	"ingres_pconnect": "keyword2",
	"ingres_query": "keyword2",
	"ingres_rollback": "keyword2",
	"ini_alter": "keyword2",
	"ini_get": "keyword2",
	"ini_restore": "keyword2",
	"ini_set": "keyword2",
	"instanceof": "operator",
	"interface": "keyword1",
	"intval": "keyword2",
	"ip2long": "keyword2",
	"iptcembed": "keyword2",
	"iptcparse": "keyword2",
	"ircg_channel_mode": "keyword2",
	"ircg_disconnect": "keyword2",
	"ircg_html_encode": "keyword2",
	"ircg_ignore_add": "keyword2",
	"ircg_ignore_del": "keyword2",
	"ircg_is_conn_alive": "keyword2",
	"ircg_join": "keyword2",
	"ircg_kick": "keyword2",
	"ircg_lookup_format_messages": "keyword2",
	"ircg_msg": "keyword2",
	"ircg_nick": "keyword2",
	"ircg_nickname_escape": "keyword2",
	"ircg_nickname_unescape": "keyword2",
	"ircg_notice": "keyword2",
	"ircg_part": "keyword2",
	"ircg_pconnect": "keyword2",
	"ircg_register_format_messages": "keyword2",
	"ircg_set_current": "keyword2",
	"ircg_topic": "keyword2",
	"ircg_whois": "keyword2",
	"is_array": "keyword2",
	"is_bool": "keyword2",
	"is_dir": "keyword2",
	"is_double": "keyword2",
	"is_executable": "keyword2",
	"is_file": "keyword2",
	"is_float": "keyword2",
	"is_int": "keyword2",
	"is_integer": "keyword2",
	"is_link": "keyword2",
	"is_long": "keyword2",
	"is_null": "keyword2",
	"is_numeric": "keyword2",
	"is_object": "keyword2",
	"is_readable": "keyword2",
	"is_real": "keyword2",
	"is_resource": "keyword2",
	"is_scalar": "keyword2",
	"is_string": "keyword2",
	"is_subclass_of": "keyword2",
	"is_uploaded_file": "keyword2",
	"is_writable": "keyword2",
	"is_writeable": "keyword2",
	"isset": "keyword1",
	"java_last_exception_clear": "keyword2",
	"java_last_exception_get": "keyword2",
	"jddayofweek": "keyword2",
	"jdmonthname": "keyword2",
	"jdtofrench": "keyword2",
	"jdtogregorian": "keyword2",
	"jdtojewish": "keyword2",
	"jdtojulian": "keyword2",
	"jdtounix": "keyword2",
	"jewishtojd": "keyword2",
	"join": "keyword2",
	"jpeg2wbmp": "keyword2",
	"juliantojd": "keyword2",
	"key": "keyword2",
	"krsort": "keyword2",
	"ksort": "keyword2",
	"labelframe": "keyword2",
	"lcg_value": "keyword2",
	"ldap_8859_to_t61": "keyword2",
	"ldap_add": "keyword2",
	"ldap_bind": "keyword2",
	"ldap_close": "keyword2",
	"ldap_compare": "keyword2",
	"ldap_connect": "keyword2",
	"ldap_count_entries": "keyword2",
	"ldap_delete": "keyword2",
	"ldap_dn2ufn": "keyword2",
	"ldap_err2str": "keyword2",
	"ldap_errno": "keyword2",
	"ldap_error": "keyword2",
	"ldap_explode_dn": "keyword2",
	"ldap_first_attribute": "keyword2",
	"ldap_first_entry": "keyword2",
	"ldap_first_reference": "keyword2",
	"ldap_free_result": "keyword2",
	"ldap_get_attributes": "keyword2",
	"ldap_get_dn": "keyword2",
	"ldap_get_entries": "keyword2",
	"ldap_get_option": "keyword2",
	"ldap_get_values": "keyword2",
	"ldap_get_values_len": "keyword2",
	"ldap_list": "keyword2",
	"ldap_mod_add": "keyword2",
	"ldap_mod_del": "keyword2",
	"ldap_mod_replace": "keyword2",
	"ldap_modify": "keyword2",
	"ldap_next_attribute": "keyword2",
	"ldap_next_entry": "keyword2",
	"ldap_next_reference": "keyword2",
	"ldap_parse_reference": "keyword2",
	"ldap_parse_result": "keyword2",
	"ldap_read": "keyword2",
	"ldap_rename": "keyword2",
	"ldap_search": "keyword2",
	"ldap_set_option": "keyword2",
	"ldap_t61_to_8859": "keyword2",
	"ldap_unbind": "keyword2",
	"leak": "keyword2",
	"levenshtein": "keyword2",
	"link": "keyword2",
	"linkinfo": "keyword2",
	"list": "keyword1",
	"listen": "keyword2",
	"localeconv": "keyword2",
	"localtime": "keyword2",
	"log": "keyword2",
	"log10": "keyword2",
	"long2ip": "keyword2",
	"lstat": "keyword2",
	"ltrim": "keyword2",
	"magic_quotes_runtime": "keyword2",
	"mail": "keyword2",
	"max": "keyword2",
	"mb_convert_case": "keyword2",
	"mb_strtolower": "keyword2",
	"mb_strtoupper": "keyword2",
	"mcal_append_event": "keyword2",
	"mcal_close": "keyword2",
	"mcal_create_calendar": "keyword2",
	"mcal_date_compare": "keyword2",
	"mcal_date_valid": "keyword2",
	"mcal_day_of_week": "keyword2",
	"mcal_day_of_year": "keyword2",
	"mcal_days_in_month": "keyword2",
	"mcal_delete_calendar": "keyword2",
	"mcal_delete_event": "keyword2",
	"mcal_event_add_attribute": "keyword2",
	"mcal_event_init": "keyword2",
	"mcal_event_set_alarm": "keyword2",
	"mcal_event_set_category": "keyword2",
	"mcal_event_set_class": "keyword2",
	"mcal_event_set_description": "keyword2",
	"mcal_event_set_end": "keyword2",
	"mcal_event_set_recur_daily": "keyword2",
	"mcal_event_set_recur_monthly_mday": "keyword2",
	"mcal_event_set_recur_monthly_wday": "keyword2",
	"mcal_event_set_recur_none": "keyword2",
	"mcal_event_set_recur_weekly": "keyword2",
	"mcal_event_set_recur_yearly": "keyword2",
	"mcal_event_set_start": "keyword2",
	"mcal_event_set_title": "keyword2",
	"mcal_fetch_current_stream_event": "keyword2",
	"mcal_fetch_event": "keyword2",
	"mcal_is_leap_year": "keyword2",
	"mcal_list_alarms": "keyword2",
	"mcal_list_events": "keyword2",
	"mcal_next_recurrence": "keyword2",
	"mcal_open": "keyword2",
	"mcal_popen": "keyword2",
	"mcal_rename_calendar": "keyword2",
	"mcal_reopen": "keyword2",
	"mcal_snooze": "keyword2",
	"mcal_store_event": "keyword2",
	"mcal_time_valid": "keyword2",
	"mcal_week_of_year": "keyword2",
	"mcrypt_cbc": "keyword2",
	"mcrypt_cfb": "keyword2",
	"mcrypt_create_iv": "keyword2",
	"mcrypt_decrypt": "keyword2",
	"mcrypt_ecb": "keyword2",
	"mcrypt_enc_get_algorithms_name": "keyword2",
	"mcrypt_enc_get_block_size": "keyword2",
	"mcrypt_enc_get_iv_size": "keyword2",
	"mcrypt_enc_get_key_size": "keyword2",
	"mcrypt_enc_get_modes_name": "keyword2",
	"mcrypt_enc_get_supported_key_sizes": "keyword2",
	"mcrypt_enc_is_block_algorithm": "keyword2",
	"mcrypt_enc_is_block_algorithm_mode": "keyword2",
	"mcrypt_enc_is_block_mode": "keyword2",
	"mcrypt_enc_self_test": "keyword2",
	"mcrypt_encrypt": "keyword2",
	"mcrypt_generic": "keyword2",
	"mcrypt_generic_deinit": "keyword2",
	"mcrypt_generic_end": "keyword2",
	"mcrypt_generic_init": "keyword2",
	"mcrypt_get_block_size": "keyword2",
	"mcrypt_get_cipher_name": "keyword2",
	"mcrypt_get_iv_size": "keyword2",
	"mcrypt_get_key_size": "keyword2",
	"mcrypt_list_algorithms": "keyword2",
	"mcrypt_list_modes": "keyword2",
	"mcrypt_module_close": "keyword2",
	"mcrypt_module_get_algo_block_size": "keyword2",
	"mcrypt_module_get_algo_key_size": "keyword2",
	"mcrypt_module_get_supported_key_sizes": "keyword2",
	"mcrypt_module_is_block_algorithm": "keyword2",
	"mcrypt_module_is_block_algorithm_mode": "keyword2",
	"mcrypt_module_is_block_mode": "keyword2",
	"mcrypt_module_open": "keyword2",
	"mcrypt_module_self_test": "keyword2",
	"mcrypt_ofb": "keyword2",
	"md5": "keyword2",
	"md5_file": "keyword2",
	"mdecrypt_generic": "keyword2",
	"metaphone": "keyword2",
	"method_exists": "keyword2",
	"mhash": "keyword2",
	"mhash_count": "keyword2",
	"mhash_get_block_size": "keyword2",
	"mhash_get_hash_name": "keyword2",
	"mhash_keygen_s2k": "keyword2",
	"microtime": "keyword2",
	"min": "keyword2",
	"ming_setcubicthreshold": "keyword2",
	"ming_setscale": "keyword2",
	"mkdir": "keyword2",
	"mktime": "keyword2",
	"move": "keyword2",
	"move_uploaded_file": "keyword2",
	"movepen": "keyword2",
	"movepento": "keyword2",
	"moveto": "keyword2",
	"msql": "keyword2",
	"msql_affected_rows": "keyword2",
	"msql_close": "keyword2",
	"msql_connect": "keyword2",
	"msql_create_db": "keyword2",
	"msql_createdb": "keyword2",
	"msql_data_seek": "keyword2",
	"msql_db_query": "keyword2",
	"msql_dbname": "keyword2",
	"msql_drop_db": "keyword2",
	"msql_dropdb": "keyword2",
	"msql_error": "keyword2",
	"msql_fetch_array": "keyword2",
	"msql_fetch_field": "keyword2",
	"msql_fetch_object": "keyword2",
	"msql_fetch_row": "keyword2",
	"msql_field_flags": "keyword2",
	"msql_field_len": "keyword2",
	"msql_field_name": "keyword2",
	"msql_field_seek": "keyword2",
	"msql_field_table": "keyword2",
	"msql_field_type": "keyword2",
	"msql_fieldflags": "keyword2",
	"msql_fieldlen": "keyword2",
	"msql_fieldname": "keyword2",
	"msql_fieldtable": "keyword2",
	"msql_fieldtype": "keyword2",
	"msql_free_result": "keyword2",
	"msql_freeresult": "keyword2",
	"msql_list_dbs": "keyword2",
	"msql_list_fields": "keyword2",
	"msql_list_tables": "keyword2",
	"msql_listdbs": "keyword2",
	"msql_listfields": "keyword2",
	"msql_listtables": "keyword2",
	"msql_num_fields": "keyword2",
	"msql_num_rows": "keyword2",
	"msql_numfields": "keyword2",
	"msql_numrows": "keyword2",
	"msql_pconnect": "keyword2",
	"msql_query": "keyword2",
	"msql_regcase": "keyword2",
	"msql_result": "keyword2",
	"msql_select_db": "keyword2",
	"msql_selectdb": "keyword2",
	"msql_tablename": "keyword2",
	"mssql_affected_rows": "keyword2",
	"mssql_close": "keyword2",
	"mssql_connect": "keyword2",
	"mssql_data_seek": "keyword2",
	"mssql_fetch_array": "keyword2",
	"mssql_fetch_batch": "keyword2",
	"mssql_fetch_field": "keyword2",
	"mssql_fetch_object": "keyword2",
	"mssql_fetch_row": "keyword2",
	"mssql_field_length": "keyword2",
	"mssql_field_name": "keyword2",
	"mssql_field_seek": "keyword2",
	"mssql_field_type": "keyword2",
	"mssql_free_result": "keyword2",
	"mssql_get_last_message": "keyword2",
	"mssql_min_client_severity": "keyword2",
	"mssql_min_error_severity": "keyword2",
	"mssql_min_message_severity": "keyword2",
	"mssql_min_server_severity": "keyword2",
	"mssql_next_result": "keyword2",
	"mssql_num_fields": "keyword2",
	"mssql_num_rows": "keyword2",
	"mssql_pconnect": "keyword2",
	"mssql_query": "keyword2",
	"mssql_result": "keyword2",
	"mssql_rows_affected": "keyword2",
	"mssql_select_db": "keyword2",
	"mt_getrandmax": "keyword2",
	"mt_rand": "keyword2",
	"mt_srand": "keyword2",
	"multcolor": "keyword2",
	"muscat_close": "keyword2",
	"muscat_get": "keyword2",
	"muscat_give": "keyword2",
	"muscat_setup": "keyword2",
	"muscat_setup_net": "keyword2",
	"mysql": "keyword2",
	"mysql_affected_rows": "keyword2",
	"mysql_close": "keyword2",
	"mysql_connect": "keyword2",
	"mysql_create_db": "keyword2",
	"mysql_createdb": "keyword2",
	"mysql_data_seek": "keyword2",
	"mysql_db_name": "keyword2",
	"mysql_db_query": "keyword2",
	"mysql_dbname": "keyword2",
	"mysql_drop_db": "keyword2",
	"mysql_dropdb": "keyword2",
	"mysql_errno": "keyword2",
	"mysql_error": "keyword2",
	"mysql_escape_string": "keyword2",
	"mysql_fetch_array": "keyword2",
	"mysql_fetch_assoc": "keyword2",
	"mysql_fetch_field": "keyword2",
	"mysql_fetch_lengths": "keyword2",
	"mysql_fetch_object": "keyword2",
	"mysql_fetch_row": "keyword2",
	"mysql_field_flags": "keyword2",
	"mysql_field_len": "keyword2",
	"mysql_field_name": "keyword2",
	"mysql_field_seek": "keyword2",
	"mysql_field_table": "keyword2",
	"mysql_field_type": "keyword2",
	"mysql_fieldflags": "keyword2",
	"mysql_fieldlen": "keyword2",
	"mysql_fieldname": "keyword2",
	"mysql_fieldtable": "keyword2",
	"mysql_fieldtype": "keyword2",
	"mysql_free_result": "keyword2",
	"mysql_freeresult": "keyword2",
	"mysql_get_client_info": "keyword2",
	"mysql_get_host_info": "keyword2",
	"mysql_get_proto_info": "keyword2",
	"mysql_get_server_info": "keyword2",
	"mysql_insert_id": "keyword2",
	"mysql_list_dbs": "keyword2",
	"mysql_list_fields": "keyword2",
	"mysql_list_tables": "keyword2",
	"mysql_listdbs": "keyword2",
	"mysql_listfields": "keyword2",
	"mysql_listtables": "keyword2",
	"mysql_num_fields": "keyword2",
	"mysql_num_rows": "keyword2",
	"mysql_numfields": "keyword2",
	"mysql_numrows": "keyword2",
	"mysql_pconnect": "keyword2",
	"mysql_query": "keyword2",
	"mysql_result": "keyword2",
	"mysql_select_db": "keyword2",
	"mysql_selectdb": "keyword2",
	"mysql_tablename": "keyword2",
	"mysql_unbuffered_query": "keyword2",
	"natcasesort": "keyword2",
	"natsort": "keyword2",
	"new": "keyword1",
	"new_xmldoc": "keyword2",
	"next": "keyword2",
	"nextframe": "keyword2",
	"nl2br": "keyword2",
	"notes_body": "keyword2",
	"notes_copy_db": "keyword2",
	"notes_create_db": "keyword2",
	"notes_create_note": "keyword2",
	"notes_drop_db": "keyword2",
	"notes_find_note": "keyword2",
	"notes_header_info": "keyword2",
	"notes_list_msgs": "keyword2",
	"notes_mark_read": "keyword2",
	"notes_mark_unread": "keyword2",
	"notes_nav_create": "keyword2",
	"notes_search": "keyword2",
	"notes_unread": "keyword2",
	"notes_version": "keyword2",
	"null": "keyword3",
	"number_format": "keyword2",
	"ob_end_clean": "keyword2",
	"ob_end_flush": "keyword2",
	"ob_get_clean": "keyword2",
	"ob_get_contents": "keyword2",
	"ob_get_flush": "keyword2",
	"ob_get_length": "keyword2",
	"ob_gzhandler": "keyword2",
	"ob_iconv_handler": "keyword2",
	"ob_implicit_flush": "keyword2",
	"ob_list_handlers": "keyword2",
	"ob_start": "keyword2",
	"ocibindbyname": "keyword2",
	"ocicancel": "keyword2",
	"ocicollappend": "keyword2",
	"ocicollassign": "keyword2",
	"ocicollassignelem": "keyword2",
	"ocicolldateappendelem": "keyword2",
	"ocicolldateassignelem": "keyword2",
	"ocicolldategetelem": "keyword2",
	"ocicollgetelem": "keyword2",
	"ocicollmax": "keyword2",
	"ocicollsize": "keyword2",
	"ocicolltrim": "keyword2",
	"ocicolumnisnull": "keyword2",
	"ocicolumnname": "keyword2",
	"ocicolumnprecision": "keyword2",
	"ocicolumnscale": "keyword2",
	"ocicolumnsize": "keyword2",
	"ocicolumntype": "keyword2",
	"ocicolumntyperaw": "keyword2",
	"ocicommit": "keyword2",
	"ocidefinebyname": "keyword2",
	"ocierror": "keyword2",
	"ociexecute": "keyword2",
	"ocifetch": "keyword2",
	"ocifetchinto": "keyword2",
	"ocifetchstatement": "keyword2",
	"ocifreecoll": "keyword2",
	"ocifreecursor": "keyword2",
	"ocifreedesc": "keyword2",
	"ocifreestatement": "keyword2",
	"ociinternaldebug": "keyword2",
	"ociloadlob": "keyword2",
	"ocilogoff": "keyword2",
	"ocilogon": "keyword2",
	"ocinewcollection": "keyword2",
	"ocinewcursor": "keyword2",
	"ocinewdescriptor": "keyword2",
	"ocinlogon": "keyword2",
	"ocinumcols": "keyword2",
	"ociparse": "keyword2",
	"ociplogon": "keyword2",
	"ociresult": "keyword2",
	"ocirollback": "keyword2",
	"ocirowcount": "keyword2",
	"ocisavelob": "keyword2",
	"ocisavelobfile": "keyword2",
	"ociserverversion": "keyword2",
	"ocisetprefetch": "keyword2",
	"ocistatementtype": "keyword2",
	"ociwritelobtofile": "keyword2",
	"octdec": "keyword2",
	"odbc_autocommit": "keyword2",
	"odbc_binmode": "keyword2",
	"odbc_close": "keyword2",
	"odbc_close_all": "keyword2",
	"odbc_columnprivileges": "keyword2",
	"odbc_columns": "keyword2",
	"odbc_commit": "keyword2",
	"odbc_connect": "keyword2",
	"odbc_cursor": "keyword2",
	"odbc_data_source": "keyword2",
	"odbc_do": "keyword2",
	"odbc_error": "keyword2",
	"odbc_errormsg": "keyword2",
	"odbc_exec": "keyword2",
	"odbc_execute": "keyword2",
	"odbc_fetch_array": "keyword2",
	"odbc_fetch_into": "keyword2",
	"odbc_fetch_object": "keyword2",
	"odbc_fetch_row": "keyword2",
	"odbc_field_len": "keyword2",
	"odbc_field_name": "keyword2",
	"odbc_field_num": "keyword2",
	"odbc_field_precision": "keyword2",
	"odbc_field_scale": "keyword2",
	"odbc_field_type": "keyword2",
	"odbc_foreignkeys": "keyword2",
	"odbc_free_result": "keyword2",
	"odbc_gettypeinfo": "keyword2",
	"odbc_longreadlen": "keyword2",
	"odbc_next_result": "keyword2",
	"odbc_num_fields": "keyword2",
	"odbc_num_rows": "keyword2",
	"odbc_pconnect": "keyword2",
	"odbc_prepare": "keyword2",
	"odbc_primarykeys": "keyword2",
	"odbc_procedurecolumns": "keyword2",
	"odbc_procedures": "keyword2",
	"odbc_result": "keyword2",
	"odbc_result_all": "keyword2",
	"odbc_rollback": "keyword2",
	"odbc_setoption": "keyword2",
	"odbc_specialcolumns": "keyword2",
	"odbc_statistics": "keyword2",
	"odbc_tableprivileges": "keyword2",
	"odbc_tables": "keyword2",
	"old_function": "keyword1",
	"open_listen_sock": "keyword2",
	"opendir": "keyword2",
	"openlog": "keyword2",
	"openssl_error_string": "keyword2",
	"openssl_free_key": "keyword2",
	"openssl_get_privatekey": "keyword2",
	"openssl_get_publickey": "keyword2",
	"openssl_open": "keyword2",
	"openssl_pkcs7_decrypt": "keyword2",
	"openssl_pkcs7_encrypt": "keyword2",
	"openssl_pkcs7_sign": "keyword2",
	"openssl_pkcs7_verify": "keyword2",
	"openssl_seal": "keyword2",
	"openssl_sign": "keyword2",
	"openssl_verify": "keyword2",
	"openssl_x509_checkpurpose": "keyword2",
	"openssl_x509_free": "keyword2",
	"openssl_x509_parse": "keyword2",
	"openssl_x509_read": "keyword2",
	"or": "operator",
	"ora_bind": "keyword2",
	"ora_close": "keyword2",
	"ora_columnname": "keyword2",
	"ora_columnsize": "keyword2",
	"ora_columntype": "keyword2",
	"ora_commit": "keyword2",
	"ora_commitoff": "keyword2",
	"ora_commiton": "keyword2",
	"ora_do": "keyword2",
	"ora_error": "keyword2",
	"ora_errorcode": "keyword2",
	"ora_exec": "keyword2",
	"ora_fetch": "keyword2",
	"ora_fetch_into": "keyword2",
	"ora_getcolumn": "keyword2",
	"ora_logoff": "keyword2",
	"ora_logon": "keyword2",
	"ora_numcols": "keyword2",
	"ora_numrows": "keyword2",
	"ora_open": "keyword2",
	"ora_parse": "keyword2",
	"ora_plogon": "keyword2",
	"ora_rollback": "keyword2",
	"orbit_caught_exception": "keyword2",
	"orbit_exception_id": "keyword2",
	"orbit_exception_value": "keyword2",
	"orbit_get_repository_id": "keyword2",
	"orbit_load_idl": "keyword2",
	"ord": "keyword2",
	"output": "keyword2",
	"ovrimos_close": "keyword2",
	"ovrimos_close_all": "keyword2",
	"ovrimos_commit": "keyword2",
	"ovrimos_connect": "keyword2",
	"ovrimos_cursor": "keyword2",
	"ovrimos_exec": "keyword2",
	"ovrimos_execute": "keyword2",
	"ovrimos_fetch_into": "keyword2",
	"ovrimos_fetch_row": "keyword2",
	"ovrimos_field_len": "keyword2",
	"ovrimos_field_name": "keyword2",
	"ovrimos_field_num": "keyword2",
	"ovrimos_field_type": "keyword2",
	"ovrimos_free_result": "keyword2",
	"ovrimos_longreadlen": "keyword2",
	"ovrimos_num_fields": "keyword2",
	"ovrimos_num_rows": "keyword2",
	"ovrimos_prepare": "keyword2",
	"ovrimos_result": "keyword2",
	"ovrimos_result_all": "keyword2",
	"ovrimos_rollback": "keyword2",
	"pack": "keyword2",
	"parse_ini_file": "keyword2",
	"parse_str": "keyword2",
	"parse_url": "keyword2",
	"passthru": "keyword2",
	"pathinfo": "keyword2",
	"pclose": "keyword2",
	"pdf_add_annotation": "keyword2",
	"pdf_add_bookmark": "keyword2",
	"pdf_add_launchlink": "keyword2",
	"pdf_add_locallink": "keyword2",
	"pdf_add_note": "keyword2",
	"pdf_add_outline": "keyword2",
	"pdf_add_pdflink": "keyword2",
	"pdf_add_thumbnail": "keyword2",
	"pdf_add_weblink": "keyword2",
	"pdf_arc": "keyword2",
	"pdf_arcn": "keyword2",
	"pdf_attach_file": "keyword2",
	"pdf_begin_page": "keyword2",
	"pdf_begin_pattern": "keyword2",
	"pdf_begin_template": "keyword2",
	"pdf_circle": "keyword2",
	"pdf_clip": "keyword2",
	"pdf_close": "keyword2",
	"pdf_close_image": "keyword2",
	"pdf_close_pdi": "keyword2",
	"pdf_close_pdi_page": "keyword2",
	"pdf_closepath": "keyword2",
	"pdf_closepath_fill_stroke": "keyword2",
	"pdf_closepath_stroke": "keyword2",
	"pdf_concat": "keyword2",
	"pdf_continue_text": "keyword2",
	"pdf_curveto": "keyword2",
	"pdf_delete": "keyword2",
	"pdf_end_page": "keyword2",
	"pdf_end_pattern": "keyword2",
	"pdf_end_template": "keyword2",
	"pdf_endpath": "keyword2",
	"pdf_fill": "keyword2",
	"pdf_fill_stroke": "keyword2",
	"pdf_findfont": "keyword2",
	"pdf_get_buffer": "keyword2",
	"pdf_get_font": "keyword2",
	"pdf_get_fontname": "keyword2",
	"pdf_get_fontsize": "keyword2",
	"pdf_get_image_height": "keyword2",
	"pdf_get_image_width": "keyword2",
	"pdf_get_parameter": "keyword2",
	"pdf_get_pdi_parameter": "keyword2",
	"pdf_get_pdi_value": "keyword2",
	"pdf_get_value": "keyword2",
	"pdf_initgraphics": "keyword2",
	"pdf_lineto": "keyword2",
	"pdf_makespotcolor": "keyword2",
	"pdf_moveto": "keyword2",
	"pdf_new": "keyword2",
	"pdf_open": "keyword2",
	"pdf_open_ccitt": "keyword2",
	"pdf_open_file": "keyword2",
	"pdf_open_gif": "keyword2",
	"pdf_open_image": "keyword2",
	"pdf_open_image_file": "keyword2",
	"pdf_open_jpeg": "keyword2",
	"pdf_open_memory_image": "keyword2",
	"pdf_open_pdi": "keyword2",
	"pdf_open_pdi_page": "keyword2",
	"pdf_open_png": "keyword2",
	"pdf_open_tiff": "keyword2",
	"pdf_place_image": "keyword2",
	"pdf_place_pdi_page": "keyword2",
	"pdf_rect": "keyword2",
	"pdf_restore": "keyword2",
	"pdf_rotate": "keyword2",
	"pdf_save": "keyword2",
	"pdf_scale": "keyword2",
	"pdf_set_border_color": "keyword2",
	"pdf_set_border_dash": "keyword2",
	"pdf_set_border_style": "keyword2",
	"pdf_set_char_spacing": "keyword2",
	"pdf_set_duration": "keyword2",
	"pdf_set_font": "keyword2",
	"pdf_set_horiz_scaling": "keyword2",
	"pdf_set_info": "keyword2",
	"pdf_set_info_author": "keyword2",
	"pdf_set_info_creator": "keyword2",
	"pdf_set_info_keywords": "keyword2",
	"pdf_set_info_subject": "keyword2",
	"pdf_set_info_title": "keyword2",
	"pdf_set_leading": "keyword2",
	"pdf_set_parameter": "keyword2",
	"pdf_set_text_pos": "keyword2",
	"pdf_set_text_rendering": "keyword2",
	"pdf_set_text_rise": "keyword2",
	"pdf_set_transition": "keyword2",
	"pdf_set_value": "keyword2",
	"pdf_set_word_spacing": "keyword2",
	"pdf_setcolor": "keyword2",
	"pdf_setdash": "keyword2",
	"pdf_setflat": "keyword2",
	"pdf_setfont": "keyword2",
	"pdf_setgray": "keyword2",
	"pdf_setgray_fill": "keyword2",
	"pdf_setgray_stroke": "keyword2",
	"pdf_setlinecap": "keyword2",
	"pdf_setlinejoin": "keyword2",
	"pdf_setlinewidth": "keyword2",
	"pdf_setmatrix": "keyword2",
	"pdf_setmiterlimit": "keyword2",
	"pdf_setpolydash": "keyword2",
	"pdf_setrgbcolor": "keyword2",
	"pdf_setrgbcolor_fill": "keyword2",
	"pdf_setrgbcolor_stroke": "keyword2",
	"pdf_show": "keyword2",
	"pdf_show_boxed": "keyword2",
	"pdf_show_xy": "keyword2",
	"pdf_skew": "keyword2",
	"pdf_stringwidth": "keyword2",
	"pdf_stroke": "keyword2",
	"pdf_translate": "keyword2",
	"pfpro_cleanup": "keyword2",
	"pfpro_init": "keyword2",
	"pfpro_process": "keyword2",
	"pfpro_process_raw": "keyword2",
	"pfpro_version": "keyword2",
	"pfsockopen": "keyword2",
	"pg_client_encoding": "keyword2",
	"pg_clientencoding": "keyword2",
	"pg_close": "keyword2",
	"pg_cmdtuples": "keyword2",
	"pg_connect": "keyword2",
	"pg_convert": "keyword2",
	"pg_dbname": "keyword2",
	"pg_delete": "keyword2",
	"pg_end_copy": "keyword2",
	"pg_errormessage": "keyword2",
	"pg_exec": "keyword2",
	"pg_fetch_all": "keyword2",
	"pg_fetch_array": "keyword2",
	"pg_fetch_assoc": "keyword2",
	"pg_fetch_object": "keyword2",
	"pg_fetch_row": "keyword2",
	"pg_fieldisnull": "keyword2",
	"pg_fieldname": "keyword2",
	"pg_fieldnum": "keyword2",
	"pg_fieldprtlen": "keyword2",
	"pg_fieldsize": "keyword2",
	"pg_fieldtype": "keyword2",
	"pg_freeresult": "keyword2",
	"pg_get_notify": "keyword2",
	"pg_get_pid": "keyword2",
	"pg_getlastoid": "keyword2",
	"pg_host": "keyword2",
	"pg_insert": "keyword2",
	"pg_loclose": "keyword2",
	"pg_locreate": "keyword2",
	"pg_loexport": "keyword2",
	"pg_loimport": "keyword2",
	"pg_loopen": "keyword2",
	"pg_loread": "keyword2",
	"pg_loreadall": "keyword2",
	"pg_lounlink": "keyword2",
	"pg_lowrite": "keyword2",
	"pg_meta_data": "keyword2",
	"pg_numfields": "keyword2",
	"pg_numrows": "keyword2",
	"pg_options": "keyword2",
	"pg_pconnect": "keyword2",
	"pg_ping": "keyword2",
	"pg_port": "keyword2",
	"pg_put_line": "keyword2",
	"pg_result": "keyword2",
	"pg_result_seek": "keyword2",
	"pg_select": "keyword2",
	"pg_set_client_encoding": "keyword2",
	"pg_setclientencoding": "keyword2",
	"pg_trace": "keyword2",
	"pg_tty": "keyword2",
	"pg_unescape_bytea": "keyword2",
	"pg_untrace": "keyword2",
	"pg_update": "keyword2",
	"php_logo_guid": "keyword2",
	"php_sapi_name": "keyword2",
	"php_uname": "keyword2",
	"phpcredits": "keyword2",
	"phpinfo": "keyword2",
	"phpversion": "keyword2",
	"pi": "keyword2",
	"png2wbmp": "keyword2",
	"popen": "keyword2",
	"pos": "keyword2",
	"posix_ctermid": "keyword2",
	"posix_getcwd": "keyword2",
	"posix_getegid": "keyword2",
	"posix_geteuid": "keyword2",
	"posix_getgid": "keyword2",
	"posix_getgrgid": "keyword2",
	"posix_getgrnam": "keyword2",
	"posix_getgroups": "keyword2",
	"posix_getlogin": "keyword2",
	"posix_getpgid": "keyword2",
	"posix_getpgrp": "keyword2",
	"posix_getpid": "keyword2",
	"posix_getppid": "keyword2",
	"posix_getpwnam": "keyword2",
	"posix_getpwuid": "keyword2",
	"posix_getrlimit": "keyword2",
	"posix_getsid": "keyword2",
	"posix_getuid": "keyword2",
	"posix_isatty": "keyword2",
	"posix_kill": "keyword2",
	"posix_mkfifo": "keyword2",
	"posix_setegid": "keyword2",
	"posix_seteuid": "keyword2",
	"posix_setgid": "keyword2",
	"posix_setpgid": "keyword2",
	"posix_setsid": "keyword2",
	"posix_setuid": "keyword2",
	"posix_times": "keyword2",
	"posix_ttyname": "keyword2",
	"posix_uname": "keyword2",
	"pow": "keyword2",
	"preg_grep": "keyword2",
	"preg_match": "keyword2",
	"preg_match_all": "keyword2",
	"preg_quote": "keyword2",
	"preg_replace": "keyword2",
	"preg_replace_callback": "keyword2",
	"preg_split": "keyword2",
	"prev": "keyword2",
	"print_r": "keyword2",
	"printer_abort": "keyword2",
	"printer_close": "keyword2",
	"printer_create_brush": "keyword2",
	"printer_create_dc": "keyword2",
	"printer_create_font": "keyword2",
	"printer_create_pen": "keyword2",
	"printer_delete_brush": "keyword2",
	"printer_delete_dc": "keyword2",
	"printer_delete_font": "keyword2",
	"printer_delete_pen": "keyword2",
	"printer_draw_bmp": "keyword2",
	"printer_draw_chord": "keyword2",
	"printer_draw_elipse": "keyword2",
	"printer_draw_line": "keyword2",
	"printer_draw_pie": "keyword2",
	"printer_draw_rectangle": "keyword2",
	"printer_draw_roundrect": "keyword2",
	"printer_draw_text": "keyword2",
	"printer_end_doc": "keyword2",
	"printer_end_page": "keyword2",
	"printer_get_option": "keyword2",
	"printer_list": "keyword2",
	"printer_logical_fontheight": "keyword2",
	"printer_open": "keyword2",
	"printer_select_brush": "keyword2",
	"printer_select_font": "keyword2",
	"printer_select_pen": "keyword2",
	"printer_set_option": "keyword2",
	"printer_start_doc": "keyword2",
	"printer_start_page": "keyword2",
	"printer_write": "keyword2",
	"printf": "keyword2",
	"private": "keyword1",
	"protected": "keyword1",
	"pspell_add_to_personal": "keyword2",
	"pspell_add_to_session": "keyword2",
	"pspell_check": "keyword2",
	"pspell_clear_session": "keyword2",
	"pspell_config_create": "keyword2",
	"pspell_config_ignore": "keyword2",
	"pspell_config_mode": "keyword2",
	"pspell_config_personal": "keyword2",
	"pspell_config_repl": "keyword2",
	"pspell_config_runtogether": "keyword2",
	"pspell_config_save_repl": "keyword2",
	"pspell_new": "keyword2",
	"pspell_new_config": "keyword2",
	"pspell_new_personal": "keyword2",
	"pspell_save_wordlist": "keyword2",
	"pspell_store_replacement": "keyword2",
	"pspell_suggest": "keyword2",
	"public": "keyword1",
	"putenv": "keyword2",
	"qdom_error": "keyword2",
	"qdom_tree": "keyword2",
	"quoted_printable_decode": "keyword2",
	"quotemeta": "keyword2",
	"rad2deg": "keyword2",
	"rand": "keyword2",
	"range": "keyword2",
	"rawurldecode": "keyword2",
	"rawurlencode": "keyword2",
	"read": "keyword2",
	"read_exif_data": "keyword2",
	"readdir": "keyword2",
	"readfile": "keyword2",
	"readgzfile": "keyword2",
	"readline": "keyword2",
	"readline_add_history": "keyword2",
	"readline_clear_history": "keyword2",
	"readline_completion_function": "keyword2",
	"readline_info": "keyword2",
	"readline_list_history": "keyword2",
	"readline_read_history": "keyword2",
	"readline_write_history": "keyword2",
	"readlink": "keyword2",
	"readv": "keyword2",
	"realpath": "keyword2",
	"recode": "keyword2",
	"recode_file": "keyword2",
	"recode_string": "keyword2",
	"recv": "keyword2",
	"recvfrom": "keyword2",
	"recvmsg": "keyword2",
	"register_shutdown_function": "keyword2",
	"register_tick_function": "keyword2",
	"remove": "keyword2",
	"rename": "keyword2",
	"require": "keyword1",
	"require_once": "keyword1",
	"reset": "keyword2",
	"restore_error_handler": "keyword2",
	"return": "keyword1",
	"rewind": "keyword2",
	"rewinddir": "keyword2",
	"rmdir": "keyword2",
	"rotate": "keyword2",
	"rotateto": "keyword2",
	"round": "keyword2",
	"rsort": "keyword2",
	"rtrim": "keyword2",
	"satellite_caught_exception": "keyword2",
	"satellite_exception_id": "keyword2",
	"satellite_exception_value": "keyword2",
	"satellite_get_repository_id": "keyword2",
	"satellite_load_idl": "keyword2",
	"save": "keyword2",
	"savetofile": "keyword2",
	"scale": "keyword2",
	"scaleto": "keyword2",
	"scandir": "keyword2",
	"select": "keyword2",
	"sem_acquire": "keyword2",
	"sem_get": "keyword2",
	"sem_release": "keyword2",
	"send": "keyword2",
	"sendmsg": "keyword2",
	"sendto": "keyword2",
	"serialize": "keyword2",
	"session_cache_limiter": "keyword2",
	"session_decode": "keyword2",
	"session_destroy": "keyword2",
	"session_encode": "keyword2",
	"session_get_cookie_params": "keyword2",
	"session_id": "keyword2",
	"session_is_registered": "keyword2",
	"session_module_name": "keyword2",
	"session_name": "keyword2",
	"session_register": "keyword2",
	"session_save_path": "keyword2",
	"session_set_cookie_params": "keyword2",
	"session_set_save_handler": "keyword2",
	"session_start": "keyword2",
	"session_unregister": "keyword2",
	"session_unset": "keyword2",
	"session_write_close": "keyword2",
	"set_content": "keyword2",
	"set_error_handler": "keyword2",
	"set_file_buffer": "keyword2",
	"set_iovec": "keyword2",
	"set_magic_quotes_runtime": "keyword2",
	"set_nonblock": "keyword2",
	"set_socket_blocking": "keyword2",
	"set_time_limit": "keyword2",
	"setaction": "keyword2",
	"setbackground": "keyword2",
	"setbounds": "keyword2",
	"setcolor": "keyword2",
	"setcookie": "keyword2",
	"setdepth": "keyword2",
	"setdimension": "keyword2",
	"setdown": "keyword2",
	"setfont": "keyword2",
	"setframes": "keyword2",
	"setheight": "keyword2",
	"sethit": "keyword2",
	"setindentation": "keyword2",
	"setleftfill": "keyword2",
	"setleftmargin": "keyword2",
	"setline": "keyword2",
	"setlinespacing": "keyword2",
	"setlocale": "keyword2",
	"setmargins": "keyword2",
	"setmatrix": "keyword2",
	"setname": "keyword2",
	"setover": "keyword2",
	"setrate": "keyword2",
	"setratio": "keyword2",
	"setrightfill": "keyword2",
	"setrightmargin": "keyword2",
	"setsockopt": "keyword2",
	"setspacing": "keyword2",
	"settype": "keyword2",
	"setup": "keyword2",
	"sha1": "keyword2",
	"sha1_file": "keyword2",
	"shell_exec": "keyword2",
	"shm_attach": "keyword2",
	"shm_detach": "keyword2",
	"shm_get_var": "keyword2",
	"shm_put_var": "keyword2",
	"shm_remove": "keyword2",
	"shm_remove_var": "keyword2",
	"shmop_close": "keyword2",
	"shmop_delete": "keyword2",
	"shmop_open": "keyword2",
	"shmop_read": "keyword2",
	"shmop_size": "keyword2",
	"shmop_write": "keyword2",
	"show_source": "keyword2",
	"shuffle": "keyword2",
	"shutdown": "keyword2",
	"signal": "keyword2",
	"similar_text": "keyword2",
	"sin": "keyword2",
	"sizeof": "keyword2",
	"skewx": "keyword2",
	"skewxto": "keyword2",
	"skewy": "keyword2",
	"skewyto": "keyword2",
	"sleep": "keyword2",
	"snmp_get_quick_print": "keyword2",
	"snmp_set_quick_print": "keyword2",
	"snmpget": "keyword2",
	"snmprealwalk": "keyword2",
	"snmpset": "keyword2",
	"snmpwalk": "keyword2",
	"snmpwalkoid": "keyword2",
	"socket": "keyword2",
	"socket_get_status": "keyword2",
	"socket_set_blocking": "keyword2",
	"socket_set_timeout": "keyword2",
	"socketpair": "keyword2",
	"sort": "keyword2",
	"soundex": "keyword2",
	"split": "keyword2",
	"spliti": "keyword2",
	"sprintf": "keyword2",
	"sql_regcase": "keyword2",
	"sqrt": "keyword2",
	"srand": "keyword2",
	"sscanf": "keyword2",
	"stat": "keyword2",
	"static": "keyword1",
	"str_pad": "keyword2",
	"str_repeat": "keyword2",
	"str_replace": "keyword2",
	"str_rot13": "keyword2",
	"str_split": "keyword2",
	"str_word_count": "keyword2",
	"strcasecmp": "keyword2",
	"strchr": "keyword2",
	"strcmp": "keyword2",
	"strcoll": "keyword2",
	"strcspn": "keyword2",
	"stream_context_create": "keyword2",
	"stream_context_set_option": "keyword2",
	"stream_context_set_params": "keyword2",
	"stream_filter_append": "keyword2",
	"stream_filter_prepend": "keyword2",
	"stream_get_status": "keyword2",
	"stream_select": "keyword2",
	"stream_set_blocking": "keyword2",
	"stream_set_timeout": "keyword2",
	"streammp3": "keyword2",
	"strerror": "keyword2",
	"strftime": "keyword2",
	"strip_tags": "keyword2",
	"stripcslashes": "keyword2",
	"stripos": "keyword2",
	"stripslashes": "keyword2",
	"stristr": "keyword2",
	"strlen": "keyword2",
	"strnatcasecmp": "keyword2",
	"strnatcmp": "keyword2",
	"strncasecmp": "keyword2",
	"strncmp": "keyword2",
	"strpbrk": "keyword2",
	"strpos": "keyword2",
	"strrchr": "keyword2",
	"strrev": "keyword2",
	"strrpos": "keyword2",
	"strspn": "keyword2",
	"strstr": "keyword2",
	"strtok": "keyword2",
	"strtolower": "keyword2",
	"strtotime": "keyword2",
	"strtoupper": "keyword2",
	"strtr": "keyword2",
	"strval": "keyword2",
	"substr": "keyword2",
	"substr_compare": "keyword2",
	"substr_count": "keyword2",
	"substr_replace": "keyword2",
	"swf_actiongeturl": "keyword2",
	"swf_actiongotoframe": "keyword2",
	"swf_actiongotolabel": "keyword2",
	"swf_actionnextframe": "keyword2",
	"swf_actionplay": "keyword2",
	"swf_actionprevframe": "keyword2",
	"swf_actionsettarget": "keyword2",
	"swf_actionstop": "keyword2",
	"swf_actiontogglequality": "keyword2",
	"swf_actionwaitforframe": "keyword2",
	"swf_addbuttonrecord": "keyword2",
	"swf_addcolor": "keyword2",
	"swf_closefile": "keyword2",
	"swf_definebitmap": "keyword2",
	"swf_definefont": "keyword2",
	"swf_defineline": "keyword2",
	"swf_definepoly": "keyword2",
	"swf_definerect": "keyword2",
	"swf_definetext": "keyword2",
	"swf_endbutton": "keyword2",
	"swf_enddoaction": "keyword2",
	"swf_endshape": "keyword2",
	"swf_endsymbol": "keyword2",
	"swf_fontsize": "keyword2",
	"swf_fontslant": "keyword2",
	"swf_fonttracking": "keyword2",
	"swf_getbitmapinfo": "keyword2",
	"swf_getfontinfo": "keyword2",
	"swf_getframe": "keyword2",
	"swf_labelframe": "keyword2",
	"swf_lookat": "keyword2",
	"swf_modifyobject": "keyword2",
	"swf_mulcolor": "keyword2",
	"swf_nextid": "keyword2",
	"swf_oncondition": "keyword2",
	"swf_openfile": "keyword2",
	"swf_ortho": "keyword2",
	"swf_ortho2": "keyword2",
	"swf_perspective": "keyword2",
	"swf_placeobject": "keyword2",
	"swf_polarview": "keyword2",
	"swf_popmatrix": "keyword2",
	"swf_posround": "keyword2",
	"swf_pushmatrix": "keyword2",
	"swf_removeobject": "keyword2",
	"swf_rotate": "keyword2",
	"swf_scale": "keyword2",
	"swf_setfont": "keyword2",
	"swf_setframe": "keyword2",
	"swf_shapearc": "keyword2",
	"swf_shapecurveto": "keyword2",
	"swf_shapecurveto3": "keyword2",
	"swf_shapefillbitmapclip": "keyword2",
	"swf_shapefillbitmaptile": "keyword2",
	"swf_shapefilloff": "keyword2",
	"swf_shapefillsolid": "keyword2",
	"swf_shapelinesolid": "keyword2",
	"swf_shapelineto": "keyword2",
	"swf_shapemoveto": "keyword2",
	"swf_showframe": "keyword2",
	"swf_startbutton": "keyword2",
	"swf_startdoaction": "keyword2",
	"swf_startshape": "keyword2",
	"swf_startsymbol": "keyword2",
	"swf_textwidth": "keyword2",
	"swf_translate": "keyword2",
	"swf_viewport": "keyword2",
	"swfaction": "keyword2",
	"swfbitmap": "keyword2",
	"swfbutton": "keyword2",
	"swfbutton_keypress": "keyword2",
	"swffill": "keyword2",
	"swffont": "keyword2",
	"swfgradient": "keyword2",
	"swfmorph": "keyword2",
	"swfmovie": "keyword2",
	"swfshape": "keyword2",
	"swfsprite": "keyword2",
	"swftext": "keyword2",
	"swftextfield": "keyword2",
	"switch": "keyword1",
	"sybase_affected_rows": "keyword2",
	"sybase_close": "keyword2",
	"sybase_connect": "keyword2",
	"sybase_data_seek": "keyword2",
	"sybase_fetch_array": "keyword2",
	"sybase_fetch_field": "keyword2",
	"sybase_fetch_object": "keyword2",
	"sybase_fetch_row": "keyword2",
	"sybase_field_seek": "keyword2",
	"sybase_free_result": "keyword2",
	"sybase_get_last_message": "keyword2",
	"sybase_min_client_severity": "keyword2",
	"sybase_min_error_severity": "keyword2",
	"sybase_min_message_severity": "keyword2",
	"sybase_min_server_severity": "keyword2",
	"sybase_num_fields": "keyword2",
	"sybase_num_rows": "keyword2",
	"sybase_pconnect": "keyword2",
	"sybase_query": "keyword2",
	"sybase_result": "keyword2",
	"sybase_select_db": "keyword2",
	"symlink": "keyword2",
	"syslog": "keyword2",
	"system": "keyword2",
	"tan": "keyword2",
	"tempnam": "keyword2",
	"textdomain": "keyword2",
	"throw": "keyword1",
	"time": "keyword2",
	"time_nanosleep": "keyword2",
	"tmpfile": "keyword2",
	"touch": "keyword2",
	"trigger_error": "keyword2",
	"trim": "keyword2",
	"true": "keyword3",
	"try": "keyword1",
	"uasort": "keyword2",
	"ucfirst": "keyword2",
	"ucwords": "keyword2",
	"udm_add_search_limit": "keyword2",
	"udm_alloc_agent": "keyword2",
	"udm_api_version": "keyword2",
	"udm_clear_search_limits": "keyword2",
	"udm_errno": "keyword2",
	"udm_error": "keyword2",
	"udm_find": "keyword2",
	"udm_free_agent": "keyword2",
	"udm_free_ispell_data": "keyword2",
	"udm_free_res": "keyword2",
	"udm_get_doc_count": "keyword2",
	"udm_get_res_field": "keyword2",
	"udm_get_res_param": "keyword2",
	"udm_load_ispell_data": "keyword2",
	"udm_set_agent_param": "keyword2",
	"uksort": "keyword2",
	"umask": "keyword2",
	"uniqid": "keyword2",
	"unixtojd": "keyword2",
	"unlink": "keyword2",
	"unpack": "keyword2",
	"unregister_tick_function": "keyword2",
	"unserialize": "keyword2",
	"unset": "keyword2",
	"urldecode": "keyword2",
	"urlencode": "keyword2",
	"user_error": "keyword2",
	"usleep": "keyword2",
	"usort": "keyword2",
	"utf8_decode": "keyword2",
	"utf8_encode": "keyword2",
	"var": "keyword1",
	"var_dump": "keyword2",
	"velocis_autocommit": "keyword2",
	"velocis_close": "keyword2",
	"velocis_commit": "keyword2",
	"velocis_connect": "keyword2",
	"velocis_exec": "keyword2",
	"velocis_fetch": "keyword2",
	"velocis_fieldname": "keyword2",
	"velocis_fieldnum": "keyword2",
	"velocis_freeresult": "keyword2",
	"velocis_off_autocommit": "keyword2",
	"velocis_result": "keyword2",
	"velocis_rollback": "keyword2",
	"virtual": "keyword2",
	"vpopmail_add_alias_domain": "keyword2",
	"vpopmail_add_alias_domain_ex": "keyword2",
	"vpopmail_add_domain": "keyword2",
	"vpopmail_add_domain_ex": "keyword2",
	"vpopmail_add_user": "keyword2",
	"vpopmail_auth_user": "keyword2",
	"vpopmail_del_domain": "keyword2",
	"vpopmail_del_domain_ex": "keyword2",
	"vpopmail_del_user": "keyword2",
	"vpopmail_error": "keyword2",
	"vpopmail_passwd": "keyword2",
	"vpopmail_set_user_quota": "keyword2",
	"wddx_add_vars": "keyword2",
	"wddx_deserialize": "keyword2",
	"wddx_packet_end": "keyword2",
	"wddx_packet_start": "keyword2",
	"wddx_serialize_value": "keyword2",
	"wddx_serialize_vars": "keyword2",
	"while": "keyword1",
	"wordwrap": "keyword2",
	"write": "keyword2",
	"writev": "keyword2",
	"xml_error_string": "keyword2",
	"xml_get_current_byte_index": "keyword2",
	"xml_get_current_column_number": "keyword2",
	"xml_get_current_line_number": "keyword2",
	"xml_get_error_code": "keyword2",
	"xml_parse": "keyword2",
	"xml_parse_into_struct": "keyword2",
	"xml_parser_create": "keyword2",
	"xml_parser_create_ns": "keyword2",
	"xml_parser_free": "keyword2",
	"xml_parser_get_option": "keyword2",
	"xml_parser_set_option": "keyword2",
	"xml_set_character_data_handler": "keyword2",
	"xml_set_default_handler": "keyword2",
	"xml_set_element_handler": "keyword2",
	"xml_set_end_namespace_decl_handler": "keyword2",
	"xml_set_external_entity_ref_handler": "keyword2",
	"xml_set_notation_decl_handler": "keyword2",
	"xml_set_object": "keyword2",
	"xml_set_processing_instruction_handler": "keyword2",
	"xml_set_start_namespace_decl_handler": "keyword2",
	"xml_set_unparsed_entity_decl_handler": "keyword2",
	"xmldoc": "keyword2",
	"xmldocfile": "keyword2",
	"xmltree": "keyword2",
	"xpath_eval": "keyword2",
	"xpath_eval_expression": "keyword2",
	"xptr_eval": "keyword2",
	"xslt_closelog": "keyword2",
	"xslt_create": "keyword2",
	"xslt_errno": "keyword2",
	"xslt_error": "keyword2",
	"xslt_fetch_result": "keyword2",
	"xslt_free": "keyword2",
	"xslt_openlog": "keyword2",
	"xslt_output_begintransform": "keyword2",
	"xslt_output_endtransform": "keyword2",
	"xslt_process": "keyword2",
	"xslt_run": "keyword2",
	"xslt_set_base": "keyword2",
	"xslt_set_encoding": "keyword2",
	"xslt_set_error_handler": "keyword2",
	"xslt_set_sax_handler": "keyword2",
	"xslt_set_scheme_handler": "keyword2",
	"xslt_transform": "keyword2",
	"yaz_addinfo": "keyword2",
	"yaz_ccl_conf": "keyword2",
	"yaz_ccl_parse": "keyword2",
	"yaz_close": "keyword2",
	"yaz_connect": "keyword2",
	"yaz_database": "keyword2",
	"yaz_element": "keyword2",
	"yaz_errno": "keyword2",
	"yaz_error": "keyword2",
	"yaz_hits": "keyword2",
	"yaz_itemorder": "keyword2",
	"yaz_present": "keyword2",
	"yaz_range": "keyword2",
	"yaz_record": "keyword2",
	"yaz_scan": "keyword2",
	"yaz_scan_result": "keyword2",
	"yaz_search": "keyword2",
	"yaz_syntax": "keyword2",
	"yaz_wait": "keyword2",
	"yp_all": "keyword2",
	"yp_cat": "keyword2",
	"yp_err_string": "keyword2",
	"yp_errno": "keyword2",
	"yp_first": "keyword2",
	"yp_get_default_domain": "keyword2",
	"yp_master": "keyword2",
	"yp_match": "keyword2",
	"yp_next": "keyword2",
	"yp_order": "keyword2",
	"zend_logo_guid": "keyword2",
	"zend_test_func": "keyword2",
	"zend_version": "keyword2",
	"zzip_close": "keyword2",
	"zzip_closedir": "keyword2",
	"zzip_entry_compressedsize": "keyword2",
	"zzip_entry_compressionmethod": "keyword2",
	"zzip_entry_filesize": "keyword2",
	"zzip_entry_name": "keyword2",
	"zzip_open": "keyword2",
	"zzip_opendir": "keyword2",
	"zzip_read": "keyword2",
	"zzip_readdir": "keyword2",
}

# Keywords dict for php_php_literal ruleset.
php_php_literal_keywords_dict = {}

# Keywords dict for php_javascript ruleset.
php_javascript_keywords_dict = {}

# Keywords dict for php_javascript_php ruleset.
php_javascript_php_keywords_dict = {}

# Keywords dict for php_phpdoc ruleset.
php_phpdoc_keywords_dict = {
	"@abstract": "label",
	"@access": "label",
	"@author": "label",
	"@category": "label",
	"@copyright": "label",
	"@deprecated": "label",
	"@example": "label",
	"@filesource": "label",
	"@final": "label",
	"@global": "label",
	"@id": "label",
	"@ignore": "label",
	"@inheritdoc": "label",
	"@internal": "label",
	"@license": "label",
	"@link": "label",
	"@name": "label",
	"@package": "label",
	"@param": "label",
	"@return": "label",
	"@see": "label",
	"@since": "label",
	"@source": "label",
	"@static": "label",
	"@staticvar": "label",
	"@subpackage": "label",
	"@toc": "label",
	"@todo": "label",
	"@tutorial": "label",
	"@uses": "label",
	"@var": "label",
	"@version": "label",
}

# Dictionary of keywords dictionaries for php mode.
keywordsDictDict = {
	"php_javascript": php_javascript_keywords_dict,
	"php_javascript_php": php_javascript_php_keywords_dict,
	"php_main": php_main_keywords_dict,
	"php_php": php_php_keywords_dict,
	"php_php_literal": php_php_literal_keywords_dict,
	"php_phpdoc": php_phpdoc_keywords_dict,
	"php_tags": php_tags_keywords_dict,
	"php_tags_literal": php_tags_literal_keywords_dict,
}

# Rules for php_main ruleset.

def rule0(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?php", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule1(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule2(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<%=", end="%>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule3(colorer, s, i):
    return colorer.match_span(s, i, kind="comment1", begin="<!--", end="-->",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule4(colorer, s, i):
    return colorer.match_span_regexp(s, i, kind="markup", begin="<SCRIPT\\s+LANGUAGE=\"?PHP\"?>", end="</SCRIPT>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule5(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<SCRIPT", end="</SCRIPT>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="JAVASCRIPT",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule6(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<STYLE", end="</STYLE>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="html::CSS",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule7(colorer, s, i):
    return colorer.match_span(s, i, kind="keyword2", begin="<!", end=">",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="xml::DTD-TAGS",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule8(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<", end=">",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="TAGS",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule9(colorer, s, i):
    return colorer.match_span(s, i, kind="literal2", begin="&", end=";",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=True)

# Rules dict for main ruleset.
rulesDict1 = {
	"&": [rule9,],
	"<": [rule0,rule1,rule2,rule3,rule4,rule5,rule6,rule7,rule8,],
}

# Rules for php_tags ruleset.

def rule10(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?php", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule11(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule12(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<%=", end="%>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule13(colorer, s, i):
    return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="TAGS_LITERAL",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule14(colorer, s, i):
    return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="TAGS_LITERAL",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule15(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

# Rules dict for tags ruleset.
rulesDict2 = {
	"\"": [rule13,],
	"'": [rule14,],
	"<": [rule10,rule11,rule12,],
	"=": [rule15,],
}

# Rules for php_tags_literal ruleset.

def rule16(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?php", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule17(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule18(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<%=", end="%>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

# Rules dict for tags_literal ruleset.
rulesDict3 = {
	"<": [rule16,rule17,rule18,],
}

# Rules for php_php ruleset.

def rule19(colorer, s, i):
    return colorer.match_span(s, i, kind="comment3", begin="/**", end="*/",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHPDOC",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule20(colorer, s, i):
    return colorer.match_span(s, i, kind="comment1", begin="/*", end="*/",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule21(colorer, s, i):
    return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP_LITERAL",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule22(colorer, s, i):
    return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule23(colorer, s, i):
    return colorer.match_span(s, i, kind="literal1", begin="`", end="`",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP_LITERAL",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule24(colorer, s, i):
    return colorer.match_eol_span(s, i, kind="comment2", seq="//",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="", exclude_match=False)

def rule25(colorer, s, i):
    return colorer.match_eol_span(s, i, kind="comment1", seq="#",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="", exclude_match=False)

def rule26(colorer, s, i):
    return colorer.match_span_regexp(s, i, kind="literal1", begin="<<<[[:space:]'\"]*([[:alnum:]_]+)[[:space:]'\"]*", end="$1",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP_LITERAL",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule27(colorer, s, i):
    return colorer.match_mark_following(s, i, kind="keyword3", pattern="$",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=False)

def rule28(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule29(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="->",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule30(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="!",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule31(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq=">=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule32(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="<=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule33(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule34(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="+",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule35(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="-",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule36(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="/",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule37(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="*",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule38(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq=">",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule39(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="<",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule40(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="%",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule41(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="&",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule42(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="|",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule43(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="^",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule44(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="~",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule45(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq=".",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule46(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="}",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule47(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="{",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule48(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq=",",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule49(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq=";",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule50(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="]",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule51(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="[",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule52(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="?",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule53(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="@",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule54(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq=":",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule55(colorer, s, i):
    return colorer.match_mark_previous(s, i, kind="function", pattern="(",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=True)

def rule56(colorer, s, i):
    return colorer.match_keywords(s, i)

# Rules dict for php ruleset.
rulesDict4 = {
	"!": [rule30,],
	"\"": [rule21,],
	"#": [rule25,],
	"$": [rule27,],
	"%": [rule40,],
	"&": [rule41,],
	"'": [rule22,],
	"(": [rule55,],
	"*": [rule37,],
	"+": [rule34,],
	",": [rule48,],
	"-": [rule29,rule35,],
	".": [rule45,],
	"/": [rule19,rule20,rule24,rule36,],
	"0": [rule56,],
	"1": [rule56,],
	"2": [rule56,],
	"3": [rule56,],
	"4": [rule56,],
	"5": [rule56,],
	"6": [rule56,],
	"7": [rule56,],
	"8": [rule56,],
	"9": [rule56,],
	":": [rule54,],
	";": [rule49,],
	"<": [rule26,rule32,rule39,],
	"=": [rule28,rule33,],
	">": [rule31,rule38,],
	"?": [rule52,],
	"@": [rule53,rule56,],
	"A": [rule56,],
	"B": [rule56,],
	"C": [rule56,],
	"D": [rule56,],
	"E": [rule56,],
	"F": [rule56,],
	"G": [rule56,],
	"H": [rule56,],
	"I": [rule56,],
	"J": [rule56,],
	"K": [rule56,],
	"L": [rule56,],
	"M": [rule56,],
	"N": [rule56,],
	"O": [rule56,],
	"P": [rule56,],
	"Q": [rule56,],
	"R": [rule56,],
	"S": [rule56,],
	"T": [rule56,],
	"U": [rule56,],
	"V": [rule56,],
	"W": [rule56,],
	"X": [rule56,],
	"Y": [rule56,],
	"Z": [rule56,],
	"[": [rule51,],
	"]": [rule50,],
	"^": [rule43,],
	"_": [rule56,],
	"`": [rule23,],
	"a": [rule56,],
	"b": [rule56,],
	"c": [rule56,],
	"d": [rule56,],
	"e": [rule56,],
	"f": [rule56,],
	"g": [rule56,],
	"h": [rule56,],
	"i": [rule56,],
	"j": [rule56,],
	"k": [rule56,],
	"l": [rule56,],
	"m": [rule56,],
	"n": [rule56,],
	"o": [rule56,],
	"p": [rule56,],
	"q": [rule56,],
	"r": [rule56,],
	"s": [rule56,],
	"t": [rule56,],
	"u": [rule56,],
	"v": [rule56,],
	"w": [rule56,],
	"x": [rule56,],
	"y": [rule56,],
	"z": [rule56,],
	"{": [rule47,],
	"|": [rule42,],
	"}": [rule46,],
	"~": [rule44,],
}

# Rules for php_php_literal ruleset.

def rule57(colorer, s, i):
    return colorer.match_mark_following(s, i, kind="keyword3", pattern="$",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=False)

# Rules dict for php_literal ruleset.
rulesDict5 = {
	"$": [rule57,],
}

# Rules for php_javascript ruleset.

def rule58(colorer, s, i):
    return colorer.match_seq(s, i, kind="markup", seq=">",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="JAVASCRIPT+PHP")

def rule59(colorer, s, i):
    return colorer.match_seq(s, i, kind="markup", seq="SRC=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="BACK_TO_HTML")

# Rules dict for javascript ruleset.
rulesDict6 = {
	">": [rule58,],
	"S": [rule59,],
}

# Rules for php_javascript_php ruleset.

def rule60(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?php", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="php::PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule61(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="php::PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule62(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<%=", end="%>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="php::PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)


# Rules dict for javascript_php ruleset.
rulesDict7 = {
	"<": [rule60,rule61,rule62,],
}

# Rules for php_phpdoc ruleset.

def rule63(colorer, s, i):
    return colorer.match_seq(s, i, kind="comment3", seq="{",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule64(colorer, s, i):
    return colorer.match_seq(s, i, kind="comment3", seq="*",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule65(colorer, s, i):
    return colorer.match_span(s, i, kind="comment2", begin="<!--", end="-->",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule66(colorer, s, i):
    return colorer.match_seq(s, i, kind="comment3", seq="<<",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule67(colorer, s, i):
    return colorer.match_seq(s, i, kind="comment3", seq="<=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule68(colorer, s, i):
    return colorer.match_seq(s, i, kind="comment3", seq="< ",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule69(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<", end=">",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="xml::TAGS",exclude_match=False,
        no_escape=False, no_line_break=True, no_word_break=False)

def rule70(colorer, s, i):
    return colorer.match_keywords(s, i)

# Rules dict for phpdoc ruleset.
rulesDict8 = {
	"*": [rule64,],
	"0": [rule70,],
	"1": [rule70,],
	"2": [rule70,],
	"3": [rule70,],
	"4": [rule70,],
	"5": [rule70,],
	"6": [rule70,],
	"7": [rule70,],
	"8": [rule70,],
	"9": [rule70,],
	"<": [rule65,rule66,rule67,rule68,rule69,],
	"@": [rule70,],
	"A": [rule70,],
	"B": [rule70,],
	"C": [rule70,],
	"D": [rule70,],
	"E": [rule70,],
	"F": [rule70,],
	"G": [rule70,],
	"H": [rule70,],
	"I": [rule70,],
	"J": [rule70,],
	"K": [rule70,],
	"L": [rule70,],
	"M": [rule70,],
	"N": [rule70,],
	"O": [rule70,],
	"P": [rule70,],
	"Q": [rule70,],
	"R": [rule70,],
	"S": [rule70,],
	"T": [rule70,],
	"U": [rule70,],
	"V": [rule70,],
	"W": [rule70,],
	"X": [rule70,],
	"Y": [rule70,],
	"Z": [rule70,],
	"_": [rule70,],
	"a": [rule70,],
	"b": [rule70,],
	"c": [rule70,],
	"d": [rule70,],
	"e": [rule70,],
	"f": [rule70,],
	"g": [rule70,],
	"h": [rule70,],
	"i": [rule70,],
	"j": [rule70,],
	"k": [rule70,],
	"l": [rule70,],
	"m": [rule70,],
	"n": [rule70,],
	"o": [rule70,],
	"p": [rule70,],
	"q": [rule70,],
	"r": [rule70,],
	"s": [rule70,],
	"t": [rule70,],
	"u": [rule70,],
	"v": [rule70,],
	"w": [rule70,],
	"x": [rule70,],
	"y": [rule70,],
	"z": [rule70,],
	"{": [rule63,],
}

# x.rulesDictDict for php mode.
rulesDictDict = {
	"php_javascript": rulesDict6,
	"php_javascript_php": rulesDict7,
	"php_main": rulesDict1,
	"php_php": rulesDict4,
	"php_php_literal": rulesDict5,
	"php_phpdoc": rulesDict8,
	"php_tags": rulesDict2,
	"php_tags_literal": rulesDict3,
}

# Import dict for php mode.
importDict = {
	"php_javascript_php": ["javascript_main",],
}

#@+node:ekr.20060824111500.14: *3* jEdit docs...
@nocolor
#@+node:ekr.20060824111500.15: *4* @url http://www.jedit.org/42docs/users-guide/writing-modes-part.html
#@+node:ekr.20060824111500.16: *4* Rule ordering
You might encounter this very common pitfall when writing your own modes.

Since jEdit checks buffer text against parser rules in the order they appear in
the ruleset, more specific rules must be placed before generalized ones,
otherwise the generalized rules will catch everything.
#@+node:ekr.20060824111500.17: *4* Attributes
#@+node:ekr.20060824111500.18: *5*  Documentation of attributes
#@+node:ekr.20060824111500.19: *6* AT_xxx
#@+node:ekr.20060824111500.20: *7* AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.21: *7* AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.22: *7* AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.23: *6* AT_CHAR (int)
The number of characters to terminate after.

For terminate only.
#@+node:ekr.20060824111500.24: *6* EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@+node:ekr.20060824111500.25: *6* NO_xxx
#@+node:ekr.20060824111500.26: *7* NO_WORD_BREAK (bool)
If set to TRUE, the span will not cross word breaks.

For 'span' only.
#@+node:ekr.20060824111500.27: *7* NO_LINE_BREAK (bool)
If set to TRUE, the span will not cross line breaks.

For 'span' only.
#@+node:ekr.20060824111500.28: *7* NO_ESCAPE (bool)
If set to TRUE, the ruleset's escape character will have no effect before the
span's end string. Otherwise, the presence of the escape character will cause
that occurrence of the end string to be ignored.

For 'span' only.
#@+node:ekr.20060824111500.29: *6*  For 'rules' only
#@+node:ekr.20060824111500.30: *7* DEFAULT
The token type for text which doesn't match any specific rule. Default is NULL.
See the section called “Token Types” for a list of token types.
#@+node:ekr.20060824111500.31: *7* DIGIT_RE (re) & HIGHLIGHT_DIGITS  (bool)
If the HIGHLIGHT_DIGITS attribute is set to TRUE, jEdit will attempt to highlight numbers in this ruleset.

Any word consisting entirely of digits (0-9) will be highlighted with the DIGIT token type.

A word that contains other letters in addition to digits will be highlighted with the DIGIT token type only if it matches the regular expression specified in the DIGIT_RE attribute. If this attribute is not specified, it will not be highlighted.

Here is an example DIGIT_RE regular expression that highlights Java-style numeric literals (normal numbers, hexadecimals prefixed with 0x, numbers suffixed with various type indicators, and floating point literals containing an exponent):

DIGIT_RE="(0x[[:xdigit:]]+|[[:digit:]]+(e[[:digit:]]*)?)[lLdDfF]?"
#@+node:ekr.20060824111500.32: *7* IGNORE_CASE (bool)
If set to FALSE, matches will be case sensitive.
Otherwise, case will not matter. Default is TRUE.
#@+node:ekr.20060824111500.33: *7* SET
The name of this ruleset. All rulesets other than the first must have a name.
#@+node:ekr.20060824111500.34: *7* NO_WORD_SEP
Any non-alphanumeric character not in this list is treated as a word separator for the purposes of syntax highlighting.
#@+node:ekr.20060824111500.35: *6* DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@+node:ekr.20060824111500.36: *6* HASH_CHAR (char) For 'regx'
Atribute            Elements used in
--------            ----------------
HASH_CHAR           EOL_SPAN_REGEXP,SPAN_REGEXP,SEQ_REGEXP

It must be set to the first character that the regular expression matches. This
rules out using regular expressions which can match more than one character at
the start position. The regular expression match cannot span more than one line,
either.
#@+node:ekr.20060824111500.37: *6* TYPE (Token Types)
The token type to highlight the text with.

Parser rules can highlight tokens using any of the following token types:

NULL - no special highlighting
COMMENT1,COMMENT2,COMMENT3,COMMENT4
FUNCTION
KEYWORD1,KEYWORD2,KEYWORD3,KEYWORD4
LABEL
LITERAL1,LITERAL2,LITERAL3,LITERAL4
MARKUP
OPERATOR
#@+node:ekr.20060824111500.38: *5* Attributes for 'begin' & 'end'
Atribute            Elements used in
--------            ----------------
AT_LINE_START       BEGIN,END
AT_WHITESPACE_END   BEGIN,END
AT_WORD_START       BEGIN,END
#@+node:ekr.20060824111500.39: *5* Attributes for 'import': delegate
The only required attribute DELEGATE must be set to the name of a ruleset. To
import a ruleset defined in the current mode, just specify its name. To import a
ruleset defined in another mode, specify a name of the form mode::ruleset. Note
that the first (unnamed) ruleset in a mode is called “MAIN”.

One quirk is that the definition of the imported ruleset is not copied to the
location of the IMPORT tag, but rather to the end of the containing ruleset.
This has implications with rule-ordering; see the section called “Rule Ordering
Requirements”.
#@+node:ekr.20060824111500.40: *5* Attributes for 'mark'
Atribute            Elements used in
--------            ----------------
AT_LINE_START       MARK_FOLLOWING,MARK_PREVIOUS
AT_WHITESPACE_END   MARK_FOLLOWING,MARK_PREVIOUS
AT_WORD_START       MARK_FOLLOWING,MARK_PREVIOUS 
EXCLUDE_MATCH       MARK_FOLLOWING,MARK_PREVIOUS
TYPE                MARK_FOLLOWING,MARK_PREVIOUS
#@+node:ekr.20060824111500.41: *5* Attributes for 'property': name, value
Atribute            Elements used in
--------            ----------------
NAME                PROPERTY
VALUE               PROPERTY
#@+node:ekr.20060824111500.42: *5* Attributes for 'rules'
Atribute            Elements used in
--------            ----------------
DEFAULT             RULES
DIGIT_RE            RULES
HIGHLIGHT_DIGITS    RULES
IGNORE_CASE         RULES
SET                 RULES
NO_WORD_SEP         RULES

The RULES tag supports the following attributes, all of which are optional: 

SET the name of this ruleset. All rulesets other than the first must have a
name.

IGNORE_CASE if set to FALSE, matches will be case sensitive. Otherwise, case
will not matter. Default is TRUE.

NO_WORD_SEP Any non-alphanumeric character not in this list is treated as a word
separator for the purposes of syntax highlighting.

DEFAULT The token type for text which doesn't match any specific rule. Default
is NULL. See the section called “Token Types” for a list of token types.

HIGHLIGHT_DIGITS DIGIT_RE

If the HIGHLIGHT_DIGITS attribute is set to TRUE, jEdit will attempt to
highlight numbers in this ruleset.

Any word consisting entirely of digits (0-9) will be highlighted with the DIGIT
token type. A word that contains other letters in addition to digits will be
highlighted with the DIGIT token type only if it matches the regular expression
specified in the DIGIT_RE attribute. If this attribute is not specified, it will
not be highlighted.
#@+node:ekr.20060824111500.43: *5* Attributes for 'span'
all_spans   = EOL_SPAN,EOL_SPAN_REGEXP,SPAN,SPAN_REGEXP
plain_spans = SPAN,SPAN_REGEXP
regx_spans  = EOL_SPAN_REGEXP,SPAN_REGEXP

Atribute            Elements used in
--------            ----------------
AT_LINE_START       all_spans
AT_WHITESPACE_END   all_spans
AT_WORD_START       all_spans
DELEGATE            all_spans
EXCLUDE_MATCH       all_spans
HASH_CHAR           regx_spans  (must be specified)
NO_ESCAPE           plain_spans
NO_LINE_BREAK       plain_spans
NO_WORD_BREAK       plain_spans
TYPE                all_spans
#@+node:ekr.20060824111500.44: *5* Attributes for 'seq'
Atribute            Elements used in
--------            ----------------
AT_LINE_START       SEQ,SEQ_REGEXP
AT_WHITESPACE_END   SEQ,SEQ_REGEXP
AT_WORD_START       SEQ,SEQ_REGEXP
DELEGATE            SEQ,SEQ_REGEXP
HASH_CHAR           SEQ_REGEXP (must be specified)
TYPE                SEQ,SEQ_REGEXP
#@+node:ekr.20060824111500.45: *4* Elements
@language html
@color
#@+node:ekr.20060824111500.46: *5* Spans and seqs...
#@+node:ekr.20060824111500.47: *6* eol_span
An EOL_SPAN is similar to a SPAN except that highlighting stops at the end of
the line, and no end sequence needs to be specified. The text to match is
specified between the opening and closing EOL_SPAN tags.

The following attributes are supported:

TYPE - The token type to highlight the span with. See the section called “Token
Types” for a list of token types.

AT_LINE_START - If set to TRUE, the span will only be highlighted if the start
sequence occurs at the beginning of a line.

AT_WHITESPACE_END - If set to TRUE, the span will only be highlighted if the
sequence is the first non-whitespace text in the line.

AT_WORD_START - If set to TRUE, the span will only be highlighted if the start
sequence occurs at the beginning of a word.

DELEGATE - text inside the span will be highlighted with the specified ruleset.
To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

EXCLUDE_MATCH - If set to TRUE, the start and end sequences will not be
highlighted, only the text between them will.

Here is an EOL_SPAN that highlights C++ comments:

<EOL_SPAN TYPE="COMMENT1">//</EOL_SPAN>
#@+node:ekr.20060824111500.48: *6* eol_span_regexp
The EOL_SPAN_REGEXP rule is similar to the EOL_SPAN rule except the match
sequence is taken to be a regular expression.

In addition to the attributes supported by the EOL_SPAN tag, the HASH_CHAR
attribute must be specified. It must be set to the first character that the
regular expression matches. This rules out using regular expressions which can
match more than one character at the start position. The regular expression
match cannot span more than one line, either.

Here is an EOL_SPAN_REGEXP that highlights MS-DOS batch file comments, which
start with REM, followed by any whitespace character, and extend until the end
of the line:

<EOL_SPAN_REGEXP AT_WHITESPACE_END="TRUE" HASH_CHAR="R" TYPE="COMMENT1">REM\s</EOL_SPAN_REGEXP>
#@+node:ekr.20060824111500.49: *6* mark_following
The MARK_FOLLOWING rule, which must be placed inside a RULES tag, highlights
from the start of the match to the next syntax token. The text to match is
specified between opening and closing MARK_FOLLOWING tags.

The following attributes are supported:

TYPE - The token type to highlight the text with. See the section called “Token
Types” for a list of token types.

AT_LINE_START - If set to TRUE, the sequence will only be highlighted if it
occurs at the beginning of a line.

AT_WHITESPACE_END - If set to TRUE, the sequence will only be highlighted if it
is the first non-whitespace text in the line.

AT_WORD_START - If set to TRUE, the sequence will only be highlighted if it
occurs at the beginning of a word.

EXCLUDE_MATCH - If set to TRUE, the match will not be highlighted, only the text
after it will.

Here is a rule that highlights variables in Unix shell scripts (“$CLASSPATH”,“$IFS”, etc):

<MARK_FOLLOWING TYPE="KEYWORD2">$</MARK_FOLLOWING>
#@+node:ekr.20060824111500.50: *6* mark_previous
The MARK_PREVIOUS rule, which must be placed inside a RULES tag, highlights from
the end of the previous syntax token to the matched text. The text to match is
specified between opening and closing MARK_PREVIOUS tags. The following
attributes are supported:

TYPE - The token type to highlight the text with. See the section called “Token
Types” for a list of token types.

AT_LINE_START - If set to TRUE, the sequence will only be highlighted if it
occurs at the beginning of a line.

AT_WHITESPACE_END - If set to TRUE, the sequence will only be highlighted if it
is the first non-whitespace text in the line.

AT_WORD_START - If set to TRUE, the sequence will only be highlighted if it
occurs at the beginning of a word.

EXCLUDE_MATCH - If set to TRUE, the match will not be highlighted, only the text
before it will.

Here is a rule that highlights labels in Java mode (for example, “XXX:”):

<MARK_PREVIOUS AT_WHITESPACE_END="TRUE" EXCLUDE_MATCH="TRUE">:</MARK_PREVIOUS>
#@+node:ekr.20060824111500.51: *6* seq
The SEQ rule, which must be placed inside a RULES tag, highlights fixed
sequences of text. The text to highlight is specified between opening and
closing SEQ tags. The following attributes are supported:

TYPE - the token type to highlight the sequence with. See the section called
“Token Types” for a list of token types.

AT_LINE_START - If set to TRUE, the sequence will only be highlighted if it
occurs at the beginning of a line.

AT_WHITESPACE_END - If set to TRUE, the sequence will only be highlighted if it
is the first non-whitespace text in the line.

AT_WORD_START - If set to TRUE, the sequence will only be highlighted if it
occurs at the beginning of a word.

DELEGATE - if this attribute is specified, all text after the sequence will be
highlighted using this ruleset. To delegate to a ruleset defined in the current
mode, just specify its name. To delegate to a ruleset defined in another mode,
specify a name of the form mode::ruleset. Note that the first (unnamed) ruleset
in a mode is called “MAIN”.

The following rules highlight a few Java operators:

<SEQ TYPE="OPERATOR">+</SEQ>
<SEQ TYPE="OPERATOR">-</SEQ>
<SEQ TYPE="OPERATOR">*</SEQ>
<SEQ TYPE="OPERATOR">/</SEQ>
#@+node:ekr.20060824111500.52: *6* seq_regexp
The SEQ_REGEXP rule is similar to the SEQ rule except the match sequence is
taken to be a regular expression.

In addition to the attributes supported by the SEQ tag, the HASH_CHAR attribute
must be specified. It must be set to the first character that the regular
expression matches. This rules out using regular expressions which can match
more than one character at the start position. The regular expression match
cannot span more than one line, either.

Here is an example of a SEQ_REGEXP rule that highlights Perl's matcher
constructions such as m/(.+):(\d+):(.+)/:

<SEQ_REGEXP TYPE="MARKUP"
    HASH_CHAR="m"
    AT_WORD_START="TRUE"
>m([[:punct:]])(?:.*?[^\\])*?\1[sgiexom]*</SEQ_REGEXP>
#@+node:ekr.20060824111500.53: *6* span
The SPAN rule, which must be placed inside a RULES tag, highlights text between
a start and end string. The start and end strings are specified inside child
elements of the SPAN tag. The following attributes are supported:

TYPE - The token type to highlight the span with. See the section called “Token
Types” for a list of token types.

AT_LINE_START - If set to TRUE, the span will only be highlighted if the start
sequence occurs at the beginning of a line.

AT_WHITESPACE_END - If set to TRUE, the span will only be highlighted if the
start sequence is the first non-whitespace text in the line.

AT_WORD_START - If set to TRUE, the span will only be highlighted if the start
sequence occurs at the beginning of a word.

DELEGATE - text inside the span will be highlighted with the specified ruleset.
To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

EXCLUDE_MATCH - If set to TRUE, the start and end sequences will not be
highlighted, only the text between them will.

NO_ESCAPE - If set to TRUE, the ruleset's escape character will have no effect
before the span's end string. Otherwise, the presence of the escape character
will cause that occurrence of the end string to be ignored.

NO_LINE_BREAK - If set to TRUE, the span will not cross line breaks.

NO_WORD_BREAK - If set to TRUE, the span will not cross word breaks.

Note that the AT_LINE_START, AT_WHITESPACE_END and AT_WORD_START attributes can
also be used on the BEGIN and END elements. Setting these attributes to the same
value on both elements has the same effect as setting them on the SPAN element.

Here is a SPAN that highlights Java string literals, which cannot include line breaks:

<SPAN TYPE="LITERAL1" NO_LINE_BREAK="TRUE">
  <BEGIN>"</BEGIN>
  <END>"</END>
</SPAN>

Here is a SPAN that highlights Java documentation comments by delegating to the “JAVADOC” ruleset defined elsewhere in the current mode:

<SPAN TYPE="COMMENT2" DELEGATE="JAVADOC">
  <BEGIN>/**</BEGIN>
  <END>*/</END>
</SPAN>

Here is a SPAN that highlights HTML cascading stylesheets inside <STYLE> tags by delegating to the main ruleset in the CSS edit mode:

<SPAN TYPE="MARKUP" DELEGATE="css::MAIN">
  <BEGIN>&lt;style&gt;</BEGIN>
  <END>&lt;/style&gt;</END>
</SPAN>
#@+node:ekr.20060824111500.54: *6* span_regexp
The SPAN_REGEXP rule is similar to the SPAN rule except the start sequence is
taken to be a regular expression.

In addition to the attributes supported by the SPAN tag, the HASH_CHAR attribute
must be specified. It must be set to the first character that the regular
expression matches. This rules out using regular expressions which can match
more than one character at the start position. The regular expression match
cannot span more than one line, either.

Any text matched by groups in the BEGIN regular expression is substituted in the
END string. See below for an example of where this is useful. 

Here is a SPAN_REGEXP rule that highlights “read-ins” in shell scripts:

<SPAN_REGEXP HASH_CHAR="<" TYPE="LITERAL1" DELEGATE="LITERAL">
    <BEGIN><![CDATA[<<[[:space:]'"]*([[:alnum:]_]+)[[:space:]'"]*]]></BEGIN>
    <END>$1</END>
</SPAN_REGEXP>

Here is a SPAN_REGEXP rule that highlights constructs placed between <#ftl and
>, as long as the <#ftl is followed by a word break:

<SPAN_REGEXP TYPE="KEYWORD1" HASH_CHAR="&lt;" DELEGATE="EXPRESSION">
    <BEGIN>&lt;#ftl\&gt;</BEGIN>
    <END>&gt;</END>
</SPAN_REGEXP>
#@+node:ekr.20060824111500.55: *5* All others...
#@+node:ekr.20060824111500.56: *6* import
The IMPORT tag, which must be placed inside a RULES tag, loads all rules defined
in a given ruleset into the current ruleset; in other words, it has the same
effect as copying and pasting the imported ruleset.

The only required attribute DELEGATE must be set to the name of a ruleset. To
import a ruleset defined in the current mode, just specify its name. To import a
ruleset defined in another mode, specify a name of the form mode::ruleset. Note
that the first (unnamed) ruleset in a mode is called “MAIN”.

One quirk is that the definition of the imported ruleset is not copied to the
location of the IMPORT tag, but rather to the end of the containing ruleset.
This has implications with rule-ordering; see the section called “Rule Ordering
Requirements”.

Here is an example from the PHP mode, which extends the inline JavaScript
highlighting to support embedded PHP:


   <RULES SET="JAVASCRIPT+PHP">

   <SPAN TYPE="MARKUP" DELEGATE="php::PHP">
       <BEGIN>&lt;?php</BEGIN>
       <END>?&gt;</END>
   </SPAN>

   <SPAN TYPE="MARKUP" DELEGATE="php::PHP">
       <BEGIN>&lt;?</BEGIN>
       <END>?&gt;</END>
   </SPAN>

   <SPAN TYPE="MARKUP" DELEGATE="php::PHP">
       <BEGIN>&lt;%=</BEGIN>
       <END>%&gt;</END>
   </SPAN>

   <IMPORT DELEGATE="javascript::MAIN"/>
</RULES>
#@+node:ekr.20060824111500.57: *6* keywords (done)
The KEYWORDS tag, which must be placed inside a RULES tag and can only appear
once, specifies a list of keywords to highlight. Keywords are similar to SEQs,
except that SEQs match anywhere in the text, whereas keywords only match whole
words. Words are considered to be runs of text separated by non-alphanumeric
characters.

The KEYWORDS tag does not define any attributes.

Each child element of the KEYWORDS tag is an element whose name is a token type,
and whose content is the keyword to highlight.

<KEYWORDS>
  <KEYWORD1>if</KEYWORD1>
  <KEYWORD1>else</KEYWORD1>
  <KEYWORD3>int</KEYWORD3>
  <KEYWORD3>void</KEYWORD3>
</KEYWORDS>
#@+node:ekr.20060824111500.58: *6* mode (done)
Each mode definition must begin with the following:

<?xml version="1.0"?>
<!DOCTYPE MODE SYSTEM "xmode.dtd">

Each mode definition must also contain exactly one MODE tag. All other tags (PROPS, RULES) must be placed inside the MODE tag. The MODE tag does not have any defined attributes. Here is an example:

<MODE>
    ... mode definition goes here ...
</MODE>
#@+node:ekr.20060824111500.59: *6* props & property  (for auto-indent)
The PROPS tag and the PROPERTY tags inside it are used to define mode-specific
properties. Each PROPERTY tag must have a NAME attribute set to the property's
name, and a VALUE attribute with the property's value.

All buffer-local properties listed in the section called “Buffer-Local
Properties” may be given values in edit modes.

The following mode properties specify commenting strings:

* commentEnd - the comment end string, used by the Range Comment command.
* commentStart - the comment start string, used by the Range Comment command.
* lineComment - the line comment string, used by the Line Comment command. 

When performing auto indent, a number of mode properties determine the resulting indent level:

* The line and the one before it are scanned for brackets listed in the
indentCloseBrackets and indentOpenBrackets properties. Opening brackets in the
previous line increase indent.

If lineUpClosingBracket is set to true, then closing brackets on the current
line will line up with the line containing the matching opening bracket. For
example, in Java mode lineUpClosingBracket is set to true, resulting in brackets
being indented like so:

{
    // Code
    {
        // More code
    }
}

If lineUpClosingBracket is set to false, the line after a closing bracket will
be lined up with the line containing the matching opening bracket. For example,
in Lisp mode lineUpClosingBracket is set to false, resulting in brackets being
indented like so:

(foo 'a-parameter
    (crazy-p)
    (bar baz ()))
(print "hello world")

* If the previous line contains no opening brackets, or if the
doubleBracketIndent property is set to true, the previous line is checked
against the regular expressions in the indentNextLine and indentNextLines
properties. If the previous line matches the former, the indent of the current
line is increased and the subsequent line is shifted back again. If the previous
line matches the latter, the indent of the current and subsequent lines is
increased.

In Java mode, for example, the indentNextLine property is set to match control
structures such as “if”, “else”, “while”, and so on.

The doubleBracketIndent property, if set to the default of false, results in code indented like so:

while(objects.hasNext())
{
    Object next = objects.hasNext();
    if(next instanceof Paintable)
        next.paint(g);
}

On the other hand, settings this property to “true” will give the following result:

while(objects.hasNext())
    {
        Object next = objects.hasNext();
        if(next instanceof Paintable)
            next.paint(g);
    }

Here is the complete <PROPS> tag for Java mode:

<PROPS>
    <PROPERTY NAME="commentStart" VALUE="/*" />
    <PROPERTY NAME="commentEnd" VALUE="*/" />
    <PROPERTY NAME="lineComment" VALUE="//" />
    <PROPERTY NAME="wordBreakChars" VALUE=",+-=&lt;&gt;/?^&amp;*" />

    <!-- Auto indent -->
    <PROPERTY NAME="indentOpenBrackets" VALUE="{" />
    <PROPERTY NAME="indentCloseBrackets" VALUE="}" />
    <PROPERTY NAME="indentNextLine"
    	VALUE="\s*(((if|while)\s*\(|else\s*|else\s+if\s*\(|for\s*\(.*\))[^{;]*)" />
    <!-- set this to 'true' if you want to use GNU coding style -->
    <PROPERTY NAME="doubleBracketIndent" VALUE="false" />
    <PROPERTY NAME="lineUpClosingBracket" VALUE="true" />
</PROPS>
#@+node:ekr.20060824111500.60: *6* rules
RULES tags must be placed inside the MODE tag. Each RULES tag defines a ruleset.
A ruleset consists of a number of parser rules, with each parser rule specifying
how to highlight a specific syntax token. There must be at least one ruleset in
each edit mode. There can also be more than one, with different rulesets being
used to highlight different parts of a buffer (for example, in HTML mode, one
rule set highlights HTML tags, and another highlights inline JavaScript). For
information about using more than one ruleset, see the section called “The SPAN
Tag”.

The RULES tag supports the following attributes, all of which are optional: 

SET the name of this ruleset. All rulesets other than the first must have a
name.

IGNORE_CASE if set to FALSE, matches will be case sensitive. Otherwise, case
will not matter. Default is TRUE.

NO_WORD_SEP Any non-alphanumeric character not in this list is treated as a word
separator for the purposes of syntax highlighting.

DEFAULT The token type for text which doesn't match any specific rule. Default
is NULL. See the section called “Token Types” for a list of token types.

HIGHLIGHT_DIGITS DIGIT_RE

If the HIGHLIGHT_DIGITS attribute is set to TRUE, jEdit will attempt to
highlight numbers in this ruleset.

Any word consisting entirely of digits (0-9) will be highlighted with the DIGIT
token type. A word that contains other letters in addition to digits will be
highlighted with the DIGIT token type only if it matches the regular expression
specified in the DIGIT_RE attribute. If this attribute is not specified, it will
not be highlighted.

Here is an example DIGIT_RE regular expression that highlights Java-style
numeric literals (normal numbers, hexadecimals prefixed with 0x, numbers
suffixed with various type indicators, and floating point literals containing an
exponent):

DIGIT_RE="(0x[[:xdigit:]]+|[[:digit:]]+(e[[:digit:]]*)?)[lLdDfF]?"

Here is an example RULES tag:

<RULES IGNORE_CASE="FALSE" HIGHLIGHT_DIGITS="TRUE">
    ... parser rules go here ...
</RULES>
#@+node:ekr.20060824111500.61: *6* terminate
The TERMINATE rule, which must be placed inside a RULES tag, specifies that
parsing should stop after the specified number of characters have been read from
a line.

The number of characters to terminate after should be specified with the AT_CHAR
attribute. Here is an example:

<TERMINATE AT_CHAR="1" />

This rule is used in Patch mode, for example, because only the first character
of each line affects highlighting.
#@+node:ekr.20060824111500.62: *3* Refactored jEdit docs...
@nocolor
#@+node:ekr.20060824111500.63: *4* @url http://www.jedit.org/42docs/users-guide/writing-modes-part.html
#@+node:ekr.20060824111500.64: *4* Rule ordering
You might encounter this very common pitfall when writing your own modes.

Since jEdit checks buffer text against parser rules in the order they appear in
the ruleset, more specific rules must be placed before generalized ones,
otherwise the generalized rules will catch everything.
#@+node:ekr.20060824111500.65: *4* Attributes
#@+node:ekr.20060824111500.66: *5* AT_CHAR (int)
The number of characters to terminate after.

For terminate only.
#@+node:ekr.20060824111500.19: *5* AT_xxx
#@+node:ekr.20060824111500.20: *6* AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.21: *6* AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.22: *6* AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.35: *5* DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@+node:ekr.20060824111500.24: *5* EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@+node:ekr.20060824111500.67: *5* For rules only
#@+node:ekr.20060824111500.68: *6* DEFAULT (token type)
The token type for text which doesn't match any specific rule. Default is NULL.
See the section called “Token Types” for a list of token types.

For 'rules' only.
#@+node:ekr.20060824111500.69: *6* HIGHLIGHT_DIGITS  &DIGITS_RE (bool)
If the HIGHLIGHT_DIGITS attribute is set to TRUE, jEdit will attempt to highlight numbers in this ruleset.

Any word consisting entirely of digits (0-9) will be highlighted with the DIGIT token type.

A word that contains other letters in addition to digits will be highlighted with the DIGIT token type only if it matches the regular expression specified in the DIGIT_RE attribute. If this attribute is not specified, it will not be highlighted.

For 'rules' only.

Example: a DIGIT_RE regular expression that highlights Java-style
numeric literals (normal numbers, hexadecimals prefixed with 0x, numbers
suffixed with various type indicators, and floating point literals containing an
exponent):

DIGIT_RE="(0x[[:xdigit:]]+|[[:digit:]]+(e[[:digit:]]*)?)[lLdDfF]?"
#@+node:ekr.20060824111500.70: *6* IGNORE_CASE (bool)
If set to FALSE, matches will be case sensitive.
Otherwise, case will not matter. Default is TRUE.

For 'rules' only.
#@+node:ekr.20060824111500.71: *6* SET (string)
The name of this ruleset. All rulesets other than the first must have a name.

For 'rules' only.
#@+node:ekr.20060824111500.72: *6* NO_WORD_SEP
Any non-alphanumeric character not in this list is treated as a word separator
for the purposes of syntax highlighting.

For 'rules' only.
#@+node:ekr.20060824111500.73: *5* HASH_CHAR (char)
The first character that the regular expression matches.

This rules out using regular expressions which can match more than one character
at the start position. The regular expression match cannot span more than one
line, either.

Required for eol_span_regexp, span_regexp, seq_regexp.
#@+node:ekr.20060824111500.74: *5* NAME and VALUE
Each PROPERTY tag must have a NAME attribute set to the property's name, and a
VALUE attribute with the property's value.

For property only.
#@+node:ekr.20060824111500.25: *5* NO_xxx
#@+node:ekr.20060824111500.26: *6* NO_WORD_BREAK (bool)
If set to TRUE, the span will not cross word breaks.

For 'span' only.
#@+node:ekr.20060824111500.27: *6* NO_LINE_BREAK (bool)
If set to TRUE, the span will not cross line breaks.

For 'span' only.
#@+node:ekr.20060824111500.28: *6* NO_ESCAPE (bool)
If set to TRUE, the ruleset's escape character will have no effect before the
span's end string. Otherwise, the presence of the escape character will cause
that occurrence of the end string to be ignored.

For 'span' only.
#@+node:ekr.20060824111500.75: *5* TYPE (Token Types)
The token type to highlight the text with.

Parser rules can highlight tokens using any of the following token types:

NULL - no special highlighting
COMMENT1,COMMENT2,COMMENT3,COMMENT4
FUNCTION
KEYWORD1,KEYWORD2,KEYWORD3,KEYWORD4
LABEL
LITERAL1,LITERAL2,LITERAL3,LITERAL4
MARKUP
OPERATOR
#@+node:ekr.20060824111500.76: *4* Elements (children are attributes)
@language html
@color

All rules must be contained in the RULES element.
#@+node:ekr.20060824111500.77: *5* begin & end
#@+node:ekr.20060824111500.19: *6* AT_xxx
#@+node:ekr.20060824111500.20: *7* AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.21: *7* AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.22: *7* AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.78: *5* eol_span (can use DELEGATE)
An EOL_SPAN is similar to a SPAN except that highlighting stops at the end of
the line, and no end sequence needs to be specified. The text to match is
specified between the opening and closing EOL_SPAN tags.

Attributes: TYPE, AT_xxx, DELEGATE, EXCLUDE_MATCH

Here is an EOL_SPAN that highlights C++ comments:

<EOL_SPAN TYPE="COMMENT1">//</EOL_SPAN>
#@+node:ekr.20060824111500.19: *6* AT_xxx
#@+node:ekr.20060824111500.20: *7* AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.21: *7* AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.22: *7* AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.35: *6* DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@+node:ekr.20060824111500.24: *6* EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@+node:ekr.20060824111500.79: *5* eol_span_regexp (can use DELEGATE)
The EOL_SPAN_REGEXP rule is similar to the EOL_SPAN rule except the match
sequence is taken to be a regular expression.

Attributes: TYPE, AT_xxx, DELEGATE, EXCLUDE_MATCH, HASH_CHAR(required)

An EOL_SPAN_REGEXP that highlights MS-DOS batch file comments, which
start with REM, followed by any whitespace character, and extend until the end
of the line:

<EOL_SPAN_REGEXP AT_WHITESPACE_END="TRUE" HASH_CHAR="R" TYPE="COMMENT1">REM\s</EOL_SPAN_REGEXP>
#@+node:ekr.20060824111500.19: *6* AT_xxx
#@+node:ekr.20060824111500.20: *7* AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.21: *7* AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.22: *7* AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.35: *6* DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@+node:ekr.20060824111500.24: *6* EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@+node:ekr.20060824111500.73: *6* HASH_CHAR (char)
The first character that the regular expression matches.

This rules out using regular expressions which can match more than one character
at the start position. The regular expression match cannot span more than one
line, either.

Required for eol_span_regexp, span_regexp, seq_regexp.
#@+node:ekr.20060824111500.80: *5* import
The IMPORT tag loads all rules defined in a given ruleset into the current
ruleset; in other words, it has the same effect as copying and pasting the
imported ruleset.

Attriubtes: DELEGATE (required)

An example from the PHP mode, which extends the inline JavaScript
highlighting to support embedded PHP:

<RULES SET="JAVASCRIPT+PHP">

   <SPAN TYPE="MARKUP" DELEGATE="php::PHP">
       <BEGIN>&lt;?php</BEGIN>
       <END>?&gt;</END>
   </SPAN>

   <SPAN TYPE="MARKUP" DELEGATE="php::PHP">
       <BEGIN>&lt;?</BEGIN>
       <END>?&gt;</END>
   </SPAN>

   <SPAN TYPE="MARKUP" DELEGATE="php::PHP">
       <BEGIN>&lt;%=</BEGIN>
       <END>%&gt;</END>
   </SPAN>

   <IMPORT DELEGATE="javascript::MAIN"/>
</RULES>
#@+node:ekr.20060824111500.35: *6* DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@+node:ekr.20060824111500.81: *5* keywords
The KEYWORDS tag can only appear once. It specifies a list of keywords to
highlight.

Keywords are similar to SEQs, except that SEQs match anywhere in the
text, whereas keywords only match whole words. Words are considered to be runs
of text separated by non-alphanumeric characters.

Attributes:  None.

Each child element of the KEYWORDS tag is an element whose name is a token type,
and whose content is the keyword to highlight.

Example:

<KEYWORDS>
  <KEYWORD1>if</KEYWORD1>
  <KEYWORD1>else</KEYWORD1>
  <KEYWORD3>int</KEYWORD3>
  <KEYWORD3>void</KEYWORD3>
</KEYWORDS>
#@+node:ekr.20060824111500.82: *5* mark_following & mark_previous
The MARK_FOLLOWING rule ighlights from the start of the match to the next syntax
token. The text to match is specified between opening and closing MARK_FOLLOWING
tags.

The MARK_PREVIOUS rule highlights from the end of the previous syntax token to
the matched text. The text to match is specified between opening and closing
MARK_PREVIOUS tags.

Attributes: TYPE, AT_xxx, EXCLUDE_MATCH

Example:

<MARK_FOLLOWING TYPE="KEYWORD2">$</MARK_FOLLOWING>
#@+node:ekr.20060824111500.19: *6* AT_xxx
#@+node:ekr.20060824111500.20: *7* AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.21: *7* AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.22: *7* AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.24: *6* EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@+node:ekr.20060824111500.83: *5* mode
Each mode definition must begin with the following:

<?xml version="1.0"?>
<!DOCTYPE MODE SYSTEM "xmode.dtd">

Each mode definition must also contain exactly one MODE tag.  All other tags (PROPS, RULES) must be placed inside the MODE tag.

Attributes: None

<MODE>
    ... mode definition goes here ...
</MODE>
#@+node:ekr.20060824111500.84: *5* props & property
The PROPS tag and the PROPERTY tags define mode-specific properties.

Attributes: NAME, VALUE

All buffer-local properties may be given values in edit modes.

EKR: must support at least commentStart, commentEnd, lineComment, and wordBreakChars attributes.

Here is the complete <PROPS> tag for Java mode:

<PROPS>
    <PROPERTY NAME="commentStart" VALUE="/*" />
    <PROPERTY NAME="commentEnd" VALUE="*/" />
    <PROPERTY NAME="lineComment" VALUE="//" />
    <PROPERTY NAME="wordBreakChars" VALUE=",+-=&lt;&gt;/?^&amp;*" />

    <!-- Auto indent -->
    <PROPERTY NAME="indentOpenBrackets" VALUE="{" />
    <PROPERTY NAME="indentCloseBrackets" VALUE="}" />
    <PROPERTY NAME="indentNextLine"
    	VALUE="\s*(((if|while)\s*\(|else\s*|else\s+if\s*\(|for\s*\(.*\))[^{;]*)" />
    <!-- set this to 'true' if you want to use GNU coding style -->
    <PROPERTY NAME="doubleBracketIndent" VALUE="false" />
    <PROPERTY NAME="lineUpClosingBracket" VALUE="true" />
</PROPS>
#@+node:ekr.20060824111500.74: *6* NAME and VALUE
Each PROPERTY tag must have a NAME attribute set to the property's name, and a
VALUE attribute with the property's value.

For property only.
#@+node:ekr.20060824111500.85: *6* Properties for comment strings
The following mode properties specify commenting strings:

commentEnd - the comment end string, used by the Range Comment command.

commentStart - the comment start string, used by the Range Comment command.

lineComment - the line comment string, used by the Line Comment command. 
#@+node:ekr.20060824111500.86: *6* Properties for auto-indent
When performing auto indent, a number of mode properties determine the resulting indent level:



#@+node:ekr.20060824111500.87: *7* indentCloseBrackets and indentOpenBrackets
The line and the one before it are scanned for brackets listed in the
indentCloseBrackets and indentOpenBrackets properties. Opening brackets in the
previous line increase indent.

If lineUpClosingBracket is set to true, then closing brackets on the current
line will line up with the line containing the matching opening bracket. For
example, in Java mode lineUpClosingBracket is set to true, resulting in brackets
being indented like so:

{
    // Code
    {
        // More code
    }
}

If lineUpClosingBracket is set to false, the line after a closing bracket will
be lined up with the line containing the matching opening bracket. For example,
in Lisp mode lineUpClosingBracket is set to false, resulting in brackets being
indented like so:

(foo 'a-parameter
    (crazy-p)
    (bar baz ()))
(print "hello world")
#@+node:ekr.20060824111500.88: *7* doubleBracketIndent
If the previous line contains no opening brackets, or if the
doubleBracketIndent property is set to true, the previous line is checked
against the regular expressions in the indentNextLine and indentNextLines
properties. If the previous line matches the former, the indent of the current
line is increased and the subsequent line is shifted back again. If the previous
line matches the latter, the indent of the current and subsequent lines is
increased.

In Java mode, for example, the indentNextLine property is set to match control
structures such as “if”, “else”, “while”, and so on.

The doubleBracketIndent property, if set to the default of false, results in code indented like so::

    while(objects.hasNext())
    {
        Object next = objects.hasNext();
        if(next instanceof Paintable)
            next.paint(g);
    }

On the other hand, settings this property to “true” will give the following result::

    while(objects.hasNext())
        {
            Object next = objects.hasNext();
            if(next instanceof Paintable)
                next.paint(g);
        }
#@+node:ekr.20060824111500.89: *6* Buffer-Local Properties
Buffer-local properties provide an alternate way to change editor settings on a
per-buffer basis. While changes made in the Buffer Options dialog box are lost
after the buffer is closed, buffer-local properties take effect each time the
file is opened, because they are embedded in the file itself. 

When jEdit loads a file, it checks the first and last 10 lines for
colon-enclosed name/value pairs. For example, placing the following in a buffer
changes the indent width to 4 characters, enables soft tabs, and activates the
Perl edit mode: 

:indentSize=4:noTabs=true:mode=perl:

Adding buffer-local properties to a buffer takes effect after the next time the
buffer is saved. 
#@+node:ekr.20060824111500.90: *7* collapseFolds
Folds with a level of this or higher will be collapsed when the buffer is
opened. If set to zero, all folds will be expanded initially. See the section
called “Folding”.

#@+node:ekr.20060824111500.91: *7* deepIndent
When set to “true”, multiple-line expressions delimited by parentheses are aligned like so::

    retVal.x = (int)(horizontalOffset
        + Chunk.offsetToX(info.chunks,
                          offset));

With this setting disabled, the text would look like so::

    retVal.x = (int)(horizontalOffset
        + Chunk.offsetToX(info.chunks,
        offset));
#@+node:ekr.20060824111500.92: *7* folding
The fold mode; one of “none”, “indent”, “explicit”, or the name of a plugin
folding mode. See the section called “Folding”.
#@+node:ekr.20060824111500.93: *7* indentSize
The width, in characters, of one indent. Must be an integer greater than 0. See
the section called “Tabbing and Indentation”.
#@+node:ekr.20060824111500.94: *7* maxLineLen
The maximum line length and wrap column position. Inserting text beyond this
column will automatically insert a line break at the appropriate position. See
the section called “Inserting and Deleting Text”.

#@+node:ekr.20060824111500.95: *7* mode
The default edit mode for the buffer. See the section called “Edit Modes”. 

#@+node:ekr.20060824111500.96: *7* noTabs
If set to “true”, soft tabs (multiple space characters) will be used instead of
“real” tabs. See the section called “Tabbing and Indentation”.

#@+node:ekr.20060824111500.97: *7* noWordSep
A list of non-alphanumeric characters that are not to be treated as word
separators. Global default is “_”. tabSize The tab width. Must be an integer
greater than 0. See the section called “Tabbing and Indentation”.

#@+node:ekr.20060824111500.98: *7* wordBreakChars
Characters, in addition to spaces and tabs, at which lines may be split when
word wrapping. See the section called “Inserting and Deleting Text”.

#@+node:ekr.20060824111500.99: *7* wrap
The word wrap mode; one of “none”, “soft”, or “hard”. See the section called
“Wrapping Long Lines”.
#@+node:ekr.20060824111500.100: *5* rules
For information about using more than one ruleset, see the section called “The SPAN Tag”.

Attributes: SET, IGNORE_CASE, NO_WORD_SEP, DEFAULT, HIGHLIGHT_DIGITS DIGIT_RE

<RULES IGNORE_CASE="FALSE" HIGHLIGHT_DIGITS="TRUE">
    ... parser rules go here ...
</RULES>
#@+node:ekr.20060824111500.67: *6* For rules only
#@+node:ekr.20060824111500.68: *7* DEFAULT (token type)
The token type for text which doesn't match any specific rule. Default is NULL.
See the section called “Token Types” for a list of token types.

For 'rules' only.
#@+node:ekr.20060824111500.69: *7* HIGHLIGHT_DIGITS  &DIGITS_RE (bool)
If the HIGHLIGHT_DIGITS attribute is set to TRUE, jEdit will attempt to highlight numbers in this ruleset.

Any word consisting entirely of digits (0-9) will be highlighted with the DIGIT token type.

A word that contains other letters in addition to digits will be highlighted with the DIGIT token type only if it matches the regular expression specified in the DIGIT_RE attribute. If this attribute is not specified, it will not be highlighted.

For 'rules' only.

Example: a DIGIT_RE regular expression that highlights Java-style
numeric literals (normal numbers, hexadecimals prefixed with 0x, numbers
suffixed with various type indicators, and floating point literals containing an
exponent):

DIGIT_RE="(0x[[:xdigit:]]+|[[:digit:]]+(e[[:digit:]]*)?)[lLdDfF]?"
#@+node:ekr.20060824111500.70: *7* IGNORE_CASE (bool)
If set to FALSE, matches will be case sensitive.
Otherwise, case will not matter. Default is TRUE.

For 'rules' only.
#@+node:ekr.20060824111500.71: *7* SET (string)
The name of this ruleset. All rulesets other than the first must have a name.

For 'rules' only.
#@+node:ekr.20060824111500.72: *7* NO_WORD_SEP
Any non-alphanumeric character not in this list is treated as a word separator
for the purposes of syntax highlighting.

For 'rules' only.
#@+node:ekr.20060824111500.101: *5* seq (can use DELEGATE)
The SEQ rule highlights fixed sequences of text. The text to highlight is
specified between opening and closing SEQ tags. The following attributes are
supported:

Attributes: TYPE, AT_xxx, DELEGATE

Examples:

<SEQ TYPE="OPERATOR">+</SEQ>
<SEQ TYPE="OPERATOR">-</SEQ>
<SEQ TYPE="OPERATOR">*</SEQ>
<SEQ TYPE="OPERATOR">/</SEQ>
#@+node:ekr.20060824111500.19: *6* AT_xxx
#@+node:ekr.20060824111500.20: *7* AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.21: *7* AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.22: *7* AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.35: *6* DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@+node:ekr.20060824111500.102: *5* seq_regexp (can use DELEGATE)
The SEQ_REGEXP rule is similar to the SEQ rule except the match sequence is
taken to be a regular expression.

Attributes: TYPE, AT_xxx, DELEGATE, HASH_CHAR(required)

Example: a SEQ_REGEXP rule that highlights Perl's matcher
constructions such as m/(.+):(\d+):(.+)/:

<SEQ_REGEXP TYPE="MARKUP"
    HASH_CHAR="m"
    AT_WORD_START="TRUE"
>m([[:punct:]])(?:.*?[^\\])*?\1[sgiexom]*</SEQ_REGEXP>
#@+node:ekr.20060824111500.19: *6* AT_xxx
#@+node:ekr.20060824111500.20: *7* AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.21: *7* AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.22: *7* AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.35: *6* DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@+node:ekr.20060824111500.103: *5* span  (can use DELEGATE)
The SPAN rule highlights text between a start and end string. The start and end
strings are specified inside child elements of the SPAN tag.

Attributes: TYPE, AT_xxx, DELEGATE, EXCLUDE_MATCH, NO_xxx,

Example: a SPAN that highlights Java string literals, which cannot include line breaks:

<SPAN TYPE="LITERAL1" NO_LINE_BREAK="TRUE">
  <BEGIN>"</BEGIN>
  <END>"</END>
</SPAN>

Example: a SPAN that highlights Java documentation comments by delegating to the
“JAVADOC” ruleset defined elsewhere in the current mode:

<SPAN TYPE="COMMENT2" DELEGATE="JAVADOC">
  <BEGIN>/**</BEGIN>
  <END>*/</END>
</SPAN>

Example: a SPAN that highlights HTML cascading stylesheets inside <STYLE> tags by
delegating to the main ruleset in the CSS edit mode:

<SPAN TYPE="MARKUP" DELEGATE="css::MAIN">
  <BEGIN>&lt;style&gt;</BEGIN>
  <END>&lt;/style&gt;</END>
</SPAN>
#@+node:ekr.20060824111500.19: *6* AT_xxx
#@+node:ekr.20060824111500.20: *7* AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.21: *7* AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.22: *7* AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.35: *6* DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@+node:ekr.20060824111500.24: *6* EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@+node:ekr.20060824111500.25: *6* NO_xxx
#@+node:ekr.20060824111500.26: *7* NO_WORD_BREAK (bool)
If set to TRUE, the span will not cross word breaks.

For 'span' only.
#@+node:ekr.20060824111500.27: *7* NO_LINE_BREAK (bool)
If set to TRUE, the span will not cross line breaks.

For 'span' only.
#@+node:ekr.20060824111500.28: *7* NO_ESCAPE (bool)
If set to TRUE, the ruleset's escape character will have no effect before the
span's end string. Otherwise, the presence of the escape character will cause
that occurrence of the end string to be ignored.

For 'span' only.
#@+node:ekr.20060824111500.104: *5* span_regexp  (can use DELEGATE)
The SPAN_REGEXP rule is similar to the SPAN rule except the start sequence is
a regular expression.

Attributes: TYPE, AT_xxx, DELEGATE, EXCLUDE_MATCH, NO_xxx, HASH_CHAR (required).

Any text matched by groups in the BEGIN regular expression is substituted in the
END string. See below for an example of where this is useful. 

Example: a SPAN_REGEXP rule that highlights “read-ins” in shell scripts:

<SPAN_REGEXP HASH_CHAR="<" TYPE="LITERAL1" DELEGATE="LITERAL">
    <BEGIN><![CDATA[<<[[:space:]'"]*([[:alnum:]_]+)[[:space:]'"]*]]></BEGIN>
    <END>$1</END>
</SPAN_REGEXP>

Example: a SPAN_REGEXP rule that highlights constructs placed between <#ftl and
>, as long as the <#ftl is followed by a word break:

<SPAN_REGEXP TYPE="KEYWORD1" HASH_CHAR="&lt;" DELEGATE="EXPRESSION">
    <BEGIN>&lt;#ftl\&gt;</BEGIN>
    <END>&gt;</END>
</SPAN_REGEXP>
#@+node:ekr.20060824111500.19: *6* AT_xxx
#@+node:ekr.20060824111500.20: *7* AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.21: *7* AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.22: *7* AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@+node:ekr.20060824111500.35: *6* DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@+node:ekr.20060824111500.24: *6* EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@+node:ekr.20060824111500.25: *6* NO_xxx
#@+node:ekr.20060824111500.26: *7* NO_WORD_BREAK (bool)
If set to TRUE, the span will not cross word breaks.

For 'span' only.
#@+node:ekr.20060824111500.27: *7* NO_LINE_BREAK (bool)
If set to TRUE, the span will not cross line breaks.

For 'span' only.
#@+node:ekr.20060824111500.28: *7* NO_ESCAPE (bool)
If set to TRUE, the ruleset's escape character will have no effect before the
span's end string. Otherwise, the presence of the escape character will cause
that occurrence of the end string to be ignored.

For 'span' only.
#@+node:ekr.20060824111500.73: *6* HASH_CHAR (char)
The first character that the regular expression matches.

This rules out using regular expressions which can match more than one character
at the start position. The regular expression match cannot span more than one
line, either.

Required for eol_span_regexp, span_regexp, seq_regexp.
#@+node:ekr.20060824111500.105: *5* terminate
The TERMINATE rule specifies that parsing should stop after the specified number
of characters have been read from a line.

The number of characters to terminate after should be specified with the AT_CHAR
attribute. Here is an example:

<TERMINATE AT_CHAR="1" />

This rule is used in Patch mode, for example, because only the first character
of each line affects highlighting.
#@+node:ekr.20060824111500.66: *6* AT_CHAR (int)
The number of characters to terminate after.

For terminate only.
#@+node:ekr.20060824111500.106: *3* script: import from modes
import glob

print '-' * 20
path = r'c:\prog\tigris-cvs\leo\modes'

errors = ['cil',] # End keyword not matched by start.

if 0:
    files = ['python','php']
else:
    files = glob.glob(r'c:\prog\tigris-cvs\leo\modes\*.py')
    files = [g.os_path_split(f)[1] for f in files]
    files = [g.os_path_splitext(f)[0] for f in files]
    # for f in files: print str(f)

if 1:
    good, bad, skipped = 0,0,0
    for modeName in (files):
        if modeName in errors:
            skipped += 1 ; continue
        mode = g.importFromPath (modeName,path)
        if mode:
            good += 1
            if 0:
                for s in ('properties','rulesDict','importDict'):
                    print hasattr(mode,s),modeName,s
        else: bad += 1
    print 'good: %d, bad: %d, skipped: %d' % (good,bad,skipped)
#@+node:ekr.20211009081123.1: ** Leo find
#@+node:ekr.20060801093639: *3* script: Find @file nodes
current = c.currentPosition()
for p in current.self_and_parents():
    if p.isAnyAtFileNode():
        d = c.scanAllDirectives(p)
        path = d.get('path')
        print g.os_path_join(path,p.atFileNodeName())
        break
#@+node:ekr.20130920214241.12461: *3* script: Find all changed methods
# The list of all changed methods in the grand reorg at revs 6016-6020
aList = [
'color','configureBorder','configureFont',
'createBindings','createFindPanel','createFindTab','createFrame',
'createRootWindow',
'disable','enable','forceLogUpdate'
'getFont','getFontConfig','getFrame',
'headWidth','interrupt','isEnabled','isSameColorState',
'kill','killGui','killPopupMenu',
'onActivate','onActivateLog',
'recreateRootWindow','restoreAllState',
'saveAllState','setBindings','setCanvasBindings','setColorFromConfig',
'setDisabledHeadlineColors','setEditHeadlineColors','setEditLabelState',
'setFocus','setFont','setFontFromConfig',
'setMinibufferBindings','setTabBindings',
'setUnselectedHeadlineColors','setUnselectedLabelState',
'setWidgetFontFromConfig',
'widthInPixels',
]
g.cls()
seen = set()
p = g.findNodeAnywhere(c,'Plugins')
for p in p.subtree():
    for name in aList[:]:
        i = 0
        s = p.b
        while i < len(s):
            progress = i
            i = p.b.find('.'+name,i)
            if i == -1: break
            if g.match_word(s,i,'.'+name):
                print('%s %s' % (name,p.h))
                seen.add(name)
                aList.remove(name)
                break
            else:
                i += 1
            assert progress < i,(i,progress)
for z in sorted(seen):
    print(z)
print('**done')
#@+node:ekr.20130810093044.16955: *3* script: Find all comments from modes (slow)
@language python

'''Slow script.'''

import glob
import imp

@others

if 0: # The other script is much faster.
    
    keys = ("lineComment","commentStart","commentEnd",)
    d = {}
        # Keys are language names.
        # Values are a list of comment delims, in keys order.
    
    paths,modes_path = get_paths()
    for path in paths:
        module_name = g.shortFileName(path)[:-3]
        module = import_module(module_name,modes_path)
        aList = []
        for key in keys:
            val = module.properties.get(key)
            if val: aList.append(val)
        d[module_name] = aList
    
    print('-'* 20)
    print('language_delims_dict')
    for key in sorted(d):
        print('%16s: "%s"' % ('"%s"' % (key),' '.join(d.get(key))))
#@+node:ekr.20130810093044.16956: *4* get_paths
def get_paths():
    
    modes_path = g.os_path_finalize_join(g.app.loadDir,'..','modes')
    pattern = g.os_path_finalize_join(modes_path,'*.py')
    paths = glob.glob(pattern)
    paths = [z for z in paths if not z.endswith('__init__.py')]
    return paths,modes_path
#@+node:ekr.20130810093044.16957: *4* import_module
def import_module(module_name,modes_path):
    
    data = imp.find_module(module_name,[modes_path])
        # This can open the file.
    theFile,pathname,description = data
    module = imp.load_module(module_name,theFile,pathname,description)
    return module
#@+node:ekr.20051110105027.159: *3* script: Find and replace all functions in leoGlobals.py
import string

@others

if 1:
    << set nameList to the list of functions in leoGlobals.py >>
else:
    p = g.findNodeAnywhere("@file leoGlobals.py")
    nameList = findFunctionsInTree(p)

    nameList.sort() ; g.enl()
    for name in nameList: g.es("'%s'," % name)

    s = "%d functions in leoGlobals.py" % len(nameList)
    g.es_print(s)

if 0:
    p = g.findTopLevelNode(c,"Code")
    g.enl() ; g.enl()
    count = prependNamesInTree(p,nameList,"g.",replace=True) # Just prints if replace==False.
    s = "%d --- done --- " % count
    g.es_print(s)
#@+node:ekr.20051110105027.160: *4* findFunctionsInTree
def findFunctionsInTree(p):

    nameList = []
    for p in p.self_and_subtree():
        names = findDefs(p.b)
        if names:
            for name in names:
                if name not in nameList:
                    nameList.append(name)
    return nameList
#@+node:ekr.20051110105027.161: *4* findDefs
def findDefs(body):

    lines = body.split('\n')
    names = []
    for s in lines:
        i = g.skip_ws(s,0)
        if g.match(s,i,"class"):
            return [] # The classes are defined in a single node.
        if g.match(s,i,"def"):
            i = g.skip_ws(s,i+3)
            j = g.skip_c_id(s,i)
            if j > i:
                name = s[i:j]
                if g.match(name,0,"__init__"): 
                    return [] # Disallow other class methods.
                names.append(name)
    return names
#@+node:ekr.20051110105027.162: *4* prependNamesInTree
def prependNamesInTree(p,nameList,prefix,replace=False):

    c = p.c

    assert(len(prefix) > 0)
    ch1 = string.letters + '_'
    ch2 = string.letters + string.digits + '_'
    def_s = "def " ; def_n = len(def_s)
    prefix_n = len(prefix)
    total = 0
    for p in p.self_and_subtree():
        count = 0 ; s = p.b
        printFlag = False
        if s:
            for name in nameList:
                i = 0 ; n = len(name)
                while 1:
                    << look for name followed by '(' >>
            if count and replace:
                if 0:
                    << print before and after >>
                c.setBodyString(p,s)
                p.setDirty()
        g.es("%3d %s" % (count,p.h))
        total += count
    c.redraw()

    return total
#@+node:ekr.20051110105027.163: *5* << look for name followed by '(' >>
i = s.find(name,i)
if i == -1:
    break
elif g.match(s,i-1,'.'):
    i += n # Already an attribute.
elif g.match(s,i-prefix_n,prefix):
    i += n # Already preceded by the prefix.
elif g.match(s,i-def_n,def_s):
    i += n # preceded by "def"
elif i > 0 and s[i-1] in ch1:
    i += n # Not a word match.
elif i+n < len(s) and s[i+n] in ch2:
    i += n # Not a word match.
else:
    j = i + n
    j = g.skip_ws(s,j)
    if j >= len(s) or s[j] != '(':
        i += n
    else: # Replace name by prefix+name
        s = s[:i] + prefix + name + s[i+n:]
        i += n ; count += 1
        # g.es('.',newline=False)
        if 1:
            if not printFlag:
                printFlag = True
                # print p.h
            print g.get_line(s,i-n)
#@+node:ekr.20051110105027.164: *5* << print before and after >>
print "-"*10,count,p.h
print "before..."
print p.b
print "-"*10,"after..."
print s
#@+node:ekr.20051110105027.165: *4* << set nameList to the list of functions in leoGlobals.py >>
nameList = (
'alert',
'angleBrackets',
'appendToList',
'callerName',
'CheckVersion',
'choose',
'clearAllIvars',
'clear_stats',
'collectGarbage',
'computeLeadingWhitespace',
'computeWidth',
'computeWindowTitle',
'createTopologyList',
'create_temp_name',
'disableIdleTimeHook',
'doHook',
'dump',
'ecnl',
'ecnls',
'enableIdleTimeHook',
'enl',
'ensure_extension',
'es',
'esDiffTime',
'es_error',
'es_event_exception',
'es_exception',
'escaped',
'executeScript',
'file_date',
'findNodeAnywhere',
'findTopLevelNode',
'findNodeInTree',
'findReference',
'find_line_start',
'find_on_line',
'flattenList',
'funcToMethod',
'getBaseDirectory',
'getOutputNewline',
'getTime',
'get_Sherlock_args',
'get_directives_dict',
'get_leading_ws',
'get_line',
'get_line_after',
'getpreferredencoding',
'idleTimeHookHandler',
# 'importFromPath',
'initScriptFind',
'init_sherlock',
'init_trace',
'isUnicode',
'isValidEncoding',
'is_c_id',
'is_nl',
'is_special',
'is_ws',
'is_ws_or_nl',
'joinLines',
'listToString',
'makeAllNonExistentDirectories',
'makeDict',
'match',
'match_c_word',
'match_ignoring_case',
'match_word',
'module_date',
'openWithFileName',
'optimizeLeadingWhitespace',
'os_path_abspath',
'os_path_basename',
'os_path_dirname',
'os_path_exists',
'os_path_getmtime',
'os_path_isabs',
'os_path_isdir',
'os_path_isfile',
'os_path_join',
'os_path_norm',
'os_path_normcase',
'os_path_normpath',
'os_path_split',
'os_path_splitext',
'pause',
'plugin_date',
'plugin_signon',
'printDiffTime',
'printGc',
'printGcRefs',
'printGlobals',
'printLeoModules',
'print_bindings',
'print_stats',
'readlineForceUnixNewline',
'redirectStderr',
'redirectStdout',
'removeLeadingWhitespace',
'removeTrailingWs',
'reportBadChars',
'restoreStderr',
'restoreStdout',
'sanitize_filename',
'scanAtEncodingDirective',
'scanAtFileOptions',
'scanAtLineendingDirective',
'scanAtPagewidthDirective',
'scanAtRootOptions',
'scanAtTabwidthDirective',
'scanDirectives',
'scanError',
'scanf',
'set_delims_from_language',
'set_delims_from_string',
'set_language',
'shortFileName',
'skip_blank_lines',
'skip_block_comment',
'skip_braces',
'skip_c_id',
'skip_heredoc_string',
'skip_leading_ws',
'skip_leading_ws_with_indent',
'skip_line',
'skip_long',
'skip_matching_delims',
'skip_nl',
'skip_non_ws',
'skip_parens',
'skip_pascal_begin_end',
'skip_pascal_block_comment',
'skip_pascal_braces',
'skip_pascal_string',
'skip_php_braces',
'skip_pp_directive',
'skip_pp_if',
'skip_pp_part',
'skip_python_string',
'skip_string',
'skip_to_char',
'skip_to_end_of_line',
'skip_to_semicolon',
'skip_typedef',
'skip_ws',
'skip_ws_and_nl',
'splitLines',
'stat',
'stdErrIsRedirected',
'stdOutIsRedirected',
'toEncodedString',
'toUnicode',
'toUnicodeFileEncoding',
'top',
'trace',
'trace_tag',
'update_file_if_changed',
'utils_rename',
'windows',
'wrap_lines')
#@+node:ekr.20051110105027.150: *3* script: Find cr/lf in a directory
import fnmatch, os

def findDosFile(pattern, dirname):

    """Check for crlf in files"""

    files = os.listdir(dirname)
    names = fnmatch.filter(files, pattern)
    for name in names:
        path = g.os_path_join(dirname, name)
        if g.os_path_isfile(path):
            bytes = open(path, 'rb').read()
            count = bytes.count('\r\n')
            if '\0' not in bytes and count:
                print "%4d %s" % (count,path)

dir = "c:\prog\leoCvs\leo"
print ; findDosFile("*",dir)
#@+node:ekr.20070124092048: *3* script: Find entries in k.guiBindNamesDict
# A script to find uses of the names defined in k.guiBindNamesDict in Leo's core.

k = c.k ; d = k.guiBindNamesDict
keys = [z for z in d.values() if z not in k.tkNamesList]
keys.sort()
h = ' tkKeys.defineSpecialKeys'
for key in keys:
    for p in c.all_positions():
        if p.h != h and p.h.find('keywords') == -1:
            s = p.b
            for z in ('"%s"' % (key), "'%s'" % (key)):
                if s.find(z) != -1:
                    print '%20s %s' % (z,p.h)
#@+node:ekr.20060509121738.1: *3* script: Find longest body text
# Used for testing new colorizer.

pmax = p.copy()
n = len(p.b)

for p in p.self_and_subtree():
    if len(p.b) > n:
        n = len(p.b)
        pmax = p.copy()

c.selectPosition(pmax)
c.redraw()
#@+node:ekr.20060509121738.2: *3* script: Find most colorizer tags
# For testing the new colorizer.

def tags(p):
    c.selectPosition(p)
    w = c.frame.body.bodyCtrl
    names = w.tag_names()
    total = 0
    for name in names:
        theList = w.tag_ranges(name)
        if theList:
            print name,w.tag_ranges(name)
            total += len(theList)
    return total

pmax = p.copy()
n = tags(p) # len(p.b)

for p in p.self_and_subtree():
    # if len(p.b) > n:
    n2 = tags(p)
    if n2 > n:
        n = n2
        pmax = p.copy()

c.selectPosition(pmax)
c.redraw()
#@+node:ekr.20070213074001.1: *3* script: Find w.xxx
import string
import leoTkinterFrame
words = {}
word_chars = string.ascii_letters + string.digits + '_'
p = g.findTopLevelNode(c,'Code')
p1 = p.copy()
baseClass = leoTkinterFrame.leoTkTextWidget
allMatches = True
seen = {}
for p in p.self_and_subtree():
    if seen.get(p.v): continue
    seen[p.v] = True
    s = p.b
    i = 0
    while 1:
        j = s.find('w.',i)
        if j == -1: break
        ch = s[j-1]
        if j == 0 or ch not in word_chars:
            j += 2
            k = g.skip_c_id(s,j)
            word = s[j:k]
            if allMatches or not hasattr(baseClass,word):
                words[word] = 1 + words.get(word,0)
            i = k
        else:
            i += 2
keys = words.keys()
keys.sort()
aList = ['%3d %s' % (words.get(key),str(key)) for key in keys]
print g.listToString(aList)
g.es('searched %s' % p1.h)
#@+node:ekr.20220318141823.1: *3* script: find trailing comments
"""
Find and mark all nodes containing underindented trailing comments in c's outline.

Such comments have the form:
    
    .. some code ..
        A trailing, overindented comment.
"""
g.cls()
import re
pattern = re.compile(r'\w+\s*=\s\w+')

def do_node(p):
    global count
    prev_assign = False
    old_lws = 0
    lines = g.splitLines(p.b)
    for i, line in enumerate(lines):
        lws = g.computeLeadingWhitespaceWidth(line, tab_width=-4)
        if line.strip().startswith('#'):
            if prev_assign and lws > old_lws:
                # Found a likely trailing comment.
                p.setMarked()
                count += 1
                return True
            prev_assign = False
        else:
            old_lws = lws
            prev_assign = pattern.search(line)
    return False
    
count = 0
c.clearAllMarked()
for p in c.all_unique_positions():
    do_node(p)
print(f"found {count} nodes.")
#@+node:ekr.20190106073221.1: *3* script: Find-chains (EKR)
'''To be run in leoPy.leo: find Leo's important chains.'''
g.cls()
import re
bases, chains = ['self.', 'c.', 'g.'], set()
id_pat = r'\w[\w0-9]*'

def get_lines():
    '''Return a list of lines containing chains.'''
    pat = re.compile(r'%s\.%s\.\w' % (id_pat, id_pat))
    lines = set()
    for p in c.all_unique_positions():
        for line in g.splitLines(p.b):
            if pat.search(line):
                lines.add(line)
    lines = list(lines)
    print('%s lines' % len(lines))
    return lines
    
s = ''.join(get_lines())

def find_chains(base):
    chains.add(base)
    base = base.replace('.', r'\.')
    pat = re.compile(r'\b(%s%s\.)(%s)' % (base, id_pat, id_pat))
    for m in re.finditer(pat, s) or []:
        chain, follow = m.group(1), m.group(2)
        if chain not in chains: # and follow not in methods:
            bases.append(chain)
            chains.add(chain)
            
def munge_chains(chains):
    '''Remove simple ivars and use c instead of self.c.'''
    chains = [z.rstrip('.') for z in sorted(list(chains))]
    chains = sorted(list(set([z[5:] if z.startswith('self.c.') else z for z in chains])))
        # Replace self.c with c.
    chains = [z for z in chains if z.count('.') > 0]
    chains = [z for z in chains if not z.startswith('self.') or z.count('.') > 1]
    return chains

while bases:
    find_chains(bases.pop())
chains = munge_chains(chains)
g.printObj(chains, tag='%s chains' % len(chains))
#@+node:ekr.20190106073221.2: *4* standard methods
if 0:
    methods = (
        'copy', # general method.
        'add', # set methods
        'append', 'extend', 'insert', 'pop', 'remove', 'split', 'sort', # list methods
        'get',  'keys', 'items', 'values', # dict methods.
        'endswith', 'finditer', 'lower', 'strip', 'lstrip', 'rstrip', 'startswith', 'upper',
            # string methods.
    )
#@+node:ekr.20180529105204.1: *3* script: get-child-headlines
s = p.b
for child in p.children():
    s = s + '\n- ' + child.h
p.b = s
#@+node:ekr.20211009080112.1: ** Leo introspection
#@+node:ekr.20120328102352.6945: *3* @@button create specialized find buttons
@language python

<< documentation >>

from leo.plugins.mod_scripting import scriptingController

sc = scriptingController(c)

if c.frame.body.hasSelection():
    code = c.frame.body.getSelectedText()
    heading = 'fix'
else:
    code,heading = p.b,p.h

def transform(c=c,code=code):
    w = c.frame.body
    s = w.getSelectedText()
    g.es(s)
    exec(code)
    g.es(s)
    w.deleteTextSelection()
    i = w.getInsertPoint()
    w.insert(i,s)
    p.b = w.getAllText()
    w.setInsertPoint(i)

sc.createIconButton(
    heading,
    command = transform,
    shortcut = None,
    statusLine = 'Make filter button',
    bg = "LightBlue"
)
#@+node:ekr.20120328102352.6949: *4* << documentation >>
@language rest
@
http://groups.google.com/group/leo-editor/browse_thread/thread/d21349c52dabd066

Ever find that you have a whole lot of:

.. sourcecode:: py

  rec[f['analyte']] ... rec[f['sample_type']] ...

expressions in your code, and now things have changed and you want them all
to be:

.. sourcecode:: py

  row.Analyte ... row.Sample_Type ...

basically if str variable s was::

  rec[f['analyte']]

then you want to perform:

.. sourcecode:: py

  s = "row."+s.split("'")[1].title()

on each one. In general it would be nice to be able to use a python
expression when search and replace doesn't cut it.

The button code below creates a button, ``fac``, which, when pressed,
creates another button, with some name you choose, which, when pressed,
executes some python code to fix the selected text in the body.

You can define the code to be executed in two ways, either in its own node:

 - insert a new node with a headline which describes the refactor
 - enter code in the node which modifies the string variable ``s``,
   which is initially set to the selected text in the body
 - press the ``fac`` button, which creates a new button named
   after this code node
 - select each offending piece of text and press the button created
   in the previous step to fix

or

 - type some code modifying ``s`` right in the body you're working on
 - press the ``fac`` button, which creates a new button named "fix"
 - select each offending piece of text and press the button created
   in the previous step to fix

Notes:

 - unlike regular button nodes, changing the code after the
   button's created (first option above) doesn't change the code
   executed by the button
 - replacing selection text makes Leo reposition the insert point at
   the top of the window, this is annoying but unrelated to this code
#@+node:ekr.20161023110345.1: *3* For #325: Simplify the organization of commands
# https://github.com/leo-editor/leo-editor/issues/325
Simplify the organization of commands
# This issue has been closed and abandoned.
#@+node:ekr.20161023051437.1: *4* script: set headlines to command names
'''
For each node p in commanderCommands.py, search p for @cmd decorators,
replacing the headline with command names.
'''
g.cls()
import re
root = g.findNodeAnywhere(c, '@file ../commands/commanderCommands.py')
pattern = re.compile(r'@cmd\((.*)\)') # [\'\"]
for p in root.self_and_subtree():
    # print('%4s %s' % (len(p.b), p.h))
    matches = list(pattern.finditer(p.b))
    if matches:
        # print('%25s %s' % (m.group(1), p.h))
        p.h = ' & '.join([m.group(1) for m in matches])
        print(p.h)
print('done')
#@+node:ekr.20161023051453.1: *4* script: inject functions into commander
'''
For each node p in commanderCommands.py, insert the line:

    commander.def_name = def_name
    
after all function/method definition
'''
g.cls()
import re
root = g.findNodeAnywhere(c, '@file ../commands/commanderCommands.py')
assert root
pattern = re.compile(r'\bdef(\s)+(\w+)')
for p in root.self_and_subtree():
    # print(p.h)
    lines = []
    for m in pattern.finditer(p.b):
        name = m.group(2)
        lines.append('commander.%s = %s\n' % (name, name))
    if lines:
        print(''.join(lines))
        p.b = '%s\n\n%s' % (p.b.rstrip(), ''.join(lines))
print('done')
#@+node:ekr.20161023044018.1: *4* script: find all calls to commander methods
'''Find all calls to commander methods in the present file.'''
g.cls()
import re
pattern = re.compile(r'c[12]*\.(\w+)\s*\(.*\)')
lines = set()
names = set()
for p in c.all_unique_nodes():
    for m in pattern.finditer(p.b):
        lines.add(m.group(0))
        names.add(m.group(1))
        # print('%30s %s' % (m.group(0), p.h))
for line in sorted(lines):
    print(line)
for name in sorted(names):
    print(name)
#@+node:ekr.20161023110341.1: *4* script: find all external calls to commander
'''Find all calls to commander methods in the present file.'''
g.cls()
import re
pattern = re.compile(r'\bc[12]*\.(\w+)\s*\(.*\)')
table = (
    '@file leoCommands.py',
    '@file ../commands/commanderCommands.py',
    '@file ../external/codewise.py',
        # Uses c in non-standard ways.
)
exclude = set() # Don't include commands defined in commanderCommands.py
seen = set()
lines = set()
names = set()
p = g.findTopLevelNode(c, 'Code')
assert p
while p:
    if p.h in table or p.h in seen:
        if p.h not in seen:
            seen.add(p.h)
            print('Skipping %s' % p.h)
        p.moveToNodeAfterTree()
    else:
        if p.isAnyAtFileNode():
            seen.add(p.h)
            # print('%s' % p.h)
        for m in pattern.finditer(p.b):
            lines.add(m.group(0))
            names.add(m.group(1))
            # print('%30s %s' % (p.h[:30], m.group(0)))
        p.moveToThreadNext()
# commanderCommands.py should contain only commands, and *local* helpers.
p = g.findNodeAnywhere(c, '@file ../commands/commanderCommands.py')
assert p
pattern = re.compile(r'\bdef\s+(\w+)\s*\(') # (.*\):')
print('exclusions...')
for p in p.subtree():
    for m in pattern.finditer(p.b):
        exclude.add(m.group(1))
        # print('%40s %s' % (m.group(0)[:40], p.h))
        # print(m.group(1))
if 0:
    print('\nexclude...')
    for z in sorted(exclude):
        print(z)
if 0:
    print('\nlines...')
    for z in sorted(lines):
        print(z)
if 1:
    print('\nnames...')
    for z in sorted(names - exclude):
        print(z)
#@+node:EKR.20040517074600.8: *3* script: Count pages
nodes = 0 ; lines = 0
for p in c.all_unique_positions():
    nodes += 1
    lines += len(g.splitLines(p.b))

pages = ((nodes * 10) + lines) / 50
s = "%d nodes,  %d lines, %d pages" % (nodes,lines,pages)
print(s); g.es(s)
#@+node:ekr.20051110105027.104: *3* script: Count separate nodes
# p = g.findTopLevelNode("Code")

tnodes = {} ; count = 0
for p in p.self_and_subtree():
    tnodes[p.v]=p.v
    count += 1

s = "%4s: %d vnodes, %d distinct" % ("Code",count,len(tnodes.keys()))
g.es_print(s)

tnodes = {} ; count = 0
for p in c.all_positions():
    tnodes[p.v]=p.v
    count += 1

s = "%4s: %d vnodes, %d distinct" % ("All",count,len(tnodes.keys()))
g.es_print(s)
#@+node:ekr.20051110105027.105: *3* script: Count total, visible nodes
total,visible = 0,0

for p in c.all_positions():
    total += 1

p = c.rootPosition()
while p:
    visible += 1
    p.moveToVisNext(c)

print "total,visible",total,visible
#@+node:ekr.20070930042719: *3* script: Create @menus tree from menu tables
'''Create @menus tree from Leo's internal menu tables.'''
# Convert standard tables to list of @item nodes
m = c.frame.menu
# A representation of code/data in defineMenuTables and createMenusFromTables.
@others
data = (
    ('File',[
        (None,m.fileMenuTopTable),
        ('Open &With...',[]), ###
        (None,m.fileMenuTop2Table),
        ('&Read/Write...',m.fileMenuReadWriteMenuTable),
        ('Tan&gle...',m.fileMenuTangleMenuTable),
        ('&Untangle...',m.fileMenuUntangleMenuTable),
        ('&Import...',m.fileMenuImportMenuTable),
        ('&Export...',m.fileMenuExportMenuTable),
        (None,m.fileMenuTop3MenuTable),
        ]),
    ('Edit',[
        (None,m.editMenuTopTable),
        ('Edit &Body...',m.editMenuEditBodyTable),
        ('Edit &Headline...',m.editMenuEditHeadlineTable),
        ('&Find...',m.editMenuFindMenuTable),
        (None,m.editMenuTop2Table),
        ]),
    ('Outline',[
        (None,c.frame.menu.outlineMenuTopMenuTable),
        ('Chec&k...',m.outlineMenuCheckOutlineMenuTable),
        ('E&xpand/Contract...',m.outlineMenuExpandContractMenuTable),
        ('&Move...',m.outlineMenuMoveMenuTable),
        ('M&ark...',m.outlineMenuMarkMenuTable),
        ('&Go To...',m.outlineMenuGoToMenuTable),
        ]),
    ('Plugins',[
        ]), # A placeholder.
    ('Cmds',[
        ('&Abbrev...',m.cmdsMenuAbbrevTable),
        ('Body E&ditors...',m.cmdsMenuBodyEditorsTable),
        ('&Buffers...',m.cmdsMenuBuffersTable),
        ('&Chapters...',m.cmdsMenuChaptersTable),
        ('C&ursor/Selection...',[]), ### Has several submenus...Must be placed by hand.
            ('Cursor &Back...',m.cursorMenuBackTable),
            ('Cursor Back &Extend Selection...',m.cursorMeuuBackExtendTable),
            ('Cursor Back Extend &to...',m.cursorMenuExtendTable),
            ('Cursor &Forward...',m.cursorMenuForwardTable),
            ('Cursor Forward E&xtend Selection...',m.cursorMenuForwardExtendTable),
        ('&Focus...',m.cmdsMenuFocusTable),
        ('&Macro...',m.cmdsMenuMacroTable),
        ('M&inibuffer...',m.cmdsMenuMinibufferTable),
        ('&Pickers...',m.cmdsMenuPickersTable),
        ('&Rectangles...',m.cmdsMenuRectanglesTable),
        ('Re&gisters...',m.cmdsMenuRegistersTable),
        ('R&un Script/Tests...',m.cmdsMenuRunTable),
        ('Scr&olling...',m.cmdsMenuScrollTable),
        ('Spell C&heck...',m.cmdsMenuSpellCheckTable),
        ('&Text Commands...',m.cmdsMenuTextTable),
        ('Toggle Setti&ngs...',m.cmdsMenuToggleTable),
        ]),
    ('Window',[
        (None,m.windowMenuTopTable),
        ]),
    ('Help',[
        (None,m.helpMenuTable),
        ]),
)

for menuName,tables in data:
    # print menuName,tables
    p2 = p.insertAsLastChild()
    p2.initHeadString('@menu '+menuName)
    for aTuple in tables:
        subMenuName,aList = aTuple
        if subMenuName:
            p3 = p2.insertAsLastChild()
            p3.initHeadString('@menu '+subMenuName)
        else:
            p3 = p2
        for z in aList:
            p4 = p3.insertAsLastChild()
            setNode(p4,z)
c.redraw()
#@+node:ekr.20070930042719.1: *4* setNode
def setNode (p,data):

    head = body = None

    if type(data) == type('abc'):
        head = data
    elif type(data) in (type(()),type([])):
        if   len(data) == 1: head = data[0]
        elif len(data) == 2: body, head = data
        else: g.trace('bad tuple: ',repr(data))
    else: g.trace('bad data: ',repr(data))

    if head and head.strip():
        p.initHeadString('@item ' + head.strip())
    if body and body.strip():
        p.setTnodeText(body.strip())
#@+node:ekr.20130810093044.16971: *3* script: Display function call hierarchy in Leo
@language python
"""
From Brian Theado

The other day I stumbled across Ville's code in scripts.leo which displays the
output of python's trace module in a leo outline. The output of the trace module
is not very friendly and I didn't find the result very usable. I was inspired to
write some code to translate the output so the tree of function calls is
displayed via Leo headlines. Thanks to Ville for sharing that code. I never
would have figure this out without that starting point.

Just copy (Ctrl-Shift-V) the child outline into a leo outline and hit ctrl-b on
the "call tree" node. The execution tree of the 'scroll-outline-up-line'
minibuffer command will be displayed to stdout and also as a tree of leo
headlines.
"""

import trace

@others

# http://docs.python.org/library/trace.html documents trace module.
tracer = trace.Trace(countcallers=1)

# Trace a minibuffer command.
# Any function call will work. Leo's minibuffer commands are easily discoverable
# via tab completion and the 'show-commands' command.

#tracer.runfunc(c.executeMinibufferCommand, 'goto-prev-node')
tracer.runfunc(c.executeMinibufferCommand, 'scroll-outline-up-line')

top = p.insertAsLastChild().copy()
top.h = 'trace session'
displayCalltree(top, tracer.results().callers.keys())
c.redraw()

#@+node:ekr.20130810093044.16972: *4* displayCalltree
def displayCalltree(p, callinfo):
   '''
   Converts the function call hierarchy in 'callinfo' into a tree of function
   calls.  The function call tree is displayed to stdout as indented text
   and is inserted as a tree of leo nodes rooted at the given position 'p'
   '''
   callers = [k[0] for k in callinfo]
   callees = [k[1] for k in callinfo]

   # The first set of children will be those that don't have any callers
   # listed in callinfo
   toplevels = list(set(callers) - set(callees))
   positions = {}
   path = []

   # Depth-first traversal of the call hierarchy represented by 'callinfo'
   # 'levels' is a stack which grows during descend and shrinks
   # during ascend.  Each element of 'levels' is a list of unprocessed
   # siblings of each other
   levels = [toplevels]
   while len(levels) > 0:
       while len(levels[-1]) > 0:
           # Process the first element in the 'deepest' (i.e. last) list of siblings
           cur = levels[-1][0]
           levels[-1] = levels[-1][1:]
           indent = " " * 4 * (len(levels)-1)
           if cur not in path:
               if cur in positions.keys():
                   # Function already seen, so make a clone
                   clone = positions[cur].clone()
                   clone.moveToLastChildOf(p)
                   print (indent + "%s %s ..." % cur[1:])
               else:
                   # Haven't seen this function, so insert a new headline
                   p = p.insertAsLastChild().copy()
                   p.h = "%s %s" % cur[1:]
                   print (indent + p.h)

                   # Remember the position so it can be cloned if seen again
                   positions[cur] = p

                   # Find all callees of this function and descend
                   levels.append([c[1] for c in callinfo if c[0] == cur])
                   path.append(cur)
           else:
               r = p.insertAsLastChild().copy()
               r.h = "(recursive call) %s %s" % (cur[1], cur[2])
               print(indent + r.h + "...")

       # Ascend back up one level
       path = path[0:-1]
       p = p.parent()
       levels = levels[0:-1]
#@+node:ekr.20040311090054: *3* script: Dump fileIndex
for p in c.all_positions():
    print p.v.fileIndex
#@+node:ekr.20160428073540.1: *3* script: How long does it take to search LeoPy.leo?
@language python
# 7559 nodes: 0.075 sec.
import time
t1 = time.time_ns()
n = 0
for p in c.all_unique_positions():
    n += 1
    if p.h.startswith('@chapter '):
        pass
t2 = time.time_ns()
timestr = f'{n} nodes, {(t2 - t1) / 1e6:0.2f} ms'
g.es(timestr)
#@+node:ekr.20111017085134.16018: *3* script: Inspect modules
import inspect

fn = g.os_path_finalize_join(g.app.loadDir,'leoNodes.py')

m = __import__ ('leoNodes')
# print(m)

classes = inspect.getmembers(m,inspect.isclass)
# print(classes)
print('='*20)
for z in classes:
    name,value = z
    print(name)
    members = inspect.getmembers(value)
    print('members of',name)
    for name2,value2 in members:
        if False: # not name2.startswith('__'):
            print('  ',name2)
        if name2 == '__init__':
            print('__init__',value2)
            if inspect.isfunction(value2):
                init_members = inspect.getmembers(value2)
                print('init members')
                for name3,value3 in init_members:
                    if not name3.startswith('__'):
                        print('    ',name3)
#@+node:ekr.20051216152812: *3* script: Obsolete: Insert begin/endUpdate
# Leo no longer uses begin/endUpdate.
u = c.undoer
w = c.frame.body.bodyCtrl
s1 = '''\
    c.beginUpdate()
    try:'''
s2 = '''\
    finally:
        c.endUpdate()'''

b = u.beforeChangeNodeContents(p)
i, j = g.app.gui.getSelectionRange(w)
if i != j:
    s = w.get(i,j)
    s = ''.join(['\t'+line for line in g.splitLines(s)])
    w.delete(i,j)
    w.insert(i,s1+'\n'+s+'\n'+s2)
else:
    w.insert(i,s1+'\n\t\t\n'+s2)
u.afterChangeNodeContents(p,'add-begin/endUpdate',b)
#@+node:ekr.20120525155800.10870: *3* script: Print all docstrings from a module (obsolete)
import leo.core.leoTest as leoTest
import types

def isClass(obj):
    return isinstance(obj,type)

specialDictNames = ('__builtins__','__doc__','__name__','__file__','__module__')

def printDoc(x,s):
    if hasattr(x,"__doc__") and x.__doc__:
        g.pr("%4d %s" % (len(x.__doc__),s))
    else:
        g.pr("%4s %s" % (' ',s))

g.pr('-' * 60)
g.pr("%4d %s" % (len(leoTest.__doc__),"leoTest"))

if 1:
    for s in leoTest.__dict__:
        if s not in specialDictNames:
            x = getattr(leoTest,s)
            if type(x) != types.ModuleType:
                printDoc(x,s)
                # if type(x) == types.ClassType:
                if isClass(x):
                    for s2 in x.__dict__:
                        x2 = getattr(x,s2)
                        if s2 not in specialDictNames:
                            g.pr(' '*4,)
                            printDoc(x2,s2)
else:
    << print names sorted by type >>
#@+node:ekr.20120525155800.10871: *4* << print names sorted by type >>
for theType,typeName in (
    (types.ModuleType,"modules"),
    (types.ClassType,"classes"),
    (types.FunctionType,"functions"),
):

    g.pr("\n%s..." % typeName)
    for s in leoTest.__dict__:

        if s not in specialDictNames:
            x = getattr(leoTest,s)
            if type(x) == theType:
                printDoc(x,s)
                if theType == types.ClassType:
                    g.pr("\tmethods...")
                    for s2 in x.__dict__:
                        x2 = getattr(x,s2)
                        if s2 not in specialDictNames:
                            g.pr("\t",newline=False)
                            printDoc(x2,s2)
#@+node:ekr.20050704172623: *3* script: Print all headlines, properly indented
for p in c.all_positions():
    print p.level()*' ',p.h
#@+node:ekr.20040915080419: *3* script: Print all uAs (unknown attributes)
for p in c.all_positions():
    h = p.h
    if hasattr(p.v,'unknownAttributes'):
        print('v',h,p.v.unknownAttributes)
#@+node:ekr.20161021172753.1: *3* script: Print commands & docstrings
'''print a summary, sorted by class, of commands & their docstrings.'''
g.cls()
print_class = False
    # Print class in Full mode.
    # False is useful for spell-checking docstrings.
full = True
    # True: print entire docstring. False: print summary.
d = c.commandsDict
    # Keys are command names; values are functions

# Group commands by class.
groups = {}
for command in sorted(d.keys()):
    f = d.get(command)
    key = f.__self__.__class__.__name__ if hasattr(f,'__self__') else f.__name__
    aList = groups.get(key,[])
    aList.append((command,f),)
    groups[key] = aList

# Print groups.
if full:
    root = p.insertAsLastChild()
    root.h = 'Docstrings'
for group in sorted(groups.keys()):
    if full:
        parent = root.insertAsLastChild()
        parent.h = group
    else:
        print('GROUP %s...' % (group))
    aList = groups.get(group)
    for command,f in sorted(aList):
        fname = f.__name__ or ''
        doc = f.__doc__ or ''
        if full:
            lines = g.splitLines(doc)
            if len(lines) == 1:
                s = "'''%s'''" % doc
            else:
                s = ''.join([z.lstrip() for z in g.splitLines(doc)])
                s = s.rstrip() + '\n'
                s = "'''\n%s'''" % s
            if print_class:
                s = '%s\n%s' % (fname,s)
            child = parent.insertAsLastChild()
            child.h = command
            child.b = s
        else:
            print('%40s:%4s %s' % (command,len(doc),fname))
if full:
    c.redraw()
print('%s commands' % (len(list(d.keys()))))
#@+node:EKR.20040517074600.13: *3* script: Print default font
font = g.app.config.defaultFont

print font.cget("family"), font.cget("weight")
#@+node:ekr.20131030082936.19134: *3* script: Print functions defined in leoGlobals.py
print("Names defined in leoGlobals.py",color="purple")
for name in sorted(g.__dict__.keys()):
    print(name)
#@+node:ekr.20130810093044.16954: *3* script: Print global data structures from in modes/*.py files
'''Create global data structures from modes/*.py files.'''

import glob
import imp

g.cls()

theDir = g.os_path_finalize_join(g.app.loadDir,'..','modes','*.py')
aList = glob.glob(theDir)

theDir = g.os_path_finalize_join(g.app.loadDir,'..','modes')

# print('-'*40)
known_keys = list(g.app.language_delims_dict.keys())
new_languages = {}

for z in aList:
    name = g.os_path_basename(z)
    name2 = name[:-3]
    if name2 in known_keys or name2.startswith('__'):
        if 0: print('ignore: %s' % (name2))
    else:
        try:
            theFile, pathname, description = imp.find_module(name2,[theDir])
            m = imp.load_module(name2, theFile, pathname, description)
            if hasattr(m,'properties'):
                # new_languages.append(name2)
                new_languages[name2] = m
            else:
                print('no properties: %s %s' % (name2,m))
        except Exception:
            g.es_exception()
            
print('%s new languages\n' % (len(list(new_languages.keys()))))
    
for key in sorted(new_languages.keys()):
    m = new_languages.get(key)
    aList2 = [m.properties.get(z)
        for z in ('lineComment','commentStart','commentEnd')
            if m.properties.get(z)]
    print('%-20s : "%s",' % (
        '"%s"' % (key),
        ' '.join(aList2)))
    # computed[name2] = ' '.join(aList2)
       
if 0:
    mismatches = 0
    print()
    for z in known_keys:
        val = g.app.language_delims_dict.get(z)
        val2 = computed.get(z)
        if not val:
            print('no val',z)
        elif not val2:
            print('no val2',z)
        elif val != val2:
            mismatches += 1
            print('mismatch for %s. expected %s got %s' % (z,repr(val),repr(val2)))
            print(repr(val))
            print(repr(val2))
    print('%s mismatches' % mismatches)
#@+node:ekr.20040717121014: *3* script: Print gnx
print "gnx", p.v.fileIndex, p.h
#@+node:ekr.20141105055521.15: *3* script: Print gnxs & gnxDict
@language python
'''A script used while investigating this bug.'''
# g.cls()
d = {}
x = g.app.nodeIndices
result = []
for v in c.all_unique_nodes():
    gnx = v.fileIndex
    assert isinstance(gnx, str),gnx
    d [gnx] = v
    result.append('%s %s' % (gnx,v))
print('%s v.fileIndex\'s...' % len(result))
print('\n'.join(sorted(result)))
if 1:
    d = c.fileCommands.gnxDict
    print('old: %s fc.gnxDict keys...' % len(list(d.keys())))
    for key in sorted(d.keys()): 
        print('%s %s' % (key,d.get(key)))
if 1:
    c.recreateGnxDict()
    d = c.fileCommands.gnxDict
    print('new: %s fc.gnxDict keys...' % len(list(d.keys())))
    for key in sorted(d.keys()): 
        print('%s %s' % (key,d.get(key)))
#@+node:ekr.20111017085134.16022: *3* script: Print long lines
# This works, but splitting long lines by hand
# is the very essence of stupidity.

# Don't use this script!
# Use a *reliable* pep8 tool instead.

g.es('finding long lines in',p.h)
found = False
while p and not found:
    for line in g.splitLines(p.b):
        if len(line) > 80:
            found = True
            g.es('long line in',p.h)
            c.selectPosition(p)
            break
    else:
        p.moveToThreadNext()
g.es('done')
#@+node:ekr.20111017085134.16020: *3* script: Print max nesting level
n = 0

for p in c.all_positions():
    n = max(n,p.level())

g.es('n',n)

last = 0
delta = 0
d = {}

for p in c.all_positions():
    n = p.level()
    if n < last:
        delta = max(delta,last-n)
        d [last-n] = d.get(last-n,0) + 1
    last = n

g.es('delta',delta)
g.es('d',d)
#@+node:ekr.20120525155800.10867: *3* script: Print missing docstrings (obsolete)
'''print all commands that lack docstrings.'''
g.cls()
d = c.commandsDict
    # Keys are command names; values are functions

ignore = ['minibufferCallback','enterModeCallback',]

aList = [ d.get(z).__name__ for z in d
    if (not d.get(z).__doc__ or not d.get(z).__doc__.strip())
        and not d.get(z).__name__ in ignore]

# g.cls()
print()
for z in sorted(set(aList)):
    print(z)
    
print('done')
#@+node:EKR.20040613162717: *3* script: Print newline stats
path = g.os_path_join(g.app.loadDir,"leo.py")

try:
    f = open(path,"rb")
    s = f.read()
    f.close()
    cr = 0 ; nl = 0
    for ch in s:
        if ch == '\r': cr += 1
        if ch == '\n': nl += 1
    m = "cr %d, nl %d %s" % (cr,nl,path)
    print m ; g.es(m)
except IOError:
    print "can not open",path
#@+node:ekr.20170120110948.4: *3* script: Print Qt color names
# This script prints the list of known Qt names. Qt seems to ignore case.
from leo.core.leoQt import QtGui
aList = sorted([g.u(z) for z in QtGui.QColor().colorNames()])
print('\n'.join(aList))
#@+node:ekr.20051110105027.151: *3* script: Print statistics using dis module
# routines to gather static statistics about opcodes based on dis module.
import compiler
import dis
import os
import string
import sys
import types

@others
#@+node:ekr.20051110105027.152: *4* go
def go():

    dir = "c:/prog/leoCVS/leo/"
    modules = getModules(dir)
    stats = [0] * 256
    try:
        # Importing these might start leo itself and hang idle.
        modules.remove("leo")
        modules.remove("openLeo")
        modules.remove("openEkr")
        modules.remove("setup")
    except: pass
    # print modules

    for m in modules:
        try:
            print "module:", m
            exec("import " + m)
            a = eval(m)
            any(a,stats)
        except:
            import traceback ; traceback.print_exc()
            print "----- no matching class in", m

    g.print_stats(stats)
#@+node:ekr.20051110105027.153: *4* getFiles
def getFiles (dir):

    # Generate the list of modules.
    allFiles = os.listdir(dir)
    files = []
    for f in allFiles:
        head,tail = g.os_path_split(f)
        root,ext = g.os_path_splitext(tail)
        if ext==".py":
            files.append(g.os_path_join(dir,f))

    return files
#@+node:ekr.20051110105027.154: *4* getModules
def getModules (dir):

    """Return the list of Python files in dir."""

    files = []

    try:
        allFiles = os.listdir(dir)
        for f in allFiles:
            head,tail = g.os_path_split(f)
            fn,ext = g.os_path_splitext(tail)
            if ext==".py":
                files.append(fn)
    except: pass

    return files
#@+node:ekr.20051110105027.155: *4* any
def any(x,stats,printName = 0):
    # based on dis.dis()
    """Gathers statistics for classes, methods, functions, or code."""
    if not x:
        return
    if type(x) is types.InstanceType:
        x = x.__class__
    if hasattr(x, 'im_func'):
        x = x.im_func
    if hasattr(x, 'func_code'):
        x = x.func_code
    if hasattr(x, '__dict__'):
        items = x.__dict__.items()
        items.sort()
        for name, x1 in items:
            if type(x1) in (types.MethodType,
                            types.FunctionType,
                            types.CodeType):
                if printName: print name
                try:
                    any(x1,stats)
                except TypeError, msg:
                    print "Sorry:", msg
    elif hasattr(x, 'co_code'):
        code(x,stats)
    else:
        raise TypeError, \
              "don't know how to disassemble %s objects" % \
              type(x).__name__
#@+node:ekr.20051110105027.156: *4* code
def code (co, stats):
    """Gather static count statistics for a code object."""

    codeList = co.co_code
    # Count the number of occurances of each opcode.
    i = 0 ;  n = len(codeList)
    while i < n:
        c = codeList[i]
        op = ord(c)
        stats[op] += 1
        i = i+1
        if op >= dis.HAVE_ARGUMENT:
            i = i+2
#@+node:ekr.20051110105027.157: *4* print_stats
def print_stats (stats):

    stats2 = [] ; total = 0
    for i in xrange(0,256):
        if stats[i] > 0:
            stats2.append((stats[i],i))
        total += stats[i]

    stats2.sort()
    stats2.reverse()
    for stat,i in stats2:
        print string.rjust(repr(stat),6), dis.opname[i]
    print "total", total
#@+node:ekr.20201030065548.3: *3* script: Print summary of c.config.settingsDict
d = c.config.settingsDict
sources = list(set([gs.path for gs in d.d.values() if gs.path is not None]))
    # Yes, gs.path can be None.
#
# Compute the summary dict.
summary = {}
for path in sources:
    inner_d = g.TypedDict(
        name=g.shortFileName(path),
        keyType=str,
        valType=g.GeneralSetting)
    summary [path] = inner_d
    for key, val in d.items():
        if val.path == path:
            inner_d [key] = val
#
# Show the summary dict.
if 1:
    # Brief: good for quck checks.
    g.printObj([str(z) for z in summary.values()])
else:
    # Complete. Good for deep debugging.
    g.printObj(summary)
#@+node:ekr.20041124144944: *3* script: Print sys.path
import os
import sys
for s in sys.path:
    exists = os.path.exists(s)
    print "%5s %s" % (exists,s)
#@+node:ekr.20040322120331: *3* script: Print tnodeList's
print '-'*20
for p in c.all_positions():
    if hasattr(p.v,"tnodeList"):
        print p,p.v.tnodeList
#@+node:ekr.20150415144952.1: *3* script: Print whether focus widget supports high-level interface
'''Determines whether the focus widget supports the high-level interface.'''
g.cls()
import leo.plugins.qtGui as qtGui
import leo.core.leoFrame as leoFrame
import PyQt4.QtGui as QtGui
if 0:
    c.logWantsFocusNow()
elif 1: # Edit a headline
    c.editHeadline(p)
    # c.redraw()
body_w = c.frame.body.bodyCtrl.widget
w = QtGui.QApplication.focusWidget()
print('Focus','isBodyCtrl',w == body_w,w)
tree = c.frame.tree
while w:
    # isText = g.app.gui.isTextWidget(w)
    if isinstance(w,QtGui.QLineEdit):
        wrapper = tree.getWrapper(w,item=None)
        if isinstance(wrapper,qtGui.leoQtBaseTextWidget):
            print('QLineEdit has wrapper',w,wrapper)
        else:
            print('QLineEdit: NO wrapper',w)
            # wrapper = tree.headlineWrapper(c,item=None,name='find-head-wrapper',widget=w)
            # print('QLineEdit NEW wrapper',w,wrapper)
        break
    elif isinstance(w,QtGui.QTextEdit):
        wrapper = getattr(w,'leo_wrapper',None)
        if wrapper:
            if isinstance(wrapper,qtGui.leoQtBaseTextWidget):
                print('QTextEdit has text wrapper',w,wrapper)
            elif isinstance(wrapper,qtGui.leoQtLog):
                logCtrl = wrapper.widget # same as wrapper.logCtrl
                print('QtTextEdit has log wrapper',w,logCtrl)
            else:
                print('unknown wrapper',wrapper)
        else:
            wrapper = qtGui.leoQTextEditWidget(w,'find-wrapper',c=c)
            print('QTextEdit NEW wrapper',w,wrapper)
        break
    print('Fail',w)
    wrapper = False
    if hasattr(w,'parent'):
        w = w.parent()
    else:
        print('no parent',w)
        break
if wrapper:
    print('is searchable (has wrapper)',w,wrapper)
    # Make sure wrapper supports the high-level interface.
    print('high level?',isinstance(wrapper,leoFrame.HighLevelInterface))
    table = (
        'insert',
        'getAllText',
        'setAllText',
        'setInsertPoint',
        'setSelectionRange',
    )
    for ivar in table:
        print(bool(getattr(wrapper,ivar,None)),ivar)
#@+node:ekr.20041019080125: *3* script: Report loaded plugins
print "Loaded plugins..."

for s in g.app.loadedPlugins:
    print s
#@+node:ekr.20041013101029: *3* script: Rexex find in headline
# Run this script from a scriptButton.
<< about this script >>
import re

def headfind():
    """Search with re and 
    - GO to found headline beginning with the selected text or clipboard buffer
    or also GO when line begins with @ and word or string in variable sMyOwnPrefix
    - EXCEPT when found search string is '-info' node
    (BOTH  1.followed by ' -info'
    AND  2.appears anywhere in headline(preceding space or start) 
    THEN  just SHOW found info node's body text in cleared Log pane.
    """
    s = c.frame.body.getSelectedText() or g.app.gui.getTextFromClipboard()
    if s:
        if len(s) == 1: s = "index -info" #if select is one char try to goto this named index node
        s = re.escape(s.lower())
        sUseLogTrigger = re.escape(" -info")
        sMyOwnPrefix = re.escape("FOLLOWING FILE IS: ").lower()
        sAllowablePrefixRe = "\@([A-Za-z][A-Za-z0-9\-]+) "
            #@verbatim
            # @ char, followed by alpha,some alphanum or dash chars, then space ...matches Leo special nodes
        for p in c.all_positions():
            srch="(^%s%s|^%s%s|^%s| %s%s)" % (sMyOwnPrefix,s,sAllowablePrefixRe,s,s,s,sUseLogTrigger) #all re
            if re.findall(srch,p.h.lower()):
                g.es("found " + s)
                sUseLogTrigger_srch="(^| )%s%s" % (s,sUseLogTrigger) #first just Log trigger re
                if re.findall(sUseLogTrigger_srch,p.h.lower()):
                    body2=p.b
                    c.frame.log.logCtrl.delete("1.0","end"); # clear Log pane before message
                    # g.es(body2,color="orange")
                    return
                else:
                    c.frame.tree.expandAllAncestors(p)
                    c.selectVnode(p)
                    c.redraw()
                    return

        g.es("no headline matches '%s'" % (s),color="blue")
    else:
        g.es("no selected text & clipboard empty",color="blue")

headfind()
#@+node:ekr.20041013101029.1: *4* << about this script >>
@ PREFIXES: Now will jump to any headline where search is preceded by an @+chars+space
and alternatively a fixed prefix string+space.

CLEAR LOG: Now also clears Log for display of -info nodes. 

NOTE:I already had a file with a bunch of text files each preceded by "THE FOLLOWING
FILE IS: " and a list of these files at the top of everything. After global
changing these lines with "- " (and at first line), I imported flattened outline...
and "there you go" a index-driven Leo version. :)

bill p
#@+node:ekr.20130802103517.20482: *3* script: Show Call hierarchy w/ trace module
@language python
@tabwidth -4

"""
Create a Leo suboutline containing summary of a previous trace session.

Run (ctrl+b) this script after::

    cd ~/leo-editor
    python -m trace --trackcalls launchLeo.py --gui=qt >trace.txt

"""
from __future__ import print_function
import os
g.cls()
f = g.os_path_finalize_join(g.app.loadDir,'..','..','trace.txt')
tr = open(os.path.expanduser(f))
top = p.insertAfter()
top.h = 'trace session'
cur,no,n = None,None,0
for l in tr:
    if l.startswith('***'):
        cur = top.insertAsLastChild().copy()
        cur.h = os.path.basename(l.split()[1])
    elif l.startswith('  -->'):
        no = cur.insertAsLastChild().copy()
        no.h = os.path.basename(l.split()[1].strip())
    elif no:
        no.b += l.strip() + '\n'
    n += 1
    if (n % 100) == 0:
        print(".",end='')
c.redraw()
#@+node:EKR.20040517074600.10: *3* script: Show font
body = c.frame.body.bodyCtrl

font = c.config.getFontFromParams(
        "body_text_font_family", "body_text_font_size",
        "body_text_font_slant",  "body_text_font_weight",
        tag = "body")

print(font)
print(body)

# body.configure(font=font)
#@+node:EKR.20040517074600.11: *3* script: Show settings
import tkFont

@others

# Body pane.
fn = c.frame.body.cget("font")
font = tkFont.Font(font=fn)
name,size,slant,weight = getFontSettings(font)
print "body:",fn,name,size,slant,weight

if 0:

    # Log pane.
    fn = c.frame.log.getFontConfig()
    font = tkFont.Font(font=fn)
    name,size,slant,weight = getFontSettings(font)
    g.es("log:" + name + "," + `size` + "," + slant + "," + weight)

    # Tree pane.
    font = c.frame.tree.getFont()
    name,size,slant,weight = getFontSettings(font)
    g.es("head:" + name + "," + `size` + "," + slant + "," + weight)
#@+node:EKR.20040517074600.12: *4* getFontSettings
def getFontSettings (font):

    name   = font.cget("family")
    size   = font.cget("size")
    slant  = font.cget("slant")
    weight = font.cget("weight")

    return name, size, slant, weight
#@+node:ekr.20150416071458.1: ** Leo nodes
#@+node:ekr.20060824181946: *3* @@button convert-to-at-shadow
"""
Look for @thin files in the current subtree.
Convert those thin files into a file with a shadow file,
if this shadow file does not exist already.

FIXME: the line end convention is currently changed:
      unix lineendings are converted to DOS lineendings,
      if files are converted on Windows.
      Not sure if that is a probem or not.
"""

# This script is deprecated because @shadow is deprecated.

import mod_shadow_core, os, shutil

def marker_from_extension(filename):
    marker = g.comment_delims_from_extension(filename)[0]
    return marker and marker + '@'

shadow_subdir = c.config.getString("shadow_subdir").strip()
if not shadow_subdir: assert False,'No shadow_subdir setting'
prefix = c.config.getString("shadow_prefix")

for p in p.self_and_subtree():
   h = p.h.strip()
   if h.startswith("@thin"):
       start = h.find("@thin") + len("@thin")
       leofiledir = os.path.split(c.mFileName)[0]
       filename = h[start:].strip()
       fullfilename = os.path.join(leofiledir, filename)
       theDir = os.path.split(fullfilename)[0]
       leoFolder = os.path.join(leofiledir, theDir, shadow_subdir)
       if not os.path.exists(leoFolder):
           os.mkdir(leoFolder)
           assert os.path.exists(leoFolder)
       else:
           assert os.path.isdir(leoFolder)
       junk, name = os.path.split(filename)
       newname = os.path.join(leoFolder, prefix + name)
       if os.path.exists(newname):
           continue
       g.es( "renaming %s to %s" % (filename, newname))
       shutil.copy2(fullfilename, newname)
       os.unlink(fullfilename)
       f = file(fullfilename, "w")
       f.close()
       mod_shadow_core.copy_file_removing_sentinels(
           sourcefilename=newname,
           targetfilename=fullfilename,
           marker_from_extension = marker_from_extension)
       g.es("File %s is now shadowed" % filename)
#@+node:ville.20090508224531.9799: *3* @@button Dump nodes to ~/.leo/dump
""" Dump nodes to ~/.leo/dump git repository.

Before using this, you need to:
    mkdir ~/.leo/dump; cd ~/.leo/dump; git init

"""

import os, codecs, hashlib
flatroot = os.path.expanduser('~/.leo/dump')
assert os.path.isdir(flatroot)

hl = []

def dump_nodes():
    for p in c.all_unique_positions():
        name, date, num = p.v.fileIndex
        gnx = '%s%s%s' % (name, date, num)
        hl.append('<a href="%s">%s%s</a><br/>' % (gnx, '-' * p.level(), p.h))
        fname = gnx
        codecs.open(fname,'w', encoding='utf-8').write(p.b)
        print "wrote", fname

os.chdir(flatroot)

dump_nodes()
lis = "\n".join(hl)

html = "<body>\n<tt>\n" + lis + "\n</tt></body>"

#titlename = c.frame.getTitle() + '.html'
pth, bname = os.path.split(c.mFileName)

if pth and bname:
    dbdirname = bname + "_" + hashlib.md5(c.mFileName).hexdigest()    

titlename = dbdirname + '.html'
codecs.open(titlename,'w', encoding='utf-8').write(html)

g.es("committing to " + flatroot)

os.system('git add *')
out = os.popen('git commit -m "Leo autocommit"').read()
g.es("committed")
g.es(out)
g.es('Outline in ' + os.path.abspath(titlename))
#@+node:ekr.20060531085804: *3* @@button Show other clones
@
Ever have a clone that is difficult to understand outside the context of its
original parent? Here's some code to help. It displays the headline of the
current node plus the headlines of all the parents of all the clones of the
current node. Selecting a displayed parent headline moves the current node to
the corresponding clone in the outline.

The idea is to be able to quickly see the context of all the clones of the
current node and to be able to easily navigate from one clone instance to the
next.
@c

@others
c.cn = cloneNavigator(c)
c.cn.displayClones(c)
#@+node:ekr.20060531085804.1: *4* class cloneNavigator
class cloneNavigator:
    '''
       Displays the headline of the current node plus the headlines of
       all the parents of all the clones of the current node.  Selecting
       a displayed parent headline moves the current node to the
       corresponding clone in the outline.

       The idea is to be able to quickly see the context of all the clones
       of the current node and to be able to easily navigate from one clone
       instance to the next.
    '''
    @others
#@+node:ekr.20060531085804.2: *5* init
def __init__ (self,c):
    self.c = c
    import Tkinter as Tk
    if 0:
        f = Tk.Toplevel()
    else:
        log = c.frame.log
        log.selectTab('Clones')
        f = log.tabFrame
        for w in f.winfo_children():
            w.destroy()

    # Create and pack empty label and listbox
    self.title = Tk.Label(f)
    self.title.pack(anchor="nw")
    self.lb = Tk.Listbox(f)
    self.lb.pack(expand=1,fill="both")
#@+node:ekr.20060531085804.3: *5* getAllClones
def getAllClones(self,p):
    c = self.c
    return [z.copy() for z in c.all_positions() if z.v == p.v]
#@+node:ekr.20060531085804.4: *5* displayClones
def displayClones(self,c):
    '''Displays the parent headline for all the clones of the current position'''
    cp = c.currentPosition()

    # "Title" is the headline of the current node
    self.title.configure(text=cp.h)

    # Initialize listbox and clone list
    clones = self.getAllClones(cp)
    self.lb.delete(0,self.lb.size()-1)

    <<Fill listbox with clone parent headlines>>    
    <<Goto selected position when listbox selection changes>>
#@+node:ekr.20060531085804.5: *6* <<Fill listbox with clone parent headlines>>
# Add the headlines of all the clone parents to the listbox
for p in clones:
    if p.parent():
        text = p.parent().h
    else:
        text = "<root>"
    self.lb.insert(self.lb.size(),text)

    # Initial listbox selection corresponds to current position
    if p.v == cp.v:
        self.lb.selection_set(self.lb.size()-1)
#@+node:ekr.20060531085804.6: *6* <<Goto selected position when listbox selection changes>>
# Callback for when a listbox entry is selected            
def gotoSelectedPosition(event,lb=self.lb,c=c,positions=clones):
    idx = int(lb.curselection()[0])
    p = positions[idx]
    c.frame.tree.expandAllAncestors(p)
    c.selectPosition(p)
    return
self.lb.bind(g.angleBrackets("ListboxSelect"), gotoSelectedPosition)
#@+node:ekr.20130810093044.16935: *3* script: Add @script node
'''
Adds a @script node to your outline which reloads the other outlines
currently loaded when this outline is next loaded.
'''

tablist = g.findNodeAnywhere(c, '@script load tabs')
if not tablist:
    from leo.core.leoNodes import vnode
    v = vnode(c)
    v.h = '@script load tabs'
    v._linkAsNthChild(c.hiddenRootNode,
         len(c.hiddenRootNode.children))
tablist = g.findNodeAnywhere(c, '@script load tabs')
assert tablist
import time
b = ["# Generated %s\n"%time.strftime('%c')]
for oc in g.app.commanders():
    b.append("g.openWithFileName('%s', c)" % oc.fileName())
b.append("c.frame.bringToFront()")
b.append("c.setLog()")
tablist.b = '\n'.join(b)
#@+node:ekr.20201110014825.1: *3* script: add tags for issues
"""Add tags for all matches of '# #[0-9]+'"""
g.cls()
import re
d_all = {}
d_added = {}
tc = c.theTagController
pat = re.compile(r'# #([0-9]+)')
for p in c.all_unique_positions():
    u = p.v.u
    tags = set(u.get(tc.TAG_LIST_KEY, set([])))
    for m in pat.finditer(p.b):
        tag = m.group(1)
        aSet = d_all.get(tag, set())
        aSet.add(p.h)
        d_all [int(tag)] = aSet
        # Add the tag. tc.add_tag is way too slow.
        if tag not in tags:
            tags.add(tag)
            u[tc.TAG_LIST_KEY] = tags
            aSet = d_added.get(tag, set())
            aSet.add(p.h)
            d_added [int(tag)] = aSet
if 1: # Print all added tags.
    print('Added tags...')
    for key in sorted(d_added):
        aList = d_added.get(key)
        for h in sorted(aList):
            print(f"{key:>8} {h}")
if 1: # Print all tags.
    print('All tags...')
    for key in sorted(d_all):
        aList = d_all.get(key)
        for h in sorted(aList):
            print(f"{key:>8} {h}")
print('done')
#@+node:tbrown.20141110113251.1: *3* script: Capture idea / ph. msg / to-do item
@g.command('x')
def add_note(args):
    """Create a global Alt-x command 'x' to quickly add a top level node
    to workbook.leo, if it's open, else the first open Leo outline, to
    quickly record a note / thought / to-do item.
    
    Marks the node as a priority 2 to-do item due today, titles the node
    with current date / time, and included date / time in body.
    """
    # search for workbook.leo
    for c in g.app.commanders():
        if c.fileName().endswith('workbook.leo'):
            break
    else:
        c = g.app.commanders()[0]  # or use first open outline
    
    # add a node a the top of the outline
    nd = c.rootPosition().insertAfter()
    nd.moveToRoot(c.rootPosition())
    
    # label the node and add to-do attributes
    import datetime, time
    nd.h = time.asctime()
    nd.b = "\n\n# %s\n\n" % nd.h 
    nd.v.u['annotate'] = {
        'duedate': datetime.date.today(),
        'created': datetime.datetime.now(),
        'nextworkdate': datetime.date.today(),
        'priority': 2,
    }

    # select the node and focus in body for immediate typing
    c.selectPosition(nd)
    c.cleo.loadIcons(nd)
    c.redraw()
    c.bringToFront()
    c.bodyWantsFocusNow()
#@+node:ekr.20120602062004.12350: *3* script: Clean imported nodes
'''A script to clean Python imports.
'''

h = 'Coverage (live)'
    # The headline of the tree to be converted.

class CleanPython:
    def __init__ (self):
        self.trace = True
    @others

p = g.findNodeAnywhere(c,h)
if p:
    CleanPython().run(c,p)
else:
    print('not found: %s' % (h))
#@+node:ekr.20120602062004.12351: *4* run
def run(self,c,p):

    if self.trace: g.cls()
    
    self.changed = 0
    p1 = p.copy()
    
    # Don't set any nodes dirty here.
    for p in p1.self_and_subtree():
        if p.isMarked():
            p.v.clearMarked()
            
    bunch = c.undoer.beforeChangeTree(p1)
   
    for p in p1.children():
        if self.trace: print('\n***** %s' % (p.h))
        self.clean(c,p)
        
    if self.changed:
        c.undoer.afterChangeTree(p1,'clean-python-code',bunch)
        c.redraw()

    g.trace('done: %s nodes changed' % (self.changed))
#@+node:ekr.20120602062004.12352: *4* clean & helpers
def clean(self,c,p):
    
    '''
    - Move a shebang line from the first child to the root.
    - Move a leading docstring in the first child to the root.
    - Use a section reference for declarations.
    - Remove leading and trailing blank lines from all nodes.
    - Merge a node containing nothing but comments with the next node.
    - Merge a node containing no class or def lines with the previous node.
    '''

    root = p.copy()
    assert p.h.startswith('@@file') or p.h.startswith('@file'),p.h
    
    self.move_shebang_line(c,root)
    self.move_doc_string(c,root)
    self.rename_decls(c,root)

    for p in root.self_and_subtree():
        self.clean_blank_lines(c,p)
    for p in root.subtree():
        self.merge_comment_nodes(c,p)
    for p in root.subtree():
        self.merge_extra_nodes(c,p)
#@+node:ekr.20120602062004.12353: *5* move_shebang_line
def move_shebang_line (self,c,root):
    
    '''Move a shebang line from the first child to the root.'''
    
    p = root.firstChild()
    s = p and p.b or ''
    if not s.startswith('#!'):
        return
        
    lines = g.splitLines(s)
    nl = '\n\n' if root.b.strip() else ''
    root.b = lines[0] + nl + root.b
    p.b = lines[1:]
    p.setDirty()
    p.setMarked()
    root.setDirty()
    root.setMarked()
    c.setChanged()
    self.changed += 1
    g.trace('%s --> %s' % (p.h,root.h))
#@+node:ekr.20120602062004.12354: *5* move_doc_string
def move_doc_string(self,c,root):

    '''Move a leading docstring in the first child to the root node.'''
    
    p = root.firstChild()
    s = p and p.b or ''
    if not (s.startswith('"""') or s.startswith("'''")):
        return
        
    delim = '"""' if s.startswith('"""') else "'''"
    i = s.find(delim,3)
    if i == -1:
        return
        
    doc = s[:i+3]
    p.b = s[i+3:].lstrip()
    
    # Move docstring to front of root.b, but after any shebang line.
    nl = '\n\n' if root.b.strip() else ''
    if root.b.startswith('#!'):
        lines = g.splitLines(root.b)
        root.b = lines[0] + doc + nl + lines[1:]
    else:
        root.b = doc + nl + root.b
        
    p.setDirty()
    p.setMarked()
    root.setDirty()
    root.setMarked()
    c.setChanged()
    self.changed += 1
    g.trace('%s --> %s' % (p.h,root.h))
#@+node:ekr.20120602062004.12355: *5* rename_decls (test)
def rename_decls (self,c,root):
    
    '''Use a section reference for declarations.'''
    
    p = root.firstChild()
    h = p and p.h or ''
    
    tag = 'declarations'
    if not h.endswith(tag):
        return

    name = h[:-len(tag)].strip()
    decls = g.angleBrackets(tag)
    p.h = '%s (%s)' % (decls,name)
    
    i = root.b.find('@others')
    if i == -1:
        g.trace('can not happen')
        return
    else:
        nl = '' if i == 0 else '\n'
        root.b = root.b[:i] + nl + decls + '\n' + root.b[i:]

    p.setDirty()
    root.setDirty()
    root.setMarked()
    c.setChanged()
    self.changed += 1
    g.trace('%s --> %s' % (p.h,root.h))
#@+node:ekr.20120602062004.12356: *5* clean_blank_lines
def clean_blank_lines(self,c,p):
    
    '''Remove leading and trailing blank lines from all nodes.
    '''
    
    s = p.b
    if not s.strip():
        return
    
    result = g.splitLines(s)
    for i in 0,-1:
        while result:
            if result[i].strip():
                break
            else:
                del result[i]
        
    s = ''.join(result)
    if not s.endswith('\n'): s = s + '\n'
    if s != p.b:
        p.b = s
        p.setDirty()
        # p.setMarked()
        c.setChanged()
        self.changed += 1
        # if self.trace: g.trace(p.h)
        
#@+node:ekr.20120602062004.12357: *5* merge_comment_nodes
def merge_comment_nodes(self,c,p):
    
    '''Merge a node containing nothing but comments with the next node.'''

    h = p.h
    
    if p.hasChildren() or not h.strip().startswith('#'):
        return
        
    p2 = p.next()
    if p2:
        b = p.b.lstrip()
        b = b + ('\n' if b.endswith('\n') else '\n\n')
        p2.b = b + p2.b
        p.doDelete(p2)
        p2.setDirty()
        # p2.setMarked()
        c.setChanged()
        self.changed += 1
        if self.trace: g.trace(h,' --> ',p2.h)
#@+node:ekr.20120602062004.12358: *5* merge_extra_nodes
def merge_extra_nodes(self,c,p):
    
    '''Merge a node containing no class or def lines with the previous node'''
    
    s = p.b
    if p.hasChildren() or p.h.strip().startswith('<<') or not s.strip():
        return
        
    for s2 in g.splitLines(s):
        if s2.strip().startswith('class') or s2.strip().startswith('def'):
            return

    p2 = p.back()
    if p2:
        nl = '\n' if s.endswith('\n') else '\n\n'
        p2.b = p2.b + nl + s
        h = p.h
        p.doDelete(p2)
        p2.setDirty()
        # p2.setMarked()
        c.setChanged()
        self.changed += 1
        if self.trace: g.trace(h,' --> ',p2.h)
#@+node:ekr.20051110110853: *3* script: clear all uA's, tnodeLists, etc.
# Use these with caution.
#@+node:ekr.20040312021734.1: *4* Clean unused tnodeLists
count = 0
for p in c.all_unique_positions():
    count += 1
    # Empty tnodeLists are not errors because they never get written to the .leo file.
    v = p.v
    if hasattr(v,"tnodeList") and len(v.tnodeList) > 0 and not v.isAnyAtFileNode():
        g.es("deleting tnodeList for " + `v`,color="blue")
        delattr(v,"tnodeList")
        c.setChanged()

s = "%d nodes" % count
print s ; g.es(s)
#@+node:ekr.20051110105027.102: *4* Clear all timestamps
# About the only time you should run this script is when:
# - changing the format of timestamps in nodeIndices.setTimestamp or
# - when making a retroactive change to leoID.txt.

if 0: # This is usually a very bad idea.

    for p in c.all_positions():
        p.v.fileIndex = None

    g.es("all timestamps cleared")
#@+node:ekr.20040318091620: *4* Clear all uAs (unknown attributes)
put = g.es_print
for p in c.all_positions():
    if p.v.u:
        put("deleting v.u:",p.h,
            g.listToString(p.v.u.keys()))
        p.v.u = None

put('done') 
c.redraw()
#@+node:ekr.20071113150213: *3* script: Clone all nodes to child node
# Clone all nodes specified in the child node called 'to be cloned'
root = p.copy()
child = p.firstChild()
assert(child.h.lower().strip() == 'to be cloned')
s = child.b
lines = g.splitLines(s)

for p in c.all_unique_positions():
    h = p.h.strip()
    for line in lines:
        if h == line.strip():
            print 'found',h
            clone = p.clone()
            clone.moveToLastChildOf(root)
c.redraw()
#@+node:ekr.20071113150213.1: *4* To be cloned
# To do
#@+node:ekr.20210118070851.1: *3* script: convert-LeoFind
"""
Convert defs in LeoFind to pep8 names.
- Don't change defs containing underscores.
- Check for existing target.
"""
# Contains a few hacks related specifically to leoFind.py.
g.cls()
import re
h = 'class LeoFind (LeoFind.py)'
root = g.findNodeAnywhere(c, h)
@others
if root:
    main(root)
else:
    print('not found:', root)
#@+node:ekr.20210118070851.2: *4* convert
def convert(old_func, new_func, root):
    print(f"{old_func} => {new_func}\n")
    for p in root.subtree():
        pattern = rf"\b{old_func}\b"
        p.h = re.sub(pattern, new_func, p.h)
        p.b = re.sub(pattern, new_func, p.b)
        # g.printObj(g.splitLines(s2), tag='p.h')
    print('')
#@+node:ekr.20210118070851.3: *4* main
def main(root):
    pattern = re.compile(r'^def\s+(\w+)', re.MULTILINE)
    for pass_n in (0, 1):
        n = 0
        for p in root.subtree():
            for m in re.finditer(pattern, p.b):
                target = m.group(0)
                old_func = m.group(1)
                if '_' in target:
                    continue
                if target.islower():
                    continue
                if old_func == 'finishCreate':  # Special case.
                    return
                new_func = new_name(old_func)
                if new_func == old_func:
                    continue
                if pass_n == 0:
                    if exists(new_func, root):
                        g.trace(f"already exists: {old_func} {new_func}")
                        g.trace('aborting')
                        return
                else:
                    n += 1
                    convert(old_func, new_func, root)
    g.trace(f"converted {n} function names")
    c.redraw()
            
#@+node:ekr.20210118070851.4: *4* new_name
def new_name(s):
    """Return the new name of s."""
    assert ' ' not in s
    # Convert s to underscore style.
    result = []
    for i, ch in enumerate(s):
        if i > 0 and ch.isupper():
            result.append('_')
        result.append(ch.lower())
    return ''.join(result).replace('i_search', 'isearch')
#@+node:ekr.20210118070851.5: *4* exists
def exists(s, root):
    """Return True if s exists in any of root's nodes."""
    for p in root.self_and_subtree():
        if s in p.b:
            return True
    return False
#@+node:ekr.20080105135417: *3* script: Delete All Icons
# Same as delete-all-icons command (now removed)
# A script seems safer because it can not be executed by mistake.

for p in c.all_positions():
    if hasattr(p.v,"unknownAttributes"):
        a = p.v.unknownAttributes
        iconsList = a.get("icons")
        if iconsList:
            a["icons"] = []
            a["lineYOffset"] = 0
            p.setDirty()
            c.setChanged()
c.redraw()
#@+node:ekr.20141105055521.13: *3* script: Dictionary to Leo outline
'''
Transform a dictionary into an outline, so you can navigate through it.
'''

def dictionary_to_outline(p,dictionary):
    '''Transform a dictionary into an outline as p's children.'''
    # g.app.gui.frameFactory.masterFrame.currentWidget().leo_c
    sorted_keys = sorted([key for key in dictionary])
    for key in sorted_keys:
        new_node = p.insertAsLastChild().copy()
        new_node.h = key
        if "dict" in str(type(dictionary[key])):
            self.dictionary_to_outline(new_node,dictionary[key])
        else:
            new_node.b = str(dictionary[key])
#@+node:ekr.20201110014834.1: *3* script: remove all tags
"""Remove all tags."""
g.cls()
tc = c.theTagController
for p in c.all_unique_positions():
    u = p.v.u
    tags = set(u.get(tc.TAG_LIST_KEY, set([])))
    for tag in tags:
        print(f"remove {tag:10} {p.h}")
        tc.remove_tag(p, tag)
print('done')
#@+node:ekr.20201030065548.4: *3* script: remove-icon-script
"""Remove icons for the node whose headline is h"""
h = 'my headline'
p = g.findNodeAnywhere(c, h)
if p:
    c.editCommands.deleteNodeIcons(p=p)
    c.redraw_now()
else:
    g.es_print('not found', h)
#@+node:ekr.20230421070847.1: *3* script: replace kwargs in mode files
"""Replace default kwargs in all leo/modes files"""

g.cls()
import glob
import os
import re

bool_kwargs = (
    'at_line_start', 'at_whitespace_end', 'at_word_start',
    'exclude_match',
    'no_escape', 'no_line_break', 'no_word_break',
)

patterns = []
for bool_arg in bool_kwargs:
    # Remove kwargs inited to False.
    pattern1 = re.compile(fr"{bool_arg}=False,")
    patterns.append((pattern1, ''))
    pattern2 = re.compile(fr"{bool_arg}=False\)")
    patterns.append((pattern2, ')'))

delegate_patterns = (
    # Remove kwargs inited to "".
    (re.compile(r'delegate="",'), ''),
    (re.compile(r'delegate=""\)'), ')'),
)
cleanup_patterns = [
    # Remove blank lines between trailing comma and ')'.
    (re.compile(r',[ ]*(\n[ ]*)*\)', re.MULTILINE), ')'),
]
for bool_arg in bool_kwargs:
    # Remove blank lines between trailing comma and any bool kwarg.
    pattern3 = re.compile(fr",[ ]*(\n[ ]*)*{bool_arg}")
    cleanup_patterns.append((pattern3, fr",\n{' '*10}{bool_arg}"))   

for aTuple in delegate_patterns:
    patterns.append(aTuple)
for aTuple in cleanup_patterns:
    patterns.append(aTuple)
    
# Exclude these files: @file nodes define them.
exclude = (
    'batch.py', 'forth.py', 'html.py',
    'javascript.py', 'julia.py', 'python.py',
)
modes = os.path.normpath(os.path.join(g.app.loadDir, '..', 'modes'))
paths = glob.glob(f"{modes}{os.sep}*.py")
paths = [z for z in paths if not any(z2 in z for z2 in exclude)]
# g.printObj([g.shortFileName(z) for z in paths])

paths = paths[:1]  ###
for path in paths:  ###
    with open(path, 'r') as f:
        contents = f.read()
    for pattern, repl in patterns:
        contents =  re.sub(pattern, repl, contents)
    lines = [z.rstrip() + '\n' for z in g.splitLines(contents)]
    contents = ''.join(lines)
    lines = g.splitLines(contents)
    if 0:
        g.printObj(lines, tag=path)
    if 0:
        g.printObj(list(f"{i+660:3} {z}" for i, z in enumerate(lines[660:])))
    if 0:
        contents = contents.replace('  ', ' ')
        lines = [f"{i:3} {z}" for i, z in enumerate(g.splitLines(contents))
            if ('colorer.match' in z
                or ',\n\n' in z
                or any(z2 in z for z2 in bool_kwargs)
            )]
        g.printObj(lines, tag=path)
    with open(path, 'w') as f:
        f.write(contents)
g.es_print('Done!')
#@+node:tom.20210803133433.1: ** Mind Maps
#@+node:tom.20210803133500.1: *3* README
@language rest
=====================
Mind Mapping for Leo
=====================

The three command scripts in this directory work together
to bring a mind mapping capability to Leo.  For a selected
node, a mind map can be created by treating the contents
as an indented list.  Alternatively, a mind map can be created
for the subtree whose head is the selected node.

The mind maps can be displayed in the Viewrendered3 or Freewin
plugins, or in the system browser.  A setting specifies the
display choice.

#@+node:tom.20210803140756.1: *4* The Commands
The Commands
=============

The three scripts in this outline must be copied into an
@settings tree, either in myLeoSettings.leo to make them
available to all outlines, or in an outline of interest.
When Leo is started or restarted, the three commands will
be installed.

The three commands are:

    - mmap-list -- create map for indented list
    - mmap-tree -- create map for subtree
    - mmap-monkeypatch -- install map creation code.

A user does not need to invoke the *mmap-monkeypatch* command.
The other two will execute it as needed.  It adds a function 
for creating mind maps to the commander for the outline.
#@+node:tom.20210803140919.1: *4* The Settings
The Settings
=============

There are two settings that affect the actions of the commands:

    - @string mmap_render_dev  (default 'vr3')
    - @string mmap-target-node (default '@clean mmap.html')

*mmap_render_dev* specifies where the mind map will be displayed.
Three values are understood: *vr3* for the Viewrendered3 plugin;
*freewin* for the Freewin plugin; and *browser* for the system browser.

For *freewin*, the display pane will open in the editing mode.
Change to the rendering mode to see the rendered mind map.

*mmap-target-node* defines a node to contain the svg mind map so that
the plugins can display it.  The map creation commands create a node with
that headline if it does not exist, then select it. If the node needs to
be created, it will be created after the end of the visible outline,
outside of any existing subtree.

The default node headline contains *@clean*.  This causes a file to be
saved with the svg mind map when the outline is next saved.  It is not
necessary to make the output into a file.  That is, the "@clean" can be
omitted - the headline can be any normal headline string.
#@+node:tom.20210803141435.1: *4* Using the Commands
Using the Commands
===================

Indented Lists
---------------

To use the *mmap-list* command, select a node that holds an indented list.
Then invoke the command with ALT-x like any other minibuffer command.
A mind map representing the list will be displayed in the selected
(or default) display device.

The white space for the indents can be mixed tabs and spaces.  They will be
converted to all spaces at 4 spaces per tab.

Subtrees
--------

To use the *mmap-tree* command, select a node that is the parent of a subtree.
Invoke it with Alt-x. A mind map depicting the subtree will be displayed in
the selected (or default) display device.  The code will try to truncate
headlines that are too long to fit the space provided.

If there are more branches than can be displayed, one final branch will be
shown with a label saying that there are more undisplayed branches.

The Monkeypatch
----------------

The third command, *mmap-monkeypatch*, is normally only invoked by the other two
as needed.  It contains all the code to turn the input into svg markup for the
mind map diagram.  It also installs the code into the commander for the outline
so it can be called by the other two commands.
#@+node:tom.20210803145435.1: *4* Known Issues
Known Issues
============

- Branch labels may not always be truncated properly;

- Labels may not always be placed horizonally in the best position;

- On linux, the vertical location of the label of the central image may not be well-centered;

- Handling of "<<", ">>" strings and HTML entities is difficult and could be improved.
#@+node:tom.20210803142908.1: *4* Design
Design
=======

The *mmap-monkeypatch* Command
---------------------------

This command defines a function that creates svg markup to display a
mind map.  This function creates a parser for indented lists, lays out
the branches of the mind map to fit the display space, and copies the
svg output to the target node.  Then it selects the target node and
opens the rendering device, which in turn displays the diagram.

When the command is invoked, it monkeypatches the commander of the
outline by adding the map creation function as an attribute of the commander.

The only time one would invoke this command directly would be during
development. After each change, the command must be run again so that
the modified code is attached to the commander.

The *mmap-list* and *mmap-tree* Commands
-----------------------------------------

These commands collect the input data and feed it to the map creation function.
In the case of *mmap-tree* the selected subtree is converted to an indented
list and then fed to the map creation function.

These commands also save the setting for the display device as a new attribute
of the outline's commander.  When this setting has been changed, the commands
notice and run the monkey patch again so that the mind map will display on
the newly specified device.

The Parser
-----------

The parser class for indented lists is derived from an AbstractParser class.
It would be feasible to derive a different parser to turn some other
structure into a mind map.  In the case of Leo subtrees, it seemed easier
to turn the tree structure into an indented list than to create another parser.

Depth of Display
-----------------

The mind maps are intentionally limited to a depth of two to avoid too
much clutter and too small type sizes.
#@+node:tom.20210803134601.1: *3* @command mmap-list
@language python
data = c.p.b
data = data.replace('&', '+').replace('<', ' ')

# Check if we have not been run before or if the rendering
# device has been changed
_render_dev = c.config.getString('mmap_render_dev')
if not hasattr(c, 'render_dev') or c.render_dev != _render_dev:
    c.executeMinibufferCommand('mmap-monkeypatch')
    c.render_dev = _render_dev

c.build_mmap(data)
#@+node:tom.20210803134612.1: *3* @command mmap-tree
@language python
"""Create an indented list for a subtree, then convert to svg mind map."""

p = c.p
tree = p.self_and_subtree()
base = p.level()
indented = ''
TABSIZE = ' ' * 4

def fix_label(label):
    if label.startswith('@'):
        parts = label.split()
        fixed = parts[1:] if len(parts) > 1 else parts
        label = ' '.join(parts)

    fixed = label.replace('<<', '<')
    fixed = fixed.replace('>>', '>')
    return fixed

for x in tree:
    label = x.v.h
    if x.level() > 0:
        label = fix_label(x.v.h)
    indent = TABSIZE * (x.level() - base) 
    indented += indent + label + '\n'

data = indented.replace('&', '+').replace('<', ' ')

# Check if we have not been run before or if the rendering 
# device has been changed
_render_dev = c.config.getString('mmap_render_dev')
if not hasattr(c, 'render_dev') or c.render_dev != _render_dev:
    c.executeMinibufferCommand('mmap-monkeypatch')
    c.render_dev = _render_dev

c.build_mmap(data)
#@+node:tom.20210808144800.1: *3* @command mmap-monkeypatch
@tabwidth -4
@language python
"""A mindmap svg generator for trees and indented lists..

This command adds a function to the outline commander c that is
used by the top-level mind mapping commands.  This function
generates the svg output for a mindmap and renders it in
the specified rendering pane or program.  The rendering
"device" is specified by the setting "mmap_render_dev".

@string settings:
mmap_render_dev -- output "device". One of ('vr3', 'freewin', 'browser').
                   Default: "vr3"
mmap-target-node -- name of node where svg output will be copied to.
                    Default: "@clean mmap.html"

"""
@others
#@+node:tom.20210808144800.2: *4* Imports
@language python
from os.path import join, isdir, basename
from os import mkdir
import webbrowser
from leo.core.leoQt import QtGui

QFontMetrics = QtGui.QFontMetrics
QFont = QtGui.QFont
QFontInfo = QtGui.QFontInfo
#@+node:tom.20210808144800.3: *4* Declarations
@language python
FONT_FAMILY = 'corbel'
FONT_SIZE = 2.4 # will be scaled by svg engine
VBX = VBY = -100
VBW = VBH = 200

SVG_HEADER = f'''<svg viewBox="{VBX} {VBY} {VBW} {VBH}"
    xmlns="http://www.w3.org/2000/svg">

<style>
text {{
    font-family:{FONT_FAMILY};
    font-size:{FONT_SIZE}pt;
    font-weight:bold;
    text-anchor:start;
}}

</style>

<!--rect id='frame' width='{VBW}' height='{VBH}'
    x='{VBX}' y='{VBY}'
    style="stroke:black; stroke-width:.4" fill='none'/-->

'''
SVG_TRAILER = '</svg>\n'

# Settings
DEFAULT_RENDERING_DEVICE = 'vr3'
RENDERING_SETTING_STR = 'mmap_render_dev'
# Possible values: freewin, vr3, browser
RENDERING_DEVICE_VALUES = ('freewin', 'vr3', 'browser')
RENDERING_DEVICE = c.config.getString(RENDERING_SETTING_STR) or DEFAULT_RENDERING_DEVICE
if RENDERING_DEVICE not in RENDERING_DEVICE_VALUES:
    RENDERING_DEVICE = DEFAULT_RENDERING_DEVICE

TARGET_SETTING_STR = 'mmap-target-node'
TARGET_NODE = c.config.getString(TARGET_SETTING_STR) or '@clean mmap.html'


#@+node:tom.20210808144800.4: *5* Font properties
@language python
# Need a qapp for font metrics to work
# even if we aren't using it for anything else
#qapp = QApplication

qf = QFont(FONT_FAMILY, int(FONT_SIZE))
qfont = QFontInfo(qf) # Uses actual font if different
FM = QFontMetrics(qf)

FONT_RESCALE = FONT_SIZE / int(FONT_SIZE)

Q_ADV = FONT_RESCALE * FM.horizontalAdvance('c')
Q_LINESPACE = FONT_RESCALE * FM.lineSpacing()
Q_HEIGHT = FONT_RESCALE * FM.height()
X_HEIGHT = FONT_RESCALE * FM.xHeight()
Q_UNDER  = FONT_RESCALE * FM.underlinePos()

LABEL_OFFSET_X = 6 * Q_ADV
LABEL_OFFSET_Y = -1
LABEL_MAX_CHRS = 26
LEADER_X_LEN = 13
LEVEL_1_JUNCTION_X = 18
MARGIN_TOP = 20

PITCH = 1.2 * Q_LINESPACE
V_PADDING = 2 * Q_LINESPACE
#@+node:tom.20210808144800.5: *4* normalize_indent
@language python
def normalize_indent(line):
    """Replace tabs with 4 spaces, return indent #."""
    indent = ''
    for ch in line:
        if ch not in (" ", "\t"):
            break
        indent += ch
    indent = len(indent.replace('\t', ' ' * 4))

    return indent // 4

#@+node:tom.20210808144800.6: *4* get_target_node
def get_target_node(c, headline):
    """Find target node if running in Leo."""
    target = None
    for p in c.all_unique_positions():
        if p.h == headline:
            target = p
    return target

#@+node:tom.20210808144800.7: *4* Class Node
@language python
class Node:
    def __init__(self, label, depth=0):
        self.depth = depth
        self.label = label
        self.id = ''
        self.children = []

#@+node:tom.20210808144800.8: *4* Class AbstractParser
@language python
class AbstractParser:
    def __init__(self):
        self.level_1_cnt = 0
        self.level_2_cnt = 0
        self.root = None

    def parse(self):
        """Returns the root node with children to depth 2."""
        raise NotImplementedError()

    def count_levels(self, start):
        """Count depth 1 and depth 2 children."""
        depth1 = len(start.children)
        depth2 = 0
        for c in start.children:
            for c2 in c.children:
                depth2 += 1
        return depth1, depth2

#@+node:tom.20210808144800.9: *4* Class IndentedParser
@language python
class IndentedParser(AbstractParser):
    """Parse indented list to depth 2."""

    def __init__(self, text=''):
        super().__init__()
        self.text = text

    def parse(self):
        """Returns the root node with children to depth 2."""
        lines = self.text.split('\n')
        lines = [l for l in lines if l.strip()]

        # Check initial indent
        first_indent = normalize_indent(lines[0])
        if first_indent:
            print('First line should not be indented')
            return None

        first = True
        rootnode = None
        for line in lines:
            depth = normalize_indent(line)
            if len(line) > LABEL_MAX_CHRS and depth > 0:
                line = line[:LABEL_MAX_CHRS] + '...'

            if depth == 0:
                if not first: break # only one root allowed
                first =  False
                rootnode = Node(line, depth)
            elif depth > 2:
                continue
            elif depth == 1:
                # All depth 1 nodes are children of the root
                n1 = Node(line.lstrip(), 1)
                rootnode.children.append(n1)
            elif depth == 2:
                # Depth 2 nodes are children of the last depth 1 node
                n2 = Node(line.lstrip(), 2)
                rootnode.children[-1].children.append(n2)

        self.root = rootnode
        self.level_1_cnt, self.level_2_cnt = self.count_levels(rootnode)
        return rootnode

#@+node:tom.20210808144800.10: *4* Class Item
@language python
class Item:
    """Holds descriptive data about a mmap item."""

    def __init__(self, label=''):
        self.label = label
        self.width = 0 # Width of the label from Font Metrics
        self.ypos = 0

    def __repr__(self):
        return f'Item({self.label})'
#@+node:tom.20210808144800.11: *4* Class Block
@language python
class Block:
    """Contains a description of a collection of related Items."""

    def __init__(self):
        self.items = []
        self.width = 0
        self.height = 0 # Total height including padding
        self.axis = 0 # Y position of the axis of this block of items.

    def set_height(self, pitch, padding):
        self.height = padding + pitch * (len(self.items) - 1)

    def set_width(self, padding=0):
        width = 0
        for item in self.items:
            width = max(width, item.width)
        self.width = width + LABEL_OFFSET_X

#@+node:tom.20210808144800.12: *4* count_nodes

def count_nodes(node, depth=1):
    """Count the number of entries in a node.
    
     A Node object contains a list of child nodes
     and a depth property.  We are only going to go
     to depth 2 at most.
    
    ARGUMENTS
    node -- a Node object;
    depth -- the number of levels to count.
    
    RETURNS
    the count as an integer.
    """

    def count_children(node):
        return len(node.children)

    cnt = 0
    depth = min(depth, 2)
    for n in node.children:
        cnt += 1
        for c in n.children:
            if c.depth <= depth:
                cnt += 1
                cnt += len(c.children)
    return cnt
#@+node:tom.20210808144800.13: *4* Root Image
@language python
def root_image(label='Main Node'):
    """Return svg markup for the map's central object.
    
       The object will be sized  to fit its label.
    """
    if label.startswith('@'):
        _, label = label.split()
        label = basename(label)
    label_width = FONT_RESCALE * FM.horizontalAdvance(label)
    height = 1.5 * Q_LINESPACE 
    xpadding = 3 * Q_ADV
    width = label_width + xpadding
    label_x = -.5 * (label_width  - xpadding)

    x = -0.5 * width
    y = -0.5 * height

    return f'''
<rect id="central-image" width="{width}" height="{height}"
    x="{x}" y="{y}" fill="aliceblue"
    rx='2'
    style="stroke:black; stroke-width:.1"/>
<text x="{label_x}" y="{X_HEIGHT}">{label}</text>
'''

#@+node:tom.20210808144800.14: *4* get_longest_label
@language python
def get_longest_label(nodelist):
    """Return the width of the longest label.

    The width equals the font metric's width for
    the label.
    
    RETURNS
    a tuple (length, text)
    """

    xpadding = 3 * Q_ADV
    _maxlen = 0
    longest_label = ''

    for n in nodelist:
        newlen = len(n.label)
        if newlen > _maxlen:
            _maxlen = newlen
            longest_label = n.label

    label_len = FM.horizontalAdvance(longest_label) + xpadding

    return label_len, longest_label
#@+node:tom.20210808144800.15: *4* Leader Path
@language python
def leader_path(x, y, width=LEADER_X_LEN):
    path = f'''
<path d="M 0 0 l {x} {y} h {width}" 
    stroke="black"
    stroke-width='.2'
    fill='none'/>
'''
    return path
#@+node:tom.20210808144800.16: *4* Label SVG
@language python
def label_svg(label, x, y):
    svg = f'''<text x="{x + LABEL_OFFSET_X}" 
      y="{y + LABEL_OFFSET_Y:.3f}">{label}</text>

'''
    return svg
#@+node:tom.20210808144800.17: *4* Level 2 Group
@language python
def level2_group(x, y, nodelist):
    """Generate svg for the depth 2 children of a node.

       The labels are spaced 2 line heights apart.     
    
       ARGUMENTS
       x, y -- the center-left coordinates for the layout.
       nodelist - a list of the nodes to lay out.
       
       RETURNS
       the generated svg.
    """
    # pylint: disable=too-many-locals
    if not nodelist:
        return ''\

    xpadding = 3 * Q_ADV
    indent = 5 * Q_ADV
    linespace = 1.2 * Q_LINESPACE
    _maxlen = 0
    extra_offset = 0.5 * Q_LINESPACE if \
                    len(nodelist) == 1 else 0
    _maxlen, longest_label = get_longest_label(nodelist)
    #label_len = FM.horizontalAdvance(longest_label) + xpadding
    label_len = FM.horizontalAdvance(longest_label)

    # get label positions around 0
    block_height = 2 * (len(nodelist) - 1) * .5 * linespace
    block_top = y - .5 * block_height
    deltay = 2 * .5 * linespace
    positions = [] # [(yy, label), ...]

    for n, node in enumerate(nodelist):
        yy = block_top + n * deltay + extra_offset
        positions.append((yy, node.label))

    # Generate SVG
    svg = ''
    for y_, l in positions:
        ylt = y_ + LABEL_OFFSET_Y - 2 * extra_offset
        xr = label_len + xpadding
        label_x_start = x + indent + xpadding + 0.5 * label_len
        svg += f'''
<path d="M {x} {y} l {indent} {y - y_} h {xr}"
    stroke="black"
    stroke-width='.2'
    fill='none'/>
<text x="{label_x_start}" y="{ylt}">{l}</text>
<circle cx="{x}" cy="{y}" r="1" fill='blue'/>

'''
    return svg
#@+node:tom.20210808144800.18: *4* Create Mindmap
@language python
def create_map(parser, data):
    """Build a mind map from data.
    
    The following constants must be declared
    global to the parser:
        
        MARGIN_TOP -- top&bottom margins in px
        Q_ADV -- average horizontal advance per character
        LEVEL_1_JUNCTION_X -- Horiz. position of 1st level labels
        SVG_HEADER
        SVG_TRAILER
        
    
    ARGUMENTS
    parser -- a parser class, which must be an subclass 
              of the AbstractParser class.
    
    data -- the data to be consumed by the parser.
    
    RETURNS
    the SVG markup for a mind map.
    """
    pars = parser(data)
    root = pars.parse()

    blocks = create_blocks(root)

    # Lay out right side of map
    left = 1.2 * Q_ADV * LABEL_MAX_CHRS + LEVEL_1_JUNCTION_X
    axes, layout, remaining = layout_am_blocks(left, blocks)
    first_level = layout_am_1st_level(root, left, axes)

    # Lay out left side of map
    left = VBX + 5 * Q_ADV
    leftover_svg = ''
    if remaining:
        axes, pm_layout, leftout = layout_pm_blocks(left, remaining)
        layout += pm_layout
        first_level += layout_pm_1st_level(root, left, axes, bool(leftout))

        # Handle overflow blocks
        leftover_svg = handle_leftovers(leftout)

    svg = (SVG_HEADER
          + layout
          + first_level 
          + leftover_svg
          + root_image(root.label) 
          + SVG_TRAILER)

    return svg
#@+node:tom.20210808144800.19: *4* Create Blocks
@language python
def create_blocks(root):
    """Return list of level-2 blocks.
    A block includes the lines for each label plus the
    leader lines that will lead from the 1st level labels
    plus any padding.
    
    The blocks are centered around a height of zero.  They
    will be moved to their level-1 position later.
    """
    blocks = []
    for c in root.children:
        items = []
        for k in c.children:
            label = k.label
            item = Item(label)
            item.width = FONT_RESCALE * FM.horizontalAdvance(label)
            items.append(item)

        block = Block()
        block.items = items
        block.set_height(PITCH, V_PADDING)
        block.set_width()
        blocks.append(block)

    height = 0
    width = 0
    for bl in blocks:
        height += bl.height
        width = max(width, bl.width)

    return blocks

#@+node:tom.20210808144800.20: *4* layout_am_first_level
@language python
def layout_am_1st_level(root, left, heights):
    """Build the first level nodes from a parse root.
    
    Builds the right (AM) side labels.

    The following constants must be declared
    global to the parser:
        
        MARGIN_TOP
        Q_ADV
        LEVEL_1_JUNCTION_X
       
    
    ARGUMENTS
    root -- the root returned by the parser.
    left -- x coordinate for the left edge of 2nd level blocks.
    heights -- a list of the axes of the 2nd-level blocks,
               in the same order as the first level nodes.
    
    RETURNS
    the SVG markup for a mind map.
    """

    # Layout parameters
    label_width, _ = get_longest_label(root.children)
    #leader_width = label_width + 4 * Q_ADV
    leader_width = left - LEVEL_1_JUNCTION_X
    leader_svg = level_1_labels = ''
    elements = len(heights)

    for m, k in enumerate(root.children[:elements]):
        label = k.label
        x = LEVEL_1_JUNCTION_X
        y = heights[m]
        x1 = x - 3 * Q_ADV
        leader_svg += leader_path(x, y, leader_width)
        level_1_labels += label_svg(label, x1, y)

    svg = leader_svg + level_1_labels

    return svg
#@+node:tom.20210808144800.21: *4* layout_pm_first_level
@language python
def layout_pm_1st_level(root, left, heights, overflow):
    """Build the first level nodes from a parse root.
    
    Builds the left (PM) side labels.
    
    The following constants must be declared
    global to the parser:
        
        MARGIN_TOP
        Q_ADV
        LEVEL_1_JUNCTION_X
       
    
    ARGUMENTS
    root -- the root returned by the parser.
    left -- x coordinate for the left edge of 2nd level blocks.
    heights -- a list of the axes of the 2nd-level blocks,
               in the same order as the first level nodes.
    overflow -- True if there are blocks left out of the layout.
    
    RETURNS
    the SVG markup for a mind map.
    """

    # Layout parameters
    label_width, _ = get_longest_label(root.children)
    #leader_width = left - LEVEL_1_JUNCTION_X
    #leader_width = - LEVEL_1_JUNCTION_X
    leader_svg = level_1_labels = ''

    # 1st PM element starts at len(root.children) - len(heights)
    startat = len(root.children) - len(heights)
    if overflow:
        startat -= 1
    elements = len(heights)

    for m, k in enumerate(root.children[startat:startat + elements]):
        label = k.label
        x = heights[m][1]
        y = heights[m][0]
        x1 = x - 3 * Q_ADV
        leader_svg += \
                f'''<path d="M 0 0 L{-LEVEL_1_JUNCTION_X} {y} {x} {y}" 
    stroke="black"
    stroke-width='.2'
    fill='none'/>'''

        level_1_labels += label_svg(label, x1, y)

    svg = leader_svg + level_1_labels

    return svg
#@+node:tom.20210808144800.22: *4* layout_am_blocks
def layout_am_blocks(x, blocklist):
    """Return SVG for the level-2 blocks on the right.
    
    ARGUMENTS
    x -- the left hand edge positioning of the blocks.
    blocklist -- a list of the blocks to be laid out.
    
    RETURNS
    a list of the y value of the block axes,
    the svg markup for these blocks, and
    a list of the remaining blocks that didn't
    fit in the layout.    
    """

    svg = ''
    level_2_heights = []
    y = VBY + MARGIN_TOP/2

    remaining = []
    for n, bl in enumerate(blocklist):
        if y + bl.height > -VBY:
            remaining = blocklist[n:]
            break
        ax, markup = layout_am_block(x, y, bl)
        svg += markup
        level_2_heights.append(ax)
        y += bl.height

    return level_2_heights, svg, remaining
#@+node:tom.20210808144800.23: *4* layout_pm_blocks
def layout_pm_blocks(x, blocklist):
    """Return SVG for the level-2 blocks on the left.
    
    ARGUMENTS
    x -- the left hand edge positioning of the blocks.
    blocklist -- a list of the blocks to be laid out.
    
    RETURNS
    a list of the y value of the block axes,
    the svg markup for these blocks, and
    a list of the remaining blocks that didn't
    fit in the layout.    
    """

    svg = ''
    level_2_heights = []
    y = -VBY - MARGIN_TOP/2

    remaining = []
    for n, bl in enumerate(blocklist):
        if y - bl.height < VBY:
            remaining = blocklist[n:]
            break
        junction_y, junction_x, markup = layout_pm_block(x, y, bl)
        svg += markup
        level_2_heights.append((junction_y, junction_x))
        y -= bl.height

    return level_2_heights, svg, remaining
#@+node:tom.20210808144800.24: *4* layout_am_block
@language python
def layout_am_block(x, y, block):
    """Lay out one block with the top given by y."""
    ht = block.height
    wd = block.width
    axis = y + (1. * ht) / 2
    label_markup = ''
    line_markup = ''
    for n, item in enumerate(block.items):
        label = item.label
        label_y = y + (n + 1) * PITCH
        line_y = y + (n + 1) * PITCH
        line_ystr = f'{line_y:.3f}'
        label_markup +=  label_svg(label, x + .5 * LABEL_OFFSET_X, label_y)
        line_markup += f'''<path d="M {x} {axis} L{ x + LABEL_OFFSET_X} {line_ystr} h {wd - LABEL_OFFSET_X}" stroke="black"
    stroke-width='.2'
    fill='none'/>
'''

    return axis, label_markup + line_markup

#@+node:tom.20210808144800.25: *4* layout_pm_block
@language python
def layout_pm_block(x, y, block):
    """Lay out one block with the top given by y."""
    ht = block.height
    wd = block.width
    fanout_left = x + wd - 1.8 * LABEL_OFFSET_X
    fanout_right = fanout_left + LABEL_OFFSET_X + Q_ADV
    axis_y = y - (1. * ht) / 2
    label_markup = ''
    line_markup = ''
    for n, item in enumerate(reversed(block.items)):
        label = item.label
        label_y = y - (n + 1) * PITCH
        line_y = y - (n + 1) * PITCH
        line_ystr = f'{line_y:.3f}'
        lable_x = x - LABEL_OFFSET_X - 3 * Q_ADV
        label_markup +=  label_svg(label, lable_x, label_y)
        line_markup += f'''<path d="M {fanout_right} {axis_y} L{ fanout_left} {line_ystr} h {- wd + LABEL_OFFSET_X}" stroke="black"
    stroke-width='.2'
    fill='none'/>
'''

    return axis_y, fanout_right, label_markup + line_markup

#@+node:tom.20210808144800.26: *4* handle_leftovers
def handle_leftovers(leftovers):
    svg = ''
    n = 0
    for b in leftovers:
        n += len(b.items)
    if n:
        msg = f'(Plus {n} more not shown)'
        label_width = FONT_RESCALE * FM.horizontalAdvance(msg)
        height = 1.5 * Q_LINESPACE 
        xpadding = 3 * Q_ADV
        width = label_width + xpadding
        label_x = -.5 * (label_width  - xpadding)

        x = -0.5 * width
        y = -0.5 * height + VBY + .5 * MARGIN_TOP
        yt = y + 0.5 * height + X_HEIGHT

        svg += f'''<line x1='0' y1='0' x2='0' y2='{y + .5 * height}' 
stroke='black' stroke-width='.2'/>'''

        svg += f'''
<rect width="{width}" height="{height}"
    x="{x}" y="{y}" fill="white"
    rx='2'
    style="stroke:none;"/>
<text x="{label_x}" y="{yt}">{msg}</text>
'''


    return svg

#@+node:tom.20210808144800.27: *4* render_with_device
@language python
def render_with_device(markup):
    if RENDERING_DEVICE == 'freewin':
        c.p.b += ' '
        c.executeMinibufferCommand('z-open-freewin')
    elif RENDERING_DEVICE == 'vr3':
        c.executeMinibufferCommand('vr3-update')
    elif RENDERING_DEVICE == 'browser':
        lm = g.app.loadManager
        leodir = lm.computeHomeLeoDir()
        tmp_path = join(leodir, 'temp')
        if not isdir(tmp_path):
            mkdir(tmp_path)
        path = join(tmp_path, 'mmap_out.html')
        with open(path, 'w') as f:
            f.write(markup)
        webbrowser.open_new_tab(path)
    else:
        g.es('No rendering device available', color='red')

#@+node:tom.20210808144800.28: *4* find_or_create_target
@language python
def find_or_create_target(c, headline, markup):
    """Find or create node with given headline.
    
    If the node has to be created, it is created after the
    last visible node. At exit, the target node is selected.
    The given markup text is inserted into the body of the 
    target node
    
    ARGUMENTS
    c -- the commander for the current outline
    headline -- the exact headline of the target node.
    markup -- the markup text to be inserted into the target node.
    
    RETURNS
    nothing
    """
    target = get_target_node(c, headline)
    if target:
        target.b = markup
        target.setDirty()
        c.selectPosition(target)
    else:
        # Create and select target node
        p_last = c.lastVisible()
        target = p_last.insertAfter()
        target.h = headline
        target.b = markup
        target.setDirty()
        c.selectPosition(target)
        while c.canMoveOutlineLeft():
            c.executeMinibufferCommand('move-outline-left')
        c.redraw()
#@+node:tom.20210808144800.29: *4* build_mmap
def build_mmap(data):
    """Parse data and build mindmap."""
    data = data.replace('&', '+').replace('<', ' ')
    svg = create_map(IndentedParser, data)

    find_or_create_target(c, TARGET_NODE, svg)
    render_with_device(svg)

    # Let others know body has changed.
    editor = c.frame.body.wrapper.widget
    doc = editor.document()
    doc.setModified(True)
#@+node:tom.20210808144800.30: *4* Main
@language python
c.build_mmap = build_mmap
#@+node:tom.20211108175236.1: ** Plots and Graphs
#@+node:tom.20211108175243.1: *3* Plot From Clipboard
@language rest
This script takes X-Y data from the clipboard and plots it using Matplotlib.

It is a slight modification of the script used by Viewrendered3 to plot data. It does not require Viewrendered3 to be installed or used.

The script requires the pyperclip package to access the clipboard. This could easily by changed to use some other clipboard handling package or Leo's clipboard handling. Pyperclip is the easiest to use and works across all the big three operating systems.  Matplotlib is required to do the actual plotting.

By X-Y data is meant data in two whitespace-separated columns; or if there is just one column, it is considered to be the Y-axis data, and the script constructs an X-axis data sequence.

Any line that contains non-numeric fields is ignored except for configuration sections, which work the same way as with Viewrendered3.  Consult the docstring for this script or the help for VR3's plot2d capability for more details.

Here is an example of X-Y data::

    1 1
    2 .5
    3 -6
    # comment
    ; comment

    4 -16
    5 -2
    6  5
    7  10

To install this script, add "@command " to the headline and copy it to myLeoSettings.leo.  Then reload settings or restart Leo.  There will be a command named "plot-2d-clipboard" available from the minibuffer.  It could also be bound to a menu, button, or keystroke.

First release: 11-8-2021 by Thomas B. Passin.
#@+node:tom.20211108175319.1: *4* plot-2d-clipboard
def plot_2d():
    """
    << docstring >>
    """
    import re, os, site
    from matplotlib import pyplot as plt
    import pyperclip

    data = pyperclip.paste()
    data_lines = data.split('\n')

    section_lines = c.p.b.split('\n')

    @others

    config_sections = has_config_section(section_lines)
    style_start = config_sections.get('style', -1)
    if style_start >= 0:
        if not set_user_style(section_lines[style_start:]):
            set_custom_style()
    else:
        set_custom_style()

    plt.ion()
    fig, ax = plt.subplots()
    fig.clear()

    label_start = config_sections.get('labels', -1)
    if label_start >= 0:
        << configure labels >>

    plot_plain_data(data_lines)
    plt.rcdefaults()

plot_2d()
#@+node:tom.20211108175319.2: *5* << docstring >>
Show a plot of x-y data in the clipboard.

The data can be either a one-column or two-column list
of rows.  Columns are separated by whitespace.  

The selected node may optionally contain the node may contain 
a config file-like set of sections that define the labels
and plot styling.

The pyperclip package is requied to read the clipboard. The 
matplotlib package is required for plotting.

This command must be installed in Leo before it can be used.  See the section *Installing The Command* below about how to do this.

@others
#@+node:tom.20211108175319.3: *6* Data Format
Data Format
------------
Data must be in one or two columns separated by whitespace  Here
is an example of two-column data::

    1 1
    2 2
    3 4
    # comment
    ; comment

    4 16
    5 32

Comment lines start with one of ";", "#". Comment, non-numeric, and 
blank lines are ignored.

Here is an example of one-column data - the missing first column will 
assigned integers starting with 0::

    1
    .5
    6
    # comment
    ; comment

    16
    32

Whether the data contains one or two columns is determined
from the first non-comment, non-blank, all numeric row.
If one-column, an implicit first column is added starting
with zero.


#@+node:tom.20211108175319.4: *6* Graph And Data Labels
Graph And Data Labels
----------------------

A figure title and axis labels can optionally be added. These are
specified by adding a configuration section *[labels]*. The
section name must be left-justified. The section is ended by a
blank line or the end of the node. The section may be placed
anywhere in the node.

Here is an example::

    [labels]
    title = Plot Example
    xaxis = Days
    yaxis = Values

Any or all of the entries may be omitted.

#@+node:tom.20211108175319.5: *6* Plot Styling
Plot Styling
-------------

The appearance of the plot can optionally be changed in several
ways. By default, a certain Matplotlib style file will be used if
present (see below), or default Matplotlib styling will be
applied. If the data node has a section *[style]*, one of two
styling methods can be used:

1. A named style. Matplotlib has a number of named
   styles, such as *ggplot*. One of these built-in
   style names can be specified by the *stylename*
   key. The style *xkcd* can also be used even
   though it is not one of the named styles.

2. A Matplotlib style file. The name of this file
   is specified by the *stylefile* key. The file
   can be located Leo's home directory, typically
   *~/.leo* or its equivalent in Windows.

Here is an example *[data]* section, with explanatory comments added::

    [style]
    # For VR3 "Plot 2D", only one of these 
    # will be used. "stylename" has priority
    # over "stylefile".
    stylename = ggplot
    #stylefile = styles.mpl

The section may be placed anywhere in the node.

The Default Style File
........................

When no *[data]* section is present, a style file named
*local_mplstyle* will be used if found. It will first be looked
for in Leo's home directory, and then in the *site.userbase()*
directory. On Windows, this is usually the *%APPDATA%\\Python*
directory. On Linux, this is usually *~/.local*.

When no style file can be found, Matplotlib will use its default
styling, as modified by a *.matplotlib.rc* file if Matplotlib can
find one.
#@+node:tom.20211113134000.1: *6* Installing The Command
Installing The Command
-----------------------

This command should be installed by adding its node to the *myLeoSettings.leo* file.  Once added, it can also be bound to a keystroke or added to a menu in the same way as other Leo commands.

To install, open *myLeoSettings.leo*, expand the *@settings* tree, and at some convenient place in that tree copy the entire tree of this *plot-2d-clipboard* command.  Edit its headline by adding "@command " to the start of the headline.  Then close and restart Leo.  The command will now be available.  You can observe this by pressing ALT-X and typing "plot-" into the minibuffer.  Hit TAB to get tab completion, and you will see the command listed.  You would invoke it by completing the command, then pressing <ENTER>.  If there is plottable data in the clipboard, the plot will appear.

#@+node:tom.20211113135245.1: *7* Binding to a Key or Menu
Binding to a Key or Menu
-------------------------

Leo lets you connect a command to a button, shortcut key or a menu.

To a Button
............
Under *@settings/@buttons* add a new node with headline *@button Plot 2D Clipboard*. In the body, type the following::

    c.executeMinibufferCommand('plot-2d-clipboard')

To a Key
.........
In the *@settings* tree, find or create a node with headline "@shortcuts".  To its body, add a line::

    plot-2d-clipboard = <your key>

where <your key> is the abbreviation of the shortcut key you want to use.  For example, if you want to bind to the Alt F9 key, you would use::

    plot-2d-clipboard = ALT+F9

To A Menu
.........
You can bind to an existing menu or create a new one.  Here is how to create a new menu on Leo's menu bar, just before the *Help* menu. The new menu will be named *Local*. In the *@settings* tree, find or create a node *@menus*. Add a new node with headline reading "@menuat help before".  Under that node add a new one with headline *@menu &Local*.  Under that node add a new one with headline *@item plot-2d-clipboard*.

Type "Plot &2D Clipboard" into the body of the @item node.

Note that the accelerator key markings "&" in front of a name are optional.

Your outline tree should now look like this:

@settings
    @menus
        @menuat help before
            @menu &Local
                @item plot-2d-clipboard

When you restart Leo, there will be a new menu *Local* with a menu item labeled *Plot 2D Clipboard*.
#@+node:tom.20211108175319.6: *5* declarations
STYLEFILE = 'local_mplstyle' # Must be in site.getuserbase()
SECTION_RE = re.compile(r'^\[([a-zA-Z0-9]+)\]')
#@+node:tom.20211108175319.7: *5* functions
#@+node:tom.20211108175319.8: *6* has_config_section()
def has_config_section(pagelines):
    """Find config-like sections in the data page.

    Sections are defined by:
        1. A left-justified term in [brackets].
        2. A blank line or the end of the list of lines.

    ARGUMENT
    pagelines -- a list of text lines.

    RETURNS
    a dictionary keyed by section label: {label: line_num, ...}
    """
    sections = {}
    for i, line in enumerate(pagelines):
        m = SECTION_RE.match(line)
        if m:
            sections[m[1]] = i
    return sections
#@+node:tom.20211108175319.9: *6* set custom_style()
@pagewidth 65
def set_custom_style():
    r"""Apply custom matplotlib styles from a file.
    
    The style file has the name given by STYLEFILE. The .leo
    directory (usually ~/.leo) will be checked first for the
    style file.

    If not found, the site.getuserbase() directory will be
    checked for the style file. On Windows, this is usually the
    %APPDATA%\Python directory. On Linux, this is usually at
    /home/tom/.local.
    
    """
    found_styles = False
    lm = g.app.loadManager
    style_dir = lm.computeHomeLeoDir()
    if g.isWindows:
        style_dir = style_dir.replace('/', '\\')
    style_file=os.path.join(style_dir, STYLEFILE)
    if os.path.exists(style_file):
        plt.style.use(style_file)
        found_styles = True
    else:
        style_dir=site.getuserbase()
        style_file=os.path.join(style_dir, STYLEFILE)
        if os.path.exists(style_file):
            plt.style.use(style_file)
            found_styles = True

    if not found_styles:
        g.es(f'Pyplot style file "{style_file}" not found, using default styles')
#@+node:tom.20211108175319.10: *6* plot_plain_data()
def plot_plain_data(datalines):
    """Plot 1- or 2- column data.  Ignore all non-numeric lines."""


    # from leo.plugins import viewrendered3 as vr3
    # from leo.plugins import viewrendered as vr

    # Helper functions
    << is_numeric >>
    << get_data >>

    x, y = get_data(datalines)
    if not x:
        g.es('VR3 -- cannot find data')
        return

    plt.plot(x,y)
    plt.show()

#@+node:tom.20211108175319.11: *7* << is_numeric >>
def is_numeric(line):
    """Test if first or 1st and 2nd cols are numeric"""
    fields = line.split()
    numfields = len(fields)
    numeric = False
    try:
        _ = float(fields[0])
        if numfields > 1:
            _ = float(fields[1])
        numeric = True
    except ValueError:
        pass

    return numeric
#@+node:tom.20211108175319.12: *7* << get_data >>
def get_data(pagelines):
    num_cols = 0

    # Skip lines starting with """ or '''
    lines = [line.replace('"""', '') for line in pagelines]
    lines = [line.replace("'''", '') for line in lines]

    # Skip blank lines
    lines = [line for line in lines if line.strip()]

    # skip non-data lines (first or 2nd col is not a number)
    t = []
    for line in lines:
        line = line.replace(',', '') # remove formatting commas
        if is_numeric(line):
            t.append(line.strip())
            # Check if first all-numeric row has one or more fields
            if not num_cols:
                num_cols = min(len(t[0].split()), 2)
    if not t:
        return None, None

    # Extract x, y values into separate lists; ignore columns after col. 2
    if num_cols == 1:
        x = [i for i in range(len(t))]
        y = [float(b.strip()) for b in t]
    else:
        xy = [line.split()[:2] for line in t]
        xs, ys = zip(*xy)
        x = [float(a) for a in xs]
        y = [float(b) for b in ys]

    return x, y
#@+node:tom.20211108175319.13: *6* set_user_style()
@pagewidth 65
def set_user_style(style_config_lines):
    """Set special plot styles.

    If the data node has a section [style], then if there is a
    key "stylename", apply that named style; otherwise if there
    is a key "stylefile", look for a file of that name ins the
    user's Leo home directory (usually ~/.leo) and use those
    styles.

    The stylename must be one of the built-in style names, such
    as "ggplot". "xkcd" also works even though it is not actually
    one of the style names.

    ARGUMENT style_config_lines -- a sequence of lines starting
    at the [style] section of the data node.

    RETURNS
    True if a style was set.
    """
    set_style = False
    for line in style_config_lines:
        if not line.strip:
            break
        fields = line.split('=')
        if len(fields) < 2:
            continue
        kind, val = fields[0].strip(), fields[1].strip()
        if kind == 'stylename':
            if val == 'xkcd':
                plt.xkcd()
            else:
                plt.style.use(val)
            set_style = True
            break
        elif kind == 'stylefile':
            lm = g.app.loadManager
            style_dir = lm.computeHomeLeoDir()
            if g.isWindows:
                style_dir = style_dir.replace('/', '\\')
            style_file = os.path.join(style_dir, val)
            if os.path.exists(style_file):
                plt.style.use(style_file)
                set_style = True
            break

    return set_style
#@+node:tom.20211108175319.14: *5* << configure labels >>
# Get lines for the labels section
for line in section_lines[label_start:]:
    if not line.strip:
        break
    fields = line.split('=')
    if len(fields) < 2:
        continue
    kind, val = fields[0].strip(), fields[1].strip()
    if kind == 'title':
        plt.title(val)
    elif kind == 'xaxis':
        plt.xlabel(val)
    elif kind == 'yaxis':
        plt.ylabel(val)

#@+node:ekr.20211020082938.1: *3* script: display slides
"""
Display slides from a folder and its subfolders.

Prompts for a folder containing images, then displays the images.

Plain keys control the display of slides:
    
      space: show the next slide.
  backspace: show the previous slide.
     escape: end the slideshow
          =: zoom in
          -: zoom out
arrows keys: pan the slide
          d: prompt to move the slide to the trash
          h: show the help message
          m: move the file.

Default settings:

- background_color: black
- 60 seconds between slides.
- Start in full-screen mode.
- Display in random order.
- Allowed file extensions: ['.jpeg', '.jpg', '.png']

"""
import os
import pathlib
import random
import textwrap
from leo.core.leoQt import isQt5, QtCore, QtGui, QtWidgets

# Defaults
background_color = "black"
delay = 60  # seconds
extensions = ['.jpeg', '.jpg', '.png']  # Allowed file extensions.
full_screen = True
sort_kind = 'random'  # 'date', 'name', 'none', 'random', or 'size'
height = 900  # Window height (pixels) when not in full screen mode.
width = 1500  # Window width (pixels) when not un full screen mode.

@others

path = QtWidgets.QFileDialog().getExistingDirectory()
if path:
    files_list = get_files(path)
    if files_list:
        n = len(files_list)
        print(f"Found {n} picture{g.plural(n)} in {path}")
        w = Slides()
        w.run()
        print('done')
    else:
        print(f"No slides found in {path!r}")
#@+node:ekr.20211020082938.2: *4* get_files
def get_files(path):
    """Return all files in path, including all subdirectories."""
    global extensions
    return [
        str(z) for z in pathlib.Path(path).rglob('*')
            if z.is_file()
            and os.path.splitext(str(z))[1].lower() in extensions
    ]
#@+node:ekr.20211020082938.3: *4* class Slides
class Slides(QtWidgets.QWidget):

    scale = 1.0
    slide_number = -1
    timer = QtCore.QBasicTimer()
    
    @others
#@+node:ekr.20211020082938.4: *5* Slides.delete
send_to_trash_warning_given = False

def delete(self):
    """Issue a prompt and delete the file if the user agrees."""
    try:
        from send2trash import send2trash
    except Exception:
        if not self.send_to_trash_warning_given:
            self.send_to_trash_warning_given = True
            print("Deleting files requires send2trash")
            print("pip install Send2Trash")
        return
    file_name = files_list[self.slide_number]
    result = g.app.gui.runAskYesNoDialog(c,
        title = "Delete File?",
        message = f"Delete file {g.shortFileName(file_name)}?"
    )
    if result == 'yes':
        # Move the file to the trash.
        send2trash(file_name)
        del files_list[self.slide_number]
        print(f"Deleted {file_name}")
        self.slide_number = max(0, self.slide_number - 1)
        self.next_slide()
#@+node:ekr.20211020082938.5: *5* Slides.keyPressEvent
def keyPressEvent (self, event):
    
    i = event.key()
    s = event.text()
    mods = event.modifiers()
    if s == 'd':
        self.delete()
    elif s == 'f':
        self.toggle_full_screen()
    elif s == 'h':
        self.show_help()
    elif s == 'm':
        self.move_to()
    elif s == 'n' or i == 32:  # ' '
        self.next_slide()
    elif s == 'p' or s == '\b':
        self.prev_slide()
    elif s == 'q' or s == '\x1b':  # ESC.
        self.quit()
    elif s in '=+':
        self.zoom_in()
    elif s == '-':
        self.zoom_out()
    elif i == 16777235:
        self.move_up()
    elif i == 16777237:
        self.move_down()
    elif i == 16777234:
        self.move_left()
    elif i == 16777236:
        self.move_right()
    else:
        g.trace(repr(s), i, repr(mods))
    
#@+node:ekr.20211020082938.6: *5* Slides.move_up/down/left/right
def move_down(self):
    self.scroll_area.scrollContentsBy(0, -200)

def move_left(self):
    self.scroll_area.scrollContentsBy(-200, 0)

def move_right(self):
    self.scroll_area.scrollContentsBy(200, 0)

def move_up(self):
    self.scroll_area.scrollContentsBy(0, 200)
#@+node:ekr.20211020082938.7: *5* Slides.move_to
def move_to(self):
    """Issue a prompt and move the file if the user agrees."""
    file_name = files_list[self.slide_number]
    path = QtWidgets.QFileDialog().getExistingDirectory()
    if path:
        new_path = os.path.join(path, os.path.basename(file_name))
        if os.path.exists(new_path):
            # result = g.app.guirunAskYesNoDialog
            print("File exists:", new_path)
        else:
            pathlib.Path(file_name).rename(new_path)
            del files_list[self.slide_number]
            self.slide_number = max(0, self.slide_number - 1)
            self.next_slide()
#@+node:ekr.20211020082938.8: *5* Slides.next_slide
def next_slide(self):
    if self.slide_number + 1 < len(files_list):
        self.slide_number += 1  # Don't wrap.
    self.scale = 1.0
    self.show_slide()
#@+node:ekr.20211020082938.9: *5* Slides.prev_slide
def prev_slide(self):
    if self.slide_number > 0: # Don't wrap.
        self.slide_number -= 1
    self.scale = 1.0
    self.show_slide()
#@+node:ekr.20211020082938.10: *5* Slides.quit
def quit(self):
    self.timer.stop()
    self.destroy()
#@+node:ekr.20211020082938.11: *5* Slides.run & helper
def run(self):
    global background_color, delay, full_screen, sort_kind
    w = self
    # Init ivars
    self.delay = delay
    self.full_screen = not full_screen  # So toggling below works.
    # Init the widget.
    w.make_widgets()
    # Center the widget
    qtRectangle = w.frameGeometry()
    centerPoint = QtWidgets.QDesktopWidget().availableGeometry().center()
    qtRectangle.moveCenter(centerPoint)
    w.move(qtRectangle.topLeft())
    # Show the widget.
    w.showNormal()
    if full_screen:
        w.toggle_full_screen()
    # Show the next slide.
    self.sort(sort_kind)
    w.next_slide()  # show_slide resets the timer.
#@+node:ekr.20211020082938.12: *6* Slides.make_widgets
def make_widgets(self):
    
    global width, height
    w = self

    # Init the window's attributes.
    w.setStyleSheet(f"background: {background_color}")
    w.setGeometry(0, 0, width, height)  # The non-full-screen sizes.
    
    # Create the picture area.
    w.picture = QtWidgets.QLabel('picture', self)
    
    # Create the scroll area.
    w.scroll_area = area =QtWidgets.QScrollArea()
    area.setWidget(self.picture)
    AlignmentFlag = QtCore.Qt if isQt5 else QtCore.Qt.AlignmentFlag
    area.setAlignment(AlignmentFlag.AlignHCenter | AlignmentFlag.AlignVCenter)
    
    # Disable scrollbars.
    ScrollBarPolicy = QtCore.Qt if isQt5 else QtCore.Qt.ScrollBarPolicy
    area.setHorizontalScrollBarPolicy(ScrollBarPolicy.ScrollBarAlwaysOff)
    area.setVerticalScrollBarPolicy(ScrollBarPolicy.ScrollBarAlwaysOff)
    
    # Init the layout.
    layout = QtWidgets.QVBoxLayout()
    layout.addWidget(self.scroll_area)
    w.setLayout(layout)
#@+node:ekr.20211020082938.13: *5* Slides.show_help
def show_help(self):
    """Show the help message."""
    print(textwrap.dedent('''\
                    d delete slide
                    f toggle full screen
                    h show help
         n or <space> show next slide
     p or <backspace> show previous slide
           q or <esc> end slideshow
                    + zoom in
                    - zoom out
             up arrow scroll up
           down arrow scroll down
           left arrow scroll left
          right arrow scroll right
    '''))
#@+node:ekr.20211020082938.14: *5* Slides.show_slide
def show_slide(self):
    # Reset the timer.
    self.timer.stop()
    self.timer.start(self.delay * 1000.0, self)
    # Get the file name.
    file_name = files_list[self.slide_number]
    # Change the title.
    self.setWindowTitle(file_name)
    # Display the picture.
    pixmap = QtGui.QPixmap(file_name)
    try:
        TransformationMode = QtCore.Qt if isQt5 else QtCore.Qt.TransformationMode
        image = pixmap.scaledToHeight(
            self.height() * self.scale,
            TransformationMode.SmoothTransformation,
        )
        self.picture.setPixmap(image)
        self.picture.adjustSize()
    except Exception:
        self.next_slide()
#@+node:ekr.20211020082938.15: *5* Slides.sort
def sort(self, sort_kind):
    """sort files_list based on sort_kind."""
    global files_list
    if sort_kind == 'date':
        print('Sorting by date...')
        files_list.sort(key = os.path.getmtime)
    elif sort_kind == 'name':
        print('Sorting by name...')
        files_list.sort()
    elif sort_kind in (None, 'none'):
        pass
    elif sort_kind == 'random':
        print('Randomizing...')
        random.shuffle(files_list)
    elif sort_kind == 'size':
        print('Sorting by size...')
        files_list.sort(key = os.path.getsize)
    else:
        g.trace(f"unknown sort kind: {sort_kind!r}")
#@+node:ekr.20211020082938.16: *5* Slides.timerEvent
def timerEvent(self, e=None):
    self.next_slide()  # show_slide resets the timer.
#@+node:ekr.20211020082938.17: *5* Slides.toggle_full_screen
def toggle_full_screen(self):
    w = self
    if w.full_screen:
        w.full_screen = False
        w.picture.adjustSize()
        w.showNormal()
    else:
        w.full_screen = True
        WindowState = QtCore.Qt if isQt5 else QtCore.Qt.WindowState
        w.setWindowState(WindowState.WindowFullScreen)
        w.picture.setGeometry(0, 0, w.width(), w.height())
        w.picture.adjustSize()
#@+node:ekr.20211020082938.18: *5* Slides.zoom_in & zoom_out
def zoom_in(self):
    self.scale = self.scale * 1.05
    self.show_slide()
    
def zoom_out(self):
    self.scale = self.scale * (1.0 / 1.05)
    self.show_slide()
#@+node:ekr.20211009081233.1: ** Prototypes
#@+node:ville.20090508224531.9800: *3* script: Prototype: pos_to_archive, archive_to_pos
def pos_to_archive(p):
    """ Archived version of position in format gnx1:gnx2:gnx3 

    This is more tolerant to tree modification than children index version 
    given by p.archivedPosition()

    Counterpart of archive_to_pos
    """

    ps = [po.gnx for po in p.self_and_parents()]
    ps.reverse()
    return ":".join(ps)

def sibling_by_gnx(p, gnx):
    """ Look through siblings of p for a node with specified gnx """
    for po in p.siblings():
        #g.es('test' , po, po.gnx)
        if po.gnx == gnx:
            return po
    raise IndexError("Pos %s has no sibling gnx = %s" % (p,gnx))            

def archive_to_pos(c,s):
    """ Convert gnx1:gnx2:gnx3 string representation of position to a real position 

    Counterpart of pos_to_archive.

    """

    ps = s.split(':')

    cur = c.rootPosition()
    for gnx in ps[:-1]:
        ne = sibling_by_gnx(cur, gnx)
        cur = ne.firstChild()

    return sibling_by_gnx(cur, ps[-1]).copy()

arc= pos_to_archive(p)
po = archive_to_pos(c, arc)

assert po == p
#@+node:ekr.20041126055818.2: *3* script: Prototype: pyclbr module (parses python)
@ This is probably a better way of parsing Python text.
It does not import the module, so it is safe for untrusted code.
@c
import pyclbr # Python Command Line Browser support.
import sys
print '*' * 40 ; print
fileNames = ("leoCommands.py","leo.py","leoAtFile.py")
fileNames = (r"c:\Python23\Lib\site-packages\Pmw\Pmw_1_1\lib\PmwPanedWidget.py"),

for fileName in fileNames:
    dir,file = g.os_path_split(fileName)
    moduleName,ext = g.os_path_splitext(file)
    moduleDict = pyclbr.readmodule_ex(moduleName,[dir] + sys.path)
    print "module", moduleName,'-' * 40
    items = []
    for funcOrClass in moduleDict.keys():
        o = moduleDict.get(funcOrClass) # o is a descriptor.
        try:
            mdict = o.methods # Fails for functions.
            items.append((int(o.lineno),"*class",o.name),)
            mkeys = mdict.keys()
            for method in mkeys:
                lineno = mdict.get(method)
                items.append((int(lineno),"method",method),)
        except AttributeError:
            # funcOrClass is a function descriptor
            items.append((int(o.lineno),"function",o.name),)
    items.sort()
    for line,kind,name in items:
        print "%4d %8s %s" % (line,kind,name)
#@+node:ekr.20050707183613: *3* script: Prototype: Ipython Shell
<< imports >>

@others

if 1:
    << use IPShellEmbed >>
else:
    shell = LeoShell('shell')
    # g.redirectStdout()
    g.es_print('-'*40)
    body = p.b
    for line in g.splitLines(body):
        g.es(str(shell.prefilter(line,None)))
    #g.restoreStdout()
#@+node:ekr.20050708110336: *4* << imports >>
import IPython
import IPython.genutils
from IPython.Struct import Struct

import __builtin__
import __main__
import os
import re
import sys

# we need the directory where IPython itself is installed
IPython_dir = os.path.dirname(IPython.__file__)
#@+node:ekr.20050708091220.78: *4* << use IPShellEmbed >>
shell = IPython.Shell.IPShellEmbed (
    argv=[],
    banner='Welcome to IPython in Leo',
    exit_msg='Bye',
    rc_override={
        'confirm_exit':0,
        #'readline':0, # Crashes the interactive interp.
    },
)
shell()
#@+node:ekr.20050708095104: *4* class dummyCache
class dummyCache:

    @others
#@+node:ekr.20050708142137: *5* ctor
def __init__ (self,user_ns):

    self.last_prompt = None
    self.prompt_count = 0
    self.user_ns = user_ns

    if 1:
        self.prompt1 = dummyPrompt('name=prompt1')
        self.prompt2 = dummyPrompt('name=prompt2')
        self.prompt_out = dummyPrompt('name=prompt_out')
    else:
        input_sep='\n'
        self.ps1_str = '>>> '   ### self._set_prompt_str(ps1,'In [\\#]: ','>>> ')
        self.ps2_str = '... '   ### self._set_prompt_str(ps2,'   .\\D.: ','... ')
        self.ps_out_str = ','   ### self._set_prompt_str(ps_out,'Out[\\#]: ','')
        pad_left=True

        self.prompt1 = IPython.Prompts.Prompt1(self,
            sep=input_sep,prompt=self.ps1_str,pad_left=pad_left)
        self.prompt2 = IPython.Prompts.Prompt2(self,
            prompt=self.ps2_str,pad_left=pad_left)
        self.prompt_out = IPython.Prompts.PromptOut(self,
            sep='',prompt=self.ps_out_str,pad_left=pad_left)

    self.last_prompt = self.prompt1 # Total kludge.
#@+node:ekr.20050708142137.2: *5* All others
def __len__ (self):
    return 0

def insert(self,n,line):
    pass

def pop(self):
    return ''
#@+node:ekr.20050708143008: *4* class dummyPrompt
class dummyPrompt (IPython.Prompts.BasePrompt):

    """Interactive prompt similar to Mathematica's."""

	@others
#@+node:ekr.20050708143008.2: *5* __init__
def __init__(self,cache=None,sep=None,prompt=None,pad_left=False,name='prompt'):

    self.name = name
#@+node:ekr.20050708143008.3: *5* set_p_str
def set_p_str(self):
    """ Set the interpolating prompt strings.

    This must be called every time the color settings change, because the
    prompt_specials global may have changed."""

    return ###

    import os,time  # needed in locals for prompt string handling
    loc = locals()
    self.p_str = ItplNS('%s%s%s' %
                        ('${self.sep}${self.col_p}',
                         multiple_replace(prompt_specials, self.p_template),
                         '${self.col_norm}'),self.cache.user_ns,loc)

    self.p_str_nocolor = ItplNS(multiple_replace(prompt_specials_nocolor,
                                                 self.p_template),
                                self.cache.user_ns,loc)
#@+node:ekr.20050708143008.4: *5* write
def write(self,msg):  # dbg

    return '' ###

    sys.stdout.write(msg)
    return ''
#@+node:ekr.20050708143008.5: *5* __str__
def __str__(self):
    """Return a string form of the prompt.

    This for is useful for continuation and output prompts, since it is
    left-padded to match lengths with the primary one (if the
    self.pad_left attribute is set)."""

    return self.name ###

    out_str = str_safe(self.p_str)
    if self.pad_left:
        # We must find the amount of padding required to match lengths,
        # taking the color escapes (which are invisible on-screen) into
        # account.
        esc_pad = len(out_str) - len(str_safe(self.p_str_nocolor))
        format = '%%%ss' % (len(str(self.cache.last_prompt))+esc_pad)
        return format % out_str
    else:
        return out_str
#@+node:ekr.20050708144144.1: *5* Unchanged
if 0:
    @others
#@+node:ekr.20050708143008.6: *6* cwd_filt
# these path filters are put in as methods so that we can control the
# namespace where the prompt strings get evaluated.

def cwd_filt(self,depth):

    """Return the last depth elements of the current working directory.

    $HOME is always replaced with '~'.
    If depth==0, the full path is returned."""

    cwd = os.getcwd().replace(HOME,"~")
    out = os.sep.join(cwd.split(os.sep)[-depth:])
    if out:
        return out
    else:
        return os.sep
#@+node:ekr.20050708143008.7: *6* cwd_filt2
def cwd_filt2(self,depth):
    """Return the last depth elements of the current working directory.

    $HOME is always replaced with '~'.
    If depth==0, the full path is returned."""

    cwd = os.getcwd().replace(HOME,"~").split(os.sep)
    if '~' in cwd and len(cwd) == depth+1:
        depth += 1
    out = os.sep.join(cwd[-depth:])
    if out:
        return out
    else:
        return os.sep
#@+node:ekr.20050708144144.2: *5* auto_rewrite
def auto_rewrite(self,*args,**keys):
    return ''
#@+node:ekr.20050708091220.76: *4* class LeoShell
class LeoShell (IPython.iplib.InteractiveShell):

    @others

    # Set the default prefilter() function (this can be user-overridden)
    prefilter = _prefilter
#@+node:ekr.20050708091220.77: *5* ctor
def __init__ (self,name):

    self.shell = self
    self.name = name

    << directory stuff >>
    << set sensible command line defaults for self.rc >>
    << define regexp's >>
    << define escape stuff >>
    << define namespaces >>
    << create alias table >>
    << define inpsector >>

    self.inputcache = dummyCache(self.user_ns)
    self.outputcache = dummyCache(self.user_ns)
    self.CACHELENGTH = 0
#@+node:ekr.20050708110239: *6* << directory stuff >>
# EKR: take from make_IPython.

# Platform-dependent suffix and directory names
if os.name == 'posix':
    rc_suffix = ''
    ipdir_def = '.ipython'
else:
    rc_suffix = '.ini'
    ipdir_def = '_ipython'

# default directory for configuration
if 1: ### Leo
    ipythondir = g.app.loadDir
else:
    ipythondir = os.path.abspath(
        os.environ.get('IPYTHONDIR',
        os.path.join(IP.home_dir,ipdir_def)))
#@+node:ekr.20050708105742: *6* << set sensible command line defaults for self.rc >>
# EKR: take from make_IPython

# This should have everything from  cmdline_opts and cmdline_only
self.rc = Struct(
    autocall = 1,
    autoindent=0,
    automagic = 1,
    banner = 1,
    cache_size = 1000,
    c = '',
    classic = 0,
    colors = 'NoColor',
    color_info = 0,
    confirm_exit = 1,
    debug = 0,
    deep_reload = 0,
    editor = '0',
    help = 0,
    ignore = 0,
    ipythondir = ipythondir,
    log = 0,
    logfile = '',
    logplay = '',
    multi_line_specials = 1,
    messages = 1,
    nosep = 0,
    pdb = 0,
    pprint = 0,
    profile = '',
    prompt_in1 = 'In [\\#]:',
    prompt_in2 = '   .\\D.:',
    prompt_out = 'Out[\\#]:',
    prompts_pad_left = 1,
    quick = 0,
    readline = 1,
    readline_merge_completions = 1,
    readline_omit__names = 0,
    rcfile = 'ipythonrc' + rc_suffix,
    screen_length = 0,
    separate_in = '\n',
    separate_out = '\n',
    separate_out2 = '',
    system_verbose = 0,
    gthread = 0,
    qthread = 0,
    wthread = 0,
    pylab = 0,
    tk = 0,
    upgrade = 0,
    Version = 0,
    xmode = 'Verbose',
    magic_docstrings = 0,  # undocumented, for doc generation
)
#@+node:ekr.20050708093114: *6* << define regexp's >>
# Don't get carried away with trying to make the autocalling catch too
# much:  it's better to be conservative rather than to trigger hidden
# evals() somewhere and end up causing side effects.

self.line_split = re.compile(
    r'^([\s*,;/])'
    r'([\?\w\.]+\w*\s*)'
    r'(\(?.*$)'
)

# RegExp to identify potential function names
self.re_fun_name = re.compile(r'[a-zA-Z_]([a-zA-Z0-9_.]*) *$')

# RegExp to exclude strings with this start from autocalling
self.re_exclude_auto = re.compile('^[!=()<>,\*/\+-]|^is ')

# try to catch also methods for stuff in lists/tuples/dicts: off
# (experimental). For this to work, the line_split regexp would need
# to be modified so it wouldn't break things at '['. That line is
# nasty enough that I shouldn't change it until I can test it _well_.
#self.re_fun_name = re.compile (r'[a-zA-Z_]([a-zA-Z0-9_.\[\]]*) ?$')
#@+node:ekr.20050708093224: *6* << define escape stuff >>
# escapes for automatic behavior on the command line
self.ESC_SHELL = '!'
self.ESC_HELP  = '?'
self.ESC_MAGIC = '%'
self.ESC_QUOTE = ','
self.ESC_QUOTE2 = ';'
self.ESC_PAREN = '/'

# And their associated handlers
self.esc_handlers = {
    self.ESC_PAREN: self.handle_auto,
    self.ESC_QUOTE: self.handle_auto,
    self.ESC_QUOTE2:self.handle_auto,
    self.ESC_MAGIC: self.handle_magic,
    self.ESC_HELP:  self.handle_help,
    self.ESC_SHELL: self.handle_shell_escape,
}
#@+node:ekr.20050708093433: *6* << define namespaces >>
# Set __name__ to __main__ to better match the behavior of the normal interpreter.

self.user_ns = {
    '__name__'     :'__main__',
    '__builtins__' : __builtin__,
}

self.internal_ns = __main__.__dict__.copy()
#@+node:ekr.20050708094606.1: *6* << create alias table >>
# dict of names to be treated as system aliases.  Each entry in the
# alias table must be a 2-tuple of the form (N,name), where N is the
# number of positional arguments of the alias.
self.alias_table = {}
#@+node:ekr.20050708150223: *6* << define inpsector >>
ins_colors = IPython.OInspect.InspectColors
code_colors = IPython.PyColorize.ANSICodeColors

self.inspector = IPython.OInspect.Inspector(ins_colors,code_colors,'NoColor')
#@+node:ekr.20050708105323.2: *5* usage
def usage(self):

    return 'A usage message'
#@+node:ekr.20050708095104.1: *5* log
def log(self,line,continuation=None):

    # Called by the logger (not sure how).
    if 0:
        g.trace(line)
#@+node:ekr.20050708113006.2: *5* system
def system(self,s):

    g.trace(s)
    pass
#@+node:ekr.20050708152111: *5* _prefilter
def _prefilter(self, line, continue_prompt):
    """Calls different preprocessors, depending on the form of line."""

    << about this function >>

    # save the line away in case we crash, so the post-mortem handler can record it
    self._last_input_line = line

    #print '***line: <%s>' % line # dbg
    if not line.strip():
        << handle empty line >>

    # print '***cont',continue_prompt  # dbg
    # special handlers are only allowed for single line statements
    if continue_prompt and not self.rc.multi_line_specials:
        return self.handle_normal(line,continue_prompt)

    # Get the structure of the input
    pre,iFun,theRest = self.split_user_input(line)
    #print 'pre <%s> iFun <%s> rest <%s>' % (pre,iFun,theRest)  # dbg

    << First check for explicit escapes in the last/first character >>
    << Next, check if we can automatically execute this thing >>
    << Let's try to find if the input line is a magic fn >>
    << execute comparisons, assignsments or function calls >>

    # If we get here, we have a normal Python line. Log and return.
    return self.handle_normal(line,continue_prompt)
#@+node:ekr.20050708152111.1: *6* << about this function >>
# All handlers *must* return a value, even if it's blank ('').

# Lines are NOT logged here. Handlers should process the line as
# needed, update the cache AND log it (so that the input cache array
# stays synced).

# This function is _very_ delicate, and since it's also the one which
# determines IPython's response to user input, it must be as efficient
# as possible.  For this reason it has _many_ returns in it, trying
# always to exit as quickly as it can figure out what it needs to do.

# This function is the main responsible for maintaining IPython's
# behavior respectful of Python's semantics.  So be _very_ careful if
# making changes to anything here.
#@+node:ekr.20050708152111.2: *6* << handle empty line >>
# the input history needs to track even empty lines

if not continue_prompt:
    self.outputcache.prompt_count -= 1

return self.handle_normal('',continue_prompt)
#@+node:ekr.20050708152111.3: *6* << First check for explicit escapes in the last/first character >>
handler = None
if line[-1] == self.ESC_HELP:
    handler = self.esc_handlers.get(line[-1])  # the ? can be at the end

if handler is None:
    # look at the first character of iFun, NOT of line, so we skip
    # leading whitespace in multiline input
    handler = self.esc_handlers.get(iFun[0:1])

if handler is not None:
    return handler(line,continue_prompt,pre,iFun,theRest)

# Emacs ipython-mode tags certain input lines
if line.endswith('# PYTHON-MODE'):
    return self.handle_emacs(line,continue_prompt)
#@+node:ekr.20050708152111.4: *6* << Next, check if we can automatically execute this thing >>
# Allow ! in multi-line statements if multi_line_specials is on:
if (
    continue_prompt and self.rc.multi_line_specials and iFun.startswith(self.ESC_SHELL)
):
    return self.handle_shell_escape(line,continue_prompt,
            pre=pre,iFun=iFun,theRest=theRest)
#@+node:ekr.20050708152111.5: *6* << Let's try to find if the input line is a magic fn >>
oinfo = None

if hasattr(self,'magic_'+iFun):
    oinfo = self._ofind(iFun) # FIXME - _ofind is part of Magic
    if oinfo['ismagic']:
        # Be careful not to call magics when a variable assignment is
        # being made (ls='hi', for example)
        if (
            self.rc.automagic and
            (len(theRest)==0 or theRest[0] not in '!=()<>,') and 
            (self.rc.multi_line_specials or not continue_prompt)
        ):
            return self.handle_magic(line,continue_prompt,pre,iFun,theRest)
        else:
            return self.handle_normal(line,continue_prompt)
#@+node:ekr.20050708152111.6: *6* << execute comparisons, assignsments or function calls >>
# If the rest of the line begins with an (in)equality, assginment or
# function call, we should not call _ofind but simply execute it.
# This avoids spurious geattr() accesses on objects upon assignment.
#
# It also allows users to assign to either alias or magic names true
# python variables (the magic/alias systems always take second seat to
# true python code).
if theRest and theRest[0] in '!=()':
    return self.handle_normal(line,continue_prompt)

if oinfo is None:
    oinfo = self._ofind(iFun) # FIXME - _ofind is part of Magic

if not oinfo['found']:
    return self.handle_normal(line,continue_prompt)
else:
    #print 'iFun <%s> rest <%s>' % (iFun,theRest) # dbg
    if oinfo['isalias']:
        return self.handle_alias(line,continue_prompt,
                                     pre,iFun,theRest)

    if self.rc.autocall and \
           not self.re_exclude_auto.match(theRest) and \
           self.re_fun_name.match(iFun) and \
           callable(oinfo['obj']) :
        #print 'going auto'  # dbg
        return self.handle_auto(line,continue_prompt,pre,iFun,theRest)
    else:
        #print 'was callable?', callable(oinfo['obj'])  # dbg
        return self.handle_normal(line,continue_prompt)
#@+node:ekr.20050708152111.7: *5* prefilter & helpers
#@+node:ekr.20050708152111.9: *6* _prefilter
def _prefilter(self, line, continue_prompt):
    """Calls different preprocessors, depending on the form of line."""

    << about this function >>

    #if line.startswith('%crash'): raise RuntimeError,'Crash now!'  # dbg

    # save the line away in case we crash, so the post-mortem handler can record it
    self._last_input_line = line

    if not line.strip():
        return ''

    # special handlers are only allowed for single line statements
    if continue_prompt and not self.rc.multi_line_specials:
        return line

    # Get the structure of the input
    pre,iFun,theRest = self.split_user_input(line)
    #print 'pre <%s> iFun <%s> rest <%s>' % (pre,iFun,theRest)  # dbg

    << First check for explicit escapes in the last/first character >>
    << Next, check if we can automatically execute this thing >>
    << Let's try to find if the input line is a magic fn >>
    << execute comparisons, assignsments or function calls >>

    # A normal Python line.
    return line
#@+node:ekr.20050708152111.10: *7* << about this function >>
# All handlers *must* return a value, even if it's blank ('').

# Lines are NOT logged here. Handlers should process the line as
# needed, update the cache AND log it (so that the input cache array
# stays synced).

# This function is the main responsible for maintaining IPython's
# behavior respectful of Python's semantics.  So be _very_ careful if
# making changes to anything here.
#@+node:ekr.20050708152111.12: *7* << First check for explicit escapes in the last/first character >>
handler = None
if line[-1] == self.ESC_HELP:
    handler = self.esc_handlers.get(line[-1])  # the ? can be at the end

if handler is None:
    # look at the first character of iFun, NOT of line, so we skip
    # leading whitespace in multiline input
    handler = self.esc_handlers.get(iFun[0:1])

if handler is not None:
    return handler(line,continue_prompt,pre,iFun,theRest)

# Emacs ipython-mode tags certain input lines
if line.endswith('# PYTHON-MODE'):
    return self.handle_emacs(line,continue_prompt)
#@+node:ekr.20050708152111.13: *7* << Next, check if we can automatically execute this thing >>
# Allow ! in multi-line statements if multi_line_specials is on:
if (
    continue_prompt and self.rc.multi_line_specials and iFun.startswith(self.ESC_SHELL)
):
    return self.handle_shell_escape(line,continue_prompt,pre=pre,iFun=iFun,theRest=theRest)
#@+node:ekr.20050708152111.14: *7* << Let's try to find if the input line is a magic fn >>
oinfo = None

if hasattr(self,'magic_'+iFun):
    oinfo = self._ofind(iFun) # FIXME - _ofind is part of Magic
    if oinfo['ismagic']:
        # Be careful not to call magics when a variable assignment is
        # being made (ls='hi', for example)
        if (
            self.rc.automagic and
            (len(theRest)==0 or theRest[0] not in '!=()<>,') and 
            (self.rc.multi_line_specials or not continue_prompt)
        ):
            return self.handle_magic(line,continue_prompt,pre,iFun,theRest)
        else:
            return line
#@+node:ekr.20050708152111.15: *7* << execute comparisons, assignsments or function calls >>
# If the rest of the line begins with an (in)equality, assginment or
# function call, we should not call _ofind but simply execute it.
# This avoids spurious geattr() accesses on objects upon assignment.
#
# It also allows users to assign to either alias or magic names true
# python variables (the magic/alias systems always take second seat to
# true python code).
if theRest and theRest[0] in '!=()':
    return line

if oinfo is None:
    oinfo = self._ofind(iFun) # FIXME - _ofind is part of Magic

if not oinfo['found']:
    return line

if oinfo['isalias']:
    return self.handle_alias(line,continue_prompt,pre,iFun,theRest)

if (self.rc.autocall and not self.re_exclude_auto.match(theRest) and 
    self.re_fun_name.match(iFun) and callable(oinfo['obj'])
):
    return self.handle_auto(line,continue_prompt,pre,iFun,theRest)
else:
    return line
#@+node:ekr.20050708165401.1: *6* handle_normal ( no longer used)
def handle_normal(self,line,continue_prompt=None,pre=None,iFun=None,theRest=None):
    """Handle normal input lines. Use as a template for handlers."""

    g.trace(line)

    if 0:
        self.log(line,continue_prompt)
        self.update_cache(line)

    return line
#@+node:ekr.20050708152111.17: *6* handle_alias (done)
def handle_alias(self,line,continue_prompt=None,pre=None,iFun=None,theRest=None):

    """Handle alias input lines. """

    theRest = esc_quotes(theRest)

    line_out = "%s%s.call_alias('%s','%s')" % (pre,self.name,iFun,theRest)

    return line_out
#@+node:ekr.20050708152111.18: *6* handle_shell_escape (needs work)
def handle_shell_escape(self, line, continue_prompt=None,pre=None,iFun=None,theRest=None):

    """Execute the line in a shell, empty return value"""

    # Example of a special handler. Others follow a similar pattern.
    if continue_prompt:  # multi-line statements
        if iFun.startswith('!!'):
            print 'SyntaxError: !! is not allowed in multiline statements'
            return pre
        else:
            cmd = ("%s %s" % (iFun[1:],theRest)).replace('"','\\"')
            line_out = '%s%s.system("%s")' % (pre,self.name,cmd)
    else: # single-line input
        if line.startswith('!!'):
            # rewrite iFun/theRest to properly hold the call to %sx and
            # the actual command to be executed, so handle_magic can work
            # correctly
            theRest = '%s %s' % (iFun[2:],theRest)
            iFun = 'sx'
            return self.handle_magic('%ssx %s' % (
                self.ESC_MAGIC,line[2:]),continue_prompt,pre,iFun,theRest)
        else:
            cmd = esc_quotes(line[1:])
            line_out = '%s.system("%s")' % (self.name,cmd)

    return line_out
#@+node:ekr.20050708152111.19: *6* handle_magic (done)
def handle_magic(self, line, continue_prompt=None,pre=None,iFun=None,theRest=None):

    """Execute magic functions.

    Also log them with a prepended # so the log is clean Python."""

    return '%sipmagic("%s")' % (pre,esc_quotes('%s %s' % (iFun,theRest)))
#@+node:ekr.20050708152111.20: *6* handle_auto (may need work)
def handle_auto(self, line, continue_prompt=None,pre=None,iFun=None,theRest=None):

    """Hande lines which can be auto-executed, quoting if requested."""

    # This should only be active for single-line input!
    if continue_prompt:
        return line
    elif pre == self.ESC_QUOTE: # Auto-quote splitting on whitespace
        return '%s("%s")\n' % (iFun,'", "'.join(theRest.split()) )
    elif pre == self.ESC_QUOTE2: # Auto-quote whole string
        return '%s("%s")\n' % (iFun,theRest)
    else: # Auto-paren
        if theRest[0:1] in ('=','['):
            # Don't autocall in these cases.  They can be rebindings of an existing callable's name,
            # or item access for an object which is BOTH callable and implements __getitem__.
            return '%s %s\n' % (iFun,theRest)
        if theRest.endswith(';'):
            return '%s(%s);\n' % (iFun.rstrip(),theRest[:-1])
        else:
            return '%s(%s)\n' % (iFun.rstrip(),theRest)
#@+node:ekr.20050708152111.21: *6* handle_help (may need work) (and it's stupid anyway)
def handle_help(self, line, continue_prompt=None,pre=None,iFun=None,theRest=None):
    """Try to get some help for the object.

    obj? or ?obj   -> basic information.
    obj?? or ??obj -> more details.
    """

    # Don't process lines which would be otherwise valid python, such as "x=1 # what?"
    try:
        code.compile_command(line)
    except SyntaxError: # Only handle stuff which is NOT valid syntax
        if line[0]==self.ESC_HELP:
            line = line[1:]
        elif line[-1]==self.ESC_HELP:
            line = line[:-1]
        if line:
            self.magic_pinfo(line)
        else:
            page(self.usage,screen_lines=self.rc.screen_length)
        return '' # Empty string is needed here!
    except: pass

    return line
#@+node:ekr.20050708152111.22: *6* handle_emacs
def handle_emacs(self,line,continue_prompt=None,pre=None,iFun=None,theRest=None):
    """Handle input lines marked by python-mode."""

    # Currently, nothing is done.
    # The input cache shouldn't be updated
    return line
#@+node:ekr.20050708152111.23: *6* safe_execfile (not used)
#@+node:ekr.20050708152111.24: *6* split_user_input
if 0: # ref
    self.line_split = re.compile(
        r'^([\s*,;/])'          # Groups[0]: s, followed by special chars: , ; or /
        r'([\?\w\.]+\w*\s*)'    # Groups[1]: one char,
        r'(\(?.*$)' )           # Groups[2]: arg list

def split_user_input(self,line):

    """Split user input into pre-char, function part and rest."""

    lsplit = self.line_split.match(line)

    if lsplit is None:  # no regexp match returns None
        try:
            iFun,theRest = line.split(None,1)
        except ValueError:
            iFun,theRest = line,''
        pre = re.match('^(\s*)(.*)',line).groups()[0]
    else:
        pre,iFun,theRest = lsplit.groups()

    print 'line:<%s>' % line # dbg
    print 'pre <%s> iFun <%s> rest <%s>' % (pre,iFun.strip(),theRest) # dbg

    return pre,iFun.strip(),theRest
#@+node:ekr.20050708152111.25: *6* update_cache
def update_cache(self, line):

    """puts line into cache"""

    pass
#@+node:ekr.20050708105323.3: *4* ipmagic & palias
def ipmagic(s):
    g.trace()
    return IPython.iplib.ipmagic(s)

def ipalias(s):
    g.trace()
    return IPython.iplib.ipalias(s)
#@+node:ekr.20050708165531.1: *4* esc_quotes
def esc_quotes(s):

    return IPython.iplib.esc_quotes(s)
#@+node:ekr.20110301092139.14873: ** Terry Brown
@language python
#@+node:ekr.20110301092139.14874: *3* docs
@language rest

Two use cases spring to mind:

Leo is already running (somewhere on my desktop) with 2-3 leo files loaded in
tabs. I'm working in a console, and I want to edit a file in the current
directory: `led foo.py` causes the running leo to create '@edit foo.py' and pop
to the front with it loaded. Vs. finding the running version of leo, then
navigating through the open file dialog to the directory I'm already in in the
shell window.

And of course with the new sticknotes plugin, set up a window manager hotkey to
pop open a new stickynote from the running leo to jot something down.

======

Since I'm making it a two-way protocol (you can execute scripts in leo, and get
results back), any kind of command line application will be possible. Something
like having leo operations in shell pipeline or shell scripts will make sense.

e.g. you could do

make | leopaste "compilation results for tuesday"

Or

leocat | grep "frobbo"

which could allow you to navigate around leo outline and press some
kind of "paste" button to dump the current node to stdout.

These are somewhat psychedelic for now, but lots of Leo is about
imagination ;-).


#@+node:ekr.20110301092139.14875: *4* sn
You can type(*), either in a shell window or the window manager's
command line entry area, "sn lisa from accounting", and a node called
"lisa from accounting" with an initial contents recording the current
time is created at the top of the leo commander which started the
server.  A stickynote window for editing that node pops up and becomes
active.  If you just type "sn" a default node name of the current time
is used.  Also a read-only attribute is set so the created date is
shown if you're running the edit_attribs.py plugin ;-)

(*) assuming you call the script 'sn' and put it on your path.
#@+node:ekr.20110301092139.14876: *4* led
Here is a quick hack at a script to edit (or create and edit) a file in leo
from the command line.

 - note the ugly sys.path.append, you don't need that line if you have
  leo installed, but I only run it from bzr

 - it attempts to find a previous editing of the file in leo, but it
  could easily miss it leading you to have multiple edits of the same
  file in leo, take care, or delete the @edit node when you're done
  editing

I've wanted to be able to pop up a file in leo like this for ages.  Now
this script, called 'led' and made executable and placed on the path,
will replace 'e', my emacs invocation script.

...which is interesting I guess, that what was keeping me from
replacing emacs almost completely with leo is not leo itself, but how
leo's invoked to edit a simple file from the command line.

Apart from nxml-mode, emacs's validating xml editing mode, I can't
think what I'll use emacs for now.
#@+node:ekr.20110301092139.14877: *4* led 2
It's probably of little benefit to people who work with mouse, menus,
and icons all the time.

But if you do everything from the command line (i.e. your OS's shell),
then it makes moving things into leo much smoother.

Suppose I've run leo and checked my todo items for the day, and now
leo's buried under some other window and I'm working in the shell in
some directory I've just created where I've just unzipped something and
now I want to edit a file that was in the .zip.

I can either
 - find the leo window
 - insert a node
 - active the open file dialog
 - navigate to the directory containing this file
 - select the file
 - and finally do the editing I want to do
or, with Ville's communication to the running leo
 - enter on the command line `led foo.txt`
 - and do the editing I want to do

where led is a script which causes the running leo to create an @edit
node containing foo.txt and pop to the front with the node selected.

Previously I was much more likely to use emacs, just because it was
easier to invoke that way from the command line.

So, opening files, creating sticky notes, invoking leo to handle output
from grep or diff or whatever - all these things are better now.

=====

The corresponding point and click process for this scenario is

a) select > r-click > Edit with Leo
b) or drag'n'drop from folder to Leo icon on task bar (or window if visible)

In short, I see this being a productivity boost for all users.
#@+node:ekr.20110301092139.14878: *3* led
#!/usr/bin/python

'''Edit a file in leo from the command line, adds an @edit node at the top of
the outline of the first commander in the leo instance.'''

import sys
sys.path.append("/home/tbrown/Desktop/Package/leo/bzr/leo.repo/trunk")
from leo.external import lproto
import os

addr = open(os.path.expanduser('~/.leo/leoserv_sockname')).read()
pc  = lproto.LProtoClient(addr)
pc.send("""
import os
fn = %s
c = g.app.commanders()[0]
h = "@edit "+fn
n = g.findNodeAnywhere(c, h)
if not n:
 n = c.rootPosition().insertAfter()
 n.moveToRoot(c.rootPosition())
 n.h = h
 if os.path.isfile(fn):
     n.b = file(fn).read()
c.selectPosition(n)
c.redraw()
c.bringToFront()
""" % repr(os.path.join(os.getcwd(), sys.argv[1])) )
#@+node:ekr.20110301092139.14879: *3* lo
#!/usr/bin/python

'''Load a .leo file from the command line into a running leo instance.'''

import sys
sys.path.append("/home/tbrown/Desktop/Package/leo/bzr/leo.repo/trunk")
from leo.external import lproto
import os

addr = open(os.path.expanduser('~/.leo/leoserv_sockname')).read()
pc  = lproto.LProtoClient(addr)
pc.send("""
c = g.app.commanders()[0]
g.openWithFileName(%s,old_c=c)
""" % repr(os.path.join(os.getcwd(), sys.argv[1])) )
#@+node:ekr.20110301092139.14880: *3* sn
#!/usr/bin/python

'''Sticky note tie-in from command line/toolbar icon to take quick notes using
a running leo.'''

import sys
sys.path.append("/home/tbrown/Desktop/Package/leo/bzr/leo.repo/trunk")
from leo.external import lproto
import os
import time
import sys

addr = open(os.path.expanduser('~/.leo/leoserv_sockname')).read()
pc  = lproto.LProtoClient(addr)
cmd="""
c = g.app.commanders()[0]
n = c.rootPosition().insertAfter()
n.moveToRoot(c.rootPosition())
n.h = "{timestamp}"
n.b = '''{content}\n\n# {timestamp}'''
c.selectPosition(n)
c.redraw()
c.k.simulateCommand('stickynote')
""".format(timestamp=time.asctime(), content=' '.join(sys.argv[1:]))
pc.send(cmd)
# print cmd
#@+node:ekr.20141105055521.18: *3* Tool for diffing Leo files
'''
From: Terry Brown

The script below is a tool for diffing two Leo files. The attached
screenshot illustrates the output for two different revisions of
LeoPyRef.leo.

``- nodename``
    indicates a node which disappeared
``+ nodename``
    a node which is new,
``!v nodename`` followed by ``!^ nodename``
    a node with an unchanged heading but changed content, the first
    linking to the old version, the second linking to the new version

If you have the bookmarks.py plugin active, you can double click nodes
to jump to the original(s).

'''
from leo.core.leoNodes import vnode
if not hasattr(vnode, 'insertAsLastChild'):
    # add insertAsLastChild method to vnodes
    def ialc(self):
        vnode(self.context)._linkAsNthChild(self, len(self.children))
        return self.children[-1]
    vnode.insertAsLastChild = ialc

from_filename = g.app.gui.runOpenFileDialog('From (old) file', [('Leo', '*.leo')])
to_filename = g.app.gui.runOpenFileDialog('To (new) file', [('Leo', '*.leo')])

# from_filename = "/mnt/shuttle/bkup/usr1/2012-07-13/home/tbrown/.leo/.todo.leo"
# to_filename = "/mnt/shuttle/bkup/usr1/2012-07-15/home/tbrown/.leo/.todo.leo"

from_c = g.openWithFileName(from_filename, c)
to_c = g.openWithFileName(to_filename, c)

vf = from_c.hiddenRootNode
vt = to_c.hiddenRootNode

assert from_c != c
assert to_c != c
assert from_c != to_c

nd = c.rootPosition().insertAfter()
nd.copy().back().moveAfter(nd)
nd.h = 'diff @bookmarks'

def text_match(a, b):
    return (a.h == b.h, 
            a.h == b.h and a.b == b.b)
def gnx_match(a, b):
    return (a.h == b.h and a.gnx == b.gnx, 
            a.h == b.h and a.b == b.b and a.gnx == b.gnx)

def diff_trees(vf, vt, path):

    fonly = []  # nodes only in from tree
    tonly = []  # nodes only in to tree
    diffs = []  # nodes which occur in both but have different descendants

    # count number of times each headline occurs as a child of
    # each node being compared
    count_f = {}
    for cf in vf.children:
        count_f[cf.h] = count_f.get(cf.h, 0) + 1
    count_t = {}
    for ct in vt.children:
        count_t[ct.h] = count_t.get(ct.h, 0) + 1

    for cf in vf.children:
        
        for ct in vt.children:
            
            if count_f[cf.h] == 1 and count_t[ct.h] == 1:
                equal = text_match
            else:
                equal = gnx_match
            
            head_eq, body_eq = equal(cf, ct)
            
            if body_eq:
                diffs.append(diff_trees(cf, ct, path+[vf.h]))
                
                break
            elif head_eq:
                d = diff_trees(cf, ct, path+[vf.h])
                if d:
                    d.h = '!v '+d.h
                else:
                    d = vnode(nd.v.context)
                    d.h = '!v '+cf.h
                d.b = "file://%s/#%s\\n\\n%s" % (
                    from_filename, 
                    '-->'.join((path+[vf.h]+[cf.h])[1:]),
                    cf.b
                )
                diffs.append(d)
                d = vnode(nd.v.context)
                d.h = '!^ '+cf.h
                d.b = "file://%s/#%s\\n\\n%s" % (
                    to_filename, 
                    '-->'.join((path+[vt.h]+[ct.h])[1:]),
                    ct.b
                )
                diffs.append(d)
                break
        else:
            fonly.append(cf)
            
    for ct in vt.children:
        
        for cf in vf.children:
            
            if count_f[cf.h] == 1 and count_t[ct.h] == 1:
                equal = text_match
            else:
                equal = gnx_match
            
            head_eq, body_eq = equal(cf, ct)
            if head_eq or body_eq:
                # no need to recurse matches again
                break

        else:
            tonly.append(ct)

    if not any(diffs) and not fonly and not tonly:
        return None
        
    vd = vnode(nd.v.context)
    vd.h = vf.h
    for d in diffs:
        if d:
            vd.children.append(d)
    for f in fonly:
        n = vd.insertAsLastChild()
        n.h = '- '+f.h
        n.b = "file://%s/#%s" % (from_filename, '-->'.join((path+[vf.h]+[f.h])[1:]))
    for t in tonly:
        n = vd.insertAsLastChild()
        n.h = '+ '+t.h
        n.b = "file://%s/#%s" % (to_filename, '-->'.join((path+[vf.h]+[t.h])[1:]))
        
    return vd

v = diff_trees(vf, vt, [])
if v:
    nd.v.children.extend(v.children)  # snip off <hidden root node>

c.bringToFront()
c.redraw()
#@+node:ekr.20150416061426.1: ** Testing & profiling
#@+node:ekr.20150416171508.1: *3* @ignore testing buttons
#@+node:ekr.20190117144022.1: *4* @button goto-test (Terry Brown)
<< docstring >>
import re
tests_path = c.config.getString("pytest-path") or "tests"
info = {}

# climb up node tree collecting info.
for nd in p.self_and_parents_iter():
    definition = re.match(r'def ([^( ]*)\(', p.b)
    if definition and not info.get('func'):
        info['func'] = definition.group(1)
    if nd.h.endswith('.py') and not info.get('file'):
        info['file'] = nd.h.split()[-1].split('/')[-1]
    if nd.h.strip('/') == tests_path.strip('/'):
        info['test'] = True

nd = p.copy()

if info.get('test'):  # we started in these tests folder
    while nd.h.strip('/') != tests_path.strip('/'):
        nd = nd.parent()  # climb up to code folder
    if info.get('file'):  # find or create code file
        target = info['file'][5:]
        for sib in nd.self_and_siblings():
            if sib.h.endswith(target):
                nd = sib
                break
        else:
            nd = nd.insertAfter()
            nd.h = '@auto ' + target
    if info.get('func'):  # find or create code function
        target = info['func'][5:]
        for child in nd.children():
            if child.h == target:
                nd = child
                break
        else:
            nd = nd.insertAsLastChild()
            nd.h = target
            nd.b = 'def'  # let abbreviation build the rest
else:  # we stared in the code folder
    if info.get('func'):  # get up to file level (weak, could be deeper)
        nd.moveToParent()
    for sib in nd.self_and_siblings():  # find or create tests folder
        if sib.h.strip('/') == tests_path.strip('/'):
            nd = sib
            break
    else:
        nd = nd.insertBefore()
        nd.h = "/%s/" % tests_path.strip('/')
        nd.b = "@path %s" % tests_path
    if info.get('file'):  # find or create test file
        target = 'test_' + info['file']
        for child in nd.children():
            if child.h.endswith(target):
                nd = child
                break
        else:
            nd = nd.insertAsLastChild()
            nd.h = "@auto %s" % target
            nd.b = "import %s\n\n@others\n" % info['file'].replace('.py', '')
    if info.get('func'):  # find or create test function
        target = 'test_' + info['func']
        for child in nd.children():
            if child.h == target:
                nd = child
                break
        else:
            nd = nd.insertAsLastChild()
            nd.h = target
            nd.b = "def %s():\n    assert %s. == 'otters'\n" % (
                target, info['file'].replace('.py', ''))

c.selectPosition(nd)
c.redraw()
#@+node:ekr.20190117144022.2: *5* << docstring >>
"""
switch between code and test

If you're using `pytest`[1] and use the layout described below, this @button code
will jump you between a function and its test, creating the (test)function
if it doesn't already exist.  Also the tests folder and `test_foo.py` file.
It assumes use of the `active_path` plugin which headlines folders as
`/myFolder/` with body text `@path myFolder`.

This code is very heavy on assumptions, but a lot of those are driven
by pytest default behavior.

To run tests, use `python -m pytest`, as anything involving py.test is
deprecated, and for some reason `pytest` finds files but runs no tests.
Tested with pytest 3.x, note Ubuntu 16.04 seems to still be on 2.x

Assumed layout:

/tests/
    test_utils.py
        def test_add_one()...
        def test_sub_one()...
    test_gui.py
        def test_load_buttons()...
utils.py
    def add_one()...
    def sub_one()...
gui.py
    def load_buttons()...

So running this code from a button will jump you from
test_sub_one() back to sub_one() and visa versa creating any
missing parts of the hierarchy in the process.

[1] https://docs.pytest.org/en/latest/
"""
#@+node:ekr.20040721113934: *4* @button Run the profiler on script in c.p.b
import profile
import pstats

# Note: the profiled code should do all needed imports.
path = g.os_path_abspath(g.os_path_join(g.app.loadDir,'..','test','leoProfile.txt'))
path = str(path)

if p.b.rstrip():
    s = p.b.rstrip() + '\n'
    profile.run(s,path)
    print '-' * 40
    print "Profiling info sent to %s" % path
    stats = pstats.Stats(path)
    stats.strip_dirs()
    stats.sort_stats('cum','file','name')
    stats.print_stats()
#@+node:ekr.20040901065642.2: *4* @button Run timit on script in c.p.b
@ Improved timeit script after an idea by 'e'.

Comments of the form #@count nnn set the repeat count.
Comments of the form #@setup comment delimits the end of setup code.
@c

try:
    import timeit # Exists only in Python 2.3 and above.
except ImportError: 
    timeit = None
    print "Can not import timeit"

if timeit and p.b.strip():
    s = p.b.rstrip() + '\n'
    << scan for #@count >>
    << put setup code in s1 and everything else in s2 >>
    t = timeit.Timer(stmt=s2,setup=s1)
    try:
        if 1: # faster.
            result = t.timeit(count)
        else: # better results.
            result = min(t.repeat(3, count))  
        print "count: %d : %f %s" % (
            count, result, p.h.strip())
    except:
        t.print_exc()
#@+node:ekr.20040901072339: *5* << scan for #@count >>
lines = s.split('\n')

count = 1000000 # default count
tag = "#@count"

for line in lines:
    i = g.skip_ws(line,0)
    if g.match(line,0,tag):
        i += len(tag)
        i = g.skip_ws(line,i)
        junk,val = g.skip_long(line,i)
        if val is not None:
            count = abs(val)
            # print "Setting count to",count
            break
#@+node:ekr.20040901071028: *5* << put setup code in s1 and everything else in s2 >>
lines = s.split('\n')

for i in xrange(len(lines)):
    if lines[i].strip() == "#@setup":
        break

if i < len(lines):
    # Split at the #@setup line and delete the #@setup line
    s1 = '\n'.join(lines[:i])
    s2 = '\n'.join(lines[i:])
    #print "setup",repr(s1)
    #print "code",repr(s2)
else:
    # There is no setup.
    s1 = None
    s2 = s

if not s1: s1 = 'pass'
if not s2: s2 = 'pass'
#@+node:ekr.20080329083943.1: *4* @button Write cleaned files to 'clean' directory
# Writes all derived files in the outline to 'clean' directories.
# For example, writes test/foo.py to test/clean/foo.py.

@others

for p in c.all_positions():
    if p.isAnyAtFileNode():
        clean_file(p)
#@+node:ekr.20080329083943.2: *5* clean_file
def clean_file(p):

    at = c.atFileCommands

    if hasattr(p.v,'tnodeList'):
        has_list = True
        old_list =  p.v.tnodeList[:]
    else:
        has_list = False

    at.write(
        root=p,nosentinels=True,
        thinFile=False,
        scriptWrite=True,
        toString=True,
        write_strips_blank_lines=False)

    if has_list:
        p.v.tnodeList = old_list

    fileName = g.os_path_normpath(g.os_path_join(
        at.default_directory,
        'clean',
        g.os_path_basename(p.anyAtFileNodeName())))

    # g.trace(p.h,len(at.stringOutput),fileName)

    # Adapted from at.openFileForWritingHelper
    path = g.os_path_dirname(fileName)
    if not g.os_path_exists(path):
        g.es('clean directory does not exist',path)
        return

    try:
        f = file(fileName,'w')
        f.write(at.stringOutput)
        f.close()
        g.es_print('wrote',fileName)
    except IOError:
        g.es_print('can not write',fileName,color='red')
        g.es_exception()
#@+node:ekr.20161017063308.1: *3* script: Find problems in pylint-leo-rc.txt
g.cls()
print_all_lines = True
table = [
    r'c:\leo.repo\leo-editor\leo\test\pylint-leo-rc.txt',
    r'c:\leo.repo\leo-editor\leo\test\pylint-leo-rc-ref.txt',
]
for path in table:
    f = open(path)
    s = f.read()
    f.close()
    # print(len(s))
    errors = []
    n = 0
    for i, line in enumerate(g.splitLines(s)):
        try:
            if print_all_lines:
                print('  %3s %4s %s' % (i+1, n, line.rstrip()))
            else:
                g.toUnicode(line)
        except UnicodeEncodeError:
            print('**%3s %4s %s' % (i+1, n, len(line.rstrip())))
            errors.append(i+1)
        n += len(line)
    print('%s error lines: %s' % (g.shortFileName(path), errors))
#@+node:ekr.20181020061600.1: *3* script: test to_key (curses gui0
# Tests curses gui.
g.cls()
import imp
import curses.ascii as a
import leo.plugins.cursesGui2 as cursesGui2
imp.reload(cursesGui2)
h = cursesGui2.CursesKeyHandler()
for i in range(255):
    char, shortcut = h.to_key(i)
    s = a.controlnames[i] if i <= 32 else a.unctrl(i)
    print('%3s %-8s %-7r %r' % (i, s, char, shortcut))
for i in range(352):
    if i > 255:
        char, shortcut = h.to_key(i)
        # s = a.controlnames[i] if i <= 32 else a.unctrl(i)
        print('%3s %-7r %r' % (i, char, shortcut))
#@+node:ekr.20181020061502.1: *3* script: verify links (to check curses gui)
g.cls()
table = (
    ('c.frame', c.frame),
    ('c.frame.tree', c.frame.tree),
    ('c.frame.tree.canvas', c.frame.tree.canvas),
    ('c.frame.tree.treeWidget', c.frame.tree.treeWidget), # Qt only.
    ('-----', None),
    ('c.frame.body', c.frame.body),
    ('c.frame.body.widget', c.frame.body.widget),
    ('c.frame.body.wrapper', c.frame.body.wrapper),
)
for tag, obj in table:
    print('%30s %s' % (tag, obj.__class__.__name__ if obj else ''))
#@+node:ekr.20051204180404: ** Text
#@+node:ekr.20190911232205.1: *3* @@button next-trailing-comment @key=ctrl-1
g.cls()
import re
pattern = re.compile(r'\w+\s*=\s\w+')

def do_node(p):
    prev_assign = False
    old_lws = 0
    lines = g.splitLines(p.b)
    for i, line in enumerate(lines):
        lws = g.computeLeadingWhitespaceWidth(line, tab_width=-4)
        if line.strip().startswith('@nobeautify'):
            return False
        if line.strip().startswith('#'):
            if prev_assign and lws > old_lws:
                # Found a likely trailing comment.
                print(f"\n{i} {p.h}")
                print(g.objToString(
                    [f"{i+i2:3} {z}" for i2, z in enumerate(lines[i-1:i+1])],
                    tag=f"line {i} {p.h}"),
                )
                return True
            prev_assign = False
        else:
            old_lws = lws
            prev_assign = pattern.search(line)
    return False
    
p = c.p
if do_node(p):
    p.moveToThreadNext()
while p:
    if do_node(p):
        c.selectPosition(p)
        break
    p.moveToThreadNext()
#@+node:ekr.20181231155727.1: *3* @@command make-vim-node
'''
Select from the cursor to a line starting with "endfunction",
then do 'extract' command.
'''
s = c.p.b
w = c.frame.body.wrapper
i = w.getInsertPoint()
target = '\nendfunction'
j = s.find(target, i)
if j > -1:
    j += len(target)
    j = g.skip_line(s,j)
    w.setSelectionRange(i, j, insert=i)
    c.frame.body.wrapper.setSelectionRange(i, j, insert=i)
    c.k.simulateCommand('extract')
#@+node:ekr.20150416171056.1: *3* @ignore text editing buttons
#@+node:ekr.20051213093427: *4* @button Add tab after each :
@color

s = p.b

result = []
lines = g.splitLines(s)
for line in lines:
    i = line.find(':')
    if i > -1:
        result.append(line[:i+1] + ' '*4 + line[i+1:])
    else:
        result.append(line)

s = ''.join(result)
g.trace(s)
c.setBodyString(p,s)
#@+node:ekr.20051213092601: *4* @button Add tab before each =
@color

s = p.b
result = []
lines = g.splitLines(s)
for line in lines:
    i = line.find('=')
    if i > -1:
        result.append(line[:i] + ' '*4 + line[i:])
    else:
        result.append(line)
s = ''.join(result)
g.trace(s)
# c.setBodyString(p,s)
#@+node:ekr.20040723065021: *4* @button Call g.stripBlankLines on a subtree
# Tag the start of the command.
u = c.undoer
u.beforeChangeGroup(c.p,"Change All")
n = 0 ; total = 0
for p in c.p.self_and_subtree():
    total += 1
    body = p.bodyString() # Don't use p.b
    s = g.stripBlankLines(body)
    if s != body:
        n += 1
        c.setBodyString(p,s)
        u.setUndoTypingParams(p,'Change',
            oldText=body,newText=s,oldSel=None, newSel=None)

# Tag the end of the command.
u.afterChangeGroup(c.p,"Change All",reportFlag=False,dirtyVnodeList=[])
print("%d nodes changed (%d total)" % (n,total))
#@+node:ekr.20040723065047: *5* Test
@ignore
a
  b

c  

last
@language python
#@+node:ekr.20080519162425.1: *4* @button Change leo imports
change = False # True: actually make the changes.
trace = True

print '-' * 40

tag = 'import leo' ; n = len('import ')

for p in p.self_and_subtree():
    result = []
    for s in g.splitlines(p.b):
        i = s.find(tag,0)
        if i > -1:
            # Do nothing if we have already done the translation.
            i2 = s.find('<<') ; i3 = s.find('import leo.core')
            if i2 == -1 and i3 == -1:
                i += n
                j = g.skip_c_id(s,i)
                word = s[i:j]
                rest = s[j:]
                if rest.strip().startswith('as'):
                    s = s[:i] + 'leo.core.' + word + rest
                else:
                    s = s[:i] + 'leo.core.' + word + ' as ' + word + rest
                if trace:
                    print p.h
                    j,k=g.getLine(s,i) ; print s[j:k]
        result.append(s)
    result = ''.join(result)

    if change and result != p.b:
        c.setBodyString(p,result)

#@+node:ekr.20051110105027.149: *4* @button Change OnX to x in headline
# Change OnXxx to xxx in all headlines & body text.
for p in p.self_and_subtree():
    # Headlines.
    h = p.h
    if g.match(h,0,"On") and len(h) > 2:
        h = h[2].lower() + h[3:]
        print(h)
        p.h = h
    # Body text
    s = p.b
    if s:
        i = s.find("def ")
        if i > -1:
            c = s[i+6].lower()
            s = s[:i] + "def " + c + s[i+7:]
            print(p.h)
            p.b = s
#@+node:ekr.20071213062051: *4* @button Create headlines from body
# Create child nodes whose headline are all lines in the body.
s = p.b
lines = g.splitLines(s)
lines = [z.strip() for z in lines if z.strip()]

for line in lines:
    p2 = p.insertAsLastChild()
    while line.find('  ') > -1:
        line = line.replace('  ',' ')
    i = line.find('#')
    if i == -1:
        p2.initHeadString(line)
    else:
        # Put comments in the body.
        p2.initHeadString(line[:i].strip())
        c.setBodyString(p2,line[i:].strip())
c.redraw()
#@+node:ekr.20051204180404.1: *4* @button Delete from ':' to end (script)
@color

s = p.b
result = []
lines = g.splitLines(s)
for line in lines:
    i = line.find(':')
    if i > -1:
        result.append(line[:i])
        if line and line[-1] == '\n':
            result.append('\n')
    else:
        result.append(line)

s = ''.join(result)
c.setBodyString(p,s)
#@+node:ekr.20060303080421: *4* @button Delete from first blank
@color

s = p.b
result = []
lines = g.splitLines(s)
for line in lines:
    i = line.find(' ')
    if i > -1:
        result.append(line[:i])
        if line and line[-1] == '\n':
            result.append('\n')
    else:
        result.append(line)

s = ''.join(result)
c.setBodyString(p,s)
#@+node:ekr.20060813102424: *4* @button Remove blank trailing lines
for p in p.self_and_subtree():
    s = p.b
    target = s.rstrip()
    if s != target:
        c.setBodyString(p,target)
        g.es(p.h)
#@+node:ekr.20051218212007: *4* @button Replace =.* with = None
@color

s = p.b
result = []
lines = g.splitLines(s)
for line in lines:
    i = line.find('=')
    if i > -1:
        result.append(line[:i] + '= None')
        if line and line[-1] == '\n':
            result.append('\n')
    else:
        result.append(line)

s = ''.join(result)
c.setBodyString(p,s)
#@+node:ekr.20140426052603.18106: *4* @button Replace g.choose
'''Replace "g.choose(test,a,b)" by "a if test else b".'''

replace = False # True: make the actual replacements

class Controller:
    @others

if 0: # Testing convenience...
    g.cls()
    c.save()
    p = g.findNodeAnywhere(c,'Code')
    assert(p)
Controller().run(c,p,replace)
#@+node:ekr.20140426052603.18107: *5* run
def run(self,c,p,replace):
    '''Main line for undoable replace g.choose with ternary operator.'''
    changed,n,found,p1 = 0,0,0,p.copy()
    dirtyVnodeList,tag,u = [],'replace g.choose',c.undoer
    u.beforeChangeGroup(p1,tag)
    for p in p.self_and_subtree():
        s,i = p.b,0
        n += 1
        while i < len(s):
            progress = i
            i,j = self.find(s,i)
            if i == -1: break
            found += 1
            s,i = self.replace(s,i,j)
            assert progress < i
        if replace and p.b != s:
            changed += 1 
            b = c.undoer.beforeChangeNodeContents(p)
            dirtyVnodeList2 = p.setDirty()
            dirtyVnodeList.extend(dirtyVnodeList2)
            p.b = s
            c.undoer.afterChangeNodeContents(p,tag,b)
    u.afterChangeGroup(p1,tag,dirtyVnodeList=dirtyVnodeList)
    print('scanned %s nodes found %s changed: %s' % (n,found,changed))
#@+node:ekr.20140426052603.18108: *5* find
def find(self,s,i):
    '''
    Return (n1,n2) such that s[n1:n2] is the next choose(cond,a,b) in s[i:].
    Return -1,-1 if there no more are found.
    '''
    while i < len(s):
        progress = i
        ch = s[i]
        if ch in '"\'':
            i = g.skip_python_string(s,i,verbose=False)
        elif ch == '#':
            i = g.skip_line(s,i) # This eats the comment!
        # elif ch == '(':
            # i = g.skip_matching_python_delims(s,i,'(',')')
            # if i > -1: assert s[i] == ')'
        else:
            for tag in ('g.choose(',): # 'choose('):
                if s[i:].startswith(tag):
                    # g.trace(s[i:i+20].strip())
                    i1 = i
                    i += len(tag)-1
                    i = g.skip_matching_python_delims(s,i,'(',')')
                    if i > -1:
                        assert s[i] == ')',repr(s[i:i+20])
                        return i1,i+1
            else:
                i += 1
        if i == -1: i = progress + 1
        assert progress < i,(i,s,ch)
    return -1,-1
#@+node:ekr.20140426052603.18109: *5* get_arg
def get_arg(self,s,i):
    '''return j such that s[i:j] is the argument.'''
    assert s[i] in '(,',repr(s[i:i+10])
    i += 1
    result = []
    while i < len(s):
        progress = i
        ch = s[i]
        if ch in ',)':
            break
        if ch in ' \t\n':
            # Don't put leading ws, and don't duplicate ws.
            if len(result) > 0 and result[-1] != ' ':
                result.append(' ')
            i += 1
        elif ch in '"\'':
            j = g.skip_python_string(s,i,verbose=False)
            if j > -1:
                result.append(s[i:j])
                i = j
            else:
                i += 1
        elif ch == '#':
            i = g.skip_line(s,i) # This eats the comment.
        elif ch == '(':
            j = g.skip_matching_python_delims(s,i,'(',')')
            assert s[j] == ')',s[j:j+10]
            result.append(s[i:j+1])
            i = j+1
        else:
            result.append(ch)
            i += 1
        assert progress < i
    assert s[i] in ',)'
    s = ''.join(result)
    return s,i
#@+node:ekr.20140426052603.18110: *5* munge
def munge(self,s):
    '''Compute the ternary operator corresponding to s.'''
    g.trace('1:',s)
    for tag in ('g.choose',): # 'choose'):
        if s.startswith(tag):
            i = len(tag)
            break
    else: assert False
    assert s[i] == '('
    args = []
    for n in (0,1,2):
        arg,i = self.get_arg(s,i)
        assert arg
        args.append(arg)
    arg1,arg2,arg3 = args
    result = '%s if %s else %s' % (arg2,arg1,arg3)
    g.trace('2:',result)
    return result
#@+node:ekr.20140426052603.18111: *5* replace
def replace(self,s,i,j):
    '''Replace s[i:j] containing a g.trace, with the corresponding ternaty operator.'''
    s1 = s[i:j]
    s2 = self.munge(s1)
    if s1 == s2:
        return s,j
    else:
        s3 = s[:i] + s2 + s[j:]
        j = i + len(s2)
        return s3,j
#@+node:ekr.20060808103945: *4* @button Set trailing ws
'''
This script quickly ends all nodes in the selected tree with exactly one
newline and marks all @thin/@file nodes dirty if any of their descendents have
been changed.
'''

@others

p = c.currentPosition()
pass1(p) # Make the changes and do p.v.setDirty for all changed nodes p.
pass2() # Quickly set all @thin/@file nodes dirty if any of their descendents are dirty.
g.es_print('done')
c.redraw()
#@+node:ekr.20060808103945.1: *5* pass1
def pass1(root):

    '''Remove trailing newlines from all nodes.'''

    count = 0 ; seen = {}
    for p in root.self_and_subtree():
        if seen.get(p.v): continue
        s = p.b
        if s:
            s2 = s.rstrip() + '\n'
            if s2 != s:
                s2 = g.toUnicode(s2,g.app.tkEncoding,reportErrors=True)
                p.v._bodyString = s2
                seen [p.v] = True
                p.v.setDirty() # Just set the bit: do **not** redraw!
                count += 1

    g.es_print("pass 1: %d nodes converted" % count)
#@+node:ekr.20060808103945.2: *5* pass2
def pass2():

    '''Quickly mark all changed @file nodes dirty.'''

    count = 0
    for p in c.all_unique_positions():
        if p.isAnyAtFileNode():
            root = p.copy()
            for p2 in root.self_and_subtree():
                if p2.v.isDirty():
                    root.setDirty()
                    count += 1
                    break

    g.es_print("pass 2: %d @file/@thin nodes set dirty" % count)
#@+node:ekr.20071001114854: *4* @button Split defs in body text to child nodes
# The parse-body command now does this.
# script to split node containing multiple def's into child nodes

def createChild (parent,body,line):
    p = parent.insertAsLastChild()
    h = line [3:].strip()
    i = h.find('(')
    if i > -1:
        func = h [: i].strip()
    h2 = func or h
    p.setHeadString(h2)
    p.setTnodeText(''.join(body))

s = p.b
if s.strip():
    b = c.undoer.beforeChangeTree(p)
    lines = g.splitLines(s) ; body = [] ; changed = False ; lastDefLine = ''
    for line in lines:
        if g.match_word(line,0,'def'):
            if body and lastDefLine:
                createChild(p,body,lastDefLine)
                body = [] ; changed = True
            lastDefLine = line
        body.append(line)
    if body and lastDefLine:
        createChild(p,body,lastDefLine)
        changed = True
    if changed:
        c.setChanged(True) ; c.setBodyString(p,'')
    c.undoer.afterChangeTree(p,'split-defs',b)
    c.redraw()
#@+node:ekr.20150416171133.1: *3* @ignore text-processing buttons
#@+node:ekr.20041228135008: *4* @button display c.p.b in the browser
@language python
@tabwidth -4

import webbrowser

if 0:
    << alternate code doesn't work well for me >>

def showHtml(html):
    '''Display html in a web browser'''
    fileName = g.os_path_join(g.app.loadDir,'..','test','leoTemp.html')
    f = file(fileName,'w')
    f.write(html)
    f.close()
    webbrowser.open(fileName)

# Display the selected node in the browser.
showHtml('<pre>%s</pre>' % c.p.b)

# To do: use a stylesheet to colorize the code.
#@+node:ekr.20041228140714: *5* << alternate code doesn't work well for me >>
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/347810
import BaseHTTPServer

def showHtml(html):
    """Display html in the default web browser without creating a temp file.

    Instantiates a trivial http server and calls webbrowser.open with a URL
    to retrieve html from that server.
    """

    class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
        def do_GET(self):
            g.trace(html)
            bufferSize = 1024*1024
            for i in xrange(0, len(html), bufferSize):
                self.wfile.write(html[i:i+bufferSize])

    server = BaseHTTPServer.HTTPServer(('127.0.0.1',0), RequestHandler)
    webbrowser.open('http://127.0.0.1:%s' % server.server_port)
    server.handle_request()
#@+node:ekr.20060821105606: *4* @button LeoToHtml
# LeoToHTML by Dan Rahmel

<< docstring >>

flagIgnoreFiles = True
flagJustHeadlines = True
filePath = "c:\\"

myFileName = c.frame.shortFileName()    # Get current outline filename
myFileName = myFileName[:-4]            # Remove .leo suffix
# Open file for output
f=open(filePath + myFileName + ".htm", 'w')

# Write HTML header information
f.write("<HTML>")
f.write("<BODY>")

for p in c.all_positions():
    myLevel = str(p.level() + 1)
    myHeadline = p.h
    # Check for node with LeoToHTML and ignore it
    if not myHeadline.upper() == "LEOTOHTML":
        if myHeadline[:5] != "@file" and not flagIgnoreFiles:
            # Write headline at current style level. indent level: 3
            f.write("<H" + myLevel + ">" + myHeadline + "</H" + myLevel + ">")
            # If including body text, convert it to HTML usable format
            if not flagJustHeadlines:
                myBody = p.b.encode( "utf-8" )
                f.write("<P>" + myBody)

# Write closing HTML info
f.write("</BODY>")  
f.write("</HTML>")

# Close file
f.close()
g.es(" Leo -> HTML completed.",color="turquoise4")
#@+node:ekr.20060821105606.1: *5* << docstring >>
'''
This script takes an outline stored in LEO and outputs it to HTML code.
The HTML can be stuck into a web page, loaded into the Microsoft Word
outline view for printing, or a million other uses.

--- Instructions ---
In LEO, open your outline, insert a node, and set the headline to LEOTOHTML.
Paste this code into the text body. To generate a .htm file of the outline,
select the Execute Script option under the Edit menu while the node is selected.

The switches below allow you to customize some of the settings. Set the flag
properties to True or False.
'''
#@+node:ekr.20060822112840: *4* @button LeoToRtf
# LeoToRTF by Dan Rahmel

<< docstring >>

# The switches below allow you to customize some of the settings.
flagIgnoreFiles = True
flagJustHeadlines = False
filePath = "c:\\"

myFileName = c.frame.shortFileName() # Get current outline filename
myFileName = myFileName[:-4] # Remove .leo suffix

g.es(" Leo -> RTF started...",color="turquoise4")

# Open file for output
f=open(filePath + myFileName + ".rtf", 'w')

# Write RTF header information
f.write("{\\rtf1\\ansi\\ansicpg1252\\deff0\\deflang1033{\\fonttbl{\\f0\\fswiss\\
fcharset0 Arial;}}")
f.write("{\\*\\generator LEOtoRTF;}\\viewkind4\\uc1\\pard\\f0\\fs20")

for p in c.all_positions():
    myLevel = str(p.level() + 1)
    myHeadline = p.h
    # Check for node with LeoToHTML and ignore it
    if not myHeadline.upper() == "LEOTORTF":
        if not (myHeadline[:5] == "@file" and flagIgnoreFiles):
            # Write headline with correct # of tabs for indentation
            myOutput = ("\\tab"*int(myLevel)) + " " + myHeadline +"\\par"
            myOutput = myOutput.encode( "utf-8" )
            f.write(myOutput)
            # If including outline body text, convert it to RTF usable format
            if not flagJustHeadlines:
                myBody = p.b.encode( "utf-8" ) 
                f.write("\\tab"*int(myLevel) + " " + myBody + "\\par")

# Write RTF close
f.write("}")  

# Close file
f.close()
g.es(" Leo -> RTF completed.",color="turquoise4")
#@+node:ekr.20060822112840.1: *5* << docstring >>
'''
This script takes an outline stored in LEO and outputs it to an RTF.
The tabbed RTF file can be loaded into Microsoft Word and formatted as a
proper outline with the Bullets and Numbering formatting options

--- Instructions ---
In LEO, open your outline, insert a node, and set the headline to LEOTORTF.
Paste this code into the text body. To generate a .rtf file of the outline,
select the Execute Script option under the Edit menu while the node is
selected.
'''
#@+node:ekr.20041229163210: *4* @button Send colorized text to web browser
'''A script to send the colorized text of a script to the default web browser.

Based on a dynascript by 'e'.'''

@language python
@tabwidth -4

<< imports >>

hopts = {
  'stripcomments':  False,
  'stripsentinals': True,
  'stripnodesents': False, # False: leave node sentinels.
  'stripdirectives':False,
  'noNUMBER':       False,
  'noOP':           False,
  'noNAME':         True,  
  'timestring':     '' # time.strftime('%m/%d/%Y %H:%M.%S'),
}

<< init globals >>

filename = g.os_path_join(g.app.loadDir,'..','test','leoTemp.html')

@others

htmlize(c,p)
#@+node:ekr.20041229164609: *5* << imports >>
import cgi
import cStringIO
import keyword
import os
import re
import sys
import time
import token
import tokenize
import webbrowser
#@+node:ekr.20041229163210.2: *5* << init globals >>
_KEYWORD = token.NT_OFFSET + 1
_TEXT    = token.NT_OFFSET + 2

_colors = {
    token.NUMBER:     '#483D8B', #black/darkslateblue
    token.OP:         '#000080', #black/navy
    token.STRING:     '#00AA00', #green 00cc66
    tokenize.COMMENT: '#DD0000', #red cc0033
    token.NAME:       '#4B0082', #black/indigo
    token.ERRORTOKEN: '#FF8080', #redred bare null does it
    _KEYWORD:         '#0066ff', #blue
    _TEXT:            '#000000', #black /is text fg color too
    '_LeoDir':        '#228B22', #directive, forest comment
    '_LeoSen':        '#BC8F8F', #sentinal, tan fade comment
    'bg':             '#FFFAFA', #snow
}

if hopts['noNUMBER']: del _colors[token.NUMBER]
if hopts['noOP']:     del _colors[token.OP]
if hopts['noNAME']:   del _colors[token.NAME]
#@+node:ekr.20041229170824: *5* stripSentinels
def stripSentinels(s):

    '''Strip sentinal lines from s.'''

    lines = s.splitlines()
    result = [] ; verbatim = False
    tag1 = '#@+node:'
    tag2 = '#@-node:'
    n = len(tag1)

    for line in lines:
        s = line.strip()
        if verbatim:
            result.append(line)
            verbatim = False
        elif s.startswith('#@verbatim'):
            verbatim = True
        elif s.startswith('#@@'):
            if not hopts['stripdirectives']:
                result.append(line)
        elif s.startswith(tag1):
            if not hopts['stripnodesents']:
                i = line.find(tag1)
                result.append(line[:i] + '#@+' + line[i+n:].strip())
        elif s.startswith(tag2):
            if not hopts['stripnodesents']:
                i = line.find(tag2)
                result.append(line[:i] + '#@-' + line[i+n:].strip())
        elif not s.startswith('#@'):
            result.append(line)

    return '\n'.join(result)
#@+node:ekr.20041229165956: *5* sanitize
def sanitize(s):

    """Leo's sanitize_filename is too aggressive and too lax."""

    if not s: return

    res = re.compile(
        r"""[|\\ /!@=\#\$%,\x5E&\x3F:;.\x22\x27<>`~\*\+\t\n\f\r\b\a]""",
        re.IGNORECASE | re.VERBOSE)

    # should test for unicode before str()
    return res.sub('_', str(s.strip())).replace('__','_')[:128]
#@+node:ekr.20041229163210.3: *5* class Parser
class Parser(object):
    """ prep the source for any language
        parse and Send colored python source.
    """
    @others
#@+node:ekr.20041229163210.4: *6* __init__
def __init__(self,lang,raw,title):
    """ Store the source text."""

    self.title = title
    self.raw = raw.strip().expandtabs(4)

    if lang == 'python':
        cmtdelim = '#'
    else:
        d = c.scanAllDirectives(p) 
        cmtdelim = d.get('delims',['#'])
        cmtdelim = cmtdelim[0] or cmtdelim[1]

    self.fnd = re.compile(r"%s@\s*@+."%(cmtdelim,) )

    if hopts['stripsentinals']: 
        # have to add option to strip all comments as well
        self.raw = stripSentinels(self.raw)
#@+node:ekr.20041229163210.5: *6* format
def format(self,formatter,form):
    """ Parse and send the colored source."""

    # store line offsets in self.lines
    self.lines = [0,0]
    pos = 0
    while 1:
        pos = self.raw.find('\n',pos) + 1
        if not pos: break
        self.lines.append(pos)
    self.lines.append(len(self.raw))
    self.pos = 0
    text = cStringIO.StringIO(self.raw)
    sys.stdout.write('<html><head><title>')
    sys.stdout.write('%s </title>\n'%(sanitize(self.title), ))

    #here would be a good spot for @noindent directive but skip a line
    s = """<STYLE TYPE="text/css"><!--
pre, H1 {color:%s; FONT-SIZE: 80%%; FONT-WEIGHT: bold; }
Text {background:%s;}
--></STYLE>
<SCRIPT LANGUAGE="JavaScript">
<!-- //
//-->
</SCRIPT>""" % (_colors[_TEXT],_colors['bg'])

    sys.stdout.write(s)
    sys.stdout.write('</head><body text="%s" bgColor="%s">' % (_colors[_TEXT],_colors['bg']))
    sys.stdout.write('<H3># %s</H3>\n'%self.title)
    sys.stdout.write('<pre>')  # style
    sys.stdout.write('<font face="Lucida,Courier New">')
    # parse the source and write it
    try:
        tokenize.tokenize(text.readline,self)
    except tokenize.TokenError, ex:
        msg = ex[0]
        line = ex[1][0]
        print "<h3>ERROR: %s</h3>%s" % (msg, self.raw[self.lines[line]:])
    sys.stdout.write('</font></pre>')
    sys.stdout.write('</body"></html>')
#@+node:ekr.20041229163210.6: *6* __call__
def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):

    """ Token handler."""

    if 0: print "type", toktype, token.tok_name[toktype], "text",\
            toktext, "start", srow,scol, "end", erow,ecol, "<br>"

    # calculate new positions
    oldpos = self.pos
    newpos = self.lines[srow] + scol
    self.pos = newpos + len(toktext)

    # handle newlines
    if toktype in [token.NEWLINE, tokenize.NL]:
        print
        return

    style = ''
    if toktype == tokenize.COMMENT:
        if toktext.lstrip().startswith('#@'):
            if self.fnd.findall(toktext):
                toktype = '_LeoDir'
            else:
                toktype = '_LeoSen'

    # Send the original whitespace.
    if newpos > oldpos:
        sys.stdout.write(self.raw[oldpos:newpos])

    # Skip indenting tokens.
    if toktype in [token.INDENT,token.DEDENT]:
        self.pos = newpos
        return

    # Map token type to a color group
    if token.LPAR <= toktype and toktype <= token.OP:
        toktype = token.OP
    elif toktype == token.NAME and keyword.iskeyword(toktext):
        toktype = _KEYWORD
    if toktype == token.ERRORTOKEN:
        style = ' style="border: solid 1.5pt #FF0000;"'

    dofont = True
    try:
        color = _colors[toktype]
    except Exception:
        dofont = False

    if dofont: sys.stdout.write('<font color="%s"%s>' % (color, style))
    sys.stdout.write(cgi.escape(toktext))
    if dofont: sys.stdout.write('</font>')
#@+node:ekr.20041229164609.2: *5* htmlize
def htmlize(c,p):
    lang = g.scanForAtLanguage(c,p)
    lang = str(lang).lower()
    source = g.getScript(c,p)
    timestring = hopts['timestring']
    # title = "%s Leo %s script %s" % (p.h[:75],lang,timestring)
    title = "%s %s" % (p.h[:75],timestring)
    try:
        if not source: raise ValueError
        g.es('output', lang, p.h)
        theParser = Parser(lang,source,title)
        sys.stdout = open(filename,'wb') 
        if lang == 'python':
            theParser.format(None,None)
        else:
            << colorize with silvercity >>
        sys.stdout.close()
        sys.stdout = sys.__stdout__
        webbrowser.open(filename, new= 1)
    except ValueError:
        g.es('no @path set, unsupported lang or empty script',color='tomato')
        g.es(lang, p.h)
    except Exception:
        g.es('htmlize malfunction?', color='tomato')
        g.es_exception()
#@+node:ekr.20041229163210.7: *6* << colorize with silvercity >>
if lang in [ # Leo may not have all of these yet
    'csharp', 'c', 'c++', 'cpp', # (C and C++)
    'css', # (Cascading Style Sheets)
    'htm', 'html', # HTML/PHP w/ JavaScript, VBScript, Python
    'plain', #null (No styling)
    'perlpod', 'perl', # (Perl)
    #'python', # (Python)
    'ruby', # (Ruby)
    'smart_python', # (Python with styled strings)
    'sql', # (SQL)
    'xml', # (XML)
    'xslt', # (XSLT)
    'yaml', # (YAML)
    # basic & java? missing. might send java as c?
    'elisp', 'php', 'java', 'rapidq', 'actionscript', 'css',
]:
    if lang in ('htm','html','php','java','rapidq','actionscript', 'css'):
        lang = 'html'
    elif lang in ['c','c++','cpp']: lang = 'cpp'
    elif lang in ['perlpod','perl']: lang = 'perl'
    elif lang in ['elisp',]: lang = 'perl'
    if lang in ('plain',None): lang = 'null'

    g.es('writing tmpname', tmpfile )
    fo = file(tmpfile, 'w')
    fo.writelines(pars.raw + "\n")
    fo.close()

    cmd = g.os_path_join(pypath, 'Scripts', 'source2html.py')

    # Send the output to stdout
    #" --view %N  %N.html"
    # --css=file copy silver_city.css where the filename will be
    # source2html.py --list-generators
    params = ' --generator=%s --title=%s --css=silver_city.css %s'%(
       lang, sanitize_(title), tmpfile,)  

    if not g.os_path_exists(cmd):
        g.es('cant find source2html install silvercity')
        print 'cant find source2html from silvercity'
    else:
        g.es('running silvercity \n', py + cmd + params )
        out, err = runcmd(py + cmd + params )
        for x in (out + err).splitlines():
            print x
else:
    print '<i>not a known htmlize supported language</i>'
    #might have to do a sequential dump of nodes, or @rst?
    #is title and first headline set the same for all options?
    print '<Pre>'    
    print pars.raw
    print '</Pre>'
#@+node:ekr.20160302141032.1: *3* Commands that support Microsoft outlook format
# support Windows 'outlook:' protocol in URLs
# By Brian Theado and Jon N.

https://groups.google.com/forum/#!searchin/leo-editor/ability$20to$20support$20Windows$20$27outlook$3A$27$20protocol$20in$20URLs$3F/leo-editor/vIaqh0DXbek/Y1AINopXDAAJ
#@+node:ekr.20160302141032.2: *4* @@command insert-outlook-email-link
'''
Copies outlook link of current selected email into the body at the current
insertion point.
'''

import win32com.client

ol = win32com.client.Dispatch("Outlook.Application")
m = ol.ActiveExplorer().Selection.Item(1)
u = "outlook:%s <MESSAGE: %s>" % (m.EntryID, m.Subject)

w = c.frame.body.wrapper
i = w.getInsertPoint()
w.insert(i, u)

@language python



#@+node:ekr.20160302141032.3: *4* @@command open-selected-outlook-link
'''Launches the outlook url (the url text must be selected).'''
import os
w = c.frame.body.wrapper
r = w.getSelectionRange()
url = w.get(r[0], r[1])
os.startfile(url, 'open')
#@+node:ekr.20111004090723.15496: *3* Create pdf file from LaTeX file
<< docstring >>

@language python
@tabwidth -4

import os
import subprocess

@others

log = g.es_print

exe = r'C:\apps\MiKTeX 2.9\miktex\bin\pdflatex.exe'
    # The full path to the executable.

d = get_options(exe)

makePDF(d,open_out=True,trace=True)
#@+node:ekr.20111004185540.15535: *4* << docstring >>
@language rest

'''
A script for creating a pdf from a latex file.

*Important: you must run Leo from a console when using this script, at least
until the script can drive pdflatex in some kind of unattended mode. As it is
now, pdflatex is likely to hang, waiting for console input. The typical (only?)
response is to hit carriage returns until pdflatex finishes.

There are two modes of operation, static and dynamic, determined by the contents
of **d**, the **option dictionary**, returned by get_options.

**Important**: change get_options as described below to meet your needs.

If d.get('in_fn') exists, the script operates in static mode. Otherwise, the
script operates in dynamic mode.

Static mode
===========

d.get('in_fn') must be the full path to the input file.

If d.get('out_dir') exists, it must be the full path to the output directory.
The out_dir setting is optional. If omitted, output goes to the directory
containing the input file.

Dynamic mode
============

Rather than use d.get('in_fn'), the script finds the nearest @<file> node in
c.p.self_and_parents(). This node, *whatever it is*, is assumed to contain LaTeX
text.

**Important**: if in_fn is a relative path, it is converted to an absolute path
by taking account of any ancestor @path directive. If the resulting path is
still relative, it is taken to be a path relative to the directory containing
the .leo file. Here is the actual code::
    
    c_dir = g.os_path_dirname(c.fileName())
    fn = g.os_path_join(c_dir,c.getNodePath(p),fn)
    
If d.get('out_dir') exists, it may be either an absolute or relative path. If it
is a relative path, it is taken to be a path relative to the path given by the
@<file> node. That is::
    
    out_dir = g.os_path_join(g.os_path_dirname(fn),out_dir)

Other settings
==============

- exe:  The full path to the pdf creation program.
        'pdflatex' is part of MikTex.

- open_out: True: open the output file automatically after it is created.

- trace: True: output log messages using g.es_print.

The exe setting is a global variable.  The open_out and trace settings
are arguments to the makePDF function.

To do
=====

- Add an argument to the command that would have pdflatex run without user input.

- determine if the node held a rst file and first turn this into Latex.

- open the log file or read it into a node if there is a compiling error.

- It might be useful to have some settings in the file node that let the script
  automatically basic latex packages, items and definitions for a particular
  type of document (Title page, TOC, set date, use the beamer package to create
  slides, etc.)

- It would be really cool to be able to render TEX to PDF in real time like
  viewrendered handles HTML and ReST.
  
Acknowledgements
================

The original script by M.D.Boldin.  Rewritten by EKR.
'''

#@+node:ekr.20111004185540.15537: *4* get_options
def get_options(exe):
    
    '''Return the user options in a dict.'''
    
    if 0: # Static options...
        return {
            'exe': exe,
            'in_fn':    r'C:\Users\edreamleo\Latex\sample.tex',
                # The full path to the input file.
            'out_dir':  r'C:\Users\edreamleo\Latex\out',
                # The full path to the output directory.
        }
    else : # Dynamic options...
        '''The nearest @<file> gives the input file.
        
        If all paths are relative, the result is relative
        to the directory containing this .leo file.
        '''
    
        return {
            'exe': exe,
            'out_dir': 'out',
                # The output directory,
                # relative to the directory of the input file.
                # None is valid.
        }
#@+node:ekr.20111004185540.15540: *4* log
def log (s):
    
    '''Write a message to the log.'''
    
    g.es_print(s,color='red')
#@+node:ekr.20111004090723.15502: *4* makePDF
def makePDF (d,open_out,trace):
    
    command,out_fn = setup(d)
    if not command: return
    
    # Save the outline to save the file.
    c.save()

    if trace:
        log('\nMakePDF: command...')
        for arg in command:
            log('  %s' % arg)
        log('')

    # Execute the command and wait for it to finish.
    subprocess.Popen(command).wait()

    if open_out:
        if trace: log('Opening: %s' % out_fn)
        os.startfile(out_fn)
    
    if trace: log('Done')
#@+node:ekr.20111004182631.15511: *4* setup & helpers
def setup (d):
    
    '''Return the command to execute and the name of the output file.'''

    exe = d.get('exe')
    in_fn,out_dir = compute_file_names(d)

    if in_fn and out_dir:
        command = [
            exe,
            '-output-directory=%s' % (out_dir),
            in_fn
        ]
        out_fn = g.os_path_join(
            out_dir,g.shortFileName(in_fn[:-4]))+'.pdf'
        return command,out_fn
    else:
        return None,None
#@+node:ekr.20111004185540.15538: *5* compute_file_names
def compute_file_names (d):
    
    '''Return the absolute paths to the input file and the output directory.'''
    
    fn = d.get('in_fn')
    if fn:
        # Static, absolute paths.
        out_dir = d.get('out_dir') or g.os_path_dirname(fn)
        if g.os_path_exists(fn) and g.os_path_exists(out_dir):
            return fn,out_dir
        elif not g.os_path_exists(fn):
            log('File not found: %s' % fn)
            return None,None
        else:
            log('Directory not found: %s' % out_dir)
            return None,None
    else:
        # Dynamic file name from the nearest @<file> node.
        for p in c.p.self_and_parents():
            if p.isAnyAtFileNode():
                fn = p.anyAtFileNodeName()
                break
        else:
            log('no <@file> node found at: "%s"' % c.p.h)
            return None,None
        
        # Compute the absolute path to the filename,
        # taking into account any relevant @path directives.
        c_dir = g.os_path_dirname(c.fileName())
        fn = g.os_path_join(c_dir,c.getNodePath(p),fn)
        out_dir = d.get('out_dir') or ''
        out_dir = g.os_path_join(g.os_path_dirname(fn),out_dir)
        
        if g.os_path_exists(fn) and g.os_path_exists(out_dir):
            return fn,out_dir
        else:
            log('not found: %s' % fn)
            return None,None
#@+node:ekr.20231008041100.1: *3* script: to-github
"""
Convert a tree of nodes to a GitHub-formatted string.

This script assumes only the top-level node contains @others.
"""

# Note: There is an @others in the docstring! That's why we must use guide lines!

g.cls()
import re

if 1:  # Convert *this* script: @button to-github.
    source_h = '@button to-github'
    target_h = 'Flat to-github'
else:  # Convert the original script.
    source_h = '@button create-rst'
    target_h = 'Flat create-rst'

# Find/create nodes.
source_p = g.findNodeAnywhere(c, source_h)
assert source_p
target_p = g.findTopLevelNode(c, target_h)
if not target_p:
    target_p = c.lastTopLevel().insertAfter()
    target_p.h = target_h
    
@others  # Define functions.
    
put_all_nodes(source_p, target_p)
c.redraw()
print('Done')

@language python
@tabwidth -4
#@+node:ekr.20231008041100.2: *4* put_all_nodes
def put_all_nodes(source_p, target_p):
    """
    Put all nodes, expanding @others only in the top-level node.
    """
    # Leo note: guide lines contain no comments, strings or docstrings.
    #           Using guide lines prevents false matches of @others!
    lines = g.splitLines(source_p.b)
    guide_lines = delete_comments_and_strings(lines)
    assert len(lines) == len(guide_lines)
    target_lines = [
        '```python\n\n',
        f"# {source_p.h}\n\n",
    ]
    for i, line in enumerate(lines):
        guide_line = guide_lines[i]
        target_lines.append(line if line.strip() else '\n')
        if '@others' in guide_line:
            target_lines.append('\n')
            for child_p in source_p.subtree():
                if child_p.h.strip().startswith('<<'):
                    target_lines.append(f"# {child_p.h}\n")
                target_lines.extend(g.splitLines(child_p.b + '\n'))
    # Finish.
    target_lines.append('\n```\n')
    target_p.b = ''.join(target_lines)
#@+node:ekr.20231008041100.3: *4* python_i.delete_comments_and_strings
# Copied from leo.plugins.importers.python.
string_pat1 = re.compile(r'([fFrR]*)("""|")')
string_pat2 = re.compile(r"([fFrR]*)('''|')")

def delete_comments_and_strings(lines: list[str]) -> list[str]:
    """
    Python_i.delete_comments_and_strings.

    This method handles f-strings properly.
    """

    def skip_string(delim: str, i: int, line: str) -> tuple[str, int]:
        """
        Skip the remainder of a string.

        String ends:      return ('', i)
        String continues: return (delim, len(line))
        """
        if delim not in line:
            return delim, len(line)
        delim_pat = re.compile(delim)
        while i < len(line):
            ch = line[i]
            if ch == '\\':
                i += 2
                continue
            if delim_pat.match(line, i):
                return '', i + len(delim)
            i += 1
        return delim, i

    delim: str = ''  # The open string delim.
    result: list[str] = []
    for line_i, line in enumerate(lines):
        i, result_line = 0, []
        while i < len(line):
            if delim:
                delim, i = skip_string(delim, i, line)
                continue
            ch = line[i]
            if ch in '#\n':
                break
            m = string_pat1.match(line, i) or string_pat2.match(line, i)
            if m:
                # Start skipping the string.
                prefix, delim = m.group(1), m.group(2)
                i += len(prefix)
                i += len(delim)
                if i < len(line):
                    delim, i = skip_string(delim, i, line)
            else:
                result_line.append(ch)
                i += 1

        # End the line and append it to the result.
        if line.endswith('\n'):
            result_line.append('\n')
        result.append(''.join(result_line))
    assert len(result) == len(lines)  # A crucial invariant.
    return result
#@+node:ekr.20050108110751.1: *3* Function: convert font tags to span tags
@ 
From Bill P.
Here is a function to convert the font tags to equivalent span tags. Note use
of font-size:x-large to show how styling can be used for emphasis.

Uses a regular expression to insert style CSS classes before </STYLE> closing
tag ...so <STYLE></STYLE> is expected to exist in input html.
@c

def font2span(colorizedusingFONT):
    aa=colorizedusingFONT
    styleClasses="""    .token_STRING    {color:#00AA00;} 
    .token_NUMBER    {color:#483D8B;}
    .token_OP        {color:#000080;} 
    .tokenize_COMMENT{color:#DD0000;} 
    .token_NAME      {color:#4B0082;} 
    .token_ERRORTOKEN{color:#FF8080;} 
    .KEYWORD        {color:#0066ff;} 
    .TEXT           {color:#000000;} 
    .LeoDir       {color:#228B22;} 
    .LeoSen       {color:#BC8F8F;font-size:x-large;} 
    .bg            {color:#FFFAFA;} 
    """
    myRE=re.compile(r"</STYLE>")              ;aa=myRE.sub(styleClasses+"\n</STYLE>",aa)
    myRE=re.compile(r'<font color="#00AA00">');aa=myRE.sub('<span class="token_STRING">',aa)
    myRE=re.compile(r'<font color="#483D8B">');aa=myRE.sub('<span class="token_NUMBER">',aa)
    myRE=re.compile(r'<font color="#000080">');aa=myRE.sub('<span class="token_OP">',aa)
    myRE=re.compile(r'<font color="#DD0000">');aa=myRE.sub('<span class="tokenize_COMMENT">',aa)
    myRE=re.compile(r'<font color="#4B0082">');aa=myRE.sub('<span class="token_NAME">',aa)
    myRE=re.compile(r'<font color="#FF8080">');aa=myRE.sub('<span class="token_ERRORTOKEN">',aa)
    myRE=re.compile(r'<font color="#0066ff">');aa=myRE.sub('<span class="KEYWORD">',aa)
    myRE=re.compile(r'<font color="#000000">');aa=myRE.sub('<span class="TEXT">',aa)
    myRE=re.compile(r'<font color="#228B22">');aa=myRE.sub('<span class="LeoDir">',aa)
    myRE=re.compile(r'<font color="#BC8F8F">');aa=myRE.sub('<span class="LeoSen">',aa)
    myRE=re.compile(r'<font color="#FFFAFA">');aa=myRE.sub('<span class="bg">',aa)
    myRE=re.compile(r'</font>')               ;aa=myRE.sub('</span>',aa)

    basefontSize="12pt"
    basefontFamily="Lucida,Courier New"
    myRE=re.compile(r'<font face="Lucida,Courier New">');
    aa=myRE.sub('<span style="font:'+basefontSize+' '+basefontFamily+';">',aa)

    return aa
#@+node:ekr.20050108110751.2: *4* NewHeadline
@ To adjust this script internally to use span tags and CSS style classes make
the following changes to  three nodes as follows:
@c

# I. in "init globals" node change the _colors dictionary to use lists 
# (with 3 values representing color, CSS class, extra styling) as follows:

_colors = {
    token.NUMBER:     ['#483D8B','token_NUMBER',' '], #black/darkslateblue
    token.OP:         ['#000080','token_OP',' '], #black/navy
    token.STRING:     ['#00AA00','token_STRING',' '], #green 00cc66
    tokenize.COMMENT: ['#DD0000','tokenize_COMMENT',' '], #red cc0033
    token.NAME:       ['#4B0082','token_NAME',' '], #black/indigo
    token.ERRORTOKEN: ['#FF8080','token_ERRORTOKEN',' '], #redred bare null does it
    _KEYWORD:         ['#0066ff','KEYWORD',' '], #blue
    _TEXT:            ['#000000','TEXT',' '], #black /is text fg color too
    '_LeoDir':        ['#228B22','LeoDir',' '], #directive, forest comment
    '_LeoSen':        ['#BC8F8F','LeoSen','font-size:x-large;'], #sentinal, tan fade comment
    'bg':             ['#FFFAFA','bg',' '], #snow
}
#@+node:ekr.20160923133728.1: *3* Remove tabs from leo/modes/*.py
@language python

import glob
path = g.os_path_join(g.app.loadDir, '..', 'modes', '*.py')
# print(g.os_path_exists(path))
aList = glob.glob(path)
# print('\n'.join(sorted(aList)))
for fn in aList:
    f = open(fn, 'r')
    s1 = f.read()
    f.close()
    s2 = s1.replace('\t', '    ')
    if s1 != s2:
        print('changed: %s' % fn)
        f = open(fn, 'w')
        f.write(s2)
        f.close()
#@+node:ekr.20141105055521.16: *3* Replace body.x with body.wrapper.x
@language python

'''
A script to replace body.x with body.wrapper.x for all x in the WrapperAPI.

It is *not undoable* to save massive amounts of memory.
Please run on an already-saved .leo file, and take all other
reasonable precautions.

If replace is False, it will just report the changes to be made.
'''
import leo.core.leoFrame as leoFrame
replace = False
aList = sorted([z for z in dir(leoFrame.WrapperAPI) if not z.startswith('__')])
nodes = 0
for p in c.all_unique_positions():
    s = p.b
    nodes += 1
    found = False
    for target in aList:
        i = 0
        pattern = 'body.' + target
        while True:
            i = s.find(pattern,i)
            if i == -1:
                break
            if g.match_word(s,i,pattern):
                if not found:
                    print('In node: %s' % p.h)
                    found = True
                i1,i2 = g.getLine(s,i)
                if replace:
                    j = i + len('body.')
                    s = s[:j] + 'wrapper.' + s[j:]
                    print(s[i1:i2+len('wrapper.')].rstrip())
                    i += len('wrapper.') + len(pattern)
                else:
                    print(s[i1:i2].rstrip())
                    i += len(pattern)
            else:
                i += len(pattern)
    if found and replace:
        p.b = s
#@+node:ekr.20130425050120.12671: *3* Replace in directory
'''Replace a search pattern with a replacement pattern in a directory.'''
# Adapted from 2to3's crlf fixer.

g.cls()
write = False
path = g.os_path_finalize_join(g.app.loadDir,'..','extensions','docutils_modernize_bad')
find = 'import six'
change = 'from docutils.utils.u import u,text_type'
import os

def fix(filename):
    if g.os_path_isdir(filename):
        # print filename, "Directory!"
        return
    s = open(filename,"rb").read()
    if '\0' in s:
        print('binary: %s' % filename)
        return
    new_s = s.replace(find,change)
    if new_s != s:
        print('changed: %s' % g.shortFileName(filename,n=2))
        if write:
            f = open(filename,"wb")
            f.write(new_s)
            f.close()

for root, dirs, files in os.walk(path):
    for fn in files:
        if fn.endswith('.py'):
            fn = g.os_path_join(root,fn)
            fix(fn)
print('done')
#@+node:ekr.20200505051752.1: *3* script: clone all files defining cmd
g.cls()

import re

pat = re.compile(r'^\s*def cmd\(', re.MULTILINE)

def predicate(root):
    return (
        root.isAnyAtFileNode() and
        any(pat.search(p.b) for p in root.self_and_subtree()))
    
c.cloneFindByPredicate(c.all_unique_positions, predicate)
#@+node:ekr.20200505051957.1: *3* script: convert @cmd decorators
"""
Script to convert @cmd to @g.command in one file, creating
top-level nodes for each new commands.

This script was an essential part of the abandoned #325:
https://github.com/leo-editor/leo-editor/issues/325

I am including it in scripts.leo as a model for future scripts.
"""
g.cls()
import re
<< define patterns >>
# Data...
change_file = True
require_c = True
fn = 'xxx.py'
instance = 'xxx'
root = g.findNodeAnywhere(c, f"@file {fn}")
assert root
if 1: # When a file contains only one kind of @cmd decorator.
    convert_root = root
else:
    convert_root = g.findNodeAnywhere(c, "class KeyHandlerClass")
    assert convert_root
@others
command_root = make_command_root(fn, root) if change_file else None
for p in convert_root.self_and_subtree():
    any_changed, parts, result = False, find_parts(p), []
    for i, part in enumerate(parts):
        new_body, command_lines, headline = do_part(p, part)
        if command_lines:
            any_changed = True
            make_command_node(command_root, command_lines, headline)
        result.extend(new_body)
    if any_changed:
        if change_file:
            print('-----', p.h)
            p.b = ''.join(result)
        else:
            g.printObj(result, tag=f"new body for {p.h}")
c.redraw()
print('')
print('Remember: delete cmd and update g.cmd_instance_dict')
#@+node:ekr.20200505051957.2: *4* << define patterns >>
# Invariant patterns...
cmd_pat = re.compile(r'^@cmd\(\'(.+)\'\)')
def_pat = re.compile(r'^def (\w+)')
doc_pat = re.compile(r'(""".*?""")', (re.MULTILINE | re.DOTALL))
#@+node:ekr.20200505051957.3: *4* dump_parts
def dump_parts(p, parts):
    for i, part in enumerate(parts):
        g.trace(f"part {i+1} of {len(parts)}: {p.h}")
        g.printObj(part.head, tag='head')
        g.printObj(part.commands, tag='commands')
        g.printObj(part.tail, tag='tail')
#@+node:ekr.20200505051957.4: *4* find_parts
def find_parts(p):
    """
    Return a list of parts g.Bunch(commands, head, tail).
    """
    parts = []
    commands, head, tail = [], [], []
    for line in g.splitLines(p.b):
        m = cmd_pat.match(line)
        if m and tail:
            # End of previous part.
            parts.append(g.Bunch(commands=commands, head=head, tail=tail))
            commands, head, tail = [m], [], []
        elif m:
            # Add the command to *this* part.
            commands.append(m)
        elif commands:
            # Append to tail.
            tail.append(line)
        else:
            # Append to head.
            head.append(line)
    # End the previous part.
    if head or commands or tail:
        parts.append(g.Bunch(commands=commands, head=head, tail=tail))
    return parts
#@+node:ekr.20200505051957.5: *4* make_command_node
def make_command_node(command_root, command_lines, headline):

    if change_file:
        p = command_root.insertAsLastChild()
        p.b += ''.join(command_lines)
        p.h = headline
    else:
        g.trace('-----', headline)
        g.printObj(command_lines)
#@+node:ekr.20200505051957.6: *4* make_command_root
def make_command_root(fn, root):
    """Find or make the node containing commands."""
    h = f"commands: {fn}"
    p = g.findNodeAnywhere(c, h)
    if not p:
        p = root.insertAsLastChild()
        p.h = h
    return p
#@+node:ekr.20200505051957.7: *4* do_part
def do_part(p, part):
    """
    Handle the next part of node p.
    Return (new_body, command, headline)
    """
    commands, head, tail = part.commands, part.head, part.tail
    if not commands:
        return [], [], []
    #
    # Let block.
    def_line = tail.pop(0) if tail else ''
    m = def_pat.match(def_line)
    def_name = m.group(1) if m else '<no name>'
    call = f"{instance}.{m.group(1)}(event)" if m else 'pass'
    headline = f"'{commands[0].group(1)}'"
    #
    # Define the body lines first, so we can copy the docstring later.
    body_lines = head + [def_line] + tail
    body_s = ''.join(body_lines)
    #
    # Create the command_lines
    command_lines = [
        f"@g.command('{z.group(1)}')" + '\n' for z in commands
    ]
    command_lines += [
        f"def {def_name}(event):" + '\n',
    ]
    # Copy the doc part from body_s, *not* p.b!
    doc = doc_pat.search(body_s)
    if doc:
        doc_lines = g.splitLines(doc.group(1))
        command_lines += ['    ' + doc_lines[0]]
        command_lines += doc_lines[1:]
        command_lines += ['\n']
    if require_c:
        command_lines += [
            f"    c = event.get('c')" + '\n',
            f"    if not c:" + '\n',
            f"        return" + '\n',
        ]
    command_lines += [
        f"    {call}" + '\n',
        '\n',
    ]
    return body_lines, command_lines, headline
#@+node:ekr.20221105063835.1: *3* script: insert-macros
"""
script: Insert text-based macros in python code.

- Add traces at start of each def.
- Add traces at end of each def.
- Add traces before each return.
"""
# This is prototype code. I have no plans to improve it.
g.cls()
import re
from typing import List
@others

target = g.findNodeInChildren(c, p, 'macro-test')
for p in target.self_and_subtree():
    add_macros(p)
print('Done!')
#@+node:ekr.20221105063835.2: *4* add_macros
def_pat = re.compile(r'^def\s+(\w+)\(.*\).*:')
ret_pat = re.compile(r'^\s*return\b(.*)')

def add_macros(p):
    if not p.b.strip():
        return
    result: List[str] = []  # List of lines, with added traces.
    indents: List[int] = []  # Indentation of def statements.
    names: List[str] = []  # Names of defs.
    returns: List[bool] = []  # True if def has any minimally indented returns.
    indent, last_indent = 0, 0
    for i, line in enumerate(g.splitLines(p.b.rstrip() + '\n')):
        s = line.lstrip()
        indent = len(line) - len(s)
        indent_s = indent * ' '
        if not s:
            result.append(line)
            continue  # Blank lines don't affect indentation.
        # End any previous def at this level.
        while indents and indent < last_indent:
            last_indent = indents.pop()
            last_name = names.pop()
            last_return = returns.pop()
            if not last_return:
                indent_s = last_indent * ' '
                result.append(f"{indent_s}g.trace('end {last_name}')\n")
        def_m = def_pat.match(s)
        if def_m:
            # End any previous entry at this level.
            if indents and indents[-1] == indent:
                indents.pop()
                names.pop()
                returns.pop()
            # Enter this def.
            indents.append(indent)
            name = def_m.group(1)
            names.append(name)
            returns.append(False)
            last_indent = indent
            indent_s = (indent + 4) * ' '
            result.append(line)
            result.append(f"{indent_s}g.trace('enter: {name}')\n")
            continue
        ret_m = ret_pat.match(s)
        if ret_m:
            if indents:
                if indents[-1] + 4 == indent:
                    returns[-1] = True
                    # g.trace(f"Suppress end trace: {line!r}")
                val = ret_m.group(1).strip()
                result.append(f"{indent_s}g.trace('return {val}')\n")
            else:
                result.append(f"### Line: {i} Return outside of def. {p.h}\n")
        result.append(line)
    if indents:
        indent_s = (indents[-1] + 4) * ' '
        name = names[-1]
        if not returns[-1]:
            result.append(f"{indent_s}g.trace('end {name}')\n")
    g.printObj(result, tag=f"{p.h}:")
#@+node:ekr.20221105063835.3: *4* macro-test
#@+node:ekr.20221105063835.4: *5* multiple defs
def test_1():
    return 'a'
    
def test_2():
    return 'b'
    
def test_3():
    if 1:
        return 1
    else:
        return 2
#@+node:ekr.20221105063835.5: *5* spam
def spam():
    print("spam")
#@+node:ekr.20221105063835.6: *5* eggs
def eggs(a=None):
    print('eggs')
#@+node:ekr.20201030065548.2: *3* script: make-annonymous (Dangerous!)
"""Remove all sensitive material in this outline, except for this node."""
# Warning: This script is dangerous! Use on a copy of your outline.
n = 1
v0 = c.p.v
for p in c.all_unique_positions():
    if p.v != v0:
        p.h = f"node {n}"
        p.b = ''
        n += 1
c.redraw()
#@+node:ekr.20190307044803.1: *3* script: recursive-clean-lines
'''
Recursively clean all lines in a folder.

Do this before importing to @clean.
'''
g.cls()

directory = r'C:\apps\pyzo\source\pyzo'

def clean_dir(directory):
    for path in g.get_files_in_directory(directory, kinds=['.py']):
        clean_file(path)
        
def clean_file(path):
    with open(path, 'r') as f:
        file_s = f.read()
    lines = []
    for line in g.splitLines(file_s):
        if line.rstrip():
            lines.append(line.rstrip())
        if line.endswith('\n'):
            lines.append('\n')
    new_s = ''.join(lines)
    if file_s != new_s:
        with open(path, 'w') as f:
            f.write(new_s)
            print('wrote: %s' % path)
    
clean_dir(directory)
#@+node:ekr.20201030065548.26: *3* script: write @clean node without writing .leo file
at = c.atFileCommands
p = g.findNodeAnywhere(c, '@clean at_clean_test.py')
assert p
at.write(p, kind='@clean', nosentinels=True, toString=False)
for p2 in p.self_and_subtree():
    p2.v.clearDirty()
c.redraw()
#@+node:ekr.20201030065548.27: *4* @@clean at_clean_test.py
@others # changed.
#@+node:ekr.20201030065548.28: *5* spam
def spam():
    pass # changed 3
#@+node:ekr.20201030065548.29: *5* eggs
def eggs():
    pass

#@+node:ekr.20150416060855.1: ** Translation
#@+node:ekr.20071104221525.92: *3* @@button elisp test
@first # -*- coding: utf-8 -*-

import leoTest
import leoImport

@others

u = leoTest.testUtils(c) 
input_node  = u.findNodeAnywhere('new-elisp2py-input-')
output_node = u.findNodeAnywhere('new-elisp2py-output-')
assert input_node and output_node

for p in input_node.children():

    if 0: # No need for child node yet.
        root = output_node.insertAsLastChild()
        root.initHeadString(p.h)

    s = p.b
    scanner = elispScanner()
    aList = scanner.parse(s)
    if 1:
        print 'parse tree...'
        print scanner.dumpList(aList)
        print 'end parse tree'
        print
    result = scanner.gen(aList,indent=0,init=True)
    print scanner.dumpList(result)
#@+node:ekr.20071104221525.93: *4* class elispScanner
class elispScanner:

    @others
#@+node:ekr.20071104221525.94: *5*  __init__
def __init__ (self):

    # Debugging.
    self.debug = True
    self.trace = False

    # Semantic info.
    self.def_keywords = ('defun','defvar',)
    self.indent_keywords = ('if','prog','prog1','progn','set',)
    self.expr_keywords = ('and','not','or',)

    self.indent = 0 # Indentation of production output.

    # Dispatch dictionary.
    self.dispatchDict = {}

#@+node:ekr.20071104221525.95: *5* class token
class token:

    '''Representing one elisp syntactic entity,
    with a list of preceding comments.'''

    def __init__ (self,comments,kind,val):
        self.comments = comments[:]
        self.kind = kind
        self.val = val
            # For blocks, a list of tokens.
            # For all other tokens, the spelling of the token.

    def __repr__ (self):
        return '<token kind: %s, val: %s>' % (self.kind,self.val)

    def __str__ (self):
        if self.kind == 'block:':
            return 'block: [snip]'
        elif self.kind == 'string:':
            return '%s%s' % (self.kind,self.val[:20])
        else:
            return '%s%s' % (self.kind,self.val)
#@+node:ekr.20071104221525.96: *5* choose
def choose(self,cond,a,b): # warning: evaluates all arguments

    if cond: return a
    else: return b
#@+node:ekr.20071104221525.97: *5* dumpList
def dumpList(self,aList):

    if type(aList) == type([]):
        result = self.dumpListHelper(aList,indent=0)
        return '\n'.join(result)
    else:
        return repr(aList)

def dumpListHelper(self,aList,indent):

    result = []
    leading = ' ' * (4 * indent)

    for z in aList:
        if z is None:
            result.append('%s%s' % (leading,'None'))
        elif z == []:
            result.append('%s%s' % (leading,'[]'))
        elif type(z) == type([]):
            result.append('%s%s' % (leading,'['))
            result.extend(self.dumpListHelper(z,indent+1))
            result.append('%s%s' % (leading,']'))
        elif isinstance(z,self.token):
            if z.kind=='block:':
                result.append('%s%s' % (leading,'block:'))
                result.extend(self.dumpListHelper(z.val,indent+1))
                # result.append('%s%s' % (leading,'block:]'))
            else:
                result.append('%s%s' % (leading,str(z)))
        else:
            result.append('%s%s' % (leading,str(z)))

    return result

listToString = dumpList
#@+node:ekr.20071104221525.98: *5* Parsing...
#@+node:ekr.20071104221525.99: *6* parse
def parse(self,s):

    # Generate the nodes, including directive and section references.
    return self.scanForest(s)
#@+node:ekr.20071104221525.100: *6* scan & helpers
def scan(self,s,i):

    '''Scan an elisp expression.'''

    start = i ; end = len(s) ; result = []
    comments = [] ; token = self.token
    # A hack. ignore initial @language lisp
    tag = '@language lisp'
    if i == 0 and s[i:i+len(tag)]==tag:
        i += len(tag)
    while i < end:
        progress = i
        ch = s[i]
        if ch == ';':
            j = self.skipComment(s,i)
            # Comments are not tokens, they are attached to tokens.
            comments.append(s[i:j]) 
            start = i = j
        elif ch == '"':
            j = self.skipString(s,i)
            result.append(token(comments,'string:',s[i:j]))
            comments = []
            start = i = j
        elif ch.isalnum() or ch == u'_':
            j = self.skipId(s,i)
            result.append(token(comments,'id:',s[i:j]))
            comments = []
            start = i = j
        elif ch =='(':
            i += 1
            j,aList = self.scan(s,i)
            result.append(token(comments,'block:',aList))
            start = i = j
        elif ch == ')':
            i += 1
            return i,result
        else:
            if ch == "'": ch = 'quote'
            if ch not in (' ','\t','\n','\r'):
                result.append(token(comments,'op:',ch))
            i += 1
        assert progress < i,'i: %d, ch: %s' % (i,repr(s[i]))

    if start < end:
        tail = s[start:end].strip()
        if tail:
            result.append(token(comments,'tail:',tail))
            comment = []
    if comments:
        result.append(token(comments,'trailing-comment:',''))

    return i,result
#@+node:ekr.20071104221525.101: *7* skipComment
def skipComment (self,s,i):

    '''Skip a comment.'''

    while i < len(s):
        if s[i] == '\n':
            break
        i += 1
    return i
#@+node:ekr.20071104221525.102: *7* skipId
def skipId (self,s,i):

    while i < len(s):
        ch = s[i]
        if ch.isalnum() or ch in ('_','-'):
            i += 1
        else:
            break
    return i
#@+node:ekr.20071104221525.103: *7* skipString
def skipString(self,s,i):

    """Skip a string literal."""

    assert(s[i] == '"')
    i += 1
    while i < len(s):
        ch = s[i]
        if ch == '\\' : i += 2
        elif ch == '"':
            i += 1 ; break
        else: i += 1

    return i
#@+node:ekr.20071104221525.104: *6* scanForest
def scanForest (self,s):

    i = 0 ; result = []

    while i < len(s):
        progress = i
        i,aList = self.scan(s,i)
        aList and result.extend(aList)
        assert i > progress

    return result


#@+node:ekr.20071104221525.105: *5* Code generators...
@ From Richard Deibenkorn:

1. Attempt what is not certain. Certainty may or may not come later. It may then
be a valuable delusion.

2. The pretty, initial position which falls short of completeness is not to be
valued--except as stimulus for further moves.

3. Do search.  But in order to find other than what is looked for.

4. Use and respond to the initial fresh qualities but consider them absolutely
expendable.
#@+node:ekr.20071104221525.106: *6* gen
def gen(self,tokens,indent,init=False):

    result = []

    if init: result.append('='*40)

    for token in tokens:
        aList = self.gen_token(token,indent)
        result.extend(aList)

    if init: result.append('-'*40)

    return result

#@+node:ekr.20071104221525.107: *6* gen_token
def gen_token(self,token,indent):

    result = []

    if self.debug:
        if token.kind == 'block:':
            aList = self.gen_block(token,indent)
            result.extend(aList)
        else:
            self.put_token(token,indent,result)
    else:
        if token.kind == 'block:':
            self.gen_block(token,indent)
        else:
            self.put_code_token(token)

    return result
#@+node:ekr.20071104221525.108: *6* gen_block & helper
def gen_block (self,token,indent):

    if not (token and token.val):
        return []

    blockList = token.val
    token2 = blockList[0]
    result = []

    if token2.kind.startswith('id'):
        aList = self.gen_block_id(token2.val,blockList,indent)
    else:
        if self.debug:
            self.put('block...',[],indent,result)
        aList = self.gen(token.val,indent+1)

    result.extend(aList)
    return result
#@+node:ekr.20071104221525.109: *7* gen_block_id
def gen_block_id (self,theId,tokens,indent):

    result = []

    # Eventually there will be a lookup of the dispatch dict here.
    if theId == 'let':
        aList = self.gen_let(tokens,indent)
    elif theId == 'if':
        aList = self.gen_if(tokens,indent)
    elif theId in self.def_keywords:
        aList = self.gen_def(theId,tokens,indent)
    elif theId in self.indent_keywords:
        self.put('%s...' % (theId),[],indent,result)
        aList = self.gen(tokens[1:],indent+1)
    elif theId in self.expr_keywords:
        aList = self.gen_expr(theId,tokens[1:],indent+1)
    else:
        aList = self.gen_call(theId,tokens[1:],indent)

    result.extend(aList)
    return result
#@+node:ekr.20071104221525.110: *6* gen_call & helper
def gen_call (self,funcId,tokens,indent):

    result = []

    if self.debug:
        self.put('call: %s' % (funcId),[],indent,result)
        for token in tokens:
            aList = self.gen_arg(token,indent+1)
            result.extend(aList)
    else:
        self.put_code_line('%s(' % (funcId))
        for token in tokens:
            self.gen_arg(token,indent+1)
        self.put_code(')')

    return result

#@+node:ekr.20071104221525.111: *7* gen_arg
def gen_arg(self,token,indent):

    result = []

    if self.debug:
        if token.kind == 'block:':
            self.put('arg block:...',[],indent,result)
            aList = self.gen_block(token,indent)
            result.extend(aList)
        else:
            self.put_token(token,indent,result)
    else:
        if token.kind == 'block:':
            aList = self.gen_block(token,indent)
            self.put_code(''.join(aList))
        else:
            self.put_code_token(token)

    return result
#@+node:ekr.20071104221525.112: *6* gen_def
def gen_def(self,theId,tokens,indent):

    result = []

    if not tokens or len(tokens) < 3:
        result.append('*** bad def tokens')
        return result

    defToken,idToken = tokens[0:2]
    if idToken.kind != 'id:':
        result.append('*** bad def id')
        return result

    if self.debug:
        self.put(theId,idToken.val,indent,result)
        aList = self.gen(tokens[2:],indent+1)
        result.extend(aList)
    else:
        self.put_code('def %s (' % idToken.val)
        self.gen_token(tokens[2],indent)
        self.put_code('): # end def\n')
        self.indent += 1
        self.gen(tokens[3:],indent+1)
        self.indent -= 1

    return result
#@+node:ekr.20071104221525.113: *6* gen_if & helpers
@ if condition then-form else-forms.

If the evaluated condition is non-nil, then-form is evaluated and the result
returned. Otherwise, the else-forms are evaluated in textual order, and the
value of the last one is returned. If condition has the value nil, and no
else-forms are given, if returns nil.
@c

def gen_if (self,tokens,indent):

    # tokens[0]: id:if
    # tokens[1] cond

    for i in xrange(len(tokens)):
        g.trace('tokens[%d]: %s' % (i,self.dumpList(tokens[i])))

    # g.trace(self.dumpList(tokens))

    result = []

    if self.debug:
        self.put('if...',[],indent,result)
        aList = self.gen(tokens[1:],indent+1)
        result.extend(aList)
    else:
        self.put_code('if ')
        self.gen(tokens[1:],indent+1)
        self.put_code(': # end if\n')

    return result
#@+node:ekr.20071104221525.114: *7* gen_then
def gen_then (self,token):

    pass
#@+node:ekr.20071104221525.115: *6* gen_expr
def gen_expr (self,theId,aList,indent):

    binops = ('and','or',)
    result = []

    self.put(theId,[],indent,result)
    aList = self.gen(aList,indent+1)

    result.extend(aList)
    return result
#@+node:ekr.20071104221525.116: *6* gen_let & helper
@
(let ((variable value)
      (variable value)
      ...)
  body...)
@c

def gen_let (self,tokens,indent):

    if not tokens: return []
    if len(tokens) != 3:
        g.trace('unusual let')
        result = self.gen(tokens,indent+1)
        return result

    if 0:
        for i in xrange(len(tokens)):
            g.trace('token',i,tokens[i])

    letToken,bindingToken,bodyToken = tokens
    result = []
    self.put('let...',[],indent,result)
    self.put('let-bindings...',[],indent+1,result)
    aList = self.gen_let_bindings(bindingToken,indent+2)
    result.extend(aList)
    self.put('let-block...',[],indent+1,result)
    aList = self.gen_block(bodyToken,indent+2)
    result.extend(aList)
    return result
#@+node:ekr.20071104221525.117: *7* gen_let_bindings
def gen_let_bindings (self,token,indent):

    result = []

    if token.kind != 'block:':
        g.trace('unexpected let')
        return result

    for z in token.val:
        if z.kind == 'block:': # one (id,val) pair
            if z.val and len(z.val) == 2:
                token1 = z.val[0]
                token2 = z.val[1]
                self.put('let-id',token1,indent,result)
                self.put('let-val...',[],indent,result)
                if token2.kind == 'block:':
                    aList = self.gen_block(token2,indent+1)
                    result.extend(aList)
                else:
                    #g.trace('no let list')
                    self.put_token(token2,indent+1,result)
            else:
                g.trace('unexpected let 2')

    return result
#@+node:ekr.20071104221525.118: *6* put...
#@+node:ekr.20071104221525.119: *7* put
def put (self,kind,val,indent,result):

    '''Append one or more lines of output to result.'''

    leading = '%2d: %s' % (indent,' ' * indent)

    if kind == 'string:':
        val = self.choose(len(val)>20,val[:20]+'..."',val)

    if val:
        s = '%s%s %s' % (leading,str(kind),str(val))
    else:
        s = '%s%s' % (leading,str(kind))

    result.append(s)
#@+node:ekr.20071104221525.120: *7* put_token
def put_token (self,token,indent,result):

    for z in token.comments:
        self.put('comment:',z,indent,result)

    self.put(token.kind,token.val,indent,result)
#@+node:ekr.20071104221525.121: *7* put_code & put_code_line
def put_code_line (self,s):

    s2 = '%s%s' % (' '*self.indent,s)
    print s2,

def put_code (self,s):

    print s,
#@+node:ekr.20071104221525.122: *7* put_code_token
def put_code_token (self,token):

    if token.kind == 'block:':
        self.put_code('<block>')
    else:
        self.put_code(token.val)

#@+node:ekr.20040714055306: *3* script: elispToPy
#@+node:ekr.20071104221525.1: *4* The project is doomed
@language lisp
@nocolor
@

This project is doomed.  It would take AI (lots of special cases) to
translate elisp to readable Python.

Furthermore, how are we to simulate the semantics of lisp constructs
such as interactive, fboundp, setcdr and nconc?

For example, the following is going to cause all kinds of semantic problems.
@c

@color

(defun dired-do-igrep (program expression &optional options arg)
  "*Run `grep` on the marked (or next prefix ARG) files.
See `\\[igrep]'."

  (interactive
   (let ((igrep-args
	  (let ((current-prefix-arg nil))
	    (igrep-read-args t))))
     ;; Delete FILES:
     (setcdr (nthcdr 1 igrep-args) (nthcdr 3 igrep-args))
     ;; Append ARG:
     (nconc igrep-args (list current-prefix-arg))))

  (igrep program
	 expression
	 (funcall (cond ((fboundp 'dired-get-marked-files) ; GNU Emacs
			 'dired-get-marked-files)
			((fboundp 'dired-mark-get-files) ; XEmacs
			 'dired-mark-get-files))
		  t arg)
	 options))

; The following is not going to be easy to translate!

(defun spam (prompt)
    (if cond
        then-part
        else-part1
        else-part2
    )
)

@nocolor
# There is no *simple* tranlation of the above to Python!

def spam (prompt):
    return  # need ternary operator!

# Need complex code generators!
def spam (prompt):
    if cond:
        return then-part
    else:
        else-part1
        return else-part2


#@+node:ekr.20040713123617.1: *4* older e2pyScript
@language python
@tabwidth -4

import string

tabWidth = 4 # how many blanks in a tab.
printFlag = False
doLeoTranslations,dontDoLeoTranslations = True,False

gClassName = "" # The class name for the present function.  Used to modify ivars.
gIvars = [] # List of ivars to be converted to self.ivar

@others

run(c)
#@+node:ekr.20050220085042.1: *5* run
def run (c):

    import leoTest
    u = leoTest.testUtils() 
    input  = u.findNodeAnywhere(c,'-elisp2py-input-')
    output = u.findNodeAnywhere(c,'-elisp2py-output-')
    assert input and output

    print ; print '*' * 60
    e = e2py(dumping=False)
    for p in input.children():
        print ; print '-' * 10, p.h
        print p.b
        result = e.doOuterBlock(p.b)
        print '-' * 20
        print result
#@+node:ekr.20050220091046: *5* class e2py
class e2py:

    '''A text-based (not token-based) approach to parsing.'''

    @others
#@+node:ekr.20050220091046.1: *6* ctor
def __init__ (self,dumping=False):

    self.dumping = dumping
#@+node:ekr.20050220111049: *6* doBlock
def doBlock (self,s,strip=True):

    '''Handle an outer block or a formerly parenthesized block.'''

    i = 0 ; result = []
    while i < len(s):
        j = s.find('(',i)
        if j == -1:
            tail = s[i:]
            if tail:
                # g.trace(repr(tail))
                result.extend(g.splitLines(tail))
            break
        else:
            prev = s[i:j]
            if prev: result.extend(g.splitLines(prev))
            i = j
            n,ok = self.findMatchingBracket(s[i:])
            if ok:
                block = s[i:i+n]
                block = self.stripLws(block)
                block_result = self.doParenBlock(block)
                if block_result:
                    result.extend(block_result)
            i += n

    result = self.removeBlankLines(result)
    self.dump(result)
    return result
#@+node:ekr.20050220091046.3: *6* doParenBlock
def doParenBlock (self,s):

    ''' - Strip outer parens.
        - Call doBlock recursively for all inner parens.
        - Add one level of indentation to each line.'''
    n,ok = self.findMatchingBracket(s)
    assert n == len(s) and ok
    s = s[1:-1] # Strip the brackets
    i = 0
    i = g.skip_ws(s,i)
    j,id = self.skip_id(s,i)
    if id:
        s = s[j:].lstrip()
        if id == 'defun': result = self.doDef(s,id)
        elif id == 'let': result = self.doLet(s)
        elif id == 'if': result = self.doIf(s)
        elif id in ('prog1','progn'): result = self.doProg(s,id)
        else: result = self.doId(s,id)
    elif s[i] == '(':
        s = s[i:].lstrip()
        result = self.doDoubleParen(s)
    else: result = self.doBlock(s)

    self.dump(result)
    return result
#@+node:ekr.20050220105058: *6* doDef
def doDef(self,s,id):
    # g.trace(id)
    if id == 'defun':
        kind,i,j,name = self.getToken(s,0)
        if kind == 'id':
            kind,i,j,params = self.getToken(s,j)
            if kind == '()':
                s = s[j:]
                result = ['def %s %s:' % (name,params)]
                result.extend(self.indent(self.doBlock(s)))
                self.dump(result)
                return result

    # Fall through if error.
    result = [id]
    result.extend(self.indent(self.doBlock(s)))
    self.dump(result)
    return result
#@+node:ekr.20050220124658: *6* doDoubleParen
def doDoubleParen (self,s):

    n,ok = self.findMatchingBracket(s)
    if ok:
        s2 = s[:n] ; s3 = s[n:]
        result = ['(:']
        result.extend(self.indent(self.doParenBlock(s2)))
        result.extend([':)'])
        result.extend(self.doBlock(s3))
    else:
        result = self.doBlock(s)

    self.dump(result)
    return result
#@+node:ekr.20050220111114: *6* doId
def doId(self,s,id):

    # g.trace(id)
    if 1:
        result = ['(%s:' % id]
        result.extend(self.indent(self.doBlock(s)))
        result.extend([':%s)' % id])
    else:
        result = [id]
        result.extend(self.doBlock(s))

    self.dump(result)
    return result
#@+node:ekr.20050220105058.1: *6* doIf
def doIf(self,s):

    # g.trace()

    if 1:
        result = ['if:']
        result.extend(self.indent(self.doBlock(s)))
        result.extend([':if'])

    else: # not yet.  Don't suck everything into the 'if' statement!
        block = self.doBlock(s)
        result = ['if (%s):' % ' '.join(block)]

    self.dump(result)
    return result
#@+node:ekr.20050220105058.2: *6* doLet
def doLet(self,s):

    # g.trace()

    result = ['let:']
    result.extend(self.indent(self.doBlock(s)))
    result.extend([':let'])

    self.dump(result)
    return result
#@+node:ekr.20050220091046.2: *6* doOuterBlock
def doOuterBlock (self,s):

    '''Handle outermost code.  Return a string, not a list.'''

    s = self.stripLws(s)
    result = self.doBlock(s)
    result = self.removeBlankLines(result)
    return '\n'.join(result)
#@+node:ekr.20050220105058.3: *6* doProg
def doProg(self,s,id):

    # g.trace(id)

    result = [id]
    result.extend(self.indent(self.doBlock(s)))

    self.dump(result)
    return result
#@+node:ekr.20050220111923: *6* dump
def dump(self,lines):

    if self.dumping:
        print '%s returns...' % g.callerName(2)
        lines = [str(line) for line in lines]
        print g.listToString(lines)
#@+node:ekr.20050220092732: *6* findMatchingBracket
def findMatchingBracket(self,s,i=0):

    ch1 = s[i]
    assert ch1 in "({["
    delim = self.matchingBracket(ch1)
    level = 1
    for ch in s[i+1:]:
        i += 1
        # g.trace(level,ch)
        if ch == ch1:
            level += 1
        elif ch == delim:
            level -= 1
            if level == 0: return i+1,True
    print "%s not matched by %s in %s" % (ch1,delim,s)
    return len(s),False
#@+node:ekr.20050220114616: *6* getToken
def getToken (self,s,i=0):

    i = g.skip_ws(s,i)
    if i < len(s):
        ch = s[i]
        if ch == '"':
            j = self.skipString(s,i)
            val = '"',i,j,s[i:j]
        elif ch in string.ascii_letters or ch in string.digits or ch in '-_':
            j,name = self.skip_id(s,i)
            val = 'id',i,j,name
        elif ch == '(':
            j,ok = self.findMatchingBracket(s,i)
            if ok:
                val = '()',i,j,s[i:j]
            else:
                val = '(',i,i,'('
        else:
            val = ch,i,i,ch
    else:
        val = None,i,i,None

    # g.trace(repr(s[i]),val)
    return val
#@+node:ekr.20050220105726: *6* indent
def indent (self,lines,strip=True):

    '''Add a tab to each element of a list.'''

    return ['    ' + line for line in lines if not strip or line.strip()]
#@+node:ekr.20050220093752: *6* matchingBracket
def matchingBracket (self,ch):

    assert ch in "({["

    if   ch == '(': return ')'
    elif ch == '{': return '}'
    else:           return ']'
#@+node:ekr.20050220103808: *6* skip_id
def skip_id(self,s,i=0):

    j = g.skip_id(s,i,chars='-')
    id = s[i:j]
    return j,id
#@+node:ekr.20050220105058.4: *6* skipString
def skipString(self,s,i):

    # Skip the opening double quote.
    i1 = i
    ch = s[i]
    i += 1
    assert(ch == '"')

    while i < len(s):
        ch = s[i]
        i += 1
        if ch == '"': return i
        elif ch == '\\': i += 1

    print "run-on elisp string: %s" % g.get_line(s[i1:])
    return i
#@+node:ekr.20050220122447: *6* removeBlankLines
def removeBlankLines (self,lines):

    return [line for line in lines if line.strip()]
#@+node:ekr.20050220100049: *6* stripLws
def stripLws(self,s):

    lines = g.splitLines(s)
    result = [line.lstrip() for line in lines]
    return ''.join(result)
#@+node:ekr.20071104221525: *4* newer elispToPy
#@+node:ekr.20071104221525.89: *5* new-elisp2py-input-
@language lisp
#@+node:ekr.20071104221525.90: *6* if-test
@language lisp

; What would the translation of this be???

; def (spam):
;   '''docstring'''
;   return ?? # A python if statement can't be used here!

(defun spam (prompt)
    "docstring"
    (if igrep-insert-default-key
        (define-key minibuffer)
        (do-else2)
        'do-else1
    )
)

#@+node:ekr.20071104221525.91: *5* new-elisp2py-output-
#@-all
#@@language python
#@@tabwidth -4

#@-leo
