2 # generates markdown tables from CSV files, after doing lovely sorting
3 # by different columns, blah blah...
4 from copy
import deepcopy
7 def write_mdwn_row(f
, row
):
12 def underlines(header
):
15 row
.append("-" * len(col
))
19 def is_svp64_page(page
):
20 return page
in ['sv/setvl', 'sv/svstep', 'sv/remap']
22 def sort_by_page(p1
, p2
):
25 if not (is_svp64_page(p1
) ^
is_svp64_page(p2
)):
35 priorities
= ['high', 'med', 'low', 'TBD']
37 def sort_by_priority(p1
, p2
):
38 p1
= priorities
.index(p1
['priority'])
39 p2
= priorities
.index(p2
['priority'])
44 def sort_by_cost(p1
, p2
):
47 if not p1
.isdigit(): p1
= 0
48 if not p2
.isdigit(): p2
= 0
55 def sort_by_cost_priority_page(p1
, p2
):
56 v
= sort_by_cost(p1
, p2
)
58 v
= sort_by_priority(p1
, p2
)
60 v
= sort_by_page(p1
, p2
)
64 def by_cost_then_priority_then_page(areas
):
65 # first blat all columns together (drop area-dict)
67 for row
in areas
.values():
70 res
= sorted(res
, key
=functools
.cmp_to_key(sort_by_cost_priority_page
))
71 # now split out into a dict again this time by cost-priority
77 costs
[cost
].append(row
)
81 def print_table(title
, header
, areas
, sortby
):
82 fname
= title
.lower().replace(" ", "_")
83 with
open("ls012/%s.mdwn" % fname
, "w") as f
:
84 # write out the page header
85 f
.write("\\newpage{}\n")
87 f
.write("# %s\n" % title
)
89 # sort everything if required
90 if sortby
is not None:
93 # start writing out areas
94 for title
, rows
in areas
.items():
95 # start new page (if not first newpage)
96 if linecount
is not None:
97 # allow 60 rows per page
98 linecount
+= len(rows
)
102 f
.write("\\newpage{}\n")
104 if linecount
is None: # skipped first newpage
107 f
.write("## %s\n" % title
)
110 # work out maximum length of items, and adjust header
111 hdr
= deepcopy(header
)
116 for hd
, value
in row
.items():
117 cols
[hd
] = max(cols
[hd
], len(value
))
118 # adjust header (add spaces)
119 for i
, hd
in enumerate(hdr
):
120 n_spaces
= cols
[hd
] - len(hd
)
121 hdr
[i
] = hdr
[i
] + " " * n_spaces
123 write_mdwn_row(f
, hdr
)
124 write_mdwn_row(f
, underlines(hdr
))
126 # adjust row (add same spaces as header width)
128 for key
in row
.keys():
129 col_len
, value
= cols
[key
], row
[key
]
131 prefix
= 'https://libre-soc.org/openpower/'
132 v
= value
.replace("_", "\_") # latex, duh
133 url
= '[%s](%s%s)' % (value
, prefix
, v
)
135 elif key
== 'rfc' and value
.startswith('ls'):
136 prefix
= 'https://libre-soc.org/openpower/sv/rfc/'
137 url
= '[%s](%s%s)' % (value
, prefix
, value
)
140 value
= value
.replace("_", "\_") # latex, duh
141 n_spaces
= col_len
- len(value
)
142 r
.append(value
+ " " * n_spaces
)
147 # approx 8 lines per header
150 if __name__
== '__main__':
151 with
open("ls012/optable.csv") as f
:
152 l
= map(str.strip
, f
.readlines())
156 if line
.startswith("#"):
157 area
= line
[1:].strip()
160 # split line by commas, whitespace-strip it
161 line
= list(map(str.strip
, line
.split(',')))
166 # create a dictionary by tuple of header+line
167 linedict
= dict(zip(header
, line
))
172 areas
[area
].append(linedict
)
174 # exccellent - now have a dictionary of list of dictionaries:
175 # area - list-of-instructions - dictionary-by-heading
176 print_table("Areas", header
, areas
, None)
178 # now sort by cost and then by page
179 print_table("XO cost", header
, areas
, by_cost_then_priority_then_page
)