-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlike_handler.py
More file actions
126 lines (96 loc) · 4.51 KB
/
like_handler.py
File metadata and controls
126 lines (96 loc) · 4.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import re
def like_processing_content_preprocess(content):
content_modified = []
for line in content:
# Split the line into the part to be processed and the part to be ignored (comment)
parts = line.split("//", 1)
process_part = parts[0] # Part to be processed
comment_part = "//" + parts[1] if len(parts) > 1 else "" # Comment part with slashes
# Mockup function to process the part to be processed
processed_part, reverse_info = process_like_statements(process_part)
if reverse_info != "":
content_modified.append(reverse_info)
# Reassemble both parts into a single line
modified_line = processed_part + comment_part
content_modified.append(modified_line)
return content_modified
def like_processing_content_restore(content_modified):
restored_content = []
processing_info_found = False
processing_info = ""
for line in content_modified:
if line.strip().startswith("// LIKEPROCESSING_INFO:"):
# Found a line containing 'LIKEPROCESSING_INFO'
processing_info_found = True
processing_info = line.strip()[len("// LIKEPROCESSING_INFO:"):].split(",")
continue
if processing_info_found:
# Restore the original content
original_line = revert_like_statements(line, processing_info)
restored_content.append(original_line)
processing_info_found = False
else:
restored_content.append(line)
return restored_content
def revert_like_statements(line, processing_info):
restored_line = line
equals_positions = [i for i, char in enumerate(restored_line) if char == '=']
offset = 0
for equals_to_skip in processing_info:
equals_to_skip = int(equals_to_skip)
# Ensure the equals_to_skip is within the range of equals positions
if 0 <= equals_to_skip < len(equals_positions):
replacement_index = equals_positions[equals_to_skip]
restored_line = restored_line[:replacement_index + offset] + 'like' + restored_line[replacement_index + 1 + offset:]
# like as 3 chars more than = so advance
offset = offset + 3
return restored_line
def process_like_statements(part):
# Regular expression pattern to find all occurrences of 'like' (case insensitive)
pattern = r'\blike\b'
# Find all occurrences of 'like' (case insensitive) and replace them with '='
processed_part = re.sub(pattern, '=', part, flags=re.IGNORECASE)
# Initialize the reverse info
reverse_info = ""
found_likes = 0
# If 'like' processing occurred
if re.search(pattern, part, flags=re.IGNORECASE):
# Create a string to facilitate reverse replacement
# This string will be a comment with slashes starting with 'LIKEPROCESSING_INFO'
# Each 'like' replacement is indicated by the number of '=' to be skipped
# So that the reverse function knows where a '=' should be replaced by a 'like'
reverse_info = "// LIKEPROCESSING_INFO: "
like_positions = [m.start() for m in re.finditer(pattern, part, flags=re.IGNORECASE)]
for pos in like_positions:
# Count the number of '=' to skip
equals_to_skip = part.count('=', 0, pos)
reverse_info += str(equals_to_skip + found_likes) + ","
# each processed like makes a extra skip for restoring
found_likes = found_likes + 1
# Remove the last comma
reverse_info = reverse_info[:-1]
# make a full comment line if like is found
if found_likes > 0:
reverse_info = reverse_info + '\n'
# Return both the processed part and the reverse info
return processed_part, reverse_info
if __name__ == '__main__':
import chardet
def detect_encoding(file_path):
with open(file_path, 'rb') as f:
result = chardet.detect(f.read())
return result['encoding']
filename = "tests/test_like_handling.pas"
# Example usage:
source_encoding = detect_encoding(filename)
# Modify the file content
with open(filename, 'r', encoding=source_encoding, errors='ignore') as original_file:
content = original_file.readlines()
for line in content:
print(repr(line))
content_modified = like_processing_content_preprocess(content)
for line in content_modified:
print(repr(line))
restored_content = like_processing_content_restore(content_modified)
for line in restored_content:
print(repr(line))