Skip to content
This repository was archived by the owner on Jun 5, 2025. It is now read-only.

Commit 995921a

Browse files
committed
Fixed linting issues
1 parent 806c176 commit 995921a

4 files changed

Lines changed: 58 additions & 37 deletions

File tree

scripts/import_packages.py

Lines changed: 30 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@
66

77

88
json_files = [
9-
'data/archived.jsonl',
10-
'data/deprecated.jsonl',
11-
'data/malicious.jsonl',
9+
"data/archived.jsonl",
10+
"data/deprecated.jsonl",
11+
"data/malicious.jsonl",
1212
]
1313

1414

@@ -21,7 +21,7 @@ def setup_schema(client):
2121
Property(name="type", data_type=DataType.TEXT),
2222
Property(name="status", data_type=DataType.TEXT),
2323
Property(name="description", data_type=DataType.TEXT),
24-
]
24+
],
2525
)
2626

2727

@@ -47,11 +47,14 @@ def generate_vector_string(package):
4747

4848
# add extra status
4949
if package["status"] == "archived":
50-
vector_str += f". However, this package is found to be archived and no longer maintained. For additional information refer to {package_url}"
50+
vector_str += f". However, this package is found to be archived and no longer \
51+
maintained. For additional information refer to {package_url}"
5152
elif package["status"] == "deprecated":
52-
vector_str += f". However, this package is found to be deprecated and no longer recommended for use. For additional information refer to {package_url}"
53+
vector_str += f". However, this package is found to be deprecated and no \
54+
longer recommended for use. For additional information refer to {package_url}"
5355
elif package["status"] == "malicious":
54-
vector_str += f". However, this package is found to be malicious. For additional information refer to {package_url}"
56+
vector_str += f". However, this package is found to be malicious. For \
57+
additional information refer to {package_url}"
5558
return vector_str
5659

5760

@@ -62,34 +65,38 @@ def add_data(client):
6265
existing_packages = list(collection.iterator())
6366
packages_dict = {}
6467
for package in existing_packages:
65-
key = package.properties['name']+"/"+package.properties['type']
68+
key = package.properties["name"] + "/" + package.properties["type"]
6669
value = {
67-
'status': package.properties['status'],
68-
'description': package.properties['description'],
70+
"status": package.properties["status"],
71+
"description": package.properties["description"],
6972
}
7073
packages_dict[key] = value
7174

7275
for json_file in json_files:
73-
with open(json_file, 'r') as f:
76+
with open(json_file, "r") as f:
7477
print("Adding data from", json_file)
7578
with collection.batch.dynamic() as batch:
7679
for line in f:
7780
package = json.loads(line)
7881

7982
# now add the status column
80-
if 'archived' in json_file:
81-
package['status'] = 'archived'
82-
elif 'deprecated' in json_file:
83-
package['status'] = 'deprecated'
84-
elif 'malicious' in json_file:
85-
package['status'] = 'malicious'
83+
if "archived" in json_file:
84+
package["status"] = "archived"
85+
elif "deprecated" in json_file:
86+
package["status"] = "deprecated"
87+
elif "malicious" in json_file:
88+
package["status"] = "malicious"
8689
else:
87-
package['status'] = 'unknown'
90+
package["status"] = "unknown"
8891

8992
# check for the existing package and only add if different
90-
key = package['name']+"/"+package['type']
93+
key = package["name"] + "/" + package["type"]
9194
if key in packages_dict:
92-
if packages_dict[key]['status'] == package['status'] and packages_dict[key]['description'] == package['description']:
95+
if (
96+
packages_dict[key]["status"] == package["status"]
97+
and packages_dict[key]["description"]
98+
== package["description"]
99+
):
93100
print("Package already exists", key)
94101
continue
95102

@@ -104,17 +111,16 @@ def add_data(client):
104111
def run_import():
105112
client = weaviate.WeaviateClient(
106113
embedded_options=EmbeddedOptions(
107-
persistence_data_path="./weaviate_data",
108-
grpc_port=50052
114+
persistence_data_path="./weaviate_data", grpc_port=50052
109115
),
110116
)
111117
with client:
112118
client.connect()
113-
print('is_ready:', client.is_ready())
119+
print("is_ready:", client.is_ready())
114120

115121
setup_schema(client)
116122
add_data(client)
117123

118124

119-
if __name__ == '__main__':
125+
if __name__ == "__main__":
120126
run_import()

src/codegate/config.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,8 +137,12 @@ def from_file(cls, config_path: Union[str, Path]) -> "Config":
137137
log_level=config_data.get("log_level", cls.log_level.value),
138138
log_format=config_data.get("log_format", cls.log_format.value),
139139
chat_model_path=config_data.get("chat_model_path", cls.chat_model_path),
140-
chat_model_n_ctx=config_data.get("chat_model_n_ctx", cls.chat_model_n_ctx),
141-
chat_model_n_gpu_layers=config_data.get("chat_model_n_gpu_layers", cls.chat_model_n_gpu_layers),
140+
chat_model_n_ctx=config_data.get(
141+
"chat_model_n_ctx", cls.chat_model_n_ctx
142+
),
143+
chat_model_n_gpu_layers=config_data.get(
144+
"chat_model_n_gpu_layers", cls.chat_model_n_gpu_layers
145+
),
142146
prompts=prompts_config,
143147
)
144148
except yaml.YAMLError as e:

src/codegate/inference/inference_engine.py

Lines changed: 20 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from llama_cpp import Llama
22

33

4-
class LlamaCppInferenceEngine():
4+
class LlamaCppInferenceEngine:
55
_inference_engine = None
66

77
def __new__(cls):
@@ -10,26 +10,37 @@ def __new__(cls):
1010
return cls._inference_engine
1111

1212
def __init__(self):
13-
if not hasattr(self, 'models'):
13+
if not hasattr(self, "models"):
1414
self.__models = {}
1515

1616
async def get_model(self, model_path, embedding=False, n_ctx=512, n_gpu_layers=0):
1717
if model_path not in self.__models:
1818
self.__models[model_path] = Llama(
19-
model_path=model_path, n_gpu_layers=n_gpu_layers, verbose=False, n_ctx=n_ctx,
20-
embedding=embedding)
19+
model_path=model_path,
20+
n_gpu_layers=n_gpu_layers,
21+
verbose=False,
22+
n_ctx=n_ctx,
23+
embedding=embedding,
24+
)
2125

2226
return self.__models[model_path]
2327

24-
async def generate(self, model_path, prompt, n_ctx=512, n_gpu_layers=0, stream=True):
25-
model = await self.get_model(model_path=model_path, n_ctx=n_ctx, n_gpu_layers=n_gpu_layers)
28+
async def generate(
29+
self, model_path, prompt, n_ctx=512, n_gpu_layers=0, stream=True
30+
):
31+
model = await self.get_model(
32+
model_path=model_path, n_ctx=n_ctx, n_gpu_layers=n_gpu_layers
33+
)
2634

2735
for chunk in model.create_completion(prompt=prompt, stream=stream):
2836
yield chunk
2937

30-
async def chat(self, model_path, n_ctx=512, n_gpu_layers=0, **chat_completion_request):
31-
model = await self.get_model(model_path=model_path, n_ctx=n_ctx,
32-
n_gpu_layers=n_gpu_layers)
38+
async def chat(
39+
self, model_path, n_ctx=512, n_gpu_layers=0, **chat_completion_request
40+
):
41+
model = await self.get_model(
42+
model_path=model_path, n_ctx=n_ctx, n_gpu_layers=n_gpu_layers
43+
)
3344
return model.create_completion(**chat_completion_request)
3445

3546
async def embed(self, model_path, content):

utils/embedding_util.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from transformers import AutoTokenizer, AutoModel
22
import torch
3-
import torch.nn.functional as F
3+
import torch.nn.functional as f
44
from torch import Tensor
55
import os
66
import warnings
@@ -35,6 +35,6 @@ def generate_embeddings(text):
3535
embeddings = average_pool(outputs.last_hidden_state, attention_mask)
3636

3737
# (Optionally) normalize embeddings
38-
embeddings = F.normalize(embeddings, p=2, dim=1)
38+
embeddings = f.normalize(embeddings, p=2, dim=1)
3939

4040
return embeddings.numpy().tolist()[0]

0 commit comments

Comments
 (0)