|
5 | 5 | from typing import Any |
6 | 6 |
|
7 | 7 |
|
8 | | -try: |
9 | | - from pydantic import BaseModel |
10 | | - from pydantic.version import VERSION as PYDANTIC_VERSION |
11 | | - |
12 | | - IS_V2 = not PYDANTIC_VERSION.startswith('1') |
13 | | -except ImportError: |
14 | | - from pydantic.v1 import BaseModel |
15 | | - |
16 | | - IS_V2 = False |
17 | | - |
18 | | -if IS_V2: |
19 | | - from pydantic import ConfigDict |
20 | | - |
21 | | - from python_anvil.api_resources.base import underscore_to_camel |
22 | | - |
23 | | - class FileCompatibleBaseModel(BaseModel): |
24 | | - """ |
25 | | - Patched model_dump to extract file objects from SerializationIterator in V2 |
26 | | - and return as BufferedReader or base64 encoded dict as needed. |
27 | | - """ |
28 | | - |
29 | | - # Allow extra fields even if it is not defined. This will allow models |
30 | | - # to be more flexible if features are added in the Anvil API, but |
31 | | - # explicit support hasn't been added yet to this library. |
32 | | - model_config = ConfigDict( |
33 | | - alias_generator=underscore_to_camel, populate_by_name=True, extra="allow" |
| 8 | +from pydantic import BaseModel, ConfigDict |
| 9 | +# from pydantic.version import VERSION as PYDANTIC_VERSION |
| 10 | + |
| 11 | +# IS_V2 = not PYDANTIC_VERSION.startswith('1') |
| 12 | +# except ImportError: |
| 13 | +# from pydantic.v1 import BaseModel |
| 14 | + |
| 15 | +# IS_V2 = False |
| 16 | + |
| 17 | +# if IS_V2: |
| 18 | +# from pydantic import ConfigDict |
| 19 | + |
| 20 | +from python_anvil.api_resources.base import underscore_to_camel |
| 21 | + |
| 22 | +class FileCompatibleBaseModel(BaseModel): |
| 23 | + """ |
| 24 | + Patched model_dump to extract file objects from SerializationIterator in V2 |
| 25 | + and return as BufferedReader or base64 encoded dict as needed. |
| 26 | + """ |
| 27 | + |
| 28 | + # Allow extra fields even if it is not defined. This will allow models |
| 29 | + # to be more flexible if features are added in the Anvil API, but |
| 30 | + # explicit support hasn't been added yet to this library. |
| 31 | + model_config = ConfigDict( |
| 32 | + alias_generator=underscore_to_camel, populate_by_name=True, extra="allow" |
| 33 | + ) |
| 34 | + |
| 35 | + def _iterator_to_buffered_reader(self, value): |
| 36 | + content = bytearray() |
| 37 | + try: |
| 38 | + while True: |
| 39 | + content.extend(next(value)) |
| 40 | + except StopIteration: |
| 41 | + # Create a BytesIO with the content |
| 42 | + bio = BytesIO(bytes(content)) |
| 43 | + # Get the total length |
| 44 | + bio.seek(0, 2) # Seek to end |
| 45 | + total_length = bio.tell() |
| 46 | + bio.seek(0) # Reset to start |
| 47 | + |
| 48 | + # Create a BufferedReader with the content |
| 49 | + reader = BufferedReader(bio) # type: ignore[arg-type] |
| 50 | + # Add a length attribute that requests_toolbelt can use |
| 51 | + reader.len = total_length # type: ignore[attr-defined] |
| 52 | + return reader |
| 53 | + |
| 54 | + def _check_if_serialization_iterator(self, value): |
| 55 | + return str(type(value).__name__) == 'SerializationIterator' and hasattr( |
| 56 | + value, '__next__' |
34 | 57 | ) |
35 | 58 |
|
36 | | - def _iterator_to_buffered_reader(self, value): |
37 | | - content = bytearray() |
38 | | - try: |
39 | | - while True: |
40 | | - content.extend(next(value)) |
41 | | - except StopIteration: |
42 | | - # Create a BytesIO with the content |
43 | | - bio = BytesIO(bytes(content)) |
44 | | - # Get the total length |
45 | | - bio.seek(0, 2) # Seek to end |
46 | | - total_length = bio.tell() |
47 | | - bio.seek(0) # Reset to start |
48 | | - |
49 | | - # Create a BufferedReader with the content |
50 | | - reader = BufferedReader(bio) |
51 | | - # Add a length attribute that requests_toolbelt can use |
52 | | - reader.len = total_length |
53 | | - return reader |
54 | | - |
55 | | - def _check_if_serialization_iterator(self, value): |
56 | | - return str(type(value).__name__) == 'SerializationIterator' and hasattr( |
57 | | - value, '__next__' |
58 | | - ) |
59 | | - |
60 | | - def _process_file_data(self, file_obj): |
61 | | - """Process file object into base64 encoded dict format.""" |
62 | | - # Read the file data and encode it as base64 |
63 | | - file_content = file_obj.read() |
64 | | - |
65 | | - # Get filename - handle both regular files and BytesIO objects |
66 | | - filename = getattr(file_obj, 'name', "document.pdf") |
67 | | - |
68 | | - if isinstance(filename, (bytes, bytearray)): |
69 | | - filename = filename.decode('utf-8') |
70 | | - |
71 | | - # manage mimetype based on file extension |
72 | | - mimetype = guess_type(filename)[0] or 'application/pdf' |
73 | | - |
74 | | - return { |
75 | | - 'data': base64.b64encode(file_content).decode('utf-8'), |
76 | | - 'mimetype': mimetype, |
77 | | - 'filename': os.path.basename(filename), |
78 | | - } |
79 | | - |
80 | | - def model_dump(self, **kwargs): |
81 | | - data = super().model_dump(**kwargs) |
82 | | - for key, value in data.items(): |
83 | | - if key == 'file' and self._check_if_serialization_iterator(value): |
84 | | - # Direct file case |
85 | | - file_obj = self._iterator_to_buffered_reader(value) |
86 | | - data[key] = self._process_file_data(file_obj) |
87 | | - elif key == 'files' and isinstance(value, list): |
88 | | - # List of objects case |
89 | | - for index, item in enumerate(value): |
90 | | - if isinstance(item, dict) and 'file' in item: |
91 | | - if self._check_if_serialization_iterator(item['file']): |
92 | | - file_obj = self._iterator_to_buffered_reader( |
93 | | - item['file'] |
94 | | - ) |
95 | | - data[key][index]['file'] = self._process_file_data( |
96 | | - file_obj |
97 | | - ) |
98 | | - return data |
99 | | - |
100 | | -else: |
101 | | - FileCompatibleBaseModel = BaseModel |
| 59 | + def _process_file_data(self, file_obj): |
| 60 | + """Process file object into base64 encoded dict format.""" |
| 61 | + # Read the file data and encode it as base64 |
| 62 | + file_content = file_obj.read() |
| 63 | + |
| 64 | + # Get filename - handle both regular files and BytesIO objects |
| 65 | + filename = getattr(file_obj, 'name', "document.pdf") |
| 66 | + |
| 67 | + if isinstance(filename, (bytes, bytearray)): |
| 68 | + filename = filename.decode('utf-8') |
| 69 | + |
| 70 | + # manage mimetype based on file extension |
| 71 | + mimetype = guess_type(filename)[0] or 'application/pdf' |
| 72 | + |
| 73 | + return { |
| 74 | + 'data': base64.b64encode(file_content).decode('utf-8'), |
| 75 | + 'mimetype': mimetype, |
| 76 | + 'filename': os.path.basename(filename), |
| 77 | + } |
| 78 | + |
| 79 | + def model_dump(self, **kwargs): |
| 80 | + data = super().model_dump(**kwargs) |
| 81 | + for key, value in data.items(): |
| 82 | + if key == 'file' and self._check_if_serialization_iterator(value): |
| 83 | + # Direct file case |
| 84 | + file_obj = self._iterator_to_buffered_reader(value) |
| 85 | + data[key] = self._process_file_data(file_obj) |
| 86 | + elif key == 'files' and isinstance(value, list): |
| 87 | + # List of objects case |
| 88 | + for index, item in enumerate(value): |
| 89 | + if isinstance(item, dict) and 'file' in item: |
| 90 | + if self._check_if_serialization_iterator(item['file']): |
| 91 | + file_obj = self._iterator_to_buffered_reader( |
| 92 | + item['file'] |
| 93 | + ) |
| 94 | + data[key][index]['file'] = self._process_file_data( |
| 95 | + file_obj |
| 96 | + ) |
| 97 | + return data |
| 98 | + |
| 99 | +# else: |
| 100 | +# FileCompatibleBaseModel = BaseModel |
0 commit comments