Mercurial > hg
comparison contrib/python-zstandard/c-ext/decompressor.c @ 30435:b86a448a2965
zstd: vendor python-zstandard 0.5.0
As the commit message for the previous changeset says, we wish
for zstd to be a 1st class citizen in Mercurial. To make that
happen, we need to enable Python to talk to the zstd C API. And
that requires bindings.
This commit vendors a copy of existing Python bindings. Why do we
need to vendor? As the commit message of the previous commit says,
relying on systems in the wild to have the bindings or zstd present
is a losing proposition. By distributing the zstd and bindings with
Mercurial, we significantly increase our chances that zstd will
work. Since zstd will deliver a better end-user experience by
achieving better performance, this benefits our users. Another
reason is that the Python bindings still aren't stable and the
API is somewhat fluid. While Mercurial could be coded to target
multiple versions of the Python bindings, it is safer to bundle
an explicit, known working version.
The added Python bindings are mostly a fully-featured interface
to the zstd C API. They allow one-shot operations, streaming,
reading and writing from objects implements the file object
protocol, dictionary compression, control over low-level compression
parameters, and more. The Python bindings work on Python 2.6,
2.7, and 3.3+ and have been tested on Linux and Windows. There are
CFFI bindings, but they are lacking compared to the C extension.
Upstream work will be needed before we can support zstd with PyPy.
But it will be possible.
The files added in this commit come from Git commit
e637c1b214d5f869cf8116c550dcae23ec13b677 from
https://github.com/indygreg/python-zstandard and are added without
modifications. Some files from the upstream repository have been
omitted, namely files related to continuous integration.
In the spirit of full disclosure, I'm the maintainer of the
"python-zstandard" project and have authored 100% of the code
added in this commit. Unfortunately, the Python bindings have
not been formally code reviewed by anyone. While I've tested
much of the code thoroughly (I even have tests that fuzz APIs),
there's a good chance there are bugs, memory leaks, not well
thought out APIs, etc. If someone wants to review the code and
send feedback to the GitHub project, it would be greatly
appreciated.
Despite my involvement with both projects, my opinions of code
style differ from Mercurial's. The code in this commit introduces
numerous code style violations in Mercurial's linters. So, the code
is excluded from most lints. However, some violations I agree with.
These have been added to the known violations ignore list for now.
author | Gregory Szorc <gregory.szorc@gmail.com> |
---|---|
date | Thu, 10 Nov 2016 22:15:58 -0800 |
parents | |
children | 08fa3a76a080 |
comparison
equal
deleted
inserted
replaced
30434:2e484bdea8c4 | 30435:b86a448a2965 |
---|---|
1 /** | |
2 * Copyright (c) 2016-present, Gregory Szorc | |
3 * All rights reserved. | |
4 * | |
5 * This software may be modified and distributed under the terms | |
6 * of the BSD license. See the LICENSE file for details. | |
7 */ | |
8 | |
9 #include "python-zstandard.h" | |
10 | |
11 extern PyObject* ZstdError; | |
12 | |
13 ZSTD_DStream* DStream_from_ZstdDecompressor(ZstdDecompressor* decompressor) { | |
14 ZSTD_DStream* dstream; | |
15 void* dictData = NULL; | |
16 size_t dictSize = 0; | |
17 size_t zresult; | |
18 | |
19 dstream = ZSTD_createDStream(); | |
20 if (!dstream) { | |
21 PyErr_SetString(ZstdError, "could not create DStream"); | |
22 return NULL; | |
23 } | |
24 | |
25 if (decompressor->dict) { | |
26 dictData = decompressor->dict->dictData; | |
27 dictSize = decompressor->dict->dictSize; | |
28 } | |
29 | |
30 if (dictData) { | |
31 zresult = ZSTD_initDStream_usingDict(dstream, dictData, dictSize); | |
32 } | |
33 else { | |
34 zresult = ZSTD_initDStream(dstream); | |
35 } | |
36 | |
37 if (ZSTD_isError(zresult)) { | |
38 PyErr_Format(ZstdError, "could not initialize DStream: %s", | |
39 ZSTD_getErrorName(zresult)); | |
40 return NULL; | |
41 } | |
42 | |
43 return dstream; | |
44 } | |
45 | |
46 PyDoc_STRVAR(Decompressor__doc__, | |
47 "ZstdDecompressor(dict_data=None)\n" | |
48 "\n" | |
49 "Create an object used to perform Zstandard decompression.\n" | |
50 "\n" | |
51 "An instance can perform multiple decompression operations." | |
52 ); | |
53 | |
54 static int Decompressor_init(ZstdDecompressor* self, PyObject* args, PyObject* kwargs) { | |
55 static char* kwlist[] = { | |
56 "dict_data", | |
57 NULL | |
58 }; | |
59 | |
60 ZstdCompressionDict* dict = NULL; | |
61 | |
62 self->refdctx = NULL; | |
63 self->dict = NULL; | |
64 self->ddict = NULL; | |
65 | |
66 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!", kwlist, | |
67 &ZstdCompressionDictType, &dict)) { | |
68 return -1; | |
69 } | |
70 | |
71 /* Instead of creating a ZSTD_DCtx for every decompression operation, | |
72 we create an instance at object creation time and recycle it via | |
73 ZSTD_copyDCTx() on each use. This means each use is a malloc+memcpy | |
74 instead of a malloc+init. */ | |
75 /* TODO lazily initialize the reference ZSTD_DCtx on first use since | |
76 not instances of ZstdDecompressor will use a ZSTD_DCtx. */ | |
77 self->refdctx = ZSTD_createDCtx(); | |
78 if (!self->refdctx) { | |
79 PyErr_NoMemory(); | |
80 goto except; | |
81 } | |
82 | |
83 if (dict) { | |
84 self->dict = dict; | |
85 Py_INCREF(dict); | |
86 } | |
87 | |
88 return 0; | |
89 | |
90 except: | |
91 if (self->refdctx) { | |
92 ZSTD_freeDCtx(self->refdctx); | |
93 self->refdctx = NULL; | |
94 } | |
95 | |
96 return -1; | |
97 } | |
98 | |
99 static void Decompressor_dealloc(ZstdDecompressor* self) { | |
100 if (self->refdctx) { | |
101 ZSTD_freeDCtx(self->refdctx); | |
102 } | |
103 | |
104 Py_XDECREF(self->dict); | |
105 | |
106 if (self->ddict) { | |
107 ZSTD_freeDDict(self->ddict); | |
108 self->ddict = NULL; | |
109 } | |
110 | |
111 PyObject_Del(self); | |
112 } | |
113 | |
114 PyDoc_STRVAR(Decompressor_copy_stream__doc__, | |
115 "copy_stream(ifh, ofh[, read_size=default, write_size=default]) -- decompress data between streams\n" | |
116 "\n" | |
117 "Compressed data will be read from ``ifh``, decompressed, and written to\n" | |
118 "``ofh``. ``ifh`` must have a ``read(size)`` method. ``ofh`` must have a\n" | |
119 "``write(data)`` method.\n" | |
120 "\n" | |
121 "The optional ``read_size`` and ``write_size`` arguments control the chunk\n" | |
122 "size of data that is ``read()`` and ``write()`` between streams. They default\n" | |
123 "to the default input and output sizes of zstd decompressor streams.\n" | |
124 ); | |
125 | |
126 static PyObject* Decompressor_copy_stream(ZstdDecompressor* self, PyObject* args, PyObject* kwargs) { | |
127 static char* kwlist[] = { | |
128 "ifh", | |
129 "ofh", | |
130 "read_size", | |
131 "write_size", | |
132 NULL | |
133 }; | |
134 | |
135 PyObject* source; | |
136 PyObject* dest; | |
137 size_t inSize = ZSTD_DStreamInSize(); | |
138 size_t outSize = ZSTD_DStreamOutSize(); | |
139 ZSTD_DStream* dstream; | |
140 ZSTD_inBuffer input; | |
141 ZSTD_outBuffer output; | |
142 Py_ssize_t totalRead = 0; | |
143 Py_ssize_t totalWrite = 0; | |
144 char* readBuffer; | |
145 Py_ssize_t readSize; | |
146 PyObject* readResult; | |
147 PyObject* res = NULL; | |
148 size_t zresult = 0; | |
149 PyObject* writeResult; | |
150 PyObject* totalReadPy; | |
151 PyObject* totalWritePy; | |
152 | |
153 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|kk", kwlist, &source, | |
154 &dest, &inSize, &outSize)) { | |
155 return NULL; | |
156 } | |
157 | |
158 if (!PyObject_HasAttrString(source, "read")) { | |
159 PyErr_SetString(PyExc_ValueError, "first argument must have a read() method"); | |
160 return NULL; | |
161 } | |
162 | |
163 if (!PyObject_HasAttrString(dest, "write")) { | |
164 PyErr_SetString(PyExc_ValueError, "second argument must have a write() method"); | |
165 return NULL; | |
166 } | |
167 | |
168 dstream = DStream_from_ZstdDecompressor(self); | |
169 if (!dstream) { | |
170 res = NULL; | |
171 goto finally; | |
172 } | |
173 | |
174 output.dst = PyMem_Malloc(outSize); | |
175 if (!output.dst) { | |
176 PyErr_NoMemory(); | |
177 res = NULL; | |
178 goto finally; | |
179 } | |
180 output.size = outSize; | |
181 output.pos = 0; | |
182 | |
183 /* Read source stream until EOF */ | |
184 while (1) { | |
185 readResult = PyObject_CallMethod(source, "read", "n", inSize); | |
186 if (!readResult) { | |
187 PyErr_SetString(ZstdError, "could not read() from source"); | |
188 goto finally; | |
189 } | |
190 | |
191 PyBytes_AsStringAndSize(readResult, &readBuffer, &readSize); | |
192 | |
193 /* If no data was read, we're at EOF. */ | |
194 if (0 == readSize) { | |
195 break; | |
196 } | |
197 | |
198 totalRead += readSize; | |
199 | |
200 /* Send data to decompressor */ | |
201 input.src = readBuffer; | |
202 input.size = readSize; | |
203 input.pos = 0; | |
204 | |
205 while (input.pos < input.size) { | |
206 Py_BEGIN_ALLOW_THREADS | |
207 zresult = ZSTD_decompressStream(dstream, &output, &input); | |
208 Py_END_ALLOW_THREADS | |
209 | |
210 if (ZSTD_isError(zresult)) { | |
211 PyErr_Format(ZstdError, "zstd decompressor error: %s", | |
212 ZSTD_getErrorName(zresult)); | |
213 res = NULL; | |
214 goto finally; | |
215 } | |
216 | |
217 if (output.pos) { | |
218 #if PY_MAJOR_VERSION >= 3 | |
219 writeResult = PyObject_CallMethod(dest, "write", "y#", | |
220 #else | |
221 writeResult = PyObject_CallMethod(dest, "write", "s#", | |
222 #endif | |
223 output.dst, output.pos); | |
224 | |
225 Py_XDECREF(writeResult); | |
226 totalWrite += output.pos; | |
227 output.pos = 0; | |
228 } | |
229 } | |
230 } | |
231 | |
232 /* Source stream is exhausted. Finish up. */ | |
233 | |
234 ZSTD_freeDStream(dstream); | |
235 dstream = NULL; | |
236 | |
237 totalReadPy = PyLong_FromSsize_t(totalRead); | |
238 totalWritePy = PyLong_FromSsize_t(totalWrite); | |
239 res = PyTuple_Pack(2, totalReadPy, totalWritePy); | |
240 Py_DecRef(totalReadPy); | |
241 Py_DecRef(totalWritePy); | |
242 | |
243 finally: | |
244 if (output.dst) { | |
245 PyMem_Free(output.dst); | |
246 } | |
247 | |
248 if (dstream) { | |
249 ZSTD_freeDStream(dstream); | |
250 } | |
251 | |
252 return res; | |
253 } | |
254 | |
255 PyDoc_STRVAR(Decompressor_decompress__doc__, | |
256 "decompress(data[, max_output_size=None]) -- Decompress data in its entirety\n" | |
257 "\n" | |
258 "This method will decompress the entirety of the argument and return the\n" | |
259 "result.\n" | |
260 "\n" | |
261 "The input bytes are expected to contain a full Zstandard frame (something\n" | |
262 "compressed with ``ZstdCompressor.compress()`` or similar). If the input does\n" | |
263 "not contain a full frame, an exception will be raised.\n" | |
264 "\n" | |
265 "If the frame header of the compressed data does not contain the content size\n" | |
266 "``max_output_size`` must be specified or ``ZstdError`` will be raised. An\n" | |
267 "allocation of size ``max_output_size`` will be performed and an attempt will\n" | |
268 "be made to perform decompression into that buffer. If the buffer is too\n" | |
269 "small or cannot be allocated, ``ZstdError`` will be raised. The buffer will\n" | |
270 "be resized if it is too large.\n" | |
271 "\n" | |
272 "Uncompressed data could be much larger than compressed data. As a result,\n" | |
273 "calling this function could result in a very large memory allocation being\n" | |
274 "performed to hold the uncompressed data. Therefore it is **highly**\n" | |
275 "recommended to use a streaming decompression method instead of this one.\n" | |
276 ); | |
277 | |
278 PyObject* Decompressor_decompress(ZstdDecompressor* self, PyObject* args, PyObject* kwargs) { | |
279 static char* kwlist[] = { | |
280 "data", | |
281 "max_output_size", | |
282 NULL | |
283 }; | |
284 | |
285 const char* source; | |
286 Py_ssize_t sourceSize; | |
287 Py_ssize_t maxOutputSize = 0; | |
288 unsigned long long decompressedSize; | |
289 size_t destCapacity; | |
290 PyObject* result = NULL; | |
291 ZSTD_DCtx* dctx = NULL; | |
292 void* dictData = NULL; | |
293 size_t dictSize = 0; | |
294 size_t zresult; | |
295 | |
296 #if PY_MAJOR_VERSION >= 3 | |
297 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|n", kwlist, | |
298 #else | |
299 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|n", kwlist, | |
300 #endif | |
301 &source, &sourceSize, &maxOutputSize)) { | |
302 return NULL; | |
303 } | |
304 | |
305 dctx = PyMem_Malloc(ZSTD_sizeof_DCtx(self->refdctx)); | |
306 if (!dctx) { | |
307 PyErr_NoMemory(); | |
308 return NULL; | |
309 } | |
310 | |
311 ZSTD_copyDCtx(dctx, self->refdctx); | |
312 | |
313 if (self->dict) { | |
314 dictData = self->dict->dictData; | |
315 dictSize = self->dict->dictSize; | |
316 } | |
317 | |
318 if (dictData && !self->ddict) { | |
319 Py_BEGIN_ALLOW_THREADS | |
320 self->ddict = ZSTD_createDDict(dictData, dictSize); | |
321 Py_END_ALLOW_THREADS | |
322 | |
323 if (!self->ddict) { | |
324 PyErr_SetString(ZstdError, "could not create decompression dict"); | |
325 goto except; | |
326 } | |
327 } | |
328 | |
329 decompressedSize = ZSTD_getDecompressedSize(source, sourceSize); | |
330 /* 0 returned if content size not in the zstd frame header */ | |
331 if (0 == decompressedSize) { | |
332 if (0 == maxOutputSize) { | |
333 PyErr_SetString(ZstdError, "input data invalid or missing content size " | |
334 "in frame header"); | |
335 goto except; | |
336 } | |
337 else { | |
338 result = PyBytes_FromStringAndSize(NULL, maxOutputSize); | |
339 destCapacity = maxOutputSize; | |
340 } | |
341 } | |
342 else { | |
343 result = PyBytes_FromStringAndSize(NULL, decompressedSize); | |
344 destCapacity = decompressedSize; | |
345 } | |
346 | |
347 if (!result) { | |
348 goto except; | |
349 } | |
350 | |
351 Py_BEGIN_ALLOW_THREADS | |
352 if (self->ddict) { | |
353 zresult = ZSTD_decompress_usingDDict(dctx, PyBytes_AsString(result), destCapacity, | |
354 source, sourceSize, self->ddict); | |
355 } | |
356 else { | |
357 zresult = ZSTD_decompressDCtx(dctx, PyBytes_AsString(result), destCapacity, source, sourceSize); | |
358 } | |
359 Py_END_ALLOW_THREADS | |
360 | |
361 if (ZSTD_isError(zresult)) { | |
362 PyErr_Format(ZstdError, "decompression error: %s", ZSTD_getErrorName(zresult)); | |
363 goto except; | |
364 } | |
365 else if (decompressedSize && zresult != decompressedSize) { | |
366 PyErr_Format(ZstdError, "decompression error: decompressed %zu bytes; expected %llu", | |
367 zresult, decompressedSize); | |
368 goto except; | |
369 } | |
370 else if (zresult < destCapacity) { | |
371 if (_PyBytes_Resize(&result, zresult)) { | |
372 goto except; | |
373 } | |
374 } | |
375 | |
376 goto finally; | |
377 | |
378 except: | |
379 Py_DecRef(result); | |
380 result = NULL; | |
381 | |
382 finally: | |
383 if (dctx) { | |
384 PyMem_FREE(dctx); | |
385 } | |
386 | |
387 return result; | |
388 } | |
389 | |
390 PyDoc_STRVAR(Decompressor_decompressobj__doc__, | |
391 "decompressobj()\n" | |
392 "\n" | |
393 "Incrementally feed data into a decompressor.\n" | |
394 "\n" | |
395 "The returned object exposes a ``decompress(data)`` method. This makes it\n" | |
396 "compatible with ``zlib.decompressobj`` and ``bz2.BZ2Decompressor`` so that\n" | |
397 "callers can swap in the zstd decompressor while using the same API.\n" | |
398 ); | |
399 | |
400 static ZstdDecompressionObj* Decompressor_decompressobj(ZstdDecompressor* self) { | |
401 ZstdDecompressionObj* result = PyObject_New(ZstdDecompressionObj, &ZstdDecompressionObjType); | |
402 if (!result) { | |
403 return NULL; | |
404 } | |
405 | |
406 result->dstream = DStream_from_ZstdDecompressor(self); | |
407 if (!result->dstream) { | |
408 Py_DecRef((PyObject*)result); | |
409 return NULL; | |
410 } | |
411 | |
412 result->decompressor = self; | |
413 Py_INCREF(result->decompressor); | |
414 | |
415 result->finished = 0; | |
416 | |
417 return result; | |
418 } | |
419 | |
420 PyDoc_STRVAR(Decompressor_read_from__doc__, | |
421 "read_from(reader[, read_size=default, write_size=default, skip_bytes=0])\n" | |
422 "Read compressed data and return an iterator\n" | |
423 "\n" | |
424 "Returns an iterator of decompressed data chunks produced from reading from\n" | |
425 "the ``reader``.\n" | |
426 "\n" | |
427 "Compressed data will be obtained from ``reader`` by calling the\n" | |
428 "``read(size)`` method of it. The source data will be streamed into a\n" | |
429 "decompressor. As decompressed data is available, it will be exposed to the\n" | |
430 "returned iterator.\n" | |
431 "\n" | |
432 "Data is ``read()`` in chunks of size ``read_size`` and exposed to the\n" | |
433 "iterator in chunks of size ``write_size``. The default values are the input\n" | |
434 "and output sizes for a zstd streaming decompressor.\n" | |
435 "\n" | |
436 "There is also support for skipping the first ``skip_bytes`` of data from\n" | |
437 "the source.\n" | |
438 ); | |
439 | |
440 static ZstdDecompressorIterator* Decompressor_read_from(ZstdDecompressor* self, PyObject* args, PyObject* kwargs) { | |
441 static char* kwlist[] = { | |
442 "reader", | |
443 "read_size", | |
444 "write_size", | |
445 "skip_bytes", | |
446 NULL | |
447 }; | |
448 | |
449 PyObject* reader; | |
450 size_t inSize = ZSTD_DStreamInSize(); | |
451 size_t outSize = ZSTD_DStreamOutSize(); | |
452 ZstdDecompressorIterator* result; | |
453 size_t skipBytes = 0; | |
454 | |
455 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|kkk", kwlist, &reader, | |
456 &inSize, &outSize, &skipBytes)) { | |
457 return NULL; | |
458 } | |
459 | |
460 if (skipBytes >= inSize) { | |
461 PyErr_SetString(PyExc_ValueError, | |
462 "skip_bytes must be smaller than read_size"); | |
463 return NULL; | |
464 } | |
465 | |
466 result = PyObject_New(ZstdDecompressorIterator, &ZstdDecompressorIteratorType); | |
467 if (!result) { | |
468 return NULL; | |
469 } | |
470 | |
471 result->decompressor = NULL; | |
472 result->reader = NULL; | |
473 result->buffer = NULL; | |
474 result->dstream = NULL; | |
475 result->input.src = NULL; | |
476 result->output.dst = NULL; | |
477 | |
478 if (PyObject_HasAttrString(reader, "read")) { | |
479 result->reader = reader; | |
480 Py_INCREF(result->reader); | |
481 } | |
482 else if (1 == PyObject_CheckBuffer(reader)) { | |
483 /* Object claims it is a buffer. Try to get a handle to it. */ | |
484 result->buffer = PyMem_Malloc(sizeof(Py_buffer)); | |
485 if (!result->buffer) { | |
486 goto except; | |
487 } | |
488 | |
489 memset(result->buffer, 0, sizeof(Py_buffer)); | |
490 | |
491 if (0 != PyObject_GetBuffer(reader, result->buffer, PyBUF_CONTIG_RO)) { | |
492 goto except; | |
493 } | |
494 | |
495 result->bufferOffset = 0; | |
496 } | |
497 else { | |
498 PyErr_SetString(PyExc_ValueError, | |
499 "must pass an object with a read() method or conforms to buffer protocol"); | |
500 goto except; | |
501 } | |
502 | |
503 result->decompressor = self; | |
504 Py_INCREF(result->decompressor); | |
505 | |
506 result->inSize = inSize; | |
507 result->outSize = outSize; | |
508 result->skipBytes = skipBytes; | |
509 | |
510 result->dstream = DStream_from_ZstdDecompressor(self); | |
511 if (!result->dstream) { | |
512 goto except; | |
513 } | |
514 | |
515 result->input.src = PyMem_Malloc(inSize); | |
516 if (!result->input.src) { | |
517 PyErr_NoMemory(); | |
518 goto except; | |
519 } | |
520 result->input.size = 0; | |
521 result->input.pos = 0; | |
522 | |
523 result->output.dst = NULL; | |
524 result->output.size = 0; | |
525 result->output.pos = 0; | |
526 | |
527 result->readCount = 0; | |
528 result->finishedInput = 0; | |
529 result->finishedOutput = 0; | |
530 | |
531 goto finally; | |
532 | |
533 except: | |
534 if (result->reader) { | |
535 Py_DECREF(result->reader); | |
536 result->reader = NULL; | |
537 } | |
538 | |
539 if (result->buffer) { | |
540 PyBuffer_Release(result->buffer); | |
541 Py_DECREF(result->buffer); | |
542 result->buffer = NULL; | |
543 } | |
544 | |
545 Py_DECREF(result); | |
546 result = NULL; | |
547 | |
548 finally: | |
549 | |
550 return result; | |
551 } | |
552 | |
553 PyDoc_STRVAR(Decompressor_write_to__doc__, | |
554 "Create a context manager to write decompressed data to an object.\n" | |
555 "\n" | |
556 "The passed object must have a ``write()`` method.\n" | |
557 "\n" | |
558 "The caller feeds intput data to the object by calling ``write(data)``.\n" | |
559 "Decompressed data is written to the argument given as it is decompressed.\n" | |
560 "\n" | |
561 "An optional ``write_size`` argument defines the size of chunks to\n" | |
562 "``write()`` to the writer. It defaults to the default output size for a zstd\n" | |
563 "streaming decompressor.\n" | |
564 ); | |
565 | |
566 static ZstdDecompressionWriter* Decompressor_write_to(ZstdDecompressor* self, PyObject* args, PyObject* kwargs) { | |
567 static char* kwlist[] = { | |
568 "writer", | |
569 "write_size", | |
570 NULL | |
571 }; | |
572 | |
573 PyObject* writer; | |
574 size_t outSize = ZSTD_DStreamOutSize(); | |
575 ZstdDecompressionWriter* result; | |
576 | |
577 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|k", kwlist, &writer, &outSize)) { | |
578 return NULL; | |
579 } | |
580 | |
581 if (!PyObject_HasAttrString(writer, "write")) { | |
582 PyErr_SetString(PyExc_ValueError, "must pass an object with a write() method"); | |
583 return NULL; | |
584 } | |
585 | |
586 result = PyObject_New(ZstdDecompressionWriter, &ZstdDecompressionWriterType); | |
587 if (!result) { | |
588 return NULL; | |
589 } | |
590 | |
591 result->decompressor = self; | |
592 Py_INCREF(result->decompressor); | |
593 | |
594 result->writer = writer; | |
595 Py_INCREF(result->writer); | |
596 | |
597 result->outSize = outSize; | |
598 | |
599 result->entered = 0; | |
600 result->dstream = NULL; | |
601 | |
602 return result; | |
603 } | |
604 | |
605 static PyMethodDef Decompressor_methods[] = { | |
606 { "copy_stream", (PyCFunction)Decompressor_copy_stream, METH_VARARGS | METH_KEYWORDS, | |
607 Decompressor_copy_stream__doc__ }, | |
608 { "decompress", (PyCFunction)Decompressor_decompress, METH_VARARGS | METH_KEYWORDS, | |
609 Decompressor_decompress__doc__ }, | |
610 { "decompressobj", (PyCFunction)Decompressor_decompressobj, METH_NOARGS, | |
611 Decompressor_decompressobj__doc__ }, | |
612 { "read_from", (PyCFunction)Decompressor_read_from, METH_VARARGS | METH_KEYWORDS, | |
613 Decompressor_read_from__doc__ }, | |
614 { "write_to", (PyCFunction)Decompressor_write_to, METH_VARARGS | METH_KEYWORDS, | |
615 Decompressor_write_to__doc__ }, | |
616 { NULL, NULL } | |
617 }; | |
618 | |
619 PyTypeObject ZstdDecompressorType = { | |
620 PyVarObject_HEAD_INIT(NULL, 0) | |
621 "zstd.ZstdDecompressor", /* tp_name */ | |
622 sizeof(ZstdDecompressor), /* tp_basicsize */ | |
623 0, /* tp_itemsize */ | |
624 (destructor)Decompressor_dealloc, /* tp_dealloc */ | |
625 0, /* tp_print */ | |
626 0, /* tp_getattr */ | |
627 0, /* tp_setattr */ | |
628 0, /* tp_compare */ | |
629 0, /* tp_repr */ | |
630 0, /* tp_as_number */ | |
631 0, /* tp_as_sequence */ | |
632 0, /* tp_as_mapping */ | |
633 0, /* tp_hash */ | |
634 0, /* tp_call */ | |
635 0, /* tp_str */ | |
636 0, /* tp_getattro */ | |
637 0, /* tp_setattro */ | |
638 0, /* tp_as_buffer */ | |
639 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ | |
640 Decompressor__doc__, /* tp_doc */ | |
641 0, /* tp_traverse */ | |
642 0, /* tp_clear */ | |
643 0, /* tp_richcompare */ | |
644 0, /* tp_weaklistoffset */ | |
645 0, /* tp_iter */ | |
646 0, /* tp_iternext */ | |
647 Decompressor_methods, /* tp_methods */ | |
648 0, /* tp_members */ | |
649 0, /* tp_getset */ | |
650 0, /* tp_base */ | |
651 0, /* tp_dict */ | |
652 0, /* tp_descr_get */ | |
653 0, /* tp_descr_set */ | |
654 0, /* tp_dictoffset */ | |
655 (initproc)Decompressor_init, /* tp_init */ | |
656 0, /* tp_alloc */ | |
657 PyType_GenericNew, /* tp_new */ | |
658 }; | |
659 | |
660 void decompressor_module_init(PyObject* mod) { | |
661 Py_TYPE(&ZstdDecompressorType) = &PyType_Type; | |
662 if (PyType_Ready(&ZstdDecompressorType) < 0) { | |
663 return; | |
664 } | |
665 | |
666 Py_INCREF((PyObject*)&ZstdDecompressorType); | |
667 PyModule_AddObject(mod, "ZstdDecompressor", | |
668 (PyObject*)&ZstdDecompressorType); | |
669 } |