Compare commits

..

195 Commits

Author SHA1 Message Date
Davide Borgonovo 0581b34525 Tolto un controllo inutile sul nome che finisce con .pdf 2022-03-18 14:11:44 +01:00
Vinayak Mehta 644bbe7c6d
Bump version and update HISTORY.md 2021-07-11 23:37:18 +05:30
Vinayak Mehta 1b4aa114e3
Change setup extra from cv to base 2021-07-11 23:35:13 +05:30
Vinayak Mehta 552846ca2a
Merge pull request #254 from camelot-dev/change-get-backend
Change get backend method for Lattice and run tests
2021-07-11 23:22:17 +05:30
Vinayak Mehta 1afe429034
Change get backend method for Lattice and run tests 2021-07-11 23:12:20 +05:30
Vinayak Mehta 8abd69fc24
Update HISTORY.md 2021-07-11 23:09:38 +05:30
Vinayak Mehta 4058311831
Fix status badge 2021-07-11 23:07:06 +05:30
Vinayak Mehta cc820b9e5d
Add test for page list generation and fix backend kwarg 2021-07-11 22:47:01 +05:30
Vinayak Mehta 8650f25331
Format code using black 2021-07-11 19:33:02 +05:30
Vinayak Mehta 760d9eda47
Update docs 2021-07-11 19:31:51 +05:30
Vinayak Mehta 3c04842d97
Merge pull request #253 from camelot-dev/make-gs-default-backend
Make ghostscript default backend and add support for string keyword arguments
2021-07-11 19:31:03 +05:30
Vinayak Mehta 02f53e7654
Warn instead of raise, fix imports, and use skipif instead of skip 2021-07-11 19:15:36 +05:30
Vinayak Mehta f160c1d44d
Raise ghostscript deprecation warning and skip ghostscript tests on windows 2021-07-11 18:55:56 +05:30
Vinayak Mehta 793ddaf42f
Update HISTORY.md 2021-07-11 17:28:24 +05:30
Vinayak Mehta 8abe02528b
Make ghostscript default backend and add support for string keywords 2021-07-11 17:25:56 +05:30
Vinayak Mehta f43235934b
Bump version and update docs 2021-07-07 04:29:23 +05:30
Vinayak Mehta d39ca4502b
Merge pull request #246 from camelot-dev/use-explicit-backends
Use PopplerBackend and GhostscriptBackend in test_plotting.py
2021-07-06 03:06:50 +05:30
Vinayak Mehta 1dc8f4e241
Use PopplerBackend and GhostscriptBackend in test_plotting.py 2021-07-06 02:58:41 +05:30
Vinayak Mehta 2aa982b51d
Merge pull request #198 from camelot-dev/add-pdftopng
Add pdftopng and use ghostscript as fallback
2021-07-05 05:22:21 +05:30
Vinayak Mehta e9c0f55690
Test image_conversion.py 2021-07-05 05:12:43 +05:30
Vinayak Mehta 65b4ea623c
Update pdftopng version 2021-07-05 04:35:14 +05:30
Vinayak Mehta 458181cd1d
Don't use fallback for image conversion backend tests 2021-07-04 20:22:32 +05:30
Vinayak Mehta c3b0fa30dc
Remove logger.info and use shutil.which to find pdftopng executable 2021-07-04 20:02:47 +05:30
Vinayak Mehta 4628be9251
Skip ghostscript tests on windows 2021-07-04 19:51:01 +05:30
Vinayak Mehta 4c78dadd55
Skip ghostscript tests on windows 2021-07-04 19:40:53 +05:30
Vinayak Mehta e76a7a7c26
Delay ghostscript import 2021-07-04 19:27:29 +05:30
Vinayak Mehta 0c20cb0be8
Update gh workflow and remove logging 2021-07-04 19:21:50 +05:30
Vinayak Mehta ff7260a228
Add separate tests for poppler and ghostscript 2021-07-04 19:02:26 +05:30
Vinayak Mehta 4dd1e7fb15
Call pdftopng in subprocess 2021-07-04 18:52:38 +05:30
Vinayak Mehta 4c32c45534
Reorder tests 2021-07-04 05:33:23 +05:30
Vinayak Mehta 36dcfe99d8
Split tests for lattice and stream, and fix test_common reprs 2021-06-28 05:32:48 +05:30
Vinayak Mehta 3ddc02b2f2
Fix fallback key 2021-06-28 03:24:15 +05:30
Vinayak Mehta 57034b88b0
Update .gitignore 2021-06-28 03:20:06 +05:30
Vinayak Mehta 0c5aff60b4
Remove newline
Delete cache
2021-06-28 03:19:35 +05:30
Vinayak Mehta 3e4e848a09
Add fallbacks to image conversion 2021-06-28 03:16:54 +05:30
Vinayak Mehta a96702987f
Raise error if ghostscript not installed 2021-06-28 02:20:44 +05:30
Vinayak Mehta 4cebd684ba
Remove ext.ghostscript 2021-06-28 02:06:16 +05:30
Vinayak Mehta 8563a09544
Add image conversion backends 2021-06-28 01:58:45 +05:30
Vinayak Mehta fdade4502e
Fix pdftopng usage
Delete cache
2021-06-28 01:10:16 +05:30
Vinayak Mehta 286b2d6a1c
Bump pdftopng version 2021-06-28 00:47:38 +05:30
Vinayak Mehta 57a26fabcb
Add pdftopng 2021-06-28 00:47:05 +05:30
Vinayak Mehta 715e8a9769
Update index.rst 2021-06-28 00:45:14 +05:30
Vinayak Mehta 4eba7b6486
Update docs 2021-06-28 00:42:05 +05:30
Vinayak Mehta 56efcaa925
Merge pull request #222 from Lucas-C/format-markdown
New export format: markdown
2021-06-28 00:35:56 +05:30
Vinayak Mehta acb8f005c2
Merge branch 'master' into format-markdown 2021-06-28 00:32:00 +05:30
Vinayak Mehta 216ec3c90b
Add faq 2021-06-28 00:28:35 +05:30
Vinayak Mehta 3d1c16ca3f
Update README and HISTORY 2021-06-28 00:26:09 +05:30
Vinayak Mehta 335a86bbb2
Merge pull request #216 from anakin87/master
Add faq section
2021-06-28 00:19:39 +05:30
Vinayak Mehta 1f54108f11
Update dev deps 2021-06-28 00:17:04 +05:30
Vinayak Mehta 2aaa913c40
Update faq 2021-06-28 00:15:43 +05:30
Vinayak Mehta cbda72ed54
Fix #229: Update installs-deps.rst 2021-06-27 23:49:25 +05:30
Vinayak Mehta 14e5569a67
Update bug report template 2021-06-27 23:16:21 +05:30
Vinayak Mehta c647f573d8
Bump version 2021-06-15 03:58:30 +05:30
Vinayak Mehta 9a3865c716
Update HISTORY.md 2021-06-15 03:55:46 +05:30
Vinayak Mehta ec21904595
Merge pull request #219 from Arnie97/master
[MRG] Add line_overlap and boxes_flow to LAParams
2021-06-15 03:53:40 +05:30
Vinayak Mehta f53be3c73e
Update HISTORY.md 2021-06-15 03:53:23 +05:30
Vinayak Mehta 38370cf3f4
Merge pull request #231 from tiagosamaha/master
Fix use resolution argument to generate image with GS
2021-06-15 03:51:00 +05:30
Tiago Samaha Cordeiro 3a8f988740
use resolution argument to generate image with GS 2021-06-15 03:46:07 +05:30
Vinayak Mehta 021be79bf7
Fix README 2021-06-15 03:30:34 +05:30
Vinayak Mehta 2c59e7b0f7
Blacken code 2021-06-15 03:29:35 +05:30
Vinayak Mehta f7c14bf1d4
Update HISTORY.md 2021-06-15 03:28:23 +05:30
Vinayak Mehta a2bb555292
Merge pull request #241 from camelot-dev/add-github-workflow
Add github test workflow and remove travis
2021-06-15 03:06:18 +05:30
Vinayak Mehta cf954a7f6d
Rename file and fix badge 2021-06-15 02:41:20 +05:30
Vinayak Mehta 0c9504e1bc
Fix workflow and job names 2021-06-15 02:36:18 +05:30
Vinayak Mehta b5cf8a235d
Add github test workflow and remove travis 2021-06-15 02:29:25 +05:30
Lucas Cimon 955e4b62d0
New export format: markdown 2021-01-13 06:31:30 +01:00
Arnie97 0dee385578 Add line_overlap and boxes_flow to LAParams 2020-12-17 22:12:24 +08:00
anakin87 ba5be43005
Merge pull request #3 from anakin87/anakin87-patch-2
Introduce Faq
2020-12-08 18:58:21 +01:00
anakin87 5c3a686ebe
Introduce Faq
Introduced faq. Started with reducing memory usage.
2020-12-08 18:57:41 +01:00
anakin87 644e17edec
Merge pull request #2 from camelot-dev/master
Update fork
2020-12-08 18:37:55 +01:00
Vinayak Mehta 7709e58d64
Merge pull request #206 from edugonza/fix-15
[MRG] Fix #15 extraction of cell data discarding overlapping text boxes
2020-10-28 14:44:35 +05:30
Eduardo Gonzalez Lopez de Murillas 7695d35449 Fix #15 extraction of cell data discarding overlapping text boxes 2020-10-27 18:06:57 +01:00
Vinayak Mehta 8ca30f3a3c
Merge pull request #202 from tchx84/close-streams-explicitly
[MRG] handlers: Close file streams explicitly
2020-10-25 05:34:38 +05:30
Martin Abente Lahaye 13a50e2ba2 handlers: Close file streams explicitly
No harm in closing these streams explicitly. Best case
scenario, this prevents descriptors leaks, worse case
scenario, it reduces the amount of  messages like the
following during tests:

ResourceWarning: unclosed file
2020-10-22 11:43:01 -03:00
Vinayak Mehta d17dc43ab2
Merge pull request #196 from jimhall/gs-install-deps
Language added to confirm proper installation of ghostscript libraries
2020-10-18 03:14:24 +05:30
Vinayak Mehta de6faa7af1
Add new checks 2020-10-18 03:13:21 +05:30
Jim Hall 468512a8cd Language added to confirm proper installation of ghostscript libraries 2020-10-08 08:21:34 -04:00
Vinayak Mehta 4edca28c53
Merge branch 'master' of github.com:camelot-dev/camelot 2020-09-08 00:35:57 +05:30
Vinayak Mehta 2a7a4f5b34
Update README and index.rst 2020-09-08 00:35:32 +05:30
Vinayak Mehta 0a3944e54d Add bug report template 2020-09-07 23:39:49 +05:30
Vinayak Mehta 6b42094db5
Update year 2020-08-28 17:52:21 +05:30
Vinayak Mehta 937185412a
Merge pull request #189 from camelot-dev/fix-179
[MRG] Prevent taking max of an empty set
2020-08-25 23:03:20 +05:30
Vinayak Mehta 5d20d56e48
Prevent taking max of an empty set 2020-08-25 22:50:31 +05:30
Vinayak Mehta 9087429501
Merge pull request #188 from anakin87/master
[MRG] Add encoding kwarg to camelot.core.Table.to_html method
2020-08-25 19:16:50 +05:30
Vinayak Mehta cc905ff2d9
Merge pull request #186 from pevisscher/patch-1
Use correct re.sub signature
2020-08-25 19:14:58 +05:30
anakin87 eadc54ad25
Merge pull request #1 from anakin87/anakin87-patch-1
Update core.py
2020-08-25 15:28:48 +02:00
anakin87 579bc16be5
Update core.py
Correct method camelot.core.Table.to_html
2020-08-25 15:27:17 +02:00
pevisscher aae2c6b3d4
use correct re.sub signature
`text_strip` currently passes the regex flags as the count parameters, which is hardcoded to `re.UNICODE` (value 32), and thus only replaces the first 32 values.

see https://docs.python.org/3/library/re.html#re.sub for the signature
2020-08-24 16:51:06 +02:00
Vinayak Mehta 705473198f
Merge pull request #121 from jedie/patch-2
[MRG] Save plot when filename is specified
2020-08-14 02:36:28 +05:30
Vinayak Mehta b741c0a9e9
Check for none and return none 2020-08-14 02:35:50 +05:30
Vinayak Mehta a6bee88053
Merge pull request #119 from jedie/patch-1
Update advanced.rst
2020-08-14 02:27:49 +05:30
Vinayak Mehta 1e050e1960
Remove plt.show() usage 2020-08-14 02:27:07 +05:30
Vinayak Mehta 28371817db
Fix doc link 2020-08-14 02:09:56 +05:30
Vinayak Mehta 7ab5db39d0
Update .readthedocs.yml and remove requirements.txt 2020-08-04 04:37:37 +05:30
Vinayak Mehta 9a5c4b6865
Merge pull request #175 from camelot-dev/revert-0-8-1
[MRG] Revert the changes in v0.8.1
2020-07-27 17:56:48 +05:30
Vinayak Mehta fbe576ffcb
Revert the changes in v0.8.1 2020-07-27 17:38:14 +05:30
Vinayak Mehta fcad5067b9
Fix failing test 2020-07-23 00:54:41 +05:30
Vinayak Mehta 1b8ce1d560
Bump requirement versions 2020-07-23 00:40:26 +05:30
Vinayak Mehta 16beb15c43
Bump version and update HISTORY.md 2020-07-21 21:48:29 +05:30
Vinayak Mehta be25e6dbdb
Merge pull request #171 from camelot-dev/fix-169
Change error name and update pdfminer.six version
2020-07-21 21:30:14 +05:30
Vinayak Mehta a13e2f6f1f
Change error name and update pdfminer.six version 2020-07-21 21:21:01 +05:30
Vinayak Mehta 4b08165328
Merge pull request #166 from stevestock/patch-1
Update install.rst
2020-07-20 16:00:43 +05:30
Vinayak Mehta e5b143d9a8
Update install instructions 2020-07-20 15:59:42 +05:30
Steven Stockhamer 8e5a8e6712
Update install.rst
MacOS now uses zsh by default.  Square brackets must be escaped in zsh
2020-07-19 20:44:16 -04:00
Vinayak Mehta 5efbcdcebb
Update requirements.txt 2020-05-24 19:04:50 +05:30
Vinayak Mehta 189fe58bf2
Update requirements.txt 2020-05-24 19:01:03 +05:30
Vinayak Mehta 1575ec1bf0
Add .readthedocs.yml 2020-05-24 18:56:33 +05:30
Vinayak Mehta d5d6a5962b
Bump version and update HISTORY.md 2020-05-24 18:36:13 +05:30
Vinayak Mehta 420d5aa624
Merge pull request #146 from camelot-dev/add-python38-travis
[MRG] Fix test data and drop python2 support
2020-05-24 18:31:27 +05:30
Vinayak Mehta a22fa63c4e
Fix syntax errors 2020-05-24 18:19:48 +05:30
Vinayak Mehta 52b2a595b4
Add f-strings and remove python3.5 test job 2020-05-24 18:14:43 +05:30
Vinayak Mehta afa1ba7c1f
Fix test indent 2020-05-24 17:38:48 +05:30
Vinayak Mehta f725f04223
Remove future imports 2020-05-24 17:33:13 +05:30
Vinayak Mehta 3afb72b872
Fix read_pdf(url) and test data 2020-05-24 17:26:52 +05:30
Vinayak Mehta 6dd9b6ce01
Create FUNDING.yml 2020-05-24 16:14:43 +05:30
Vinayak Mehta fc1b6f6227
Add python38 test job for travis 2020-05-24 15:27:48 +05:30
Vinayak Mehta 7d4c9e53c6 Update README 2020-03-21 18:07:29 +05:30
Jens Diemer f8b6181988
Fix #120 - Save plot 2020-03-15 13:20:27 +01:00
Jens Diemer dbdbc5f19e
Update advanced.rst
Bugfix plot example.
2020-03-15 13:12:17 +01:00
Vinayak Mehta 44193e0d26
Add deepsource badge to docs 2019-12-24 13:08:46 +05:30
Vinayak Mehta a9918a78cf
Add deepsource badge 2019-12-24 13:07:11 +05:30
Vinayak Mehta 47bb839d7a
Create .deepsource.toml 2019-12-24 13:03:45 +05:30
Vinayak Mehta 1b30f8ecf9
Merge pull request #94 from miltonArango/improving-coverage
[MRG] Unit tests for the version generation
2019-11-15 10:50:42 +05:30
Milton Arango 8e28a0cac0 Moved the version tests to test_common PR #94
Applied black formatting
2019-11-14 20:26:20 -05:00
Vinayak Mehta eb2badbbd0
Merge pull request #91 from vasantvohra/patch-1
[MRG] Update how-it-works.rst
2019-11-15 03:36:02 +05:30
Milton Arango 0d1db4b09e Unit Tests for the Version Generation
Unit tests for the __version__.py generate_version method.
2019-10-26 15:41:41 -05:00
Vasant Vohra 167ee9ac69
Update how-it-works.rst
minor typo
2019-10-17 14:04:18 +05:30
Vinayak Mehta 83f816f104
Merge pull request #48 from jnothman/assert_frame_equal
[MRG] Use assert_frame_equal for more informative errors in tests
2019-10-15 15:51:26 +05:30
Vinayak Mehta f1879726d9
Merge pull request #86 from pravarag/add-opencollective
[MRG] Update python-tk link
2019-10-15 12:19:07 +05:30
Pravar Agrawal 56f3b54f62 [PyConIndia] Update python-tk link in README 2019-10-15 11:25:57 +05:30
Vinayak Mehta 11fadb16fd
Merge pull request #65 from pravarag/add-opencollective
[MRG] Add opencollective url
2019-10-14 23:58:12 +05:30
Vinayak Mehta 2340833bb8
Merge branch 'master' into add-opencollective 2019-10-14 23:57:40 +05:30
Vinayak Mehta 7ce4cb5050
Merge pull request #57 from dcorriveau/master
Update README.md
2019-10-14 23:56:40 +05:30
Vinayak Mehta 78e5dd1f4e
Fix #56 2019-10-14 23:55:51 +05:30
Vinayak Mehta 857f68ef6b
Update README.md 2019-10-14 23:52:38 +05:30
Vinayak Mehta 14c3cb49b9
Merge pull request #70 from kishvanchee/fix67
[MRG] Add 3.7 version to installation docs
2019-10-14 23:50:44 +05:30
Kishore Vancheeshwaran a6d32ecddb added 3.7 version to installation docs 2019-10-14 12:52:51 +05:30
Pravar Agrawal e5e02401da [DevSprint] convert camelot to uppercase in README 2019-10-14 12:51:09 +05:30
Pravar Agrawal ea3eac3c40 [DevSprint] push new branch, fix typo in index.rst change 2019-10-14 12:33:13 +05:30
Pravar Agrawal 28a8112c6d [DevSprint] Fix typo in index.rst for opencollective link 2019-10-14 12:25:17 +05:30
Pravar Agrawal 45384106c8 [DevSprint] Add opencollective link to README and Docs 2019-10-14 12:21:09 +05:30
Dylan Corriveau 81729f57cc
Update README.md 2019-10-13 16:00:04 -04:00
Joel Nothman 9eb15c09dc Use assert_frame_equal for more informative errors in tests 2019-08-06 11:38:44 +10:00
Vinayak Mehta 7ecfcad239 Update HISTORY.md 2019-07-28 21:46:55 +10:00
Dimiter Naydenov b2929a9e92
Merge pull request #34 from KOLANICH/win_ghostscript_callback_fix
Fixed calling convention of callback functions
2019-07-24 13:39:18 +03:00
Dimiter Naydenov 6d33c7ff1e
Merge pull request #32 from KOLANICH/ghostscript_discovery_win
Fixed library discovery on Windows
2019-07-24 13:38:51 +03:00
KOLANICH 5687fbc8b2 Fixed calling convention of callback functions 2019-07-16 21:08:34 +03:00
KOLANICH 9e356b1b0a Fixed library discovery on Windows 2019-07-16 21:07:23 +03:00
Vinayak Mehta f7b94b3e57 Add black badge to index.rst 2019-07-07 16:15:04 +05:30
Vinayak Mehta 0efb3ca1b0 Update HISTORY.md and bump version 2019-07-07 16:07:28 +05:30
Vinayak Mehta 098f7c6727
Merge pull request #26 from camelot-dev/fix-25
[MRG] Update flavor kwargs
2019-07-06 23:42:09 +05:30
Vinayak Mehta a97b50ef21 Update flavor kwargs 2019-07-06 22:59:51 +05:30
Vinayak Mehta e0e4eeb6d4 Rebuild 2019-07-06 04:36:16 +05:30
Vinayak Mehta b8c55383ea Update docs 2019-07-06 04:28:32 +05:30
Dimiter Naydenov 0f8cda4793
Merge pull request #5 from camelot-dev/fix-cli-group-name
[MRG] No need to monkey-patch Click.HelpFormatter
2019-07-04 18:26:35 +03:00
Dimiter Naydenov e81e818b0e
Merge pull request #4 from camelot-dev/fix-strip-text-arg
[MRG] Fixed strip_text argument getting ignored
2019-07-04 18:26:11 +03:00
Dimiter Naydenov 13616c2fb4 No need to monkey-patch Click.HelpFormatter 2019-07-04 13:13:32 +03:00
Dimiter Naydenov 240ea6c411 Fixed strip_text argument getting ignored 2019-07-04 12:12:52 +03:00
Vinayak Mehta d5df93635e
Merge pull request #3 from camelot-dev/code-style-black-badge
[MRG] Add code style: black badge to README.md
2019-07-04 01:10:51 +05:30
Dimiter Naydenov 245731345c
Add code style: black badge to README.md
Now PR #1 got merged, we can wear this proudly :)
2019-07-03 21:58:22 +03:00
Vinayak Mehta 9137df2f6c
Merge pull request #1 from camelot-dev/blacken-code
[MRG] Blacken code
2019-07-04 00:20:57 +05:30
Vinayak Mehta 16ddd10644
Update image_processing.py 2019-07-04 00:06:46 +05:30
Vinayak Mehta 2115a0e177 Blacken code 2019-07-03 23:47:42 +05:30
Vinayak Mehta 27d55d056c
Merge pull request #2 from camelot-dev/fix-pytest-dep-warning
[MRG] Fix pytest deprecation warning
2019-07-03 23:20:43 +05:30
Vinayak Mehta 8866eaa3b6 Fix pytest deprecation warning 2019-07-03 22:07:10 +05:30
Vinayak Mehta 78d80555d8 Update LICENSE and fix travis 2019-07-03 20:46:18 +05:30
Himanshu Sikaria 69767beb7b
Update README.md 2019-06-30 21:57:10 +05:30
Vinayak Mehta de97be23a4
Merge pull request #332 from socialcopsdev/fix-312
[MRG] Fix #312
2019-05-27 22:49:04 +05:30
Vinayak Mehta 8d9fdb740e Update HISTORY.md 2019-05-27 22:48:45 +05:30
Vinayak Mehta 477568dea7 Fix test 2019-05-27 22:29:50 +05:30
Vinayak Mehta de3281c1b6 Add test 2019-05-27 22:18:23 +05:30
Vinayak Mehta b2a8348f13 Fix #312 2019-05-26 17:13:59 +05:30
Vinayak Mehta 857edcd86e
Merge pull request #250 from davidkong0987/patch-1
[MRG] Update advanced.rst
2019-05-26 16:57:09 +05:30
Vinayak Mehta 8c11608078
Update advanced.rst 2019-05-26 16:44:26 +05:30
Vinayak Mehta a1b85d2c91
Merge pull request #319 from Suyash458/fix-#298
[MRG] add -strip to cli docs
2019-05-08 12:14:18 -04:00
Vinayak Mehta 4355bc98ab
Update cli.rst 2019-05-08 12:13:47 -04:00
Suyash458 3ac9318300 add `-strip` to cli docs 2019-04-27 14:46:47 +05:30
Vinayak Mehta 934065ada6
Merge pull request #294 from socialcopsdev/fix-split-bug
[MRG] Fix split text bug
2019-04-20 21:30:21 +05:30
Vinayak Mehta 355ae818a0
Merge branch 'master' into fix-split-bug 2019-04-20 21:06:47 +05:30
Vinayak Mehta 7ff8b5b89c Update HISTORY.md 2019-04-20 21:05:05 +05:30
Vinayak Mehta 3071548898 Update HISTORY.md 2019-04-20 21:04:09 +05:30
Vinayak Mehta ce727d9558 Fix split text bug 2019-03-22 02:28:29 +05:30
Vinayak Mehta ecf6febaa7
Update HISTORY.md 2019-03-08 21:20:43 +05:30
Vinayak Mehta a5343dcc25
Merge pull request #283 from symroe/277_table_sorting
[MRG] Sort TableList by order of tables in PDF
2019-03-08 21:18:51 +05:30
Vinayak Mehta 88466b8c4e
Rename _mk_table to _make_table 2019-03-08 21:04:34 +05:30
Sym Roe 8446271aa4
Always sort TableList after reading PDF 2019-02-25 09:48:47 +00:00
Sym Roe c019e582bf
Add __lt__ to Table to allow sorting
Refs #277
2019-02-25 09:20:09 +00:00
Yatin Taluja 8ea4ec3de8
Merge pull request #257 from yatintaluja/fix-245
[MRG] Fix AttributeError for encrypted files
2019-01-16 16:55:31 +05:30
yatintaluja 6c4b468800 Fix #245 2019-01-16 16:33:17 +05:30
yatintaluja 5330620ea2 Bump version 2019-01-16 16:30:05 +05:30
davidkong0987 24e58d0759
Update advanced.rst
added .. note:: in front
2019-01-10 00:49:42 -05:00
davidkong0987 88a6d0b761
Update advanced.rst
added .. note:: in front
2019-01-10 00:48:34 -05:00
davidkong0987 ddec47964c
Update advanced.rst 2019-01-09 10:05:51 -05:00
Vinayak Mehta 45ae980988 Bump version 2019-01-06 13:00:08 +05:30
Vinayak Mehta 215e5ea2a5 Move ghostscript import 2019-01-06 01:50:54 +05:30
74 changed files with 5578 additions and 2666 deletions

View File

@ -1,3 +1,2 @@
[run]
branch = True
omit = camelot/ext/*

View File

@ -1,10 +0,0 @@
root = true
[*]
end_of_line = lf
insert_final_newline = true
[*.py]
charset = utf-8
indent_style = space
indent_size = 4

1
.github/FUNDING.yml vendored 100644
View File

@ -0,0 +1 @@
open_collective: camelot

View File

@ -0,0 +1,57 @@
---
name: Bug report
about: Please follow this template to submit bug reports.
title: ''
labels: bug
assignees: ''
---
<!-- Please read the filing issues section of the contributor's guide first: https://camelot-py.readthedocs.io/en/master/dev/contributing.html -->
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
**Steps to reproduce the bug**
<!-- Steps used to install `camelot`:
1. Add step here (you can add more steps too) -->
<!-- Steps to be used to reproduce behavior:
1. Add step here (you can add more steps too) -->
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
**Code**
<!-- Add the Camelot code snippet that you used. -->
```
import camelot
# add your code here
```
**PDF**
<!-- Add the PDF file that you want to extract tables from. -->
**Screenshots**
<!-- If applicable, add screenshots to help explain your problem. -->
**Environment**
- OS: [e.g. macOS]
- Python version:
- Numpy version:
- OpenCV version:
- Ghostscript version:
- Camelot version:
**Additional context**
<!-- Add any other context about the problem here. -->

44
.github/workflows/tests.yml vendored 100644
View File

@ -0,0 +1,44 @@
name: tests
on: [pull_request]
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.6, 3.7, 3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install camelot with dependencies
run: |
make install
- name: Test with pytest
run: |
make test
test_latest:
name: Test on ${{ matrix.os }} with Python 3.9
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
python-version: [3.9]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install camelot with dependencies
run: |
make install
- name: Test with pytest
run: |
make test

4
.gitignore vendored
View File

@ -1,3 +1,4 @@
fontconfig/
__pycache__/
*.py[cod]
*.so
@ -12,5 +13,8 @@ coverage.xml
.pytest_cache/
_build/
.venv/
htmlcov/
# vscode
.vscode

27
.readthedocs.yml 100644
View File

@ -0,0 +1,27 @@
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: docs/conf.py
# Build documentation with MkDocs
#mkdocs:
# configuration: mkdocs.yml
# Optionally build your docs in additional formats such as PDF
formats:
- pdf
# Optionally set the version of Python and requirements required to build your docs
python:
version: 3.8
install:
- method: pip
path: .
extra_requirements:
- dev

View File

@ -1,32 +0,0 @@
sudo: true
language: python
cache: pip
addons:
apt:
update: true
install:
- make install
jobs:
include:
- stage: test
script:
- make test
python: '2.7'
- stage: test
script:
- make test
python: '3.5'
- stage: test
script:
- make test
python: '3.6'
- stage: test
script:
- make test
python: '3.7'
dist: xenial
- stage: coverage
python: '3.6'
script:
- make test
- codecov --verbose

View File

@ -16,14 +16,14 @@ As the [Requests Code Of Conduct](http://docs.python-requests.org/en/master/dev/
## Your first contribution
A great way to start contributing to Camelot is to pick an issue tagged with the [help wanted](https://github.com/socialcopsdev/camelot/labels/help%20wanted) tag or the [good first issue](https://github.com/socialcopsdev/camelot/labels/good%20first%20issue) tag. If you're unable to find a good first issue, feel free to contact the maintainer.
A great way to start contributing to Camelot is to pick an issue tagged with the [help wanted](https://github.com/camelot-dev/camelot/labels/help%20wanted) tag or the [good first issue](https://github.com/camelot-dev/camelot/labels/good%20first%20issue) tag. If you're unable to find a good first issue, feel free to contact the maintainer.
## Setting up a development environment
To install the dependencies needed for development, you can use pip:
<pre>
$ pip install camelot-py[dev]
$ pip install "camelot-py[dev]"
</pre>
Alternatively, you can clone the project repository, and install using pip:
@ -36,7 +36,7 @@ $ pip install ".[dev]"
### Submit a pull request
The preferred workflow for contributing to Camelot is to fork the [project repository](https://github.com/socialcopsdev/camelot) on GitHub, clone, develop on a branch and then finally submit a pull request. Here are the steps:
The preferred workflow for contributing to Camelot is to fork the [project repository](https://github.com/camelot-dev/camelot) on GitHub, clone, develop on a branch and then finally submit a pull request. Here are the steps:
1. Fork the project repository. Click on the Fork button near the top of the page. This creates a copy of the code under your account on the GitHub.
@ -106,7 +106,7 @@ The function docstrings are written using the [numpydoc](https://numpydoc.readth
## Filing Issues
We use [GitHub issues](https://github.com/socialcopsdev/camelot/issues) to keep track of all issues and pull requests. Before opening an issue (which asks a question or reports a bug), please use GitHub search to look for existing issues (both open and closed) that may be similar.
We use [GitHub issues](https://github.com/camelot-dev/camelot/issues) to keep track of all issues and pull requests. Before opening an issue (which asks a question or reports a bug), please use GitHub search to look for existing issues (both open and closed) that may be similar.
### Questions

View File

@ -4,6 +4,109 @@ Release History
master
------
0.10.1 (2021-07-11)
------------------
- Change extra requirements from `cv` to `base`. You can use `pip install "camelot-py[base]"` to install everything required to run camelot.
0.10.0 (2021-07-11)
------------------
**Improvements**
- Add support for multiple image conversion backends. [#198](https://github.com/camelot-dev/camelot/pull/198) and [#253](https://github.com/camelot-dev/camelot/pull/253) by Vinayak Mehta.
- Add markdown export format. [#222](https://github.com/camelot-dev/camelot/pull/222/) by [Lucas Cimon](https://github.com/Lucas-C).
**Documentation**
- Add faq section. [#216](https://github.com/camelot-dev/camelot/pull/216) by [Stefano Fiorucci](https://github.com/anakin87).
0.9.0 (2021-06-15)
------------------
**Bugfixes**
- Fix use of resolution argument to generate image with ghostscript. [#231](https://github.com/camelot-dev/camelot/pull/231) by [Tiago Samaha Cordeiro](https://github.com/tiagosamaha).
- [#15](https://github.com/camelot-dev/camelot/issues/15) Fix duplicate strings being assigned to the same cell. [#206](https://github.com/camelot-dev/camelot/pull/206) by [Eduardo Gonzalez Lopez de Murillas](https://github.com/edugonza).
- Save plot when filename is specified. [#121](https://github.com/camelot-dev/camelot/pull/121) by [Jens Diemer](https://github.com/jedie).
- Close file streams explicitly. [#202](https://github.com/camelot-dev/camelot/pull/202) by [Martin Abente Lahaye](https://github.com/tchx84).
- Use correct re.sub signature. [#186](https://github.com/camelot-dev/camelot/pull/186) by [pevisscher](https://github.com/pevisscher).
- [#183](https://github.com/camelot-dev/camelot/issues/183) Fix UnicodeEncodeError when using Stream flavor by adding encoding kwarg to `to_html`. [#188](https://github.com/camelot-dev/camelot/pull/188) by [Stefano Fiorucci](https://github.com/anakin87).
- [#179](https://github.com/camelot-dev/camelot/issues/179) Fix `max() arg is an empty sequence` error on PDFs with blank pages. [#189](https://github.com/camelot-dev/camelot/pull/189) by Vinayak Mehta.
**Improvements**
- Add `line_overlap` and `boxes_flow` to `LAParams`. [#219](https://github.com/camelot-dev/camelot/pull/219) by [Arnie97](https://github.com/Arnie97).
- [Add bug report template.](https://github.com/camelot-dev/camelot/commit/0a3944e54d133b701edfe9c7546ff11289301ba8)
- Move from [Travis to GitHub Actions](https://github.com/camelot-dev/camelot/pull/241).
- Update `.readthedocs.yml` and [remove requirements.txt](https://github.com/camelot-dev/camelot/commit/7ab5db39d07baa4063f975e9e00f6073340e04c1#diff-cde814ef2f549dc093f5b8fc533b7e8f47e7b32a8081e0760e57d5c25a1139d9)
**Documentation**
- [#193](https://github.com/camelot-dev/camelot/issues/193) Add better checks to confirm proper installation of ghostscript. [#196](https://github.com/camelot-dev/camelot/pull/196) by [jimhall](https://github.com/jimhall).
- Update `advanced.rst` plotting examples. [#119](https://github.com/camelot-dev/camelot/pull/119) by [Jens Diemer](https://github.com/jedie).
0.8.2 (2020-07-27)
------------------
* Revert the changes in `0.8.1`.
0.8.1 (2020-07-21)
------------------
**Bugfixes**
* [#169](https://github.com/camelot-dev/camelot/issues/169) Fix import error caused by `pdfminer.six==20200720`. [#171](https://github.com/camelot-dev/camelot/pull/171) by Vinayak Mehta.
0.8.0 (2020-05-24)
------------------
**Improvements**
* Drop Python 2 support!
* Remove Python 2.7 and 3.5 support.
* Replace all instances of `.format` with f-strings.
* Remove all `__future__` imports.
* Fix HTTP 403 forbidden exception in read_pdf(url) and remove Python 2 urllib support.
* Fix test data.
**Bugfixes**
* Fix library discovery on Windows. [#32](https://github.com/camelot-dev/camelot/pull/32) by [KOLANICH](https://github.com/KOLANICH).
* Fix calling convention of callback functions. [#34](https://github.com/camelot-dev/camelot/pull/34) by [KOLANICH](https://github.com/KOLANICH).
0.7.3 (2019-07-07)
------------------
**Improvements**
* Camelot now follows the Black code style! [#1](https://github.com/camelot-dev/camelot/pull/1) and [#3](https://github.com/camelot-dev/camelot/pull/3).
**Bugfixes**
* Fix Click.HelpFormatter monkey-patch. [#5](https://github.com/camelot-dev/camelot/pull/5) by [Dimiter Naydenov](https://github.com/dimitern).
* Fix strip_text argument getting ignored. [#4](https://github.com/camelot-dev/camelot/pull/4) by [Dimiter Naydenov](https://github.com/dimitern).
* [#25](https://github.com/camelot-dev/camelot/issues/25) edge_tol skipped in read_pdf. [#26](https://github.com/camelot-dev/camelot/pull/26) by Vinayak Mehta.
* Fix pytest deprecation warning. [#2](https://github.com/camelot-dev/camelot/pull/2) by Vinayak Mehta.
* [#293](https://github.com/socialcopsdev/camelot/issues/293) Split text ignores all text to the right of last cut. [#294](https://github.com/socialcopsdev/camelot/pull/294) by Vinayak Mehta.
* [#277](https://github.com/socialcopsdev/camelot/issues/277) Sort TableList by order of tables in PDF. [#283](https://github.com/socialcopsdev/camelot/pull/283) by [Sym Roe](https://github.com/symroe).
* [#312](https://github.com/socialcopsdev/camelot/issues/312) `table_regions` throws `ValueError` when `flavor='stream'`. [#332](https://github.com/socialcopsdev/camelot/pull/332) by Vinayak Mehta.
0.7.2 (2019-01-10)
------------------
**Bugfixes**
* [#245](https://github.com/socialcopsdev/camelot/issues/245) Fix AttributeError for encrypted files. [#251](https://github.com/socialcopsdev/camelot/pull/251) by Yatin Taluja.
0.7.1 (2019-01-06)
------------------
**Bugfixes**
* Move ghostscript import to inside the function so Anaconda builds don't fail.
0.7.0 (2019-01-05)
------------------

View File

@ -1,6 +1,7 @@
MIT License
Copyright (c) 2018 Peeply Private Ltd (Singapore)
Copyright (c) 2019-2021 Camelot Developers
Copyright (c) 2018-2019 Peeply Private Ltd (Singapore)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@ -1,27 +1,28 @@
<p align="center">
<img src="https://raw.githubusercontent.com/socialcopsdev/camelot/master/docs/_static/camelot.png" width="200">
<img src="https://raw.githubusercontent.com/camelot-dev/camelot/master/docs/_static/camelot.png" width="200">
</p>
# Camelot: PDF Table Extraction for Humans
[![Build Status](https://travis-ci.org/socialcopsdev/camelot.svg?branch=master)](https://travis-ci.org/socialcopsdev/camelot) [![Documentation Status](https://readthedocs.org/projects/camelot-py/badge/?version=master)](https://camelot-py.readthedocs.io/en/master/)
[![codecov.io](https://codecov.io/github/socialcopsdev/camelot/badge.svg?branch=master&service=github)](https://codecov.io/github/socialcopsdev/camelot?branch=master)
[![tests](https://github.com/camelot-dev/camelot/actions/workflows/tests.yml/badge.svg)](https://github.com/camelot-dev/camelot/actions/workflows/tests.yml) [![Documentation Status](https://readthedocs.org/projects/camelot-py/badge/?version=master)](https://camelot-py.readthedocs.io/en/master/)
[![codecov.io](https://codecov.io/github/camelot-dev/camelot/badge.svg?branch=master&service=github)](https://codecov.io/github/camelot-dev/camelot?branch=master)
[![image](https://img.shields.io/pypi/v/camelot-py.svg)](https://pypi.org/project/camelot-py/) [![image](https://img.shields.io/pypi/l/camelot-py.svg)](https://pypi.org/project/camelot-py/) [![image](https://img.shields.io/pypi/pyversions/camelot-py.svg)](https://pypi.org/project/camelot-py/) [![Gitter chat](https://badges.gitter.im/camelot-dev/Lobby.png)](https://gitter.im/camelot-dev/Lobby)
[![image](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black)
**Camelot** is a Python library that makes it easy for *anyone* to extract tables from PDF files!
**Camelot** is a Python library that can help you extract tables from PDFs!
**Note:** You can also check out [Excalibur](https://github.com/camelot-dev/excalibur), which is a web interface for Camelot!
**Note:** You can also check out [Excalibur](https://github.com/camelot-dev/excalibur), the web interface to Camelot!
---
**Here's how you can extract tables from PDF files.** Check out the PDF used in this example [here](https://github.com/socialcopsdev/camelot/blob/master/docs/_static/pdf/foo.pdf).
**Here's how you can extract tables from PDFs.** You can check out the PDF used in this example [here](https://github.com/camelot-dev/camelot/blob/master/docs/_static/pdf/foo.pdf).
<pre>
>>> import camelot
>>> tables = camelot.read_pdf('foo.pdf')
>>> tables
&lt;TableList n=1&gt;
>>> tables.export('foo.csv', f='csv', compress=True) # json, excel, html, sqlite
>>> tables.export('foo.csv', f='csv', compress=True) # json, excel, html, markdown, sqlite
>>> tables[0]
&lt;Table shape=(7, 7)&gt;
>>> tables[0].parsing_report
@ -31,7 +32,7 @@
'order': 1,
'page': 1
}
>>> tables[0].to_csv('foo.csv') # to_json, to_excel, to_html, to_sqlite
>>> tables[0].to_csv('foo.csv') # to_json, to_excel, to_html, to_markdown, to_sqlite
>>> tables[0].df # get a pandas DataFrame!
</pre>
@ -44,24 +45,29 @@
| 2032_2 | 0.17 | 57.8 | 21.7% | 0.3% | 2.7% | 1.2% |
| 4171_1 | 0.07 | 173.9 | 58.1% | 1.6% | 2.1% | 0.5% |
There's a [command-line interface](https://camelot-py.readthedocs.io/en/master/user/cli.html) too!
Camelot also comes packaged with a [command-line interface](https://camelot-py.readthedocs.io/en/master/user/cli.html)!
**Note:** Camelot only works with text-based PDFs and not scanned documents. (As Tabula [explains](https://github.com/tabulapdf/tabula#why-tabula), "If you can click and drag to select text in your table in a PDF viewer, then your PDF is text-based".)
You can check out some frequently asked questions [here](https://camelot-py.readthedocs.io/en/master/user/faq.html).
## Why Camelot?
- **You are in control.**: Unlike other libraries and tools which either give a nice output or fail miserably (with no in-between), Camelot gives you the power to tweak table extraction. (This is important since everything in the real world, including PDF table extraction, is fuzzy.)
- *Bad* tables can be discarded based on **metrics** like accuracy and whitespace, without ever having to manually look at each table.
- Each table is a **pandas DataFrame**, which seamlessly integrates into [ETL and data analysis workflows](https://gist.github.com/vinayak-mehta/e5949f7c2410a0e12f25d3682dc9e873).
- **Export** to multiple formats, including JSON, Excel, HTML and Sqlite.
- **Configurability**: Camelot gives you control over the table extraction process with [tweakable settings](https://camelot-py.readthedocs.io/en/master/user/advanced.html).
- **Metrics**: You can discard bad tables based on metrics like accuracy and whitespace, without having to manually look at each table.
- **Output**: Each table is extracted into a **pandas DataFrame**, which seamlessly integrates into [ETL and data analysis workflows](https://gist.github.com/vinayak-mehta/e5949f7c2410a0e12f25d3682dc9e873). You can also export tables to multiple formats, which include CSV, JSON, Excel, HTML, Markdown, and Sqlite.
See [comparison with other PDF table extraction libraries and tools](https://github.com/socialcopsdev/camelot/wiki/Comparison-with-other-PDF-Table-Extraction-libraries-and-tools).
See [comparison with similar libraries and tools](https://github.com/camelot-dev/camelot/wiki/Comparison-with-other-PDF-Table-Extraction-libraries-and-tools).
## Support the development
If Camelot has helped you, please consider supporting its development with a one-time or monthly donation [on OpenCollective](https://opencollective.com/camelot).
## Installation
### Using conda
The easiest way to install Camelot is to install it with [conda](https://conda.io/docs/), which is a package manager and environment management system for the [Anaconda](http://docs.continuum.io/anaconda/) distribution.
The easiest way to install Camelot is with [conda](https://conda.io/docs/), which is a package manager and environment management system for the [Anaconda](http://docs.continuum.io/anaconda/) distribution.
<pre>
$ conda install -c conda-forge camelot-py
@ -69,10 +75,10 @@ $ conda install -c conda-forge camelot-py
### Using pip
After [installing the dependencies](https://camelot-py.readthedocs.io/en/master/user/install-deps.html) ([tk](https://packages.ubuntu.com/trusty/python-tk) and [ghostscript](https://www.ghostscript.com/)), you can simply use pip to install Camelot:
After [installing the dependencies](https://camelot-py.readthedocs.io/en/master/user/install-deps.html) ([tk](https://packages.ubuntu.com/bionic/python/python-tk) and [ghostscript](https://www.ghostscript.com/)), you can also just use pip to install Camelot:
<pre>
$ pip install camelot-py[cv]
$ pip install "camelot-py[base]"
</pre>
### From the source code
@ -80,52 +86,32 @@ $ pip install camelot-py[cv]
After [installing the dependencies](https://camelot-py.readthedocs.io/en/master/user/install.html#using-pip), clone the repo using:
<pre>
$ git clone https://www.github.com/socialcopsdev/camelot
$ git clone https://www.github.com/camelot-dev/camelot
</pre>
and install Camelot using pip:
<pre>
$ cd camelot
$ pip install ".[cv]"
$ pip install ".[base]"
</pre>
## Documentation
Great documentation is available at [http://camelot-py.readthedocs.io/](http://camelot-py.readthedocs.io/).
The documentation is available at [http://camelot-py.readthedocs.io/](http://camelot-py.readthedocs.io/).
## Development
## Wrappers
The [Contributor's Guide](https://camelot-py.readthedocs.io/en/master/dev/contributing.html) has detailed information about contributing code, documentation, tests and more. We've included some basic information in this README.
- [camelot-php](https://github.com/randomstate/camelot-php) provides a [PHP](https://www.php.net/) wrapper on Camelot.
### Source code
## Contributing
You can check the latest sources with:
<pre>
$ git clone https://www.github.com/socialcopsdev/camelot
</pre>
### Setting up a development environment
You can install the development dependencies easily, using pip:
<pre>
$ pip install camelot-py[dev]
</pre>
### Testing
After installation, you can run tests using:
<pre>
$ python setup.py test
</pre>
The [Contributor's Guide](https://camelot-py.readthedocs.io/en/master/dev/contributing.html) has detailed information about contributing issues, documentation, code, and tests.
## Versioning
Camelot uses [Semantic Versioning](https://semver.org/). For the available versions, see the tags on this repository. For the changelog, you can check out [HISTORY.md](https://github.com/socialcopsdev/camelot/blob/master/HISTORY.md).
Camelot uses [Semantic Versioning](https://semver.org/). For the available versions, see the tags on this repository. For the changelog, you can check out [HISTORY.md](https://github.com/camelot-dev/camelot/blob/master/HISTORY.md).
## License
This project is licensed under the MIT License, see the [LICENSE](https://github.com/socialcopsdev/camelot/blob/master/LICENSE) file for details.
This project is licensed under the MIT License, see the [LICENSE](https://github.com/camelot-dev/camelot/blob/master/LICENSE) file for details.

View File

@ -2,26 +2,16 @@
import logging
from click import HelpFormatter
from .__version__ import __version__
from .io import read_pdf
from .plotting import PlotMethods
def _write_usage(self, prog, args='', prefix='Usage: '):
return self._write_usage('camelot', args, prefix=prefix)
# monkey patch click.HelpFormatter
HelpFormatter._write_usage = HelpFormatter.write_usage
HelpFormatter.write_usage = _write_usage
# set up logging
logger = logging.getLogger('camelot')
logger = logging.getLogger("camelot")
format_string = '%(asctime)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%dT%H:%M:%S')
format_string = "%(asctime)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(format_string, datefmt="%Y-%m-%dT%H:%M:%S")
handler = logging.StreamHandler()
handler.setFormatter(formatter)

View File

@ -1,9 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
__all__ = ('main',)
__all__ = ("main",)
def main():

View File

@ -1,23 +1,23 @@
# -*- coding: utf-8 -*-
VERSION = (0, 7, 0)
VERSION = (0, 10, 1)
PRERELEASE = None # alpha, beta or rc
REVISION = None
def generate_version(version, prerelease=None, revision=None):
version_parts = ['.'.join(map(str, version))]
version_parts = [".".join(map(str, version))]
if prerelease is not None:
version_parts.append('-{}'.format(prerelease))
version_parts.append(f"-{prerelease}")
if revision is not None:
version_parts.append('.{}'.format(revision))
return ''.join(version_parts)
version_parts.append(f".{revision}")
return "".join(version_parts)
__title__ = 'camelot-py'
__description__ = 'PDF Table Extraction for Humans.'
__url__ = 'http://camelot-py.readthedocs.io/'
__title__ = "camelot-py"
__description__ = "PDF Table Extraction for Humans."
__url__ = "http://camelot-py.readthedocs.io/"
__version__ = generate_version(VERSION, prerelease=PRERELEASE, revision=REVISION)
__author__ = 'Vinayak Mehta'
__author_email__ = 'vmehta94@gmail.com'
__license__ = 'MIT License'
__author__ = "Vinayak Mehta"
__author_email__ = "vmehta94@gmail.com"
__license__ = "MIT License"

View File

@ -0,0 +1,3 @@
# -*- coding: utf-8 -*-
from .image_conversion import ImageConversionBackend

View File

@ -0,0 +1,47 @@
# -*- coding: utf-8 -*-
import sys
import ctypes
from ctypes.util import find_library
def installed_posix():
library = find_library("gs")
return library is not None
def installed_windows():
library = find_library(
"".join(("gsdll", str(ctypes.sizeof(ctypes.c_voidp) * 8), ".dll"))
)
return library is not None
class GhostscriptBackend(object):
def installed(self):
if sys.platform in ["linux", "darwin"]:
return installed_posix()
elif sys.platform == "win32":
return installed_windows()
else:
return installed_posix()
def convert(self, pdf_path, png_path, resolution=300):
if not self.installed():
raise OSError(
"Ghostscript is not installed. You can install it using the instructions"
" here: https://camelot-py.readthedocs.io/en/master/user/install-deps.html"
)
import ghostscript
gs_command = [
"gs",
"-q",
"-sDEVICE=png16m",
"-o",
png_path,
f"-r{resolution}",
pdf_path,
]
ghostscript.Ghostscript(*gs_command)

View File

@ -0,0 +1,40 @@
# -*- coding: utf-8 -*-
from .poppler_backend import PopplerBackend
from .ghostscript_backend import GhostscriptBackend
BACKENDS = {"poppler": PopplerBackend, "ghostscript": GhostscriptBackend}
class ImageConversionBackend(object):
def __init__(self, backend="poppler", use_fallback=True):
if backend not in BACKENDS.keys():
raise ValueError(f"Image conversion backend '{backend}' not supported")
self.backend = backend
self.use_fallback = use_fallback
self.fallbacks = list(filter(lambda x: x != backend, BACKENDS.keys()))
def convert(self, pdf_path, png_path):
try:
converter = BACKENDS[self.backend]()
converter.convert(pdf_path, png_path)
except Exception as e:
import sys
if self.use_fallback:
for fallback in self.fallbacks:
try:
converter = BACKENDS[fallback]()
converter.convert(pdf_path, png_path)
except Exception as e:
raise type(e)(
str(e) + f" with image conversion backend '{fallback}'"
).with_traceback(sys.exc_info()[2])
continue
else:
break
else:
raise type(e)(
str(e) + f" with image conversion backend '{self.backend}'"
).with_traceback(sys.exc_info()[2])

View File

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
import shutil
import subprocess
class PopplerBackend(object):
def convert(self, pdf_path, png_path):
pdftopng_executable = shutil.which("pdftopng")
if pdftopng_executable is None:
raise OSError(
"pdftopng is not installed. You can install it using the 'pip install pdftopng' command."
)
pdftopng_command = [pdftopng_executable, pdf_path, png_path]
try:
subprocess.check_output(
" ".join(pdftopng_command), stderr=subprocess.STDOUT, shell=True
)
except subprocess.CalledProcessError as e:
raise ValueError(e.output)

View File

@ -3,6 +3,7 @@
import logging
import click
try:
import matplotlib.pyplot as plt
except ImportError:
@ -13,7 +14,7 @@ else:
from . import __version__, read_pdf, plot
logger = logging.getLogger('camelot')
logger = logging.getLogger("camelot")
logger.setLevel(logging.INFO)
@ -28,25 +29,49 @@ class Config(object):
pass_config = click.make_pass_decorator(Config)
@click.group()
@click.group(name="camelot")
@click.version_option(version=__version__)
@click.option('-q', '--quiet', is_flag=False, help='Suppress logs and warnings.')
@click.option('-p', '--pages', default='1', help='Comma-separated page numbers.'
' Example: 1,3,4 or 1,4-end or all.')
@click.option('-pw', '--password', help='Password for decryption.')
@click.option('-o', '--output', help='Output file path.')
@click.option('-f', '--format',
type=click.Choice(['csv', 'json', 'excel', 'html', 'sqlite']),
help='Output file format.')
@click.option('-z', '--zip', is_flag=True, help='Create ZIP archive.')
@click.option('-split', '--split_text', is_flag=True,
help='Split text that spans across multiple cells.')
@click.option('-flag', '--flag_size', is_flag=True, help='Flag text based on'
' font size. Useful to detect super/subscripts.')
@click.option('-strip', '--strip_text', help='Characters that should be stripped from a string before'
' assigning it to a cell.')
@click.option('-M', '--margins', nargs=3, default=(1.0, 0.5, 0.1),
help='PDFMiner char_margin, line_margin and word_margin.')
@click.option("-q", "--quiet", is_flag=False, help="Suppress logs and warnings.")
@click.option(
"-p",
"--pages",
default="1",
help="Comma-separated page numbers." " Example: 1,3,4 or 1,4-end or all.",
)
@click.option("-pw", "--password", help="Password for decryption.")
@click.option("-o", "--output", help="Output file path.")
@click.option(
"-f",
"--format",
type=click.Choice(["csv", "excel", "html", "json", "markdown", "sqlite"]),
help="Output file format.",
)
@click.option("-z", "--zip", is_flag=True, help="Create ZIP archive.")
@click.option(
"-split",
"--split_text",
is_flag=True,
help="Split text that spans across multiple cells.",
)
@click.option(
"-flag",
"--flag_size",
is_flag=True,
help="Flag text based on" " font size. Useful to detect super/subscripts.",
)
@click.option(
"-strip",
"--strip_text",
help="Characters that should be stripped from a string before"
" assigning it to a cell.",
)
@click.option(
"-M",
"--margins",
nargs=3,
default=(1.0, 0.5, 0.1),
help="PDFMiner char_margin, line_margin and word_margin.",
)
@click.pass_context
def cli(ctx, *args, **kwargs):
"""Camelot: PDF Table Extraction for Humans"""
@ -55,79 +80,131 @@ def cli(ctx, *args, **kwargs):
ctx.obj.set_config(key, value)
@cli.command('lattice')
@click.option('-R', '--table_regions', default=[], multiple=True,
help='Page regions to analyze. Example: x1,y1,x2,y2'
' where x1, y1 -> left-top and x2, y2 -> right-bottom.')
@click.option('-T', '--table_areas', default=[], multiple=True,
help='Table areas to process. Example: x1,y1,x2,y2'
' where x1, y1 -> left-top and x2, y2 -> right-bottom.')
@click.option('-back', '--process_background', is_flag=True,
help='Process background lines.')
@click.option('-scale', '--line_scale', default=15,
help='Line size scaling factor. The larger the value,'
' the smaller the detected lines.')
@click.option('-copy', '--copy_text', default=[], type=click.Choice(['h', 'v']),
multiple=True, help='Direction in which text in a spanning cell'
' will be copied over.')
@click.option('-shift', '--shift_text', default=['l', 't'],
type=click.Choice(['', 'l', 'r', 't', 'b']), multiple=True,
help='Direction in which text in a spanning cell will flow.')
@click.option('-l', '--line_tol', default=2,
help='Tolerance parameter used to merge close vertical'
' and horizontal lines.')
@click.option('-j', '--joint_tol', default=2,
help='Tolerance parameter used to decide whether'
' the detected lines and points lie close to each other.')
@click.option('-block', '--threshold_blocksize', default=15,
help='For adaptive thresholding, size of a pixel'
' neighborhood that is used to calculate a threshold value for'
' the pixel. Example: 3, 5, 7, and so on.')
@click.option('-const', '--threshold_constant', default=-2,
help='For adaptive thresholding, constant subtracted'
' from the mean or weighted mean. Normally, it is positive but'
' may be zero or negative as well.')
@click.option('-I', '--iterations', default=0,
help='Number of times for erosion/dilation will be applied.')
@click.option('-res', '--resolution', default=300,
help='Resolution used for PDF to PNG conversion.')
@click.option('-plot', '--plot_type',
type=click.Choice(['text', 'grid', 'contour', 'joint', 'line']),
help='Plot elements found on PDF page for visual debugging.')
@click.argument('filepath', type=click.Path(exists=True))
@cli.command("lattice")
@click.option(
"-R",
"--table_regions",
default=[],
multiple=True,
help="Page regions to analyze. Example: x1,y1,x2,y2"
" where x1, y1 -> left-top and x2, y2 -> right-bottom.",
)
@click.option(
"-T",
"--table_areas",
default=[],
multiple=True,
help="Table areas to process. Example: x1,y1,x2,y2"
" where x1, y1 -> left-top and x2, y2 -> right-bottom.",
)
@click.option(
"-back", "--process_background", is_flag=True, help="Process background lines."
)
@click.option(
"-scale",
"--line_scale",
default=15,
help="Line size scaling factor. The larger the value,"
" the smaller the detected lines.",
)
@click.option(
"-copy",
"--copy_text",
default=[],
type=click.Choice(["h", "v"]),
multiple=True,
help="Direction in which text in a spanning cell" " will be copied over.",
)
@click.option(
"-shift",
"--shift_text",
default=["l", "t"],
type=click.Choice(["", "l", "r", "t", "b"]),
multiple=True,
help="Direction in which text in a spanning cell will flow.",
)
@click.option(
"-l",
"--line_tol",
default=2,
help="Tolerance parameter used to merge close vertical" " and horizontal lines.",
)
@click.option(
"-j",
"--joint_tol",
default=2,
help="Tolerance parameter used to decide whether"
" the detected lines and points lie close to each other.",
)
@click.option(
"-block",
"--threshold_blocksize",
default=15,
help="For adaptive thresholding, size of a pixel"
" neighborhood that is used to calculate a threshold value for"
" the pixel. Example: 3, 5, 7, and so on.",
)
@click.option(
"-const",
"--threshold_constant",
default=-2,
help="For adaptive thresholding, constant subtracted"
" from the mean or weighted mean. Normally, it is positive but"
" may be zero or negative as well.",
)
@click.option(
"-I",
"--iterations",
default=0,
help="Number of times for erosion/dilation will be applied.",
)
@click.option(
"-res",
"--resolution",
default=300,
help="Resolution used for PDF to PNG conversion.",
)
@click.option(
"-plot",
"--plot_type",
type=click.Choice(["text", "grid", "contour", "joint", "line"]),
help="Plot elements found on PDF page for visual debugging.",
)
@click.argument("filepath", type=click.Path(exists=True))
@pass_config
def lattice(c, *args, **kwargs):
"""Use lines between text to parse the table."""
conf = c.config
pages = conf.pop('pages')
output = conf.pop('output')
f = conf.pop('format')
compress = conf.pop('zip')
quiet = conf.pop('quiet')
plot_type = kwargs.pop('plot_type')
filepath = kwargs.pop('filepath')
pages = conf.pop("pages")
output = conf.pop("output")
f = conf.pop("format")
compress = conf.pop("zip")
quiet = conf.pop("quiet")
plot_type = kwargs.pop("plot_type")
filepath = kwargs.pop("filepath")
kwargs.update(conf)
table_regions = list(kwargs['table_regions'])
kwargs['table_regions'] = None if not table_regions else table_regions
table_areas = list(kwargs['table_areas'])
kwargs['table_areas'] = None if not table_areas else table_areas
copy_text = list(kwargs['copy_text'])
kwargs['copy_text'] = None if not copy_text else copy_text
kwargs['shift_text'] = list(kwargs['shift_text'])
table_regions = list(kwargs["table_regions"])
kwargs["table_regions"] = None if not table_regions else table_regions
table_areas = list(kwargs["table_areas"])
kwargs["table_areas"] = None if not table_areas else table_areas
copy_text = list(kwargs["copy_text"])
kwargs["copy_text"] = None if not copy_text else copy_text
kwargs["shift_text"] = list(kwargs["shift_text"])
if plot_type is not None:
if not _HAS_MPL:
raise ImportError('matplotlib is required for plotting.')
raise ImportError("matplotlib is required for plotting.")
else:
if output is None:
raise click.UsageError('Please specify output file path using --output')
raise click.UsageError("Please specify output file path using --output")
if f is None:
raise click.UsageError('Please specify output file format using --format')
raise click.UsageError("Please specify output file format using --format")
tables = read_pdf(filepath, pages=pages, flavor='lattice',
suppress_stdout=quiet, **kwargs)
click.echo('Found {} tables'.format(tables.n))
tables = read_pdf(
filepath, pages=pages, flavor="lattice", suppress_stdout=quiet, **kwargs
)
click.echo(f"Found {tables.n} tables")
if plot_type is not None:
for table in tables:
plot(table, kind=plot_type)
@ -136,57 +213,89 @@ def lattice(c, *args, **kwargs):
tables.export(output, f=f, compress=compress)
@cli.command('stream')
@click.option('-R', '--table_regions', default=[], multiple=True,
help='Page regions to analyze. Example: x1,y1,x2,y2'
' where x1, y1 -> left-top and x2, y2 -> right-bottom.')
@click.option('-T', '--table_areas', default=[], multiple=True,
help='Table areas to process. Example: x1,y1,x2,y2'
' where x1, y1 -> left-top and x2, y2 -> right-bottom.')
@click.option('-C', '--columns', default=[], multiple=True,
help='X coordinates of column separators.')
@click.option('-e', '--edge_tol', default=50, help='Tolerance parameter'
' for extending textedges vertically.')
@click.option('-r', '--row_tol', default=2, help='Tolerance parameter'
' used to combine text vertically, to generate rows.')
@click.option('-c', '--column_tol', default=0, help='Tolerance parameter'
' used to combine text horizontally, to generate columns.')
@click.option('-plot', '--plot_type',
type=click.Choice(['text', 'grid', 'contour', 'textedge']),
help='Plot elements found on PDF page for visual debugging.')
@click.argument('filepath', type=click.Path(exists=True))
@cli.command("stream")
@click.option(
"-R",
"--table_regions",
default=[],
multiple=True,
help="Page regions to analyze. Example: x1,y1,x2,y2"
" where x1, y1 -> left-top and x2, y2 -> right-bottom.",
)
@click.option(
"-T",
"--table_areas",
default=[],
multiple=True,
help="Table areas to process. Example: x1,y1,x2,y2"
" where x1, y1 -> left-top and x2, y2 -> right-bottom.",
)
@click.option(
"-C",
"--columns",
default=[],
multiple=True,
help="X coordinates of column separators.",
)
@click.option(
"-e",
"--edge_tol",
default=50,
help="Tolerance parameter" " for extending textedges vertically.",
)
@click.option(
"-r",
"--row_tol",
default=2,
help="Tolerance parameter" " used to combine text vertically, to generate rows.",
)
@click.option(
"-c",
"--column_tol",
default=0,
help="Tolerance parameter"
" used to combine text horizontally, to generate columns.",
)
@click.option(
"-plot",
"--plot_type",
type=click.Choice(["text", "grid", "contour", "textedge"]),
help="Plot elements found on PDF page for visual debugging.",
)
@click.argument("filepath", type=click.Path(exists=True))
@pass_config
def stream(c, *args, **kwargs):
"""Use spaces between text to parse the table."""
conf = c.config
pages = conf.pop('pages')
output = conf.pop('output')
f = conf.pop('format')
compress = conf.pop('zip')
quiet = conf.pop('quiet')
plot_type = kwargs.pop('plot_type')
filepath = kwargs.pop('filepath')
pages = conf.pop("pages")
output = conf.pop("output")
f = conf.pop("format")
compress = conf.pop("zip")
quiet = conf.pop("quiet")
plot_type = kwargs.pop("plot_type")
filepath = kwargs.pop("filepath")
kwargs.update(conf)
table_regions = list(kwargs['table_regions'])
kwargs['table_regions'] = None if not table_regions else table_regions
table_areas = list(kwargs['table_areas'])
kwargs['table_areas'] = None if not table_areas else table_areas
columns = list(kwargs['columns'])
kwargs['columns'] = None if not columns else columns
table_regions = list(kwargs["table_regions"])
kwargs["table_regions"] = None if not table_regions else table_regions
table_areas = list(kwargs["table_areas"])
kwargs["table_areas"] = None if not table_areas else table_areas
columns = list(kwargs["columns"])
kwargs["columns"] = None if not columns else columns
if plot_type is not None:
if not _HAS_MPL:
raise ImportError('matplotlib is required for plotting.')
raise ImportError("matplotlib is required for plotting.")
else:
if output is None:
raise click.UsageError('Please specify output file path using --output')
raise click.UsageError("Please specify output file path using --output")
if f is None:
raise click.UsageError('Please specify output file format using --format')
raise click.UsageError("Please specify output file format using --format")
tables = read_pdf(filepath, pages=pages, flavor='stream',
suppress_stdout=quiet, **kwargs)
click.echo('Found {} tables'.format(tables.n))
tables = read_pdf(
filepath, pages=pages, flavor="stream", suppress_stdout=quiet, **kwargs
)
click.echo(f"Found {tables.n} tables")
if plot_type is not None:
for table in tables:
plot(table, kind=plot_type)

View File

@ -42,7 +42,8 @@ class TextEdge(object):
TEXTEDGE_REQUIRED_ELEMENTS horizontal text rows.
"""
def __init__(self, x, y0, y1, align='left'):
def __init__(self, x, y0, y1, align="left"):
self.x = x
self.y0 = y0
self.y1 = y1
@ -51,8 +52,12 @@ class TextEdge(object):
self.is_valid = False
def __repr__(self):
return '<TextEdge x={} y0={} y1={} align={} valid={}>'.format(
round(self.x, 2), round(self.y0, 2), round(self.y1, 2), self.align, self.is_valid)
x = round(self.x, 2)
y0 = round(self.y0, 2)
y1 = round(self.y1, 2)
return (
f"<TextEdge x={x} y0={y0} y1={y1} align={self.align} valid={self.is_valid}>"
)
def update_coords(self, x, y0, edge_tol=50):
"""Updates the text edge's x and bottom y coordinates and sets
@ -73,9 +78,10 @@ class TextEdges(object):
the PDF page. The dict has three keys based on the alignments,
and each key's value is a list of camelot.core.TextEdge objects.
"""
def __init__(self, edge_tol=50):
self.edge_tol = edge_tol
self._textedges = {'left': [], 'right': [], 'middle': []}
self._textedges = {"left": [], "right": [], "middle": []}
@staticmethod
def get_x_coord(textline, align):
@ -85,7 +91,7 @@ class TextEdges(object):
x_left = textline.x0
x_right = textline.x1
x_middle = x_left + (x_right - x_left) / 2.0
x_coord = {'left': x_left, 'middle': x_middle, 'right': x_right}
x_coord = {"left": x_left, "middle": x_middle, "right": x_right}
return x_coord[align]
def find(self, x_coord, align):
@ -98,8 +104,7 @@ class TextEdges(object):
return None
def add(self, textline, align):
"""Adds a new text edge to the current dict.
"""
"""Adds a new text edge to the current dict."""
x = self.get_x_coord(textline, align)
y0 = textline.y0
y1 = textline.y1
@ -107,16 +112,16 @@ class TextEdges(object):
self._textedges[align].append(te)
def update(self, textline):
"""Updates an existing text edge in the current dict.
"""
for align in ['left', 'right', 'middle']:
"""Updates an existing text edge in the current dict."""
for align in ["left", "right", "middle"]:
x_coord = self.get_x_coord(textline, align)
idx = self.find(x_coord, align)
if idx is None:
self.add(textline, align)
else:
self._textedges[align][idx].update_coords(
x_coord, textline.y0, edge_tol=self.edge_tol)
x_coord, textline.y0, edge_tol=self.edge_tol
)
def generate(self, textlines):
"""Generates the text edges dict based on horizontal text
@ -132,9 +137,15 @@ class TextEdges(object):
the most.
"""
intersections_sum = {
'left': sum(te.intersections for te in self._textedges['left'] if te.is_valid),
'right': sum(te.intersections for te in self._textedges['right'] if te.is_valid),
'middle': sum(te.intersections for te in self._textedges['middle'] if te.is_valid)
"left": sum(
te.intersections for te in self._textedges["left"] if te.is_valid
),
"right": sum(
te.intersections for te in self._textedges["right"] if te.is_valid
),
"middle": sum(
te.intersections for te in self._textedges["middle"] if te.is_valid
),
}
# TODO: naive
@ -147,6 +158,7 @@ class TextEdges(object):
"""Returns a dict of interesting table areas on the PDF page
calculated using relevant text edges.
"""
def pad(area, average_row_height):
x0 = area[0] - TABLE_AREA_PADDING
y0 = area[1] - TABLE_AREA_PADDING
@ -175,7 +187,11 @@ class TextEdges(object):
else:
table_areas.pop(found)
updated_area = (
found[0], min(te.y0, found[1]), max(found[2], te.x), max(found[3], te.y1))
found[0],
min(te.y0, found[1]),
max(found[2], te.x),
max(found[3], te.y1),
)
table_areas[updated_area] = None
# extend table areas based on textlines that overlap
@ -196,7 +212,11 @@ class TextEdges(object):
if found is not None:
table_areas.pop(found)
updated_area = (
min(tl.x0, found[0]), min(tl.y0, found[1]), max(found[2], tl.x1), max(found[3], tl.y1))
min(tl.x0, found[0]),
min(tl.y0, found[1]),
max(found[2], tl.x1),
max(found[3], tl.y1),
)
table_areas[updated_area] = None
average_textline_height = sum_textline_height / float(len(textlines))
@ -265,11 +285,14 @@ class Cell(object):
self.bottom = False
self.hspan = False
self.vspan = False
self._text = ''
self._text = ""
def __repr__(self):
return '<Cell x1={} y1={} x2={} y2={}>'.format(
round(self.x1, 2), round(self.y1, 2), round(self.x2, 2), round(self.y2, 2))
x1 = round(self.x1)
y1 = round(self.y1)
x2 = round(self.x2)
y2 = round(self.y2)
return f"<Cell x1={x1} y1={y1} x2={x2} y2={y2}>"
@property
def text(self):
@ -277,12 +300,11 @@ class Cell(object):
@text.setter
def text(self, t):
self._text = ''.join([self._text, t])
self._text = "".join([self._text, t])
@property
def bound(self):
"""The number of sides on which the cell is bounded.
"""
"""The number of sides on which the cell is bounded."""
return self.top + self.bottom + self.left + self.right
@ -314,11 +336,11 @@ class Table(object):
PDF page number.
"""
def __init__(self, cols, rows):
self.cols = cols
self.rows = rows
self.cells = [[Cell(c[0], r[1], c[1], r[0])
for c in cols] for r in rows]
self.cells = [[Cell(c[0], r[1], c[1], r[0]) for c in cols] for r in rows]
self.df = None
self.shape = (0, 0)
self.accuracy = 0
@ -327,12 +349,18 @@ class Table(object):
self.page = None
def __repr__(self):
return '<{} shape={}>'.format(self.__class__.__name__, self.shape)
return f"<{self.__class__.__name__} shape={self.shape}>"
def __lt__(self, other):
if self.page == other.page:
if self.order < other.order:
return True
if self.page < other.page:
return True
@property
def data(self):
"""Returns two-dimensional list of strings in table.
"""
"""Returns two-dimensional list of strings in table."""
d = []
for row in self.cells:
d.append([cell.text.strip() for cell in row])
@ -345,16 +373,15 @@ class Table(object):
"""
# pretty?
report = {
'accuracy': round(self.accuracy, 2),
'whitespace': round(self.whitespace, 2),
'order': self.order,
'page': self.page
"accuracy": round(self.accuracy, 2),
"whitespace": round(self.whitespace, 2),
"order": self.order,
"page": self.page,
}
return report
def set_all_edges(self):
"""Sets all table edges to True.
"""
"""Sets all table edges to True."""
for row in self.cells:
for cell in row:
cell.left = cell.right = cell.top = cell.bottom = True
@ -376,12 +403,21 @@ class Table(object):
for v in vertical:
# find closest x coord
# iterate over y coords and find closest start and end points
i = [i for i, t in enumerate(self.cols)
if np.isclose(v[0], t[0], atol=joint_tol)]
j = [j for j, t in enumerate(self.rows)
if np.isclose(v[3], t[0], atol=joint_tol)]
k = [k for k, t in enumerate(self.rows)
if np.isclose(v[1], t[0], atol=joint_tol)]
i = [
i
for i, t in enumerate(self.cols)
if np.isclose(v[0], t[0], atol=joint_tol)
]
j = [
j
for j, t in enumerate(self.rows)
if np.isclose(v[3], t[0], atol=joint_tol)
]
k = [
k
for k, t in enumerate(self.rows)
if np.isclose(v[1], t[0], atol=joint_tol)
]
if not j:
continue
J = j[0]
@ -427,12 +463,21 @@ class Table(object):
for h in horizontal:
# find closest y coord
# iterate over x coords and find closest start and end points
i = [i for i, t in enumerate(self.rows)
if np.isclose(h[1], t[0], atol=joint_tol)]
j = [j for j, t in enumerate(self.cols)
if np.isclose(h[0], t[0], atol=joint_tol)]
k = [k for k, t in enumerate(self.cols)
if np.isclose(h[2], t[0], atol=joint_tol)]
i = [
i
for i, t in enumerate(self.rows)
if np.isclose(h[1], t[0], atol=joint_tol)
]
j = [
j
for j, t in enumerate(self.cols)
if np.isclose(h[0], t[0], atol=joint_tol)
]
k = [
k
for k, t in enumerate(self.cols)
if np.isclose(h[2], t[0], atol=joint_tol)
]
if not j:
continue
J = j[0]
@ -478,8 +523,7 @@ class Table(object):
return self
def set_border(self):
"""Sets table border edges to True.
"""
"""Sets table border edges to True."""
for r in range(len(self.rows)):
self.cells[r][0].left = True
self.cells[r][len(self.cols) - 1].right = True
@ -530,12 +574,7 @@ class Table(object):
Output filepath.
"""
kw = {
'encoding': 'utf-8',
'index': False,
'header': False,
'quoting': 1
}
kw = {"encoding": "utf-8", "index": False, "header": False, "quoting": 1}
kw.update(kwargs)
self.df.to_csv(path, **kw)
@ -550,12 +589,10 @@ class Table(object):
Output filepath.
"""
kw = {
'orient': 'records'
}
kw = {"orient": "records"}
kw.update(kwargs)
json_string = self.df.to_json(**kw)
with open(path, 'w') as f:
with open(path, "w") as f:
f.write(json_string)
def to_excel(self, path, **kwargs):
@ -570,8 +607,8 @@ class Table(object):
"""
kw = {
'sheet_name': 'page-{}-table-{}'.format(self.page, self.order),
'encoding': 'utf-8'
"sheet_name": f"page-{self.page}-table-{self.order}",
"encoding": "utf-8",
}
kw.update(kwargs)
writer = pd.ExcelWriter(path)
@ -590,9 +627,24 @@ class Table(object):
"""
html_string = self.df.to_html(**kwargs)
with open(path, 'w') as f:
with open(path, "w", encoding="utf-8") as f:
f.write(html_string)
def to_markdown(self, path, **kwargs):
"""Writes Table to a Markdown file.
For kwargs, check :meth:`pandas.DataFrame.to_markdown`.
Parameters
----------
path : str
Output filepath.
"""
md_string = self.df.to_markdown(**kwargs)
with open(path, "w", encoding="utf-8") as f:
f.write(md_string)
def to_sqlite(self, path, **kwargs):
"""Writes Table to sqlite database.
@ -604,13 +656,10 @@ class Table(object):
Output filepath.
"""
kw = {
'if_exists': 'replace',
'index': False
}
kw = {"if_exists": "replace", "index": False}
kw.update(kwargs)
conn = sqlite3.connect(path)
table_name = 'page-{}-table-{}'.format(self.page, self.order)
table_name = f"page-{self.page}-table-{self.order}"
self.df.to_sql(table_name, conn, **kw)
conn.commit()
conn.close()
@ -626,12 +675,12 @@ class TableList(object):
Number of tables in the list.
"""
def __init__(self, tables):
self._tables = tables
def __repr__(self):
return '<{} n={}>'.format(
self.__class__.__name__, self.n)
return f"<{self.__class__.__name__} n={self.n}>"
def __len__(self):
return len(self._tables)
@ -641,37 +690,35 @@ class TableList(object):
@staticmethod
def _format_func(table, f):
return getattr(table, 'to_{}'.format(f))
return getattr(table, f"to_{f}")
@property
def n(self):
return len(self)
def _write_file(self, f=None, **kwargs):
dirname = kwargs.get('dirname')
root = kwargs.get('root')
ext = kwargs.get('ext')
dirname = kwargs.get("dirname")
root = kwargs.get("root")
ext = kwargs.get("ext")
for table in self._tables:
filename = os.path.join('{}-page-{}-table-{}{}'.format(
root, table.page, table.order, ext))
filename = f"{root}-page-{table.page}-table-{table.order}{ext}"
filepath = os.path.join(dirname, filename)
to_format = self._format_func(table, f)
to_format(filepath)
def _compress_dir(self, **kwargs):
path = kwargs.get('path')
dirname = kwargs.get('dirname')
root = kwargs.get('root')
ext = kwargs.get('ext')
zipname = os.path.join(os.path.dirname(path), root) + '.zip'
with zipfile.ZipFile(zipname, 'w', allowZip64=True) as z:
path = kwargs.get("path")
dirname = kwargs.get("dirname")
root = kwargs.get("root")
ext = kwargs.get("ext")
zipname = os.path.join(os.path.dirname(path), root) + ".zip"
with zipfile.ZipFile(zipname, "w", allowZip64=True) as z:
for table in self._tables:
filename = os.path.join('{}-page-{}-table-{}{}'.format(
root, table.page, table.order, ext))
filename = f"{root}-page-{table.page}-table-{table.order}{ext}"
filepath = os.path.join(dirname, filename)
z.write(filepath, os.path.basename(filepath))
def export(self, path, f='csv', compress=False):
def export(self, path, f="csv", compress=False):
"""Exports the list of tables to specified file format.
Parameters
@ -679,7 +726,7 @@ class TableList(object):
path : str
Output filepath.
f : str
File format. Can be csv, json, excel, html and sqlite.
File format. Can be csv, excel, html, json, markdown or sqlite.
compress : bool
Whether or not to add files to a ZIP archive.
@ -690,33 +737,28 @@ class TableList(object):
if compress:
dirname = tempfile.mkdtemp()
kwargs = {
'path': path,
'dirname': dirname,
'root': root,
'ext': ext
}
kwargs = {"path": path, "dirname": dirname, "root": root, "ext": ext}
if f in ['csv', 'json', 'html']:
if f in ["csv", "html", "json", "markdown"]:
self._write_file(f=f, **kwargs)
if compress:
self._compress_dir(**kwargs)
elif f == 'excel':
elif f == "excel":
filepath = os.path.join(dirname, basename)
writer = pd.ExcelWriter(filepath)
for table in self._tables:
sheet_name = 'page-{}-table-{}'.format(table.page, table.order)
table.df.to_excel(writer, sheet_name=sheet_name, encoding='utf-8')
sheet_name = f"page-{table.page}-table-{table.order}"
table.df.to_excel(writer, sheet_name=sheet_name, encoding="utf-8")
writer.save()
if compress:
zipname = os.path.join(os.path.dirname(path), root) + '.zip'
with zipfile.ZipFile(zipname, 'w', allowZip64=True) as z:
zipname = os.path.join(os.path.dirname(path), root) + ".zip"
with zipfile.ZipFile(zipname, "w", allowZip64=True) as z:
z.write(filepath, os.path.basename(filepath))
elif f == 'sqlite':
elif f == "sqlite":
filepath = os.path.join(dirname, basename)
for table in self._tables:
table.to_sqlite(filepath)
if compress:
zipname = os.path.join(os.path.dirname(path), root) + '.zip'
with zipfile.ZipFile(zipname, 'w', allowZip64=True) as z:
zipname = os.path.join(os.path.dirname(path), root) + ".zip"
with zipfile.ZipFile(zipname, "w", allowZip64=True) as z:
z.write(filepath, os.path.basename(filepath))

View File

@ -1,674 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.

View File

@ -1,96 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ghostscript - A Python interface for the Ghostscript interpreter C-API
"""
#
# Modifications 2018 by Vinayak Mehta <vmehta94@gmail.com>
# Copyright 2010-2018 by Hartmut Goebel <h.goebel@crazy-compilers.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from . import _gsprint as gs
__author__ = 'Hartmut Goebel <h.goebel@crazy-compilers.com>'
__copyright__ = 'Copyright 2010-2018 by Hartmut Goebel <h.goebel@crazy-compilers.com>'
__license__ = 'GNU General Public License version 3 (GPL v3)'
__version__ = '0.6'
class __Ghostscript(object):
def __init__(self, instance, args, stdin=None, stdout=None, stderr=None):
self._initialized = False
self._callbacks = None
if stdin or stdout or stderr:
self.set_stdio(stdin, stdout, stderr)
rc = gs.init_with_args(instance, args)
self._initialized = True
if rc == gs.e_Quit:
self.exit()
def __enter__(self):
return self
def __exit__(self, *args):
self.exit()
def set_stdio(self, stdin=None, stdout=None, stderr=None):
"""Set stdin, stdout and stderr of the ghostscript interpreter.
The ``stdin`` stream has to support the ``readline()``
interface. The ``stdout`` and ``stderr`` streams have to
support the ``write()`` and ``flush()`` interface.
Please note that this does not affect the input- and output-
streams of the devices. Esp. setting stdout does not allow
catching the devise-output even when using ``-sOutputFile=-``.
"""
global __instance__
self._callbacks = (
stdin and gs._wrap_stdin(stdin) or None,
stdout and gs._wrap_stdout(stdout) or None,
stderr and gs._wrap_stderr(stderr) or None,
)
gs.set_stdio(__instance__, *self._callbacks)
def __del__(self):
self.exit()
def exit(self):
global __instance__
if self._initialized:
if __instance__ is not None:
gs.exit(__instance__)
gs.delete_instance(__instance__)
__instance__ = None
self._initialized = False
def Ghostscript(*args, **kwargs):
"""Factory function for setting up a Ghostscript instance
"""
global __instance__
# Ghostscript only supports a single instance
if __instance__ is None:
__instance__ = gs.new_instance()
return __Ghostscript(__instance__, args,
stdin=kwargs.get('stdin', None),
stdout=kwargs.get('stdout', None),
stderr=kwargs.get('stderr', None))
__instance__ = None

View File

@ -1,243 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ghostscript._gsprint - A low-level interface to the Ghostscript C-API using ctypes
"""
#
# Modifications 2018 by Vinayak Mehta <vmehta94@gmail.com>
# Copyright 2010-2018 by Hartmut Goebel <h.goebel@crazy-compilers.com>
#
# Display_callback Structure by Lasse Fister <commander@graphicore.de> in 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
from ctypes import *
# base/gserrors.h
#
# Internal code for a normal exit when usage info is displayed.
# This allows Window versions of Ghostscript to pause until
# the message can be read.
#
e_Info = -110
#
# Internal code for the .quit operator.
# The real quit code is an integer on the operand stack.
# gs_interpret returns this only for a .quit with a zero exit code.
#
e_Quit = -101
__author__ = 'Hartmut Goebel <h.goebel@crazy-compilers.com>'
__copyright__ = 'Copyright 2010-2018 by Hartmut Goebel <h.goebel@crazy-compilers.com>'
__license__ = 'GNU General Public License version 3 (GPL v3)'
__version__ = '0.6'
gs_main_instance = c_void_p
display_callback = c_void_p
# https://www.ghostscript.com/doc/current/API.htm
class GhostscriptError(Exception):
def __init__(self, ecode):
self.code = ecode
def new_instance():
"""Create a new instance of Ghostscript
This instance is passed to most other API functions.
"""
# :todo: The caller_handle will be provided to callback functions.
display_callback = None
instance = gs_main_instance()
rc = libgs.gsapi_new_instance(pointer(instance), display_callback)
if rc != 0:
raise GhostscriptError(rc)
return instance
def delete_instance(instance):
"""Destroy an instance of Ghostscript
Before you call this, Ghostscript must have finished.
If Ghostscript has been initialised, you must call exit()
before delete_instance()
"""
return libgs.gsapi_delete_instance(instance)
c_stdstream_call_t = CFUNCTYPE(c_int, gs_main_instance, POINTER(c_char), c_int)
def _wrap_stdin(infp):
"""Wrap a filehandle into a C function to be used as `stdin` callback
for ``set_stdio``. The filehandle has to support the readline() method.
"""
def _wrap(instance, dest, count):
try:
data = infp.readline(count)
except:
count = -1
else:
if not data:
count = 0
else:
count = len(data)
memmove(dest, c_char_p(data), count)
return count
return c_stdstream_call_t(_wrap)
def _wrap_stdout(outfp):
"""Wrap a filehandle into a C function to be used as `stdout` or
`stderr` callback for ``set_stdio``. The filehandle has to support the
write() and flush() methods.
"""
def _wrap(instance, str, count):
outfp.write(str[:count])
outfp.flush()
return count
return c_stdstream_call_t(_wrap)
_wrap_stderr = _wrap_stdout
def set_stdio(instance, stdin, stdout, stderr):
"""Set the callback functions for stdio.
``stdin``, ``stdout`` and ``stderr`` have to be ``ctypes``
callback functions matching the ``_gsprint.c_stdstream_call_t``
prototype. You may want to use _wrap_* to wrap file handles.
Note 1: This function only changes stdio of the Postscript
interpreter, not that of the devices.
Note 2: Make sure you keep references to C function objects
as long as they are used from C code. Otherwise they may be
garbage collected, crashing your program when a callback is made.
The ``stdin`` callback function should return the number of
characters read, `0` for EOF, or `-1` for error. The `stdout` and
`stderr` callback functions should return the number of characters
written.
You may pass ``None`` for any of stdin, stdout or stderr , in which
case the system stdin, stdout resp. stderr will be used.
"""
rc = libgs.gsapi_set_stdio(instance, stdin, stdout, stderr)
if rc not in (0, e_Quit, e_Info):
raise GhostscriptError(rc)
return rc
def init_with_args(instance, argv):
"""Initialise the interpreter
1. If quit or EOF occur during init_with_args(), the return value
will be e_Quit. This is not an error. You must call exit() and
must not call any other functions.
2. If usage info should be displayed, the return value will be
e_Info which is not an error. Do not call exit().
3. Under normal conditions this returns 0. You would then call one
or more run_*() functions and then finish with exit()
"""
ArgArray = c_char_p * len(argv)
c_argv = ArgArray(*argv)
rc = libgs.gsapi_init_with_args(instance, len(argv), c_argv)
if rc not in (0, e_Quit, e_Info):
raise GhostscriptError(rc)
return rc
def exit(instance):
"""Exit the interpreter
This must be called on shutdown if init_with_args() has been
called, and just before delete_instance()
"""
rc = libgs.gsapi_exit(instance)
if rc != 0:
raise GhostscriptError(rc)
return rc
def __win32_finddll():
try:
import winreg
except ImportError:
# assume Python 2
from _winreg import OpenKey, CloseKey, EnumKey, QueryValueEx, \
QueryInfoKey, HKEY_LOCAL_MACHINE
else:
from winreg import OpenKey, CloseKey, EnumKey, QueryValueEx, \
QueryInfoKey, HKEY_LOCAL_MACHINE
from distutils.version import LooseVersion
import os
dlls = []
# Look up different variants of Ghostscript and take the highest
# version for which the DLL is to be found in the filesystem.
for key_name in ('AFPL Ghostscript', 'Aladdin Ghostscript',
'GNU Ghostscript', 'GPL Ghostscript'):
try:
k1 = OpenKey(HKEY_LOCAL_MACHINE, "Software\\%s" % key_name)
for num in range(0, QueryInfoKey(k1)[0]):
version = EnumKey(k1, num)
try:
k2 = OpenKey(k1, version)
dll_path = QueryValueEx(k2, 'GS_DLL')[0]
CloseKey(k2)
if os.path.exists(dll_path):
dlls.append((LooseVersion(version), dll_path))
except WindowsError:
pass
CloseKey(k1)
except WindowsError:
pass
if dlls:
dlls.sort()
return dlls[-1][-1]
else:
return None
if sys.platform == 'win32':
libgs = __win32_finddll()
if not libgs:
raise RuntimeError('Please make sure that Ghostscript is installed')
libgs = windll.LoadLibrary(libgs)
else:
try:
libgs = cdll.LoadLibrary('libgs.so')
except OSError:
# shared object file not found
import ctypes.util
libgs = ctypes.util.find_library('gs')
if not libgs:
raise RuntimeError('Please make sure that Ghostscript is installed')
libgs = cdll.LoadLibrary(libgs)
del __win32_finddll

View File

@ -7,8 +7,14 @@ from PyPDF2 import PdfFileReader, PdfFileWriter
from .core import TableList
from .parsers import Stream, Lattice
from .utils import (TemporaryDirectory, get_page_layout, get_text_objects,
get_rotation, is_url, download_url)
from .utils import (
TemporaryDirectory,
get_page_layout,
get_text_objects,
get_rotation,
is_url,
download_url,
)
class PDFHandler(object):
@ -27,21 +33,23 @@ class PDFHandler(object):
Password for decryption.
"""
def __init__(self, filepath, pages='1', password=None):
def __init__(self, filepath, pages="1", password=None):
if is_url(filepath):
filepath = download_url(filepath)
self.filepath = filepath
if not filepath.lower().endswith('.pdf'):
raise NotImplementedError("File format not supported")
self.pages = self._get_pages(self.filepath, pages)
# if not filepath.lower().endswith(".pdf"):
# raise NotImplementedError("File format not supported")
if password is None:
self.password = ''
self.password = ""
else:
self.password = password
if sys.version_info[0] < 3:
self.password = self.password.encode('ascii')
self.password = self.password.encode("ascii")
self.pages = self._get_pages(pages)
def _get_pages(self, filepath, pages):
def _get_pages(self, pages):
"""Converts pages string to list of ints.
Parameters
@ -59,26 +67,31 @@ class PDFHandler(object):
"""
page_numbers = []
if pages == '1':
page_numbers.append({'start': 1, 'end': 1})
if pages == "1":
page_numbers.append({"start": 1, "end": 1})
else:
infile = PdfFileReader(open(filepath, 'rb'), strict=False)
with open(self.filepath, "rb") as f:
infile = PdfFileReader(f, strict=False)
if infile.isEncrypted:
infile.decrypt(self.password)
if pages == 'all':
page_numbers.append({'start': 1, 'end': infile.getNumPages()})
if pages == "all":
page_numbers.append({"start": 1, "end": infile.getNumPages()})
else:
for r in pages.split(','):
if '-' in r:
a, b = r.split('-')
if b == 'end':
for r in pages.split(","):
if "-" in r:
a, b = r.split("-")
if b == "end":
b = infile.getNumPages()
page_numbers.append({'start': int(a), 'end': int(b)})
page_numbers.append({"start": int(a), "end": int(b)})
else:
page_numbers.append({'start': int(r), 'end': int(r)})
page_numbers.append({"start": int(r), "end": int(r)})
P = []
for p in page_numbers:
P.extend(range(p['start'], p['end'] + 1))
P.extend(range(p["start"], p["end"] + 1))
return sorted(set(P))
def _save_page(self, filepath, page, temp):
@ -94,16 +107,16 @@ class PDFHandler(object):
Tmp directory.
"""
with open(filepath, 'rb') as fileobj:
with open(filepath, "rb") as fileobj:
infile = PdfFileReader(fileobj, strict=False)
if infile.isEncrypted:
infile.decrypt(self.password)
fpath = os.path.join(temp, 'page-{0}.pdf'.format(page))
fpath = os.path.join(temp, f"page-{page}.pdf")
froot, fext = os.path.splitext(fpath)
p = infile.getPage(page - 1)
outfile = PdfFileWriter()
outfile.addPage(p)
with open(fpath, 'wb') as f:
with open(fpath, "wb") as f:
outfile.write(f)
layout, dim = get_page_layout(fpath)
# fix rotated PDF
@ -111,23 +124,27 @@ class PDFHandler(object):
horizontal_text = get_text_objects(layout, ltype="horizontal_text")
vertical_text = get_text_objects(layout, ltype="vertical_text")
rotation = get_rotation(chars, horizontal_text, vertical_text)
if rotation != '':
fpath_new = ''.join([froot.replace('page', 'p'), '_rotated', fext])
if rotation != "":
fpath_new = "".join([froot.replace("page", "p"), "_rotated", fext])
os.rename(fpath, fpath_new)
infile = PdfFileReader(open(fpath_new, 'rb'), strict=False)
instream = open(fpath_new, "rb")
infile = PdfFileReader(instream, strict=False)
if infile.isEncrypted:
infile.decrypt(self.password)
outfile = PdfFileWriter()
p = infile.getPage(0)
if rotation == 'anticlockwise':
if rotation == "anticlockwise":
p.rotateClockwise(90)
elif rotation == 'clockwise':
elif rotation == "clockwise":
p.rotateCounterClockwise(90)
outfile.addPage(p)
with open(fpath, 'wb') as f:
with open(fpath, "wb") as f:
outfile.write(f)
instream.close()
def parse(self, flavor='lattice', suppress_stdout=False, layout_kwargs={}, **kwargs):
def parse(
self, flavor="lattice", suppress_stdout=False, layout_kwargs={}, **kwargs
):
"""Extracts tables by calling parser.get_tables on all single
page PDFs.
@ -153,11 +170,11 @@ class PDFHandler(object):
with TemporaryDirectory() as tempdir:
for p in self.pages:
self._save_page(self.filepath, p, tempdir)
pages = [os.path.join(tempdir, 'page-{0}.pdf'.format(p))
for p in self.pages]
parser = Lattice(**kwargs) if flavor == 'lattice' else Stream(**kwargs)
pages = [os.path.join(tempdir, f"page-{p}.pdf") for p in self.pages]
parser = Lattice(**kwargs) if flavor == "lattice" else Stream(**kwargs)
for p in pages:
t = parser.extract_tables(p, suppress_stdout=suppress_stdout,
layout_kwargs=layout_kwargs)
t = parser.extract_tables(
p, suppress_stdout=suppress_stdout, layout_kwargs=layout_kwargs
)
tables.extend(t)
return TableList(tables)
return TableList(sorted(tables))

View File

@ -1,7 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import division
import cv2
import numpy as np
@ -39,17 +37,23 @@ def adaptive_threshold(imagename, process_background=False, blocksize=15, c=-2):
if process_background:
threshold = cv2.adaptiveThreshold(
gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, blocksize, c)
gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blocksize, c
)
else:
threshold = cv2.adaptiveThreshold(
np.invert(gray), 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blocksize, c)
np.invert(gray),
255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY,
blocksize,
c,
)
return img, threshold
def find_lines(threshold, regions=None, direction='horizontal',
line_scale=15, iterations=0):
def find_lines(
threshold, regions=None, direction="horizontal", line_scale=15, iterations=0
):
"""Finds horizontal and vertical lines by applying morphological
transformations on an image.
@ -87,15 +91,14 @@ def find_lines(threshold, regions=None, direction='horizontal',
"""
lines = []
if direction == 'vertical':
if direction == "vertical":
size = threshold.shape[0] // line_scale
el = cv2.getStructuringElement(cv2.MORPH_RECT, (1, size))
elif direction == 'horizontal':
elif direction == "horizontal":
size = threshold.shape[1] // line_scale
el = cv2.getStructuringElement(cv2.MORPH_RECT, (size, 1))
elif direction is None:
raise ValueError("Specify direction as either 'vertical' or"
" 'horizontal'")
raise ValueError("Specify direction as either 'vertical' or 'horizontal'")
if regions is not None:
region_mask = np.zeros(threshold.shape)
@ -110,19 +113,21 @@ def find_lines(threshold, regions=None, direction='horizontal',
try:
_, contours, _ = cv2.findContours(
threshold.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
threshold.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
except ValueError:
# for opencv backward compatibility
contours, _ = cv2.findContours(
threshold.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
threshold.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
for c in contours:
x, y, w, h = cv2.boundingRect(c)
x1, x2 = x, x + w
y1, y2 = y, y + h
if direction == 'vertical':
if direction == "vertical":
lines.append(((x1 + x2) // 2, y2, (x1 + x2) // 2, y1))
elif direction == 'horizontal':
elif direction == "horizontal":
lines.append((x1, (y1 + y2) // 2, x2, (y1 + y2) // 2))
return dmask, lines
@ -150,11 +155,13 @@ def find_contours(vertical, horizontal):
try:
__, contours, __ = cv2.findContours(
mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
except ValueError:
# for opencv backward compatibility
contours, __ = cv2.findContours(
mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
# sort in reverse based on contour area and use first 10 contours
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]
@ -196,11 +203,13 @@ def find_joints(contours, vertical, horizontal):
roi = joints[y : y + h, x : x + w]
try:
__, jc, __ = cv2.findContours(
roi.astype(np.uint8), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
roi.astype(np.uint8), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE
)
except ValueError:
# for opencv backward compatibility
jc, __ = cv2.findContours(
roi.astype(np.uint8), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
roi.astype(np.uint8), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE
)
if len(jc) <= 4: # remove contours with less than 4 joints
continue
joint_coords = []

View File

@ -6,8 +6,15 @@ from .handlers import PDFHandler
from .utils import validate_input, remove_extra
def read_pdf(filepath, pages='1', password=None, flavor='lattice',
suppress_stdout=False, layout_kwargs={}, **kwargs):
def read_pdf(
filepath,
pages="1",
password=None,
flavor="lattice",
suppress_stdout=False,
layout_kwargs={},
**kwargs
):
"""Read PDF and return extracted tables.
Note: kwargs annotated with ^ can only be used with flavor='stream'
@ -91,9 +98,10 @@ def read_pdf(filepath, pages='1', password=None, flavor='lattice',
tables : camelot.core.TableList
"""
if flavor not in ['lattice', 'stream']:
raise NotImplementedError("Unknown flavor specified."
" Use either 'lattice' or 'stream'")
if flavor not in ["lattice", "stream"]:
raise NotImplementedError(
"Unknown flavor specified." " Use either 'lattice' or 'stream'"
)
with warnings.catch_warnings():
if suppress_stdout:
@ -102,6 +110,10 @@ def read_pdf(filepath, pages='1', password=None, flavor='lattice',
validate_input(kwargs, flavor=flavor)
p = PDFHandler(filepath, pages=pages, password=password)
kwargs = remove_extra(kwargs, flavor=flavor)
tables = p.parse(flavor=flavor, suppress_stdout=suppress_stdout,
layout_kwargs=layout_kwargs, **kwargs)
tables = p.parse(
flavor=flavor,
suppress_stdout=suppress_stdout,
layout_kwargs=layout_kwargs,
**kwargs
)
return tables

View File

@ -6,15 +6,15 @@ from ..utils import get_page_layout, get_text_objects
class BaseParser(object):
"""Defines a base parser.
"""
"""Defines a base parser."""
def _generate_layout(self, filename, layout_kwargs):
self.filename = filename
self.layout_kwargs = layout_kwargs
self.layout, self.dimensions = get_page_layout(
filename, **layout_kwargs)
self.images = get_text_objects(self.layout, ltype='image')
self.horizontal_text = get_text_objects(self.layout, ltype='horizontal_text')
self.vertical_text = get_text_objects(self.layout, ltype='vertical_text')
self.layout, self.dimensions = get_page_layout(filename, **layout_kwargs)
self.images = get_text_objects(self.layout, ltype="image")
self.horizontal_text = get_text_objects(self.layout, ltype="horizontal_text")
self.vertical_text = get_text_objects(self.layout, ltype="vertical_text")
self.pdf_width, self.pdf_height = self.dimensions
self.rootname, __ = os.path.splitext(self.filename)
self.imagename = "".join([self.rootname, ".png"])

View File

@ -1,28 +1,37 @@
# -*- coding: utf-8 -*-
from __future__ import division
import os
import sys
import copy
import locale
import logging
import warnings
import subprocess
import numpy as np
import pandas as pd
from .base import BaseParser
from ..core import Table
from ..ext.ghostscript import Ghostscript
from ..utils import (scale_image, scale_pdf, segments_in_bbox, text_in_bbox,
merge_close_lines, get_table_index, compute_accuracy,
compute_whitespace)
from ..image_processing import (adaptive_threshold, find_lines,
find_contours, find_joints)
from ..utils import (
scale_image,
scale_pdf,
segments_in_bbox,
text_in_bbox,
merge_close_lines,
get_table_index,
compute_accuracy,
compute_whitespace,
)
from ..image_processing import (
adaptive_threshold,
find_lines,
find_contours,
find_joints,
)
from ..backends.image_conversion import BACKENDS
logger = logging.getLogger('camelot')
logger = logging.getLogger("camelot")
class Lattice(BaseParser):
@ -84,11 +93,27 @@ class Lattice(BaseParser):
Resolution used for PDF to PNG conversion.
"""
def __init__(self, table_regions=None, table_areas=None, process_background=False,
line_scale=15, copy_text=None, shift_text=['l', 't'],
split_text=False, flag_size=False, strip_text='', line_tol=2,
joint_tol=2, threshold_blocksize=15, threshold_constant=-2,
iterations=0, resolution=300, **kwargs):
def __init__(
self,
table_regions=None,
table_areas=None,
process_background=False,
line_scale=15,
copy_text=None,
shift_text=["l", "t"],
split_text=False,
flag_size=False,
strip_text="",
line_tol=2,
joint_tol=2,
threshold_blocksize=15,
threshold_constant=-2,
iterations=0,
resolution=300,
backend="ghostscript",
**kwargs,
):
self.table_regions = table_regions
self.table_areas = table_areas
self.process_background = process_background
@ -104,6 +129,37 @@ class Lattice(BaseParser):
self.threshold_constant = threshold_constant
self.iterations = iterations
self.resolution = resolution
self.backend = Lattice._get_backend(backend)
@staticmethod
def _get_backend(backend):
def implements_convert():
methods = [
method for method in dir(backend) if method.startswith("__") is False
]
return "convert" in methods
if isinstance(backend, str):
if backend not in BACKENDS.keys():
raise NotImplementedError(
f"Unknown backend '{backend}' specified. Please use either 'poppler' or 'ghostscript'."
)
if backend == "ghostscript":
warnings.warn(
"'ghostscript' will be replaced by 'poppler' as the default image conversion"
" backend in v0.12.0. You can try out 'poppler' with backend='poppler'.",
DeprecationWarning,
)
return BACKENDS[backend]()
else:
if not implements_convert():
raise NotImplementedError(
f"'{backend}' must implement a 'convert' method"
)
return backend
@staticmethod
def _reduce_index(t, idx, shift_text):
@ -131,19 +187,19 @@ class Lattice(BaseParser):
indices = []
for r_idx, c_idx, text in idx:
for d in shift_text:
if d == 'l':
if d == "l":
if t.cells[r_idx][c_idx].hspan:
while not t.cells[r_idx][c_idx].left:
c_idx -= 1
if d == 'r':
if d == "r":
if t.cells[r_idx][c_idx].hspan:
while not t.cells[r_idx][c_idx].right:
c_idx += 1
if d == 't':
if d == "t":
if t.cells[r_idx][c_idx].vspan:
while not t.cells[r_idx][c_idx].top:
r_idx -= 1
if d == 'b':
if d == "b":
if t.cells[r_idx][c_idx].vspan:
while not t.cells[r_idx][c_idx].bottom:
r_idx += 1
@ -172,27 +228,17 @@ class Lattice(BaseParser):
if f == "h":
for i in range(len(t.cells)):
for j in range(len(t.cells[i])):
if t.cells[i][j].text.strip() == '':
if t.cells[i][j].text.strip() == "":
if t.cells[i][j].hspan and not t.cells[i][j].left:
t.cells[i][j].text = t.cells[i][j - 1].text
elif f == "v":
for i in range(len(t.cells)):
for j in range(len(t.cells[i])):
if t.cells[i][j].text.strip() == '':
if t.cells[i][j].text.strip() == "":
if t.cells[i][j].vspan and not t.cells[i][j].top:
t.cells[i][j].text = t.cells[i - 1][j].text
return t
def _generate_image(self):
self.imagename = ''.join([self.rootname, '.png'])
gs_call = '-q -sDEVICE=png16m -o {} -r300 {}'.format(
self.imagename, self.filename)
gs_call = gs_call.encode().split()
null = open(os.devnull, 'wb')
with Ghostscript(*gs_call, stdout=null) as gs:
pass
null.close()
def _generate_table_bbox(self):
def scale_areas(areas):
scaled_areas = []
@ -207,8 +253,11 @@ class Lattice(BaseParser):
return scaled_areas
self.image, self.threshold = adaptive_threshold(
self.imagename, process_background=self.process_background,
blocksize=self.threshold_blocksize, c=self.threshold_constant)
self.imagename,
process_background=self.process_background,
blocksize=self.threshold_blocksize,
c=self.threshold_constant,
)
image_width = self.image.shape[1]
image_height = self.image.shape[0]
@ -225,21 +274,35 @@ class Lattice(BaseParser):
regions = scale_areas(self.table_regions)
vertical_mask, vertical_segments = find_lines(
self.threshold, regions=regions, direction='vertical',
line_scale=self.line_scale, iterations=self.iterations)
self.threshold,
regions=regions,
direction="vertical",
line_scale=self.line_scale,
iterations=self.iterations,
)
horizontal_mask, horizontal_segments = find_lines(
self.threshold, regions=regions, direction='horizontal',
line_scale=self.line_scale, iterations=self.iterations)
self.threshold,
regions=regions,
direction="horizontal",
line_scale=self.line_scale,
iterations=self.iterations,
)
contours = find_contours(vertical_mask, horizontal_mask)
table_bbox = find_joints(contours, vertical_mask, horizontal_mask)
else:
vertical_mask, vertical_segments = find_lines(
self.threshold, direction='vertical', line_scale=self.line_scale,
iterations=self.iterations)
self.threshold,
direction="vertical",
line_scale=self.line_scale,
iterations=self.iterations,
)
horizontal_mask, horizontal_segments = find_lines(
self.threshold, direction='horizontal', line_scale=self.line_scale,
iterations=self.iterations)
self.threshold,
direction="horizontal",
line_scale=self.line_scale,
iterations=self.iterations,
)
areas = scale_areas(self.table_areas)
table_bbox = find_joints(areas, vertical_mask, horizontal_mask)
@ -247,18 +310,20 @@ class Lattice(BaseParser):
self.table_bbox_unscaled = copy.deepcopy(table_bbox)
self.table_bbox, self.vertical_segments, self.horizontal_segments = scale_image(
table_bbox, vertical_segments, horizontal_segments, pdf_scalers)
table_bbox, vertical_segments, horizontal_segments, pdf_scalers
)
def _generate_columns_and_rows(self, table_idx, tk):
# select elements which lie within table_bbox
t_bbox = {}
v_s, h_s = segments_in_bbox(
tk, self.vertical_segments, self.horizontal_segments)
t_bbox['horizontal'] = text_in_bbox(tk, self.horizontal_text)
t_bbox['vertical'] = text_in_bbox(tk, self.vertical_text)
tk, self.vertical_segments, self.horizontal_segments
)
t_bbox["horizontal"] = text_in_bbox(tk, self.horizontal_text)
t_bbox["vertical"] = text_in_bbox(tk, self.vertical_text)
t_bbox['horizontal'].sort(key=lambda x: (-x.y0, x.x0))
t_bbox['vertical'].sort(key=lambda x: (x.x0, -x.y0))
t_bbox["horizontal"].sort(key=lambda x: (-x.y0, x.x0))
t_bbox["vertical"].sort(key=lambda x: (x.x0, -x.y0))
self.t_bbox = t_bbox
@ -267,23 +332,19 @@ class Lattice(BaseParser):
cols.extend([tk[0], tk[2]])
rows.extend([tk[1], tk[3]])
# sort horizontal and vertical segments
cols = merge_close_lines(
sorted(cols), line_tol=self.line_tol)
rows = merge_close_lines(
sorted(rows, reverse=True), line_tol=self.line_tol)
cols = merge_close_lines(sorted(cols), line_tol=self.line_tol)
rows = merge_close_lines(sorted(rows, reverse=True), line_tol=self.line_tol)
# make grid using x and y coord of shortlisted rows and cols
cols = [(cols[i], cols[i + 1])
for i in range(0, len(cols) - 1)]
rows = [(rows[i], rows[i + 1])
for i in range(0, len(rows) - 1)]
cols = [(cols[i], cols[i + 1]) for i in range(0, len(cols) - 1)]
rows = [(rows[i], rows[i + 1]) for i in range(0, len(rows) - 1)]
return cols, rows, v_s, h_s
def _generate_table(self, table_idx, cols, rows, **kwargs):
v_s = kwargs.get('v_s')
h_s = kwargs.get('h_s')
v_s = kwargs.get("v_s")
h_s = kwargs.get("h_s")
if v_s is None or h_s is None:
raise ValueError('No segments found on {}'.format(self.rootname))
raise ValueError("No segments found on {}".format(self.rootname))
table = Table(cols, rows)
# set table edges to True using ver+hor lines
@ -296,14 +357,21 @@ class Lattice(BaseParser):
pos_errors = []
# TODO: have a single list in place of two directional ones?
# sorted on x-coordinate based on reading order i.e. LTR or RTL
for direction in ['vertical', 'horizontal']:
for direction in ["vertical", "horizontal"]:
for t in self.t_bbox[direction]:
indices, error = get_table_index(
table, t, direction, split_text=self.split_text,
flag_size=self.flag_size, strip_text=self.strip_text)
table,
t,
direction,
split_text=self.split_text,
flag_size=self.flag_size,
strip_text=self.strip_text,
)
if indices[:2] != (-1, -1):
pos_errors.append(error)
indices = Lattice._reduce_index(table, indices, shift_text=self.shift_text)
indices = Lattice._reduce_index(
table, indices, shift_text=self.shift_text
)
for r_idx, c_idx, text in indices:
table.cells[r_idx][c_idx].text = text
accuracy = compute_accuracy([[100, pos_errors]])
@ -316,11 +384,11 @@ class Lattice(BaseParser):
table.shape = table.df.shape
whitespace = compute_whitespace(data)
table.flavor = 'lattice'
table.flavor = "lattice"
table.accuracy = accuracy
table.whitespace = whitespace
table.order = table_idx + 1
table.page = int(os.path.basename(self.rootname).replace('page-', ''))
table.page = int(os.path.basename(self.rootname).replace("page-", ""))
# for plotting
_text = []
@ -336,24 +404,29 @@ class Lattice(BaseParser):
def extract_tables(self, filename, suppress_stdout=False, layout_kwargs={}):
self._generate_layout(filename, layout_kwargs)
if not suppress_stdout:
logger.info('Processing {}'.format(os.path.basename(self.rootname)))
logger.info("Processing {}".format(os.path.basename(self.rootname)))
if not self.horizontal_text:
if self.images:
warnings.warn('{} is image-based, camelot only works on'
' text-based pages.'.format(os.path.basename(self.rootname)))
warnings.warn(
"{} is image-based, camelot only works on"
" text-based pages.".format(os.path.basename(self.rootname))
)
else:
warnings.warn('No tables found on {}'.format(
os.path.basename(self.rootname)))
warnings.warn(
"No tables found on {}".format(os.path.basename(self.rootname))
)
return []
self._generate_image()
self.backend.convert(self.filename, self.imagename)
self._generate_table_bbox()
_tables = []
# sort tables based on y-coord
for table_idx, tk in enumerate(sorted(
self.table_bbox.keys(), key=lambda x: x[1], reverse=True)):
for table_idx, tk in enumerate(
sorted(self.table_bbox.keys(), key=lambda x: x[1], reverse=True)
):
cols, rows, v_s, h_s = self._generate_columns_and_rows(table_idx, tk)
table = self._generate_table(table_idx, cols, rows, v_s=v_s, h_s=h_s)
table._bbox = tk

View File

@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import division
import os
import logging
import warnings
@ -10,11 +9,10 @@ import pandas as pd
from .base import BaseParser
from ..core import TextEdges, Table
from ..utils import (text_in_bbox, get_table_index, compute_accuracy,
compute_whitespace)
from ..utils import text_in_bbox, get_table_index, compute_accuracy, compute_whitespace
logger = logging.getLogger('camelot')
logger = logging.getLogger("camelot")
class Stream(BaseParser):
@ -55,9 +53,20 @@ class Stream(BaseParser):
to generate columns.
"""
def __init__(self, table_regions=None, table_areas=None, columns=None, split_text=False,
flag_size=False, strip_text='', edge_tol=50, row_tol=2,
column_tol=0, **kwargs):
def __init__(
self,
table_regions=None,
table_areas=None,
columns=None,
split_text=False,
flag_size=False,
strip_text="",
edge_tol=50,
row_tol=2,
column_tol=0,
**kwargs,
):
self.table_regions = table_regions
self.table_areas = table_areas
self.columns = columns
@ -112,6 +121,7 @@ class Stream(BaseParser):
row_y = 0
rows = []
temp = []
for t in text:
# is checking for upright necessary?
# if t.get_text().strip() and all([obj.upright for obj in t._objs if
@ -122,7 +132,9 @@ class Stream(BaseParser):
temp = []
row_y = t.y0
temp.append(t)
rows.append(sorted(temp, key=lambda t: t.x0))
if len(rows) > 1:
__ = rows.pop(0) # TODO: hacky
return rows
@ -150,8 +162,9 @@ class Stream(BaseParser):
else:
lower = merged[-1]
if column_tol >= 0:
if (higher[0] <= lower[1] or
np.isclose(higher[0], lower[1], atol=column_tol)):
if higher[0] <= lower[1] or np.isclose(
higher[0], lower[1], atol=column_tol
):
upper_bound = max(lower[1], higher[1])
lower_bound = min(lower[0], higher[0])
merged[-1] = (lower_bound, upper_bound)
@ -186,13 +199,14 @@ class Stream(BaseParser):
List of continuous row y-coordinate tuples.
"""
row_mids = [sum([(t.y0 + t.y1) / 2 for t in r]) / len(r)
if len(r) > 0 else 0 for r in rows_grouped]
row_mids = [
sum([(t.y0 + t.y1) / 2 for t in r]) / len(r) if len(r) > 0 else 0
for r in rows_grouped
]
rows = [(row_mids[i] + row_mids[i - 1]) / 2 for i in range(1, len(row_mids))]
rows.insert(0, text_y_max)
rows.append(text_y_min)
rows = [(rows[i], rows[i + 1])
for i in range(0, len(rows) - 1)]
rows = [(rows[i], rows[i + 1]) for i in range(0, len(rows) - 1)]
return rows
@staticmethod
@ -217,8 +231,9 @@ class Stream(BaseParser):
if text:
text = Stream._group_rows(text, row_tol=row_tol)
elements = [len(r) for r in text]
new_cols = [(t.x0, t.x1)
for r in text if len(r) == max(elements) for t in r]
new_cols = [
(t.x0, t.x1) for r in text if len(r) == max(elements) for t in r
]
cols.extend(Stream._merge_columns(sorted(new_cols)))
return cols
@ -243,15 +258,13 @@ class Stream(BaseParser):
cols = [(cols[i][0] + cols[i - 1][1]) / 2 for i in range(1, len(cols))]
cols.insert(0, text_x_min)
cols.append(text_x_max)
cols = [(cols[i], cols[i + 1])
for i in range(0, len(cols) - 1)]
cols = [(cols[i], cols[i + 1]) for i in range(0, len(cols) - 1)]
return cols
def _validate_columns(self):
if self.table_areas is not None and self.columns is not None:
if len(self.table_areas) != len(self.columns):
raise ValueError("Length of table_areas and columns"
" should be equal")
raise ValueError("Length of table_areas and columns" " should be equal")
def _nurminen_table_detection(self, textlines):
"""A general implementation of the table detection algorithm
@ -286,7 +299,11 @@ class Stream(BaseParser):
# filter horizontal text
hor_text = []
for region in self.table_regions:
x1, y1, x2, y2 = region
x1, y1, x2, y2 = region.split(",")
x1 = float(x1)
y1 = float(y1)
x2 = float(x2)
y2 = float(y2)
region_text = text_in_bbox((x1, y2, x2, y1), self.horizontal_text)
hor_text.extend(region_text)
# find tables based on nurminen's detection algorithm
@ -305,16 +322,16 @@ class Stream(BaseParser):
def _generate_columns_and_rows(self, table_idx, tk):
# select elements which lie within table_bbox
t_bbox = {}
t_bbox['horizontal'] = text_in_bbox(tk, self.horizontal_text)
t_bbox['vertical'] = text_in_bbox(tk, self.vertical_text)
t_bbox["horizontal"] = text_in_bbox(tk, self.horizontal_text)
t_bbox["vertical"] = text_in_bbox(tk, self.vertical_text)
t_bbox['horizontal'].sort(key=lambda x: (-x.y0, x.x0))
t_bbox['vertical'].sort(key=lambda x: (x.x0, -x.y0))
t_bbox["horizontal"].sort(key=lambda x: (-x.y0, x.x0))
t_bbox["vertical"].sort(key=lambda x: (x.x0, -x.y0))
self.t_bbox = t_bbox
text_x_min, text_y_min, text_x_max, text_y_max = self._text_bbox(self.t_bbox)
rows_grouped = self._group_rows(self.t_bbox['horizontal'], row_tol=self.row_tol)
rows_grouped = self._group_rows(self.t_bbox["horizontal"], row_tol=self.row_tol)
rows = self._join_rows(rows_grouped, text_y_max, text_y_min)
elements = [len(r) for r in rows_grouped]
@ -323,7 +340,7 @@ class Stream(BaseParser):
# take (0, pdf_width) by default
# similar to else condition
# len can't be 1
cols = self.columns[table_idx].split(',')
cols = self.columns[table_idx].split(",")
cols = [float(c) for c in cols]
cols.insert(0, text_x_min)
cols.append(text_x_max)
@ -331,6 +348,9 @@ class Stream(BaseParser):
else:
# calculate mode of the list of number of elements in
# each row to guess the number of columns
if not len(elements):
cols = [(text_x_min, text_x_max)]
else:
ncols = max(set(elements), key=elements.count)
if ncols == 1:
# if mode is 1, the page usually contains not tables
@ -342,20 +362,29 @@ class Stream(BaseParser):
if len(elements):
ncols = max(set(elements), key=elements.count)
else:
warnings.warn("No tables found in table area {}".format(
table_idx + 1))
cols = [(t.x0, t.x1) for r in rows_grouped if len(r) == ncols for t in r]
warnings.warn(f"No tables found in table area {table_idx + 1}")
cols = [
(t.x0, t.x1) for r in rows_grouped if len(r) == ncols for t in r
]
cols = self._merge_columns(sorted(cols), column_tol=self.column_tol)
inner_text = []
for i in range(1, len(cols)):
left = cols[i - 1][1]
right = cols[i][0]
inner_text.extend([t for direction in self.t_bbox
inner_text.extend(
[
t
for direction in self.t_bbox
for t in self.t_bbox[direction]
if t.x0 > left and t.x1 < right])
outer_text = [t for direction in self.t_bbox
if t.x0 > left and t.x1 < right
]
)
outer_text = [
t
for direction in self.t_bbox
for t in self.t_bbox[direction]
if t.x0 > cols[-1][1] or t.x1 < cols[0][0]]
if t.x0 > cols[-1][1] or t.x1 < cols[0][0]
]
inner_text.extend(outer_text)
cols = self._add_columns(cols, inner_text, self.row_tol)
cols = self._join_columns(cols, text_x_min, text_x_max)
@ -369,11 +398,16 @@ class Stream(BaseParser):
pos_errors = []
# TODO: have a single list in place of two directional ones?
# sorted on x-coordinate based on reading order i.e. LTR or RTL
for direction in ['vertical', 'horizontal']:
for direction in ["vertical", "horizontal"]:
for t in self.t_bbox[direction]:
indices, error = get_table_index(
table, t, direction, split_text=self.split_text,
flag_size=self.flag_size, strip_text=self.strip_text)
table,
t,
direction,
split_text=self.split_text,
flag_size=self.flag_size,
strip_text=self.strip_text,
)
if indices[:2] != (-1, -1):
pos_errors.append(error)
for r_idx, c_idx, text in indices:
@ -385,11 +419,11 @@ class Stream(BaseParser):
table.shape = table.df.shape
whitespace = compute_whitespace(data)
table.flavor = 'stream'
table.flavor = "stream"
table.accuracy = accuracy
table.whitespace = whitespace
table.order = table_idx + 1
table.page = int(os.path.basename(self.rootname).replace('page-', ''))
table.page = int(os.path.basename(self.rootname).replace("page-", ""))
# for plotting
_text = []
@ -404,24 +438,28 @@ class Stream(BaseParser):
def extract_tables(self, filename, suppress_stdout=False, layout_kwargs={}):
self._generate_layout(filename, layout_kwargs)
base_filename = os.path.basename(self.rootname)
if not suppress_stdout:
logger.info('Processing {}'.format(os.path.basename(self.rootname)))
logger.info(f"Processing {base_filename}")
if not self.horizontal_text:
if self.images:
warnings.warn('{} is image-based, camelot only works on'
' text-based pages.'.format(os.path.basename(self.rootname)))
warnings.warn(
f"{base_filename} is image-based, camelot only works on"
" text-based pages."
)
else:
warnings.warn('No tables found on {}'.format(
os.path.basename(self.rootname)))
warnings.warn(f"No tables found on {base_filename}")
return []
self._generate_table_bbox()
_tables = []
# sort tables based on y-coord
for table_idx, tk in enumerate(sorted(
self.table_bbox.keys(), key=lambda x: x[1], reverse=True)):
for table_idx, tk in enumerate(
sorted(self.table_bbox.keys(), key=lambda x: x[1], reverse=True)
):
cols, rows = self._generate_columns_and_rows(table_idx, tk)
table = self._generate_table(table_idx, cols, rows)
table._bbox = tk

View File

@ -10,7 +10,7 @@ else:
class PlotMethods(object):
def __call__(self, table, kind='text', filename=None):
def __call__(self, table, kind="text", filename=None):
"""Plot elements found on PDF page based on kind
specified, useful for debugging and playing with different
parameters to get the best output.
@ -31,17 +31,21 @@ class PlotMethods(object):
"""
if not _HAS_MPL:
raise ImportError('matplotlib is required for plotting.')
raise ImportError("matplotlib is required for plotting.")
if table.flavor == 'lattice' and kind in ['textedge']:
raise NotImplementedError("Lattice flavor does not support kind='{}'".format(
kind))
elif table.flavor == 'stream' and kind in ['joint', 'line']:
raise NotImplementedError("Stream flavor does not support kind='{}'".format(
kind))
if table.flavor == "lattice" and kind in ["textedge"]:
raise NotImplementedError(f"Lattice flavor does not support kind='{kind}'")
elif table.flavor == "stream" and kind in ["joint", "line"]:
raise NotImplementedError(f"Stream flavor does not support kind='{kind}'")
plot_method = getattr(self, kind)
return plot_method(table)
fig = plot_method(table)
if filename is not None:
fig.savefig(filename)
return None
return fig
def text(self, table):
"""Generates a plot for all text elements present
@ -57,18 +61,12 @@ class PlotMethods(object):
"""
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax = fig.add_subplot(111, aspect="equal")
xs, ys = [], []
for t in table._text:
xs.extend([t[0], t[2]])
ys.extend([t[1], t[3]])
ax.add_patch(
patches.Rectangle(
(t[0], t[1]),
t[2] - t[0],
t[3] - t[1]
)
)
ax.add_patch(patches.Rectangle((t[0], t[1]), t[2] - t[0], t[3] - t[1]))
ax.set_xlim(min(xs) - 10, max(xs) + 10)
ax.set_ylim(min(ys) - 10, max(ys) + 10)
return fig
@ -87,21 +85,17 @@ class PlotMethods(object):
"""
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax = fig.add_subplot(111, aspect="equal")
for row in table.cells:
for cell in row:
if cell.left:
ax.plot([cell.lb[0], cell.lt[0]],
[cell.lb[1], cell.lt[1]])
ax.plot([cell.lb[0], cell.lt[0]], [cell.lb[1], cell.lt[1]])
if cell.right:
ax.plot([cell.rb[0], cell.rt[0]],
[cell.rb[1], cell.rt[1]])
ax.plot([cell.rb[0], cell.rt[0]], [cell.rb[1], cell.rt[1]])
if cell.top:
ax.plot([cell.lt[0], cell.rt[0]],
[cell.lt[1], cell.rt[1]])
ax.plot([cell.lt[0], cell.rt[0]], [cell.lt[1], cell.rt[1]])
if cell.bottom:
ax.plot([cell.lb[0], cell.rb[0]],
[cell.lb[1], cell.rb[1]])
ax.plot([cell.lb[0], cell.rb[0]], [cell.lb[1], cell.rb[1]])
return fig
def contour(self, table):
@ -124,7 +118,7 @@ class PlotMethods(object):
img, table_bbox = (None, {table._bbox: None})
_FOR_LATTICE = False
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax = fig.add_subplot(111, aspect="equal")
xs, ys = [], []
if not _FOR_LATTICE:
@ -133,21 +127,14 @@ class PlotMethods(object):
ys.extend([t[1], t[3]])
ax.add_patch(
patches.Rectangle(
(t[0], t[1]),
t[2] - t[0],
t[3] - t[1],
color='blue'
(t[0], t[1]), t[2] - t[0], t[3] - t[1], color="blue"
)
)
for t in table_bbox.keys():
ax.add_patch(
patches.Rectangle(
(t[0], t[1]),
t[2] - t[0],
t[3] - t[1],
fill=False,
color='red'
(t[0], t[1]), t[2] - t[0], t[3] - t[1], fill=False, color="red"
)
)
if not _FOR_LATTICE:
@ -173,25 +160,19 @@ class PlotMethods(object):
"""
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax = fig.add_subplot(111, aspect="equal")
xs, ys = [], []
for t in table._text:
xs.extend([t[0], t[2]])
ys.extend([t[1], t[3]])
ax.add_patch(
patches.Rectangle(
(t[0], t[1]),
t[2] - t[0],
t[3] - t[1],
color='blue'
)
patches.Rectangle((t[0], t[1]), t[2] - t[0], t[3] - t[1], color="blue")
)
ax.set_xlim(min(xs) - 10, max(xs) + 10)
ax.set_ylim(min(ys) - 10, max(ys) + 10)
for te in table._textedges:
ax.plot([te.x, te.x],
[te.y0, te.y1])
ax.plot([te.x, te.x], [te.y0, te.y1])
return fig
@ -210,14 +191,14 @@ class PlotMethods(object):
"""
img, table_bbox = table._image
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax = fig.add_subplot(111, aspect="equal")
x_coord = []
y_coord = []
for k in table_bbox.keys():
for coord in table_bbox[k]:
x_coord.append(coord[0])
y_coord.append(coord[1])
ax.plot(x_coord, y_coord, 'ro')
ax.plot(x_coord, y_coord, "ro")
ax.imshow(img)
return fig
@ -235,7 +216,7 @@ class PlotMethods(object):
"""
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax = fig.add_subplot(111, aspect="equal")
vertical, horizontal = table._segments
for v in vertical:
ax.plot([v[0], v[2]], [v[1], v[3]])

View File

@ -1,8 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import division
import os
import sys
import re
import random
import shutil
import string
@ -19,23 +18,22 @@ from pdfminer.pdfpage import PDFTextExtractionNotAllowed
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import (LAParams, LTAnno, LTChar, LTTextLineHorizontal,
LTTextLineVertical, LTImage)
from pdfminer.layout import (
LAParams,
LTAnno,
LTChar,
LTTextLineHorizontal,
LTTextLineVertical,
LTImage,
)
PY3 = sys.version_info[0] >= 3
if PY3:
from urllib.request import urlopen
from urllib.parse import urlparse as parse_url
from urllib.parse import uses_relative, uses_netloc, uses_params
else:
from urllib2 import urlopen
from urlparse import urlparse as parse_url
from urlparse import uses_relative, uses_netloc, uses_params
from urllib.request import Request, urlopen
from urllib.parse import urlparse as parse_url
from urllib.parse import uses_relative, uses_netloc, uses_params
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
_VALID_URLS.discard("")
# https://github.com/pandas-dev/pandas/blob/master/pandas/io/common.py
@ -59,9 +57,11 @@ def is_url(url):
def random_string(length):
ret = ''
ret = ""
while length:
ret += random.choice(string.digits + string.ascii_lowercase + string.ascii_uppercase)
ret += random.choice(
string.digits + string.ascii_lowercase + string.ascii_uppercase
)
length -= 1
return ret
@ -79,14 +79,13 @@ def download_url(url):
Temporary filepath.
"""
filename = '{}.pdf'.format(random_string(6))
with tempfile.NamedTemporaryFile('wb', delete=False) as f:
obj = urlopen(url)
if PY3:
filename = f"{random_string(6)}.pdf"
with tempfile.NamedTemporaryFile("wb", delete=False) as f:
headers = {"User-Agent": "Mozilla/5.0"}
request = Request(url, None, headers)
obj = urlopen(request)
content_type = obj.info().get_content_type()
else:
content_type = obj.info().getheader('Content-Type')
if content_type != 'application/pdf':
if content_type != "application/pdf":
raise NotImplementedError("File format not supported")
f.write(obj.read())
filepath = os.path.join(os.path.dirname(f.name), filename)
@ -94,39 +93,37 @@ def download_url(url):
return filepath
stream_kwargs = [
'columns',
'row_tol',
'column_tol'
]
stream_kwargs = ["columns", "edge_tol", "row_tol", "column_tol"]
lattice_kwargs = [
'process_background',
'line_scale',
'copy_text',
'shift_text',
'line_tol',
'joint_tol',
'threshold_blocksize',
'threshold_constant',
'iterations'
"process_background",
"line_scale",
"copy_text",
"shift_text",
"line_tol",
"joint_tol",
"threshold_blocksize",
"threshold_constant",
"iterations",
"resolution",
]
def validate_input(kwargs, flavor='lattice'):
def validate_input(kwargs, flavor="lattice"):
def check_intersection(parser_kwargs, input_kwargs):
isec = set(parser_kwargs).intersection(set(input_kwargs.keys()))
if isec:
raise ValueError("{} cannot be used with flavor='{}'".format(
",".join(sorted(isec)), flavor))
raise ValueError(
f"{','.join(sorted(isec))} cannot be used with flavor='{flavor}'"
)
if flavor == 'lattice':
if flavor == "lattice":
check_intersection(stream_kwargs, kwargs)
else:
check_intersection(lattice_kwargs, kwargs)
def remove_extra(kwargs, flavor='lattice'):
if flavor == 'lattice':
def remove_extra(kwargs, flavor="lattice"):
if flavor == "lattice":
for key in kwargs.keys():
if key in stream_kwargs:
kwargs.pop(key)
@ -256,15 +253,19 @@ def scale_image(tables, v_segments, h_segments, factors):
v_segments_new = []
for v in v_segments:
x1, x2 = scale(v[0], scaling_factor_x), scale(v[2], scaling_factor_x)
y1, y2 = scale(abs(translate(-img_y, v[1])), scaling_factor_y), scale(
abs(translate(-img_y, v[3])), scaling_factor_y)
y1, y2 = (
scale(abs(translate(-img_y, v[1])), scaling_factor_y),
scale(abs(translate(-img_y, v[3])), scaling_factor_y),
)
v_segments_new.append((x1, y1, x2, y2))
h_segments_new = []
for h in h_segments:
x1, x2 = scale(h[0], scaling_factor_x), scale(h[2], scaling_factor_x)
y1, y2 = scale(abs(translate(-img_y, h[1])), scaling_factor_y), scale(
abs(translate(-img_y, h[3])), scaling_factor_y)
y1, y2 = (
scale(abs(translate(-img_y, h[1])), scaling_factor_y),
scale(abs(translate(-img_y, h[3])), scaling_factor_y),
)
h_segments_new.append((x1, y1, x2, y2))
return tables_new, v_segments_new, h_segments_new
@ -291,13 +292,13 @@ def get_rotation(chars, horizontal_text, vertical_text):
rotated 90 degree clockwise.
"""
rotation = ''
rotation = ""
hlen = len([t for t in horizontal_text if t.get_text().strip()])
vlen = len([t for t in vertical_text if t.get_text().strip()])
if hlen < vlen:
clockwise = sum(t.matrix[1] < 0 and t.matrix[2] > 0 for t in chars)
anticlockwise = sum(t.matrix[1] > 0 and t.matrix[2] < 0 for t in chars)
rotation = 'anticlockwise' if clockwise < anticlockwise else 'clockwise'
rotation = "anticlockwise" if clockwise < anticlockwise else "clockwise"
return rotation
@ -325,10 +326,16 @@ def segments_in_bbox(bbox, v_segments, h_segments):
"""
lb = (bbox[0], bbox[1])
rt = (bbox[2], bbox[3])
v_s = [v for v in v_segments if v[1] > lb[1] - 2 and
v[3] < rt[1] + 2 and lb[0] - 2 <= v[0] <= rt[0] + 2]
h_s = [h for h in h_segments if h[0] > lb[0] - 2 and
h[2] < rt[0] + 2 and lb[1] - 2 <= h[1] <= rt[1] + 2]
v_s = [
v
for v in v_segments
if v[1] > lb[1] - 2 and v[3] < rt[1] + 2 and lb[0] - 2 <= v[0] <= rt[0] + 2
]
h_s = [
h
for h in h_segments
if h[0] > lb[0] - 2 and h[2] < rt[0] + 2 and lb[1] - 2 <= h[1] <= rt[1] + 2
]
return v_s, h_s
@ -346,15 +353,108 @@ def text_in_bbox(bbox, text):
Returns
-------
t_bbox : list
List of PDFMiner text objects that lie inside table.
List of PDFMiner text objects that lie inside table, discarding the overlapping ones
"""
lb = (bbox[0], bbox[1])
rt = (bbox[2], bbox[3])
t_bbox = [t for t in text if lb[0] - 2 <= (t.x0 + t.x1) / 2.0
<= rt[0] + 2 and lb[1] - 2 <= (t.y0 + t.y1) / 2.0
<= rt[1] + 2]
return t_bbox
t_bbox = [
t
for t in text
if lb[0] - 2 <= (t.x0 + t.x1) / 2.0 <= rt[0] + 2
and lb[1] - 2 <= (t.y0 + t.y1) / 2.0 <= rt[1] + 2
]
# Avoid duplicate text by discarding overlapping boxes
rest = {t for t in t_bbox}
for ba in t_bbox:
for bb in rest.copy():
if ba == bb:
continue
if bbox_intersect(ba, bb):
# if the intersection is larger than 80% of ba's size, we keep the longest
if (bbox_intersection_area(ba, bb) / bbox_area(ba)) > 0.8:
if bbox_longer(bb, ba):
rest.discard(ba)
unique_boxes = list(rest)
return unique_boxes
def bbox_intersection_area(ba, bb) -> float:
"""Returns area of the intersection of the bounding boxes of two PDFMiner objects.
Parameters
----------
ba : PDFMiner text object
bb : PDFMiner text object
Returns
-------
intersection_area : float
Area of the intersection of the bounding boxes of both objects
"""
x_left = max(ba.x0, bb.x0)
y_top = min(ba.y1, bb.y1)
x_right = min(ba.x1, bb.x1)
y_bottom = max(ba.y0, bb.y0)
if x_right < x_left or y_bottom > y_top:
return 0.0
intersection_area = (x_right - x_left) * (y_top - y_bottom)
return intersection_area
def bbox_area(bb) -> float:
"""Returns area of the bounding box of a PDFMiner object.
Parameters
----------
bb : PDFMiner text object
Returns
-------
area : float
Area of the bounding box of the object
"""
return (bb.x1 - bb.x0) * (bb.y1 - bb.y0)
def bbox_intersect(ba, bb) -> bool:
"""Returns True if the bounding boxes of two PDFMiner objects intersect.
Parameters
----------
ba : PDFMiner text object
bb : PDFMiner text object
Returns
-------
overlaps : bool
True if the bounding boxes intersect
"""
return ba.x1 >= bb.x0 and bb.x1 >= ba.x0 and ba.y1 >= bb.y0 and bb.y1 >= ba.y0
def bbox_longer(ba, bb) -> bool:
"""Returns True if the bounding box of the first PDFMiner object is longer or equal to the second.
Parameters
----------
ba : PDFMiner text object
bb : PDFMiner text object
Returns
-------
longer : bool
True if the bounding box of the first object is longer or equal
"""
return (ba.x1 - ba.x0) >= (bb.x1 - bb.x0)
def merge_close_lines(ar, line_tol=2):
@ -385,12 +485,33 @@ def merge_close_lines(ar, line_tol=2):
return ret
def text_strip(text, strip=""):
"""Strips any characters in `strip` that are present in `text`.
Parameters
----------
text : str
Text to process and strip.
strip : str, optional (default: '')
Characters that should be stripped from `text`.
Returns
-------
stripped : str
"""
if not strip:
return text
stripped = re.sub(
fr"[{''.join(map(re.escape, strip))}]", "", text, flags=re.UNICODE
)
return stripped
# TODO: combine the following functions into a TextProcessor class which
# applies corresponding transformations sequentially
# (inspired from sklearn.pipeline.Pipeline)
def flag_font_size(textline, direction, strip_text=''):
def flag_font_size(textline, direction, strip_text=""):
"""Flags super/subscripts in text by enclosing them with <s></s>.
May give false positives.
@ -409,10 +530,18 @@ def flag_font_size(textline, direction, strip_text=''):
fstring : string
"""
if direction == 'horizontal':
d = [(t.get_text(), np.round(t.height, decimals=6)) for t in textline if not isinstance(t, LTAnno)]
elif direction == 'vertical':
d = [(t.get_text(), np.round(t.width, decimals=6)) for t in textline if not isinstance(t, LTAnno)]
if direction == "horizontal":
d = [
(t.get_text(), np.round(t.height, decimals=6))
for t in textline
if not isinstance(t, LTAnno)
]
elif direction == "vertical":
d = [
(t.get_text(), np.round(t.width, decimals=6))
for t in textline
if not isinstance(t, LTAnno)
]
l = [np.round(size, decimals=6) for text, size in d]
if len(set(l)) > 1:
flist = []
@ -420,21 +549,21 @@ def flag_font_size(textline, direction, strip_text=''):
for key, chars in groupby(d, itemgetter(1)):
if key == min_size:
fchars = [t[0] for t in chars]
if ''.join(fchars).strip():
fchars.insert(0, '<s>')
fchars.append('</s>')
flist.append(''.join(fchars))
if "".join(fchars).strip():
fchars.insert(0, "<s>")
fchars.append("</s>")
flist.append("".join(fchars))
else:
fchars = [t[0] for t in chars]
if ''.join(fchars).strip():
flist.append(''.join(fchars))
fstring = ''.join(flist).strip(strip_text)
if "".join(fchars).strip():
flist.append("".join(fchars))
fstring = "".join(flist)
else:
fstring = ''.join([t.get_text() for t in textline]).strip(strip_text)
return fstring
fstring = "".join([t.get_text() for t in textline])
return text_strip(fstring, strip_text)
def split_textline(table, textline, direction, flag_size=False, strip_text=''):
def split_textline(table, textline, direction, flag_size=False, strip_text=""):
"""Splits PDFMiner LTTextLine into substrings if it spans across
multiple rows/columns.
@ -464,38 +593,70 @@ def split_textline(table, textline, direction, flag_size=False, strip_text=''):
cut_text = []
bbox = textline.bbox
try:
if direction == 'horizontal' and not textline.is_empty():
x_overlap = [i for i, x in enumerate(table.cols) if x[0] <= bbox[2] and bbox[0] <= x[1]]
r_idx = [j for j, r in enumerate(table.rows) if r[1] <= (bbox[1] + bbox[3]) / 2 <= r[0]]
if direction == "horizontal" and not textline.is_empty():
x_overlap = [
i
for i, x in enumerate(table.cols)
if x[0] <= bbox[2] and bbox[0] <= x[1]
]
r_idx = [
j
for j, r in enumerate(table.rows)
if r[1] <= (bbox[1] + bbox[3]) / 2 <= r[0]
]
r = r_idx[0]
x_cuts = [(c, table.cells[r][c].x2) for c in x_overlap if table.cells[r][c].right]
x_cuts = [
(c, table.cells[r][c].x2) for c in x_overlap if table.cells[r][c].right
]
if not x_cuts:
x_cuts = [(x_overlap[0], table.cells[r][-1].x2)]
for obj in textline._objs:
row = table.rows[r]
for cut in x_cuts:
if isinstance(obj, LTChar):
if (row[1] <= (obj.y0 + obj.y1) / 2 <= row[0] and
(obj.x0 + obj.x1) / 2 <= cut[1]):
if (
row[1] <= (obj.y0 + obj.y1) / 2 <= row[0]
and (obj.x0 + obj.x1) / 2 <= cut[1]
):
cut_text.append((r, cut[0], obj))
break
else:
# TODO: add test
if cut == x_cuts[-1]:
cut_text.append((r, cut[0] + 1, obj))
elif isinstance(obj, LTAnno):
cut_text.append((r, cut[0], obj))
elif direction == 'vertical' and not textline.is_empty():
y_overlap = [j for j, y in enumerate(table.rows) if y[1] <= bbox[3] and bbox[1] <= y[0]]
c_idx = [i for i, c in enumerate(table.cols) if c[0] <= (bbox[0] + bbox[2]) / 2 <= c[1]]
elif direction == "vertical" and not textline.is_empty():
y_overlap = [
j
for j, y in enumerate(table.rows)
if y[1] <= bbox[3] and bbox[1] <= y[0]
]
c_idx = [
i
for i, c in enumerate(table.cols)
if c[0] <= (bbox[0] + bbox[2]) / 2 <= c[1]
]
c = c_idx[0]
y_cuts = [(r, table.cells[r][c].y1) for r in y_overlap if table.cells[r][c].bottom]
y_cuts = [
(r, table.cells[r][c].y1) for r in y_overlap if table.cells[r][c].bottom
]
if not y_cuts:
y_cuts = [(y_overlap[0], table.cells[-1][c].y1)]
for obj in textline._objs:
col = table.cols[c]
for cut in y_cuts:
if isinstance(obj, LTChar):
if (col[0] <= (obj.x0 + obj.x1) / 2 <= col[1] and
(obj.y0 + obj.y1) / 2 >= cut[1]):
if (
col[0] <= (obj.x0 + obj.x1) / 2 <= col[1]
and (obj.y0 + obj.y1) / 2 >= cut[1]
):
cut_text.append((cut[0], c, obj))
break
else:
# TODO: add test
if cut == y_cuts[-1]:
cut_text.append((cut[0] - 1, c, obj))
elif isinstance(obj, LTAnno):
cut_text.append((cut[0], c, obj))
except IndexError:
@ -503,15 +664,26 @@ def split_textline(table, textline, direction, flag_size=False, strip_text=''):
grouped_chars = []
for key, chars in groupby(cut_text, itemgetter(0, 1)):
if flag_size:
grouped_chars.append((key[0], key[1],
flag_font_size([t[2] for t in chars], direction, strip_text=strip_text)))
grouped_chars.append(
(
key[0],
key[1],
flag_font_size(
[t[2] for t in chars], direction, strip_text=strip_text
),
)
)
else:
gchars = [t[2].get_text() for t in chars]
grouped_chars.append((key[0], key[1], ''.join(gchars).strip(strip_text)))
grouped_chars.append(
(key[0], key[1], text_strip("".join(gchars), strip_text))
)
return grouped_chars
def get_table_index(table, t, direction, split_text=False, flag_size=False, strip_text='',):
def get_table_index(
table, t, direction, split_text=False, flag_size=False, strip_text=""
):
"""Gets indices of the table cell where given text object lies by
comparing their y and x-coordinates.
@ -550,8 +722,9 @@ def get_table_index(table, t, direction, split_text=False, flag_size=False, stri
"""
r_idx, c_idx = [-1] * 2
for r in range(len(table.rows)):
if ((t.y0 + t.y1) / 2.0 < table.rows[r][0] and
(t.y0 + t.y1) / 2.0 > table.rows[r][1]):
if (t.y0 + t.y1) / 2.0 < table.rows[r][0] and (t.y0 + t.y1) / 2.0 > table.rows[
r
][1]:
lt_col_overlap = []
for c in table.cols:
if c[0] <= t.x1 and c[1] >= t.x0:
@ -561,11 +734,12 @@ def get_table_index(table, t, direction, split_text=False, flag_size=False, stri
else:
lt_col_overlap.append(-1)
if len(list(filter(lambda x: x != -1, lt_col_overlap))) == 0:
text = t.get_text().strip('\n')
text = t.get_text().strip("\n")
text_range = (t.x0, t.x1)
col_range = (table.cols[0][0], table.cols[-1][1])
warnings.warn("{} {} does not lie in column range {}".format(
text, text_range, col_range))
warnings.warn(
f"{text} {text_range} does not lie in column range {col_range}"
)
r_idx = r
c_idx = lt_col_overlap.index(max(lt_col_overlap))
break
@ -586,12 +760,26 @@ def get_table_index(table, t, direction, split_text=False, flag_size=False, stri
error = ((X * (y0_offset + y1_offset)) + (Y * (x0_offset + x1_offset))) / charea
if split_text:
return split_textline(table, t, direction, flag_size=flag_size, strip_text=strip_text), error
return (
split_textline(
table, t, direction, flag_size=flag_size, strip_text=strip_text
),
error,
)
else:
if flag_size:
return [(r_idx, c_idx, flag_font_size(t._objs, direction, strip_text=strip_text))], error
return (
[
(
r_idx,
c_idx,
flag_font_size(t._objs, direction, strip_text=strip_text),
)
],
error,
)
else:
return [(r_idx, c_idx, t.get_text().strip(strip_text))], error
return [(r_idx, c_idx, text_strip(t.get_text(), strip_text))], error
def compute_accuracy(error_weights):
@ -642,25 +830,35 @@ def compute_whitespace(d):
r_nempty_cells, c_nempty_cells = [], []
for i in d:
for j in i:
if j.strip() == '':
if j.strip() == "":
whitespace += 1
whitespace = 100 * (whitespace / float(len(d) * len(d[0])))
return whitespace
def get_page_layout(filename, char_margin=1.0, line_margin=0.5, word_margin=0.1,
detect_vertical=True, all_texts=True):
def get_page_layout(
filename,
line_overlap=0.5,
char_margin=1.0,
line_margin=0.5,
word_margin=0.1,
boxes_flow=0.5,
detect_vertical=True,
all_texts=True,
):
"""Returns a PDFMiner LTPage object and page dimension of a single
page pdf. See https://euske.github.io/pdfminer/ to get definitions
of kwargs.
page pdf. To get the definitions of kwargs, see
https://pdfminersix.rtfd.io/en/latest/reference/composable.html.
Parameters
----------
filename : string
Path to pdf file.
line_overlap : float
char_margin : float
line_margin : float
word_margin : float
boxes_flow : float
detect_vertical : bool
all_texts : bool
@ -672,16 +870,22 @@ def get_page_layout(filename, char_margin=1.0, line_margin=0.5, word_margin=0.1,
Dimension of pdf page in the form (width, height).
"""
with open(filename, 'rb') as f:
with open(filename, "rb") as f:
parser = PDFParser(f)
document = PDFDocument(parser)
if not document.is_extractable:
raise PDFTextExtractionNotAllowed
laparams = LAParams(char_margin=char_margin,
raise PDFTextExtractionNotAllowed(
f"Text extraction is not allowed: {filename}"
)
laparams = LAParams(
line_overlap=line_overlap,
char_margin=char_margin,
line_margin=line_margin,
word_margin=word_margin,
boxes_flow=boxes_flow,
detect_vertical=detect_vertical,
all_texts=all_texts)
all_texts=all_texts,
)
rsrcmgr = PDFResourceManager()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
@ -713,13 +917,13 @@ def get_text_objects(layout, ltype="char", t=None):
List of PDFMiner text objects.
"""
if ltype == 'char':
if ltype == "char":
LTObject = LTChar
elif ltype == 'image':
elif ltype == "image":
LTObject = LTImage
elif ltype == 'horizontal_text':
elif ltype == "horizontal_text":
LTObject = LTTextLineHorizontal
elif ltype == 'vertical_text':
elif ltype == "vertical_text":
LTObject = LTTextLineVertical
if t is None:
t = []

View File

@ -4,13 +4,13 @@
</a>
</p>
<p>
<iframe src="https://ghbtns.com/github-btn.html?user=socialcopsdev&repo=camelot&type=watch&count=true&size=large"
<iframe src="https://ghbtns.com/github-btn.html?user=camelot-dev&repo=camelot&type=watch&count=true&size=large"
allowtransparency="true" frameborder="0" scrolling="0" width="200px" height="35px"></iframe>
</p>
<h3>Useful Links</h3>
<ul>
<li><a href="https://github.com/socialcopsdev/camelot">Camelot @ GitHub</a></li>
<li><a href="https://github.com/camelot-dev/camelot">Camelot @ GitHub</a></li>
<li><a href="https://pypi.org/project/camelot-py/">Camelot @ PyPI</a></li>
<li><a href="https://github.com/socialcopsdev/camelot/issues">Issue Tracker</a></li>
<li><a href="https://github.com/camelot-dev/camelot/issues">Issue Tracker</a></li>
</ul>

View File

@ -4,6 +4,6 @@
</a>
</p>
<p>
<iframe src="https://ghbtns.com/github-btn.html?user=socialcopsdev&repo=camelot&type=watch&count=true&size=large"
<iframe src="https://ghbtns.com/github-btn.html?user=camelot-dev&repo=camelot&type=watch&count=true&size=large"
allowtransparency="true" frameborder="0" scrolling="0" width="200px" height="35px"></iframe>
</p>

View File

@ -1,7 +1,19 @@
# flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
from pygments.token import (
Keyword,
Name,
Comment,
String,
Error,
Number,
Operator,
Generic,
Whitespace,
Punctuation,
Other,
Literal,
)
class FlaskyStyle(Style):
@ -14,10 +26,8 @@ class FlaskyStyle(Style):
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
@ -25,12 +35,9 @@ class FlaskyStyle(Style):
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
@ -53,12 +60,9 @@ class FlaskyStyle(Style):
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
@ -71,7 +75,6 @@ class FlaskyStyle(Style):
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'

View File

@ -22,8 +22,8 @@ import sys
# sys.path.insert(0, os.path.abspath('..'))
# Insert Camelot's path into the system.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('_themes'))
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath("_themes"))
import camelot
@ -38,33 +38,33 @@ import camelot
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
source_suffix = ".rst"
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
master_doc = "index"
# General information about the project.
project = u'Camelot'
copyright = u'2018, <a href="https://socialcops.com" target="_blank">SocialCops</a>'
author = u'Vinayak Mehta'
project = u"Camelot"
copyright = u"2021, Camelot Developers"
author = u"Vinayak Mehta"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@ -94,7 +94,7 @@ language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build']
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
@ -114,7 +114,7 @@ add_module_names = True
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'flask_theme_support.FlaskyStyle'
pygments_style = "flask_theme_support.FlaskyStyle"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
@ -130,18 +130,18 @@ todo_include_todos = True
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'show_powered_by': False,
'github_user': 'socialcopsdev',
'github_repo': 'camelot',
'github_banner': True,
'show_related': False,
'note_bg': '#FFF59C'
"show_powered_by": False,
"github_user": "camelot-dev",
"github_repo": "camelot",
"github_banner": True,
"show_related": False,
"note_bg": "#FFF59C",
}
# Add any paths that contain custom themes here, relative to this directory.
@ -164,12 +164,12 @@ html_theme_options = {
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
@ -189,10 +189,21 @@ html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebarintro.html', 'relations.html', 'sourcelink.html',
'searchbox.html', 'hacks.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html', 'hacks.html']
"index": [
"sidebarintro.html",
"relations.html",
"sourcelink.html",
"searchbox.html",
"hacks.html",
],
"**": [
"sidebarlogo.html",
"localtoc.html",
"relations.html",
"sourcelink.html",
"searchbox.html",
"hacks.html",
],
}
# Additional templates that should be rendered to pages, maps page names to
@ -249,7 +260,7 @@ html_show_copyright = True
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Camelotdoc'
htmlhelp_basename = "Camelotdoc"
# -- Options for LaTeX output ---------------------------------------------
@ -257,15 +268,12 @@ latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
@ -275,8 +283,7 @@ latex_elements = {
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Camelot.tex', u'Camelot Documentation',
u'Vinayak Mehta', 'manual'),
(master_doc, "Camelot.tex", u"Camelot Documentation", u"Vinayak Mehta", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
@ -316,10 +323,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Camelot', u'Camelot Documentation',
[author], 1)
]
man_pages = [(master_doc, "Camelot", u"Camelot Documentation", [author], 1)]
# If true, show URL addresses after external links.
#
@ -332,9 +336,15 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Camelot', u'Camelot Documentation',
author, 'Camelot', 'One line description of project.',
'Miscellaneous'),
(
master_doc,
"Camelot",
u"Camelot Documentation",
author,
"Camelot",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
@ -356,6 +366,6 @@ texinfo_documents = [
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'https://docs.python.org/2': None,
'http://pandas.pydata.org/pandas-docs/stable': None
"https://docs.python.org/2": None,
"http://pandas.pydata.org/pandas-docs/stable": None,
}

View File

@ -29,15 +29,15 @@ Your first contribution
A great way to start contributing to Camelot is to pick an issue tagged with the `help wanted`_ or the `good first issue`_ tags. If you're unable to find a good first issue, feel free to contact the maintainer.
.. _help wanted: https://github.com/socialcopsdev/camelot/labels/help%20wanted
.. _good first issue: https://github.com/socialcopsdev/camelot/labels/good%20first%20issue
.. _help wanted: https://github.com/camelot-dev/camelot/labels/help%20wanted
.. _good first issue: https://github.com/camelot-dev/camelot/labels/good%20first%20issue
Setting up a development environment
------------------------------------
To install the dependencies needed for development, you can use pip::
$ pip install camelot-py[dev]
$ pip install "camelot-py[dev]"
Alternatively, you can clone the project repository, and install using pip::
@ -51,7 +51,7 @@ Submit a pull request
The preferred workflow for contributing to Camelot is to fork the `project repository`_ on GitHub, clone, develop on a branch and then finally submit a pull request. Here are the steps:
.. _project repository: https://github.com/socialcopsdev/camelot
.. _project repository: https://github.com/camelot-dev/camelot
1. Fork the project repository. Click on the Fork button near the top of the page. This creates a copy of the code under your account on the GitHub.
@ -134,7 +134,7 @@ Filing Issues
We use `GitHub issues`_ to keep track of all issues and pull requests. Before opening an issue (which asks a question or reports a bug), please use GitHub search to look for existing issues (both open and closed) that may be similar.
.. _GitHub issues: https://github.com/socialcopsdev/camelot/issues
.. _GitHub issues: https://github.com/camelot-dev/camelot/issues
Questions
^^^^^^^^^

View File

@ -8,15 +8,15 @@ Camelot: PDF Table Extraction for Humans
Release v\ |version|. (:ref:`Installation <install>`)
.. image:: https://travis-ci.org/socialcopsdev/camelot.svg?branch=master
:target: https://travis-ci.org/socialcopsdev/camelot
.. image:: https://travis-ci.org/camelot-dev/camelot.svg?branch=master
:target: https://travis-ci.org/camelot-dev/camelot
.. image:: https://readthedocs.org/projects/camelot-py/badge/?version=master
:target: https://camelot-py.readthedocs.io/en/master/
:alt: Documentation Status
.. image:: https://codecov.io/github/socialcopsdev/camelot/badge.svg?branch=master&service=github
:target: https://codecov.io/github/socialcopsdev/camelot?branch=master
.. image:: https://codecov.io/github/camelot-dev/camelot/badge.svg?branch=master&service=github
:target: https://codecov.io/github/camelot-dev/camelot?branch=master
.. image:: https://img.shields.io/pypi/v/camelot-py.svg
:target: https://pypi.org/project/camelot-py/
@ -30,15 +30,21 @@ Release v\ |version|. (:ref:`Installation <install>`)
.. image:: https://badges.gitter.im/camelot-dev/Lobby.png
:target: https://gitter.im/camelot-dev/Lobby
**Camelot** is a Python library that makes it easy for *anyone* to extract tables from PDF files!
.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
:target: https://github.com/ambv/black
.. note:: You can also check out `Excalibur`_, which is a web interface for Camelot!
.. image:: https://img.shields.io/badge/continous%20quality-deepsource-lightgrey
:target: https://deepsource.io/gh/camelot-dev/camelot/?ref=repository-badge
**Camelot** is a Python library that can help you extract tables from PDFs!
.. note:: You can also check out `Excalibur`_, the web interface to Camelot!
.. _Excalibur: https://github.com/camelot-dev/excalibur
----
**Here's how you can extract tables from PDF files.** Check out the PDF used in this example `here`_.
**Here's how you can extract tables from PDFs.** You can check out the PDF used in this example `here`_.
.. _here: _static/pdf/foo.pdf
@ -48,7 +54,7 @@ Release v\ |version|. (:ref:`Installation <install>`)
>>> tables = camelot.read_pdf('foo.pdf')
>>> tables
<TableList n=1>
>>> tables.export('foo.csv', f='csv', compress=True) # json, excel, html
>>> tables.export('foo.csv', f='csv', compress=True) # json, excel, html, markdown, sqlite
>>> tables[0]
<Table shape=(7, 7)>
>>> tables[0].parsing_report
@ -58,35 +64,44 @@ Release v\ |version|. (:ref:`Installation <install>`)
'order': 1,
'page': 1
}
>>> tables[0].to_csv('foo.csv') # to_json, to_excel, to_html
>>> tables[0].to_csv('foo.csv') # to_json, to_excel, to_html, to_markdown, to_sqlite
>>> tables[0].df # get a pandas DataFrame!
.. csv-table::
:file: _static/csv/foo.csv
There's a :ref:`command-line interface <cli>` too!
Camelot also comes packaged with a :ref:`command-line interface <cli>`!
.. note:: Camelot only works with text-based PDFs and not scanned documents. (As Tabula `explains`_, "If you can click and drag to select text in your table in a PDF viewer, then your PDF is text-based".)
You can check out some frequently asked questions :ref:`here <faq>`.
.. _explains: https://github.com/tabulapdf/tabula#why-tabula
Why Camelot?
------------
- **You are in control.** Unlike other libraries and tools which either give a nice output or fail miserably (with no in-between), Camelot gives you the power to tweak table extraction. (This is important since everything in the real world, including PDF table extraction, is fuzzy.)
- *Bad* tables can be discarded based on **metrics** like accuracy and whitespace, without ever having to manually look at each table.
- Each table is a **pandas DataFrame**, which seamlessly integrates into `ETL and data analysis workflows`_.
- **Export** to multiple formats, including JSON, Excel and HTML.
See `comparison with other PDF table extraction libraries and tools`_.
- **Configurability**: Camelot gives you control over the table extraction process with :ref:`tweakable settings <advanced>`.
- **Metrics**: You can discard bad tables based on metrics like accuracy and whitespace, without having to manually look at each table.
- **Output**: Each table is extracted into a **pandas DataFrame**, which seamlessly integrates into `ETL and data analysis workflows`_. You can also export tables to multiple formats, which include CSV, JSON, Excel, HTML, Markdown, and Sqlite.
.. _ETL and data analysis workflows: https://gist.github.com/vinayak-mehta/e5949f7c2410a0e12f25d3682dc9e873
.. _comparison with other PDF table extraction libraries and tools: https://github.com/socialcopsdev/camelot/wiki/Comparison-with-other-PDF-Table-Extraction-libraries-and-tools
See `comparison with similar libraries and tools`_.
.. _comparison with similar libraries and tools: https://github.com/camelot-dev/camelot/wiki/Comparison-with-other-PDF-Table-Extraction-libraries-and-tools
Support the development
-----------------------
If Camelot has helped you, please consider supporting its development with a one-time or monthly donation `on OpenCollective`_!
.. _on OpenCollective: https://opencollective.com/camelot
The User Guide
--------------
This part of the documentation begins with some background information about why Camelot was created, takes a small dip into the implementation details and then focuses on step-by-step instructions for getting the most out of Camelot.
This part of the documentation begins with some background information about why Camelot was created, takes you through some implementation details, and then focuses on step-by-step instructions for getting the most out of Camelot.
.. toctree::
:maxdepth: 2
@ -97,13 +112,13 @@ This part of the documentation begins with some background information about why
user/how-it-works
user/quickstart
user/advanced
user/faq
user/cli
The API Documentation/Guide
---------------------------
If you are looking for information on a specific function, class, or method,
this part of the documentation is for you.
If you are looking for information on a specific function, class, or method, this part of the documentation is for you.
.. toctree::
:maxdepth: 2
@ -113,8 +128,7 @@ this part of the documentation is for you.
The Contributor Guide
---------------------
If you want to contribute to the project, this part of the documentation is for
you.
If you want to contribute to the project, this part of the documentation is for you.
.. toctree::
:maxdepth: 2

View File

@ -66,8 +66,7 @@ Let's plot all the text present on the table's PDF page.
::
>>> camelot.plot(tables[0], kind='text')
>>> plt.show()
>>> camelot.plot(tables[0], kind='text').show()
.. tip::
Here's how you can do the same with the :ref:`command-line interface <cli>`.
@ -93,8 +92,7 @@ Let's plot the table (to see if it was detected correctly or not). This plot typ
::
>>> camelot.plot(tables[0], kind='grid')
>>> plt.show()
>>> camelot.plot(tables[0], kind='grid').show()
.. tip::
Here's how you can do the same with the :ref:`command-line interface <cli>`.
@ -118,8 +116,7 @@ Now, let's plot all table boundaries present on the table's PDF page.
::
>>> camelot.plot(tables[0], kind='contour')
>>> plt.show()
>>> camelot.plot(tables[0], kind='contour').show()
.. tip::
Here's how you can do the same with the :ref:`command-line interface <cli>`.
@ -141,8 +138,7 @@ Cool, let's plot all line segments present on the table's PDF page.
::
>>> camelot.plot(tables[0], kind='line')
>>> plt.show()
>>> camelot.plot(tables[0], kind='line').show()
.. tip::
Here's how you can do the same with the :ref:`command-line interface <cli>`.
@ -164,8 +160,7 @@ Finally, let's plot all line intersections present on the table's PDF page.
::
>>> camelot.plot(tables[0], kind='joint')
>>> plt.show()
>>> camelot.plot(tables[0], kind='joint').show()
.. tip::
Here's how you can do the same with the :ref:`command-line interface <cli>`.
@ -187,8 +182,7 @@ You can also visualize the textedges found on a page by specifying ``kind='texte
::
>>> camelot.plot(tables[0], kind='textedge')
>>> plt.show()
>>> camelot.plot(tables[0], kind='textedge').show()
.. tip::
Here's how you can do the same with the :ref:`command-line interface <cli>`.
@ -224,10 +218,12 @@ Table areas that you want Camelot to analyze can be passed as a list of comma-se
.. csv-table::
:file: ../_static/csv/table_areas.csv
.. note:: ``table_areas`` accepts strings of the form x1,y1,x2,y2 where (x1, y1) -> top-left and (x2, y2) -> bottom-right in PDF coordinate space. In PDF coordinate space, the bottom-left corner of the page is the origin, with coordinates (0, 0).
Specify table regions
---------------------
However there may be cases like `[1] <../_static/pdf/table_regions.pdf>`__ and `[2] <https://github.com/socialcopsdev/camelot/blob/master/tests/files/tableception.pdf>`__, where the table might not lie at the exact coordinates every time but in an approximate region.
However there may be cases like `[1] <../_static/pdf/table_regions.pdf>`__ and `[2] <https://github.com/camelot-dev/camelot/blob/master/tests/files/tableception.pdf>`__, where the table might not lie at the exact coordinates every time but in an approximate region.
You can use the ``table_regions`` keyword argument to :meth:`read_pdf() <camelot.read_pdf>` to solve for such cases. When ``table_regions`` is specified, Camelot will only analyze the specified regions to look for tables.
@ -314,7 +310,7 @@ In this case, the text that `other tools`_ return, will be ``24.912``. This is r
You can solve this by passing ``flag_size=True``, which will enclose the superscripts and subscripts with ``<s></s>``, based on font size, as shown below.
.. _other tools: https://github.com/socialcopsdev/camelot/wiki/Comparison-with-other-PDF-Table-Extraction-libraries-and-tools
.. _other tools: https://github.com/camelot-dev/camelot/wiki/Comparison-with-other-PDF-Table-Extraction-libraries-and-tools
::
@ -338,7 +334,7 @@ You can solve this by passing ``flag_size=True``, which will enclose the supersc
Strip characters from text
--------------------------
You can strip unwanted characters like spaces, dots and newlines from a string using the ``strip_text`` keyword argument. Take a look at `this PDF <https://github.com/socialcopsdev/camelot/blob/master/tests/files/tabula/12s0324.pdf>`_ as an example, the text at the start of each row contains a lot of unwanted spaces, dots and newlines.
You can strip unwanted characters like spaces, dots and newlines from a string using the ``strip_text`` keyword argument. Take a look at `this PDF <https://github.com/camelot-dev/camelot/blob/master/tests/files/tabula/12s0324.pdf>`_ as an example, the text at the start of each row contains a lot of unwanted spaces, dots and newlines.
::
@ -364,7 +360,7 @@ You can strip unwanted characters like spaces, dots and newlines from a string u
Improve guessed table areas
---------------------------
While using :ref:`Stream <stream>`, automatic table detection can fail for PDFs like `this one <https://github.com/socialcopsdev/camelot/blob/master/tests/files/edge_tol.pdf>`_. That's because the text is relatively far apart vertically, which can lead to shorter textedges being calculated.
While using :ref:`Stream <stream>`, automatic table detection can fail for PDFs like `this one <https://github.com/camelot-dev/camelot/blob/master/tests/files/edge_tol.pdf>`_. That's because the text is relatively far apart vertically, which can lead to shorter textedges being calculated.
.. note:: To know more about how textedges are calculated to guess table areas, you can see pages 20, 35 and 40 of `Anssi Nurminen's master's thesis <http://dspace.cc.tut.fi/dpub/bitstream/handle/123456789/21520/Nurminen.pdf?sequence=3>`_.
@ -373,8 +369,7 @@ Let's see the table area that is detected by default.
::
>>> tables = camelot.read_pdf('edge_tol.pdf', flavor='stream')
>>> camelot.plot(tables[0], kind='contour')
>>> plt.show()
>>> camelot.plot(tables[0], kind='contour').show()
.. tip::
Here's how you can do the same with the :ref:`command-line interface <cli>`.
@ -394,8 +389,7 @@ To improve the detected area, you can increase the ``edge_tol`` (default: 50) va
::
>>> tables = camelot.read_pdf('edge_tol.pdf', flavor='stream', edge_tol=500)
>>> camelot.plot(tables[0], kind='contour')
>>> plt.show()
>>> camelot.plot(tables[0], kind='contour').show()
.. tip::
Here's how you can do the same with the :ref:`command-line interface <cli>`.
@ -470,8 +464,7 @@ Let's plot the table for this PDF.
::
>>> tables = camelot.read_pdf('short_lines.pdf')
>>> camelot.plot(tables[0], kind='grid')
>>> plt.show()
>>> camelot.plot(tables[0], kind='grid').show()
.. figure:: ../_static/png/short_lines_1.png
:alt: A plot of the PDF table with short lines
@ -482,8 +475,7 @@ Clearly, the smaller lines separating the headers, couldn't be detected. Let's t
::
>>> tables = camelot.read_pdf('short_lines.pdf', line_scale=40)
>>> camelot.plot(tables[0], kind='grid')
>>> plt.show()
>>> camelot.plot(tables[0], kind='grid').show()
.. tip::
Here's how you can do the same with the :ref:`command-line interface <cli>`.
@ -624,10 +616,33 @@ We don't need anything else. Now, let's pass ``copy_text=['v']`` to copy text in
Tweak layout generation
-----------------------
Camelot is built on top of PDFMiner's functionality of grouping characters on a page into words and sentences. In some cases (such as `#170 <https://github.com/socialcopsdev/camelot/issues/170>`_ and `#215 <https://github.com/socialcopsdev/camelot/issues/215>`_), PDFMiner can group characters that should belong to the same sentence into separate sentences.
Camelot is built on top of PDFMiner's functionality of grouping characters on a page into words and sentences. In some cases (such as `#170 <https://github.com/camelot-dev/camelot/issues/170>`_ and `#215 <https://github.com/camelot-dev/camelot/issues/215>`_), PDFMiner can group characters that should belong to the same sentence into separate sentences.
To deal with such cases, you can tweak PDFMiner's `LAParams kwargs <https://github.com/euske/pdfminer/blob/master/pdfminer/layout.py#L33>`_ to improve layout generation, by passing the keyword arguments as a dict using ``layout_kwargs`` in :meth:`read_pdf() <camelot.read_pdf>`. To know more about the parameters you can tweak, you can check out `PDFMiner docs <https://euske.github.io/pdfminer/>`_.
To deal with such cases, you can tweak PDFMiner's `LAParams kwargs <https://github.com/euske/pdfminer/blob/master/pdfminer/layout.py#L33>`_ to improve layout generation, by passing the keyword arguments as a dict using ``layout_kwargs`` in :meth:`read_pdf() <camelot.read_pdf>`. To know more about the parameters you can tweak, you can check out `PDFMiner docs <https://pdfminersix.rtfd.io/en/latest/reference/composable.html>`_.
::
>>> tables = camelot.read_pdf('foo.pdf', layout_kwargs={'detect_vertical': False})
.. _image-conversion-backend:
Use alternate image conversion backends
---------------------------------------
When using the :ref:`Lattice <lattice>` flavor, Camelot uses ``ghostscript`` to convert PDF pages to images for line recognition. If you face installation issues with ``ghostscript``, you can use an alternate image conversion backend called ``poppler``. You can specify which image conversion backend you want to use with::
>>> tables = camelot.read_pdf(filename, backend="ghostscript") # default
>>> tables = camelot.read_pdf(filename, backend="poppler")
.. note:: ``ghostscript`` will be replaced by ``poppler`` as the default image conversion backend in ``v0.12.0``.
If you face issues with both ``ghostscript`` and ``poppler``, you can supply your own image conversion backend::
>>> class ConversionBackend(object):
>>> def convert(pdf_path, png_path):
>>> # read pdf page from pdf_path
>>> # convert pdf page to image
>>> # write image to png_path
>>> pass
>>>
>>> tables = camelot.read_pdf(filename, backend=ConversionBackend())

View File

@ -26,6 +26,8 @@ You can print the help for the interface by typing ``camelot --help`` in your fa
-split, --split_text Split text that spans across multiple cells.
-flag, --flag_size Flag text based on font size. Useful to
detect super/subscripts.
-strip, --strip_text Characters that should be stripped from a
string before assigning it to a cell.
-M, --margins <FLOAT FLOAT FLOAT>...
PDFMiner char_margin, line_margin and
word_margin.

70
docs/user/faq.rst 100644
View File

@ -0,0 +1,70 @@
.. _faq:
Frequently Asked Questions
==========================
This part of the documentation answers some common questions. To add questions, please open an issue `here <https://github.com/camelot-dev/camelot/issues/new>`_.
Does Camelot work with image-based PDFs?
----------------------------------------
**No**, Camelot only works with text-based PDFs and not scanned documents. (As Tabula `explains <https://github.com/tabulapdf/tabula#why-tabula>`_, "If you can click and drag to select text in your table in a PDF viewer, then your PDF is text-based".)
How to reduce memory usage for long PDFs?
-----------------------------------------
During table extraction from long PDF documents, RAM usage can grow significantly.
A simple workaround is to divide the extraction into chunks, and save extracted data to disk at the end of every chunk.
For more details, check out this code snippet from `@anakin87 <https://github.com/anakin87>`_:
::
import camelot
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n]
def extract_tables(filepath, pages, chunks=50, export_path=".", params={}):
"""
Divide the extraction work into n chunks. At the end of every chunk,
save data on disk and free RAM.
filepath : str
Filepath or URL of the PDF file.
pages : str, optional (default: '1')
Comma-separated page numbers.
Example: '1,3,4' or '1,4-end' or 'all'.
"""
# get list of pages from camelot.handlers.PDFHandler
handler = camelot.handlers.PDFHandler(filepath)
page_list = handler._get_pages(filepath, pages=pages)
# chunk pages list
page_chunks = list(chunks(page_list, chunks))
# extraction and export
for chunk in page_chunks:
pages_string = str(chunk).replace("[", "").replace("]", "")
tables = camelot.read_pdf(filepath, pages=pages_string, **params)
tables.export(f"{export_path}/tables.csv")
How can I supply my own image conversion backend to Lattice?
------------------------------------------------------------
When using the :ref:`Lattice <lattice>` flavor, you can supply your own :ref:`image conversion backend <image-conversion-backend>` by creating a class with a ``convert`` method as follows::
>>> class ConversionBackend(object):
>>> def convert(pdf_path, png_path):
>>> # read pdf page from pdf_path
>>> # convert pdf page to image
>>> # write image to png_path
>>> pass
>>>
>>> tables = camelot.read_pdf(filename, backend=ConversionBackend())

View File

@ -16,11 +16,11 @@ Stream can be used to parse tables that have whitespaces between cells to simula
1. Words on the PDF page are grouped into text rows based on their *y* axis overlaps.
2. Textedges are calculated and then used to guess interesting table areas on the PDF page. You can read `Anssi Nurminen's master's thesis <http://dspace.cc.tut.fi/dpub/bitstream/handle/123456789/21520/Nurminen.pdf?sequence=3>`_ to know more about this table detection technique. [See pages 20, 35 and 40]
2. Textedges are calculated and then used to guess interesting table areas on the PDF page. You can read `Anssi Nurminen's master's thesis <https://pdfs.semanticscholar.org/a9b1/67a86fb189bfcd366c3839f33f0404db9c10.pdf>`_ to know more about this table detection technique. [See pages 20, 35 and 40]
3. The number of columns inside each table area are then guessed. This is done by calculating the mode of number of words in each text row. Based on this mode, words in each text row are chosen to calculate a list of column *x* ranges.
4. Words that lie inside/outside the current column *x* ranges are then used to extend extend the current list of columns.
4. Words that lie inside/outside the current column *x* ranges are then used to extend the current list of columns.
5. Finally, a table is formed using the text rows' *y* ranges and column *x* ranges and words found on the page are assigned to the table's cells based on their *x* and *y* coordinates.

View File

@ -3,72 +3,60 @@
Installation of dependencies
============================
The dependencies `Tkinter`_ and `ghostscript`_ can be installed using your system's package manager. You can run one of the following, based on your OS.
.. _Tkinter: https://wiki.python.org/moin/TkInter
.. _ghostscript: https://www.ghostscript.com
The dependencies `Ghostscript <https://www.ghostscript.com>`_ and `Tkinter <https://wiki.python.org/moin/TkInter>`_ can be installed using your system's package manager or by running their installer.
OS-specific instructions
------------------------
For Ubuntu
^^^^^^^^^^
Ubuntu
^^^^^^
::
$ apt install python-tk ghostscript
$ apt install ghostscript python3-tk
Or for Python 3::
$ apt install python3-tk ghostscript
For macOS
^^^^^^^^^
MacOS
^^^^^
::
$ brew install tcl-tk ghostscript
$ brew install ghostscript tcl-tk
For Windows
^^^^^^^^^^^
Windows
^^^^^^^
For Tkinter, you can download the `ActiveTcl Community Edition`_ from ActiveState. For ghostscript, you can get the installer at the `ghostscript downloads page`_.
For Ghostscript, you can get the installer at their `downloads page <https://www.ghostscript.com/download/gsdnld.html>`_. And for Tkinter, you can download the `ActiveTcl Community Edition <https://www.activestate.com/activetcl/downloads>`_ from ActiveState.
.. _ActiveTcl Community Edition: https://www.activestate.com/activetcl/downloads
.. _ghostscript downloads page: https://www.ghostscript.com/download/gsdnld.html
.. _as shown here: https://java.com/en/download/help/path.xml
Checks to see if dependencies are installed correctly
-----------------------------------------------------
Checks to see if dependencies were installed correctly
------------------------------------------------------
You can run the following checks to see if the dependencies were installed correctly.
You can do the following checks to see if the dependencies were installed correctly.
For Ghostscript
^^^^^^^^^^^^^^^
Open the Python REPL and run the following:
For Ubuntu/MacOS::
>>> from ctypes.util import find_library
>>> find_library("gs")
"libgs.so.9"
For Windows::
>>> import ctypes
>>> from ctypes.util import find_library
>>> find_library("".join(("gsdll", str(ctypes.sizeof(ctypes.c_voidp) * 8), ".dll")))
<name-of-ghostscript-library-on-windows>
**Check:** The output of the ``find_library`` function should not be empty.
If the output is empty, then it's possible that the Ghostscript library is not available one of the ``LD_LIBRARY_PATH``/``DYLD_LIBRARY_PATH``/``PATH`` variables depending on your operating system. In this case, you may have to modify one of those path variables.
For Tkinter
^^^^^^^^^^^
Launch Python, and then at the prompt, type::
>>> import Tkinter
Or in Python 3::
Launch Python and then import Tkinter::
>>> import tkinter
If you have Tkinter, Python will not print an error message, and if not, you will see an ``ImportError``.
For ghostscript
^^^^^^^^^^^^^^^
Run the following to check the ghostscript version.
For Ubuntu/macOS::
$ gs -version
For Windows::
C:\> gswin64c.exe -version
Or for Windows 32-bit::
C:\> gswin32c.exe -version
If you have ghostscript, you should see the ghostscript version and copyright information.
**Check:** Importing ``tkinter`` should not raise an import error.

View File

@ -5,43 +5,36 @@ Installation of Camelot
This part of the documentation covers the steps to install Camelot.
Using conda
-----------
After :ref:`installing the dependencies <install_deps>`, which include `Ghostscript <https://www.ghostscript.com>`_ and `Tkinter <https://wiki.python.org/moin/TkInter>`_, you can use one of the following methods to install Camelot:
The easiest way to install Camelot is to install it with `conda`_, which is a package manager and environment management system for the `Anaconda`_ distribution.
::
.. warning:: The ``lattice`` flavor will fail to run if Ghostscript is not installed. You may run into errors as shown in `issue #193 <https://github.com/camelot-dev/camelot/issues/193>`_.
pip
---
To install Camelot from PyPI using ``pip``, please include the extra ``cv`` requirement as shown::
$ pip install "camelot-py[base]"
conda
-----
`conda`_ is a package manager and environment management system for the `Anaconda <https://anaconda.org>`_ distribution. It can be used to install Camelot from the ``conda-forge`` channel::
$ conda install -c conda-forge camelot-py
.. note:: Camelot is available for Python 2.7, 3.5 and 3.6 on Linux, macOS and Windows. For Windows, you will need to install ghostscript which you can get from their `downloads page`_.
.. _conda: https://conda.io/docs/
.. _Anaconda: http://docs.continuum.io/anaconda/
.. _downloads page: https://www.ghostscript.com/download/gsdnld.html
.. _conda-forge: https://conda-forge.org/
Using pip
---------
After :ref:`installing the dependencies <install_deps>`, which include `Tkinter`_ and `ghostscript`_, you can simply use pip to install Camelot::
$ pip install camelot-py[cv]
.. _Tkinter: https://wiki.python.org/moin/TkInter
.. _ghostscript: https://www.ghostscript.com
From the source code
--------------------
After :ref:`installing the dependencies <install_deps>`, you can install from the source by:
After :ref:`installing the dependencies <install_deps>`, you can install Camelot from source by:
1. Cloning the GitHub repository.
::
$ git clone https://www.github.com/socialcopsdev/camelot
$ git clone https://www.github.com/camelot-dev/camelot
2. Then simply using pip again.
2. And then simply using pip again.
::
$ cd camelot
$ pip install ".[cv]"
$ pip install ".[base]"

View File

@ -27,7 +27,7 @@ Here is a `comparison`_ of Camelot's output with outputs from other open-source
.. _pdf-table-extract: https://github.com/ashima/pdf-table-extract
.. _PDFTables: https://pdftables.com/
.. _Smallpdf: https://smallpdf.com
.. _comparison: https://github.com/socialcopsdev/camelot/wiki/Comparison-with-other-PDF-Table-Extraction-libraries-and-tools
.. _comparison: https://github.com/camelot-dev/camelot/wiki/Comparison-with-other-PDF-Table-Extraction-libraries-and-tools
What's in a name?
-----------------

View File

@ -56,7 +56,7 @@ Woah! The accuracy is top-notch and there is less whitespace, which means the ta
.. csv-table::
:file: ../_static/csv/foo.csv
Looks good! You can now export the table as a CSV file using its :meth:`to_csv() <camelot.core.Table.to_csv>` method. Alternatively you can use :meth:`to_json() <camelot.core.Table.to_json>`, :meth:`to_excel() <camelot.core.Table.to_excel>` :meth:`to_html() <camelot.core.Table.to_html>` or :meth:`to_sqlite() <camelot.core.Table.to_sqlite>` methods to export the table as JSON, Excel, HTML files or a sqlite database respectively.
Looks good! You can now export the table as a CSV file using its :meth:`to_csv() <camelot.core.Table.to_csv>` method. Alternatively you can use :meth:`to_json() <camelot.core.Table.to_json>`, :meth:`to_excel() <camelot.core.Table.to_excel>` :meth:`to_html() <camelot.core.Table.to_html>` :meth:`to_markdown() <camelot.core.Table.to_markdown>` or :meth:`to_sqlite() <camelot.core.Table.to_sqlite>` methods to export the table as JSON, Excel, HTML files or a sqlite database respectively.
::
@ -76,7 +76,7 @@ You can also export all tables at once, using the :class:`tables <camelot.core.T
$ camelot --format csv --output foo.csv lattice foo.pdf
This will export all tables as CSV files at the path specified. Alternatively, you can use ``f='json'``, ``f='excel'``, ``f='html'`` or ``f='sqlite'``.
This will export all tables as CSV files at the path specified. Alternatively, you can use ``f='json'``, ``f='excel'``, ``f='html'``, ``f='markdown'`` or ``f='sqlite'``.
.. note:: The :meth:`export() <camelot.core.TableList.export>` method exports files with a ``page-*-table-*`` suffix. In the example above, the single table in the list will be exported to ``foo-page-1-table-1.csv``. If the list contains multiple tables, multiple CSV files will be created. To avoid filling up your path with multiple files, you can use ``compress=True``, which will create a single ZIP file at your path with all the CSV files.

View File

@ -1,8 +0,0 @@
click>=6.7
matplotlib>=2.2.3
numpy>=1.13.3
opencv-python>=3.4.2.17
openpyxl>=2.5.8
pandas>=0.23.4
pdfminer.six>=20170720
PyPDF2>=1.26.0

View File

@ -6,76 +6,78 @@ from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, 'camelot', '__version__.py'), 'r') as f:
with open(os.path.join(here, "camelot", "__version__.py"), "r") as f:
exec(f.read(), about)
with open('README.md', 'r') as f:
with open("README.md", "r") as f:
readme = f.read()
requires = [
'chardet>=3.0.4',
'click>=6.7',
'numpy>=1.13.3',
'openpyxl>=2.5.8',
'pandas>=0.23.4',
'pdfminer.six>=20170720',
'PyPDF2>=1.26.0'
"chardet>=3.0.4",
"click>=6.7",
"numpy>=1.13.3",
"openpyxl>=2.5.8",
"pandas>=0.23.4",
"pdfminer.six>=20200726",
"PyPDF2>=1.26.0",
"tabulate>=0.8.9",
]
cv_requires = [
'opencv-python>=3.4.2.17'
]
base_requires = ["ghostscript>=0.7", "opencv-python>=3.4.2.17", "pdftopng>=0.2.3"]
plot_requires = [
'matplotlib>=2.2.3',
"matplotlib>=2.2.3",
]
dev_requires = [
'codecov>=2.0.15',
'pytest>=3.8.0',
'pytest-cov>=2.6.0',
'pytest-mpl>=0.10',
'pytest-runner>=4.2',
'Sphinx>=1.7.9'
"codecov>=2.0.15",
"pytest>=5.4.3",
"pytest-cov>=2.10.0",
"pytest-mpl>=0.11",
"pytest-runner>=5.2",
"Sphinx>=3.1.2",
"sphinx-autobuild>=2021.3.14",
]
all_requires = cv_requires + plot_requires
all_requires = base_requires + plot_requires
dev_requires = dev_requires + all_requires
def setup_package():
metadata = dict(name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
metadata = dict(
name=about["__title__"],
version=about["__version__"],
description=about["__description__"],
long_description=readme,
long_description_content_type="text/markdown",
url=about['__url__'],
author=about['__author__'],
author_email=about['__author_email__'],
license=about['__license__'],
packages=find_packages(exclude=('tests',)),
url=about["__url__"],
author=about["__author__"],
author_email=about["__author_email__"],
license=about["__license__"],
packages=find_packages(exclude=("tests",)),
install_requires=requires,
extras_require={
'all': all_requires,
'cv': cv_requires,
'dev': dev_requires,
'plot': plot_requires
"all": all_requires,
"base": base_requires,
"cv": base_requires, # deprecate
"dev": dev_requires,
"plot": plot_requires,
},
entry_points={
'console_scripts': [
'camelot = camelot.cli:cli',
"console_scripts": [
"camelot = camelot.cli:cli",
],
},
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
])
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
try:
from setuptools import setup
@ -85,5 +87,5 @@ def setup_package():
setup(**metadata)
if __name__ == '__main__':
if __name__ == "__main__":
setup_package()

View File

@ -1,2 +1,3 @@
import matplotlib
matplotlib.use('agg')
matplotlib.use("agg")

File diff suppressed because it is too large Load Diff

View File

Before

Width:  |  Height:  |  Size: 8.2 KiB

After

Width:  |  Height:  |  Size: 8.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

View File

Before

Width:  |  Height:  |  Size: 46 KiB

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.7 KiB

After

Width:  |  Height:  |  Size: 6.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.8 KiB

After

Width:  |  Height:  |  Size: 8.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,7 +1,9 @@
# -*- coding: utf-8 -*-
import os
import sys
import pytest
from click.testing import CliRunner
from camelot.cli import cli
@ -9,109 +11,181 @@ from camelot.utils import TemporaryDirectory
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, 'files')
testdir = os.path.join(testdir, "files")
skip_on_windows = pytest.mark.skipif(
sys.platform.startswith("win"),
reason="Ghostscript not installed in Windows test environment",
)
def test_help_output():
runner = CliRunner()
prog_name = runner.get_default_prog_name(cli)
result = runner.invoke(cli, ["--help"])
output = result.output
assert prog_name == "camelot"
assert result.output.startswith("Usage: %(prog_name)s [OPTIONS] COMMAND" % locals())
assert all(
v in result.output
for v in ["Options:", "--version", "--help", "Commands:", "lattice", "stream"]
)
@skip_on_windows
def test_cli_lattice():
with TemporaryDirectory() as tempdir:
infile = os.path.join(testdir, 'foo.pdf')
outfile = os.path.join(tempdir, 'foo.csv')
infile = os.path.join(testdir, "foo.pdf")
outfile = os.path.join(tempdir, "foo.csv")
runner = CliRunner()
result = runner.invoke(cli, ['--format', 'csv', '--output', outfile,
'lattice', infile])
result = runner.invoke(
cli, ["--format", "csv", "--output", outfile, "lattice", infile]
)
assert result.exit_code == 0
assert result.output == 'Found 1 tables\n'
assert "Found 1 tables" in result.output
result = runner.invoke(cli, ['--format', 'csv',
'lattice', infile])
output_error = 'Error: Please specify output file path using --output'
result = runner.invoke(cli, ["--format", "csv", "lattice", infile])
output_error = "Error: Please specify output file path using --output"
assert output_error in result.output
result = runner.invoke(cli, ['--output', outfile,
'lattice', infile])
format_error = 'Please specify output file format using --format'
result = runner.invoke(cli, ["--output", outfile, "lattice", infile])
format_error = "Please specify output file format using --format"
assert format_error in result.output
def test_cli_stream():
with TemporaryDirectory() as tempdir:
infile = os.path.join(testdir, 'budget.pdf')
outfile = os.path.join(tempdir, 'budget.csv')
infile = os.path.join(testdir, "budget.pdf")
outfile = os.path.join(tempdir, "budget.csv")
runner = CliRunner()
result = runner.invoke(cli, ['--format', 'csv', '--output', outfile,
'stream', infile])
result = runner.invoke(
cli, ["--format", "csv", "--output", outfile, "stream", infile]
)
assert result.exit_code == 0
assert result.output == 'Found 1 tables\n'
assert result.output == "Found 1 tables\n"
result = runner.invoke(cli, ['--format', 'csv', 'stream', infile])
output_error = 'Error: Please specify output file path using --output'
result = runner.invoke(cli, ["--format", "csv", "stream", infile])
output_error = "Error: Please specify output file path using --output"
assert output_error in result.output
result = runner.invoke(cli, ['--output', outfile, 'stream', infile])
format_error = 'Please specify output file format using --format'
result = runner.invoke(cli, ["--output", outfile, "stream", infile])
format_error = "Please specify output file format using --format"
assert format_error in result.output
def test_cli_password():
with TemporaryDirectory() as tempdir:
infile = os.path.join(testdir, 'health_protected.pdf')
outfile = os.path.join(tempdir, 'health_protected.csv')
infile = os.path.join(testdir, "health_protected.pdf")
outfile = os.path.join(tempdir, "health_protected.csv")
runner = CliRunner()
result = runner.invoke(cli, ['--password', 'userpass',
'--format', 'csv', '--output', outfile,
'stream', infile])
result = runner.invoke(
cli,
[
"--password",
"userpass",
"--format",
"csv",
"--output",
outfile,
"stream",
infile,
],
)
assert result.exit_code == 0
assert result.output == 'Found 1 tables\n'
assert result.output == "Found 1 tables\n"
output_error = 'file has not been decrypted'
output_error = "file has not been decrypted"
# no password
result = runner.invoke(cli, ['--format', 'csv', '--output', outfile,
'stream', infile])
result = runner.invoke(
cli, ["--format", "csv", "--output", outfile, "stream", infile]
)
assert output_error in str(result.exception)
# bad password
result = runner.invoke(cli, ['--password', 'wrongpass',
'--format', 'csv', '--output', outfile,
'stream', infile])
result = runner.invoke(
cli,
[
"--password",
"wrongpass",
"--format",
"csv",
"--output",
outfile,
"stream",
infile,
],
)
assert output_error in str(result.exception)
def test_cli_output_format():
with TemporaryDirectory() as tempdir:
infile = os.path.join(testdir, 'health.pdf')
outfile = os.path.join(tempdir, 'health.{}')
infile = os.path.join(testdir, "health.pdf")
runner = CliRunner()
# json
result = runner.invoke(cli, ['--format', 'json', '--output', outfile.format('json'),
'stream', infile])
assert result.exit_code == 0
outfile = os.path.join(tempdir, "health.json")
result = runner.invoke(
cli,
["--format", "json", "--output", outfile, "stream", infile],
)
assert result.exit_code == 0, f"Output: {result.output}"
# excel
result = runner.invoke(cli, ['--format', 'excel', '--output', outfile.format('xlsx'),
'stream', infile])
assert result.exit_code == 0
outfile = os.path.join(tempdir, "health.xlsx")
result = runner.invoke(
cli,
["--format", "excel", "--output", outfile, "stream", infile],
)
assert result.exit_code == 0, f"Output: {result.output}"
# html
result = runner.invoke(cli, ['--format', 'html', '--output', outfile.format('html'),
'stream', infile])
assert result.exit_code == 0
outfile = os.path.join(tempdir, "health.html")
result = runner.invoke(
cli,
["--format", "html", "--output", outfile, "stream", infile],
)
assert result.exit_code == 0, f"Output: {result.output}"
# markdown
outfile = os.path.join(tempdir, "health.md")
result = runner.invoke(
cli,
["--format", "markdown", "--output", outfile, "stream", infile],
)
assert result.exit_code == 0, f"Output: {result.output}"
# zip
result = runner.invoke(cli, ['--zip', '--format', 'csv', '--output', outfile.format('csv'),
'stream', infile])
assert result.exit_code == 0
outfile = os.path.join(tempdir, "health.csv")
result = runner.invoke(
cli,
[
"--zip",
"--format",
"csv",
"--output",
outfile,
"stream",
infile,
],
)
assert result.exit_code == 0, f"Output: {result.output}"
def test_cli_quiet():
with TemporaryDirectory() as tempdir:
infile = os.path.join(testdir, 'blank.pdf')
outfile = os.path.join(tempdir, 'blank.csv')
infile = os.path.join(testdir, "empty.pdf")
outfile = os.path.join(tempdir, "empty.csv")
runner = CliRunner()
result = runner.invoke(cli, ['--format', 'csv', '--output', outfile,
'stream', infile])
assert 'No tables found on page-1' in result.output
result = runner.invoke(
cli, ["--format", "csv", "--output", outfile, "stream", infile]
)
assert "No tables found on page-1" in result.output
result = runner.invoke(cli, ['--quiet', '--format', 'csv',
'--output', outfile, 'stream', infile])
assert 'No tables found on page-1' not in result.output
result = runner.invoke(
cli, ["--quiet", "--format", "csv", "--output", outfile, "stream", infile]
)
assert "No tables found on page-1" not in result.output

View File

@ -1,24 +1,47 @@
# -*- coding: utf-8 -*-
import os
import sys
import pytest
import pandas as pd
from pandas.testing import assert_frame_equal
import camelot
from camelot.io import PDFHandler
from camelot.core import Table, TableList
from camelot.__version__ import generate_version
from camelot.backends import ImageConversionBackend
from .data import *
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
skip_on_windows = pytest.mark.skipif(
sys.platform.startswith("win"),
reason="Ghostscript not installed in Windows test environment",
)
def test_version_generation():
version = (0, 7, 3)
assert generate_version(version, prerelease=None, revision=None) == "0.7.3"
def test_version_generation_with_prerelease_revision():
version = (0, 7, 3)
prerelease = "alpha"
revision = 2
assert (
generate_version(version, prerelease=prerelease, revision=revision)
== "0.7.3-alpha.2"
)
@skip_on_windows
def test_parsing_report():
parsing_report = {
'accuracy': 99.02,
'whitespace': 12.24,
'order': 1,
'page': 1
}
parsing_report = {"accuracy": 99.02, "whitespace": 12.24, "order": 1, "page": 1}
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename)
@ -30,220 +53,122 @@ def test_password():
filename = os.path.join(testdir, "health_protected.pdf")
tables = camelot.read_pdf(filename, password="ownerpass", flavor="stream")
assert df.equals(tables[0].df)
assert_frame_equal(df, tables[0].df)
tables = camelot.read_pdf(filename, password="userpass", flavor="stream")
assert df.equals(tables[0].df)
assert_frame_equal(df, tables[0].df)
def test_stream():
df = pd.DataFrame(data_stream)
filename = os.path.join(testdir, "health.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert df.equals(tables[0].df)
def test_stream_table_rotated():
df = pd.DataFrame(data_stream_table_rotated)
filename = os.path.join(testdir, "clockwise_table_2.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert df.equals(tables[0].df)
filename = os.path.join(testdir, "anticlockwise_table_2.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert df.equals(tables[0].df)
def test_stream_two_tables():
df1 = pd.DataFrame(data_stream_two_tables_1)
df2 = pd.DataFrame(data_stream_two_tables_2)
filename = os.path.join(testdir, "tabula/12s0324.pdf")
tables = camelot.read_pdf(filename, flavor='stream')
assert len(tables) == 2
assert df1.equals(tables[0].df)
assert df2.equals(tables[1].df)
def test_stream_table_areas():
df = pd.DataFrame(data_stream_table_areas)
filename = os.path.join(testdir, "tabula/us-007.pdf")
tables = camelot.read_pdf(filename, flavor="stream", table_areas=["320,500,573,335"])
assert df.equals(tables[0].df)
def test_stream_columns():
df = pd.DataFrame(data_stream_columns)
filename = os.path.join(testdir, "mexican_towns.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", columns=["67,180,230,425,475"], row_tol=10)
assert df.equals(tables[0].df)
def test_stream_split_text():
df = pd.DataFrame(data_stream_split_text)
filename = os.path.join(testdir, "tabula/m27.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", columns=["72,95,209,327,442,529,566,606,683"], split_text=True)
assert df.equals(tables[0].df)
def test_stream_flag_size():
df = pd.DataFrame(data_stream_flag_size)
filename = os.path.join(testdir, "superscript.pdf")
tables = camelot.read_pdf(filename, flavor="stream", flag_size=True)
assert df.equals(tables[0].df)
def test_stream_strip_text():
df = pd.DataFrame(data_stream_strip_text)
filename = os.path.join(testdir, "detect_vertical_false.pdf")
tables = camelot.read_pdf(filename, flavor="stream", strip_text="\n")
assert df.equals(tables[0].df)
def test_stream_edge_tol():
df = pd.DataFrame(data_stream_edge_tol)
filename = os.path.join(testdir, "edge_tol.pdf")
tables = camelot.read_pdf(filename, flavor="stream", edge_tol=500)
assert df.equals(tables[0].df)
def test_stream_layout_kwargs():
df = pd.DataFrame(data_stream_layout_kwargs)
filename = os.path.join(testdir, "detect_vertical_false.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", layout_kwargs={"detect_vertical": False})
assert df.equals(tables[0].df)
def test_lattice():
df = pd.DataFrame(data_lattice)
filename = os.path.join(
testdir, "tabula/icdar2013-dataset/competition-dataset-us/us-030.pdf")
tables = camelot.read_pdf(filename, pages="2")
assert df.equals(tables[0].df)
def test_lattice_table_rotated():
df = pd.DataFrame(data_lattice_table_rotated)
filename = os.path.join(testdir, "clockwise_table_1.pdf")
tables = camelot.read_pdf(filename)
assert df.equals(tables[0].df)
filename = os.path.join(testdir, "anticlockwise_table_1.pdf")
tables = camelot.read_pdf(filename)
assert df.equals(tables[0].df)
def test_lattice_two_tables():
df1 = pd.DataFrame(data_lattice_two_tables_1)
df2 = pd.DataFrame(data_lattice_two_tables_2)
filename = os.path.join(testdir, "twotables_2.pdf")
tables = camelot.read_pdf(filename)
assert len(tables) == 2
assert df1.equals(tables[0].df)
assert df2.equals(tables[1].df)
def test_lattice_table_regions():
df = pd.DataFrame(data_lattice_table_regions)
filename = os.path.join(testdir, "table_region.pdf")
tables = camelot.read_pdf(filename, table_regions=["170,370,560,270"])
assert df.equals(tables[0].df)
def test_lattice_table_areas():
df = pd.DataFrame(data_lattice_table_areas)
filename = os.path.join(testdir, "twotables_2.pdf")
tables = camelot.read_pdf(filename, table_areas=["80,693,535,448"])
assert df.equals(tables[0].df)
def test_lattice_process_background():
df = pd.DataFrame(data_lattice_process_background)
filename = os.path.join(testdir, "background_lines_1.pdf")
tables = camelot.read_pdf(filename, process_background=True)
assert df.equals(tables[1].df)
def test_lattice_copy_text():
df = pd.DataFrame(data_lattice_copy_text)
filename = os.path.join(testdir, "row_span_1.pdf")
tables = camelot.read_pdf(filename, line_scale=60, copy_text="v")
assert df.equals(tables[0].df)
def test_lattice_shift_text():
df_lt = pd.DataFrame(data_lattice_shift_text_left_top)
df_disable = pd.DataFrame(data_lattice_shift_text_disable)
df_rb = pd.DataFrame(data_lattice_shift_text_right_bottom)
filename = os.path.join(testdir, "column_span_2.pdf")
tables = camelot.read_pdf(filename, line_scale=40)
assert df_lt.equals(tables[0].df)
tables = camelot.read_pdf(filename, line_scale=40, shift_text=[''])
assert df_disable.equals(tables[0].df)
tables = camelot.read_pdf(filename, line_scale=40, shift_text=['r', 'b'])
assert df_rb.equals(tables[0].df)
def test_repr():
def test_repr_poppler():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename)
tables = camelot.read_pdf(filename, backend="poppler")
assert repr(tables) == "<TableList n=1>"
assert repr(tables[0]) == "<Table shape=(7, 7)>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120.48 y1=218.43 x2=164.64 y2=233.77>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120 y1=219 x2=165 y2=234>"
def test_pages():
@skip_on_windows
def test_repr_ghostscript():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename, backend="ghostscript")
assert repr(tables) == "<TableList n=1>"
assert repr(tables[0]) == "<Table shape=(7, 7)>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120 y1=218 x2=165 y2=234>"
def test_url_poppler():
url = "https://camelot-py.readthedocs.io/en/master/_static/pdf/foo.pdf"
tables = camelot.read_pdf(url)
tables = camelot.read_pdf(url, backend="poppler")
assert repr(tables) == "<TableList n=1>"
assert repr(tables[0]) == "<Table shape=(7, 7)>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120.48 y1=218.43 x2=164.64 y2=233.77>"
tables = camelot.read_pdf(url, pages='1-end')
assert repr(tables) == "<TableList n=1>"
assert repr(tables[0]) == "<Table shape=(7, 7)>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120.48 y1=218.43 x2=164.64 y2=233.77>"
tables = camelot.read_pdf(url, pages='all')
assert repr(tables) == "<TableList n=1>"
assert repr(tables[0]) == "<Table shape=(7, 7)>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120.48 y1=218.43 x2=164.64 y2=233.77>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120 y1=219 x2=165 y2=234>"
def test_url():
@skip_on_windows
def test_url_ghostscript():
url = "https://camelot-py.readthedocs.io/en/master/_static/pdf/foo.pdf"
tables = camelot.read_pdf(url)
tables = camelot.read_pdf(url, backend="ghostscript")
assert repr(tables) == "<TableList n=1>"
assert repr(tables[0]) == "<Table shape=(7, 7)>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120.48 y1=218.43 x2=164.64 y2=233.77>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120 y1=218 x2=165 y2=234>"
def test_arabic():
df = pd.DataFrame(data_arabic)
def test_pages_poppler():
url = "https://camelot-py.readthedocs.io/en/master/_static/pdf/foo.pdf"
tables = camelot.read_pdf(url, backend="poppler")
assert repr(tables) == "<TableList n=1>"
assert repr(tables[0]) == "<Table shape=(7, 7)>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120 y1=219 x2=165 y2=234>"
filename = os.path.join(testdir, "tabula/arabic.pdf")
tables = camelot.read_pdf(filename)
assert df.equals(tables[0].df)
tables = camelot.read_pdf(url, pages="1-end", backend="poppler")
assert repr(tables) == "<TableList n=1>"
assert repr(tables[0]) == "<Table shape=(7, 7)>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120 y1=219 x2=165 y2=234>"
tables = camelot.read_pdf(url, pages="all", backend="poppler")
assert repr(tables) == "<TableList n=1>"
assert repr(tables[0]) == "<Table shape=(7, 7)>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120 y1=219 x2=165 y2=234>"
@skip_on_windows
def test_pages_ghostscript():
url = "https://camelot-py.readthedocs.io/en/master/_static/pdf/foo.pdf"
tables = camelot.read_pdf(url, backend="ghostscript")
assert repr(tables) == "<TableList n=1>"
assert repr(tables[0]) == "<Table shape=(7, 7)>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120 y1=218 x2=165 y2=234>"
tables = camelot.read_pdf(url, pages="1-end", backend="ghostscript")
assert repr(tables) == "<TableList n=1>"
assert repr(tables[0]) == "<Table shape=(7, 7)>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120 y1=218 x2=165 y2=234>"
tables = camelot.read_pdf(url, pages="all", backend="ghostscript")
assert repr(tables) == "<TableList n=1>"
assert repr(tables[0]) == "<Table shape=(7, 7)>"
assert repr(tables[0].cells[0][0]) == "<Cell x1=120 y1=218 x2=165 y2=234>"
def test_table_order():
def _make_table(page, order):
t = Table([], [])
t.page = page
t.order = order
return t
table_list = TableList(
[_make_table(2, 1), _make_table(1, 1), _make_table(3, 4), _make_table(1, 2)]
)
assert [(t.page, t.order) for t in sorted(table_list)] == [
(1, 1),
(1, 2),
(2, 1),
(3, 4),
]
assert [(t.page, t.order) for t in sorted(table_list, reverse=True)] == [
(3, 4),
(2, 1),
(1, 2),
(1, 1),
]
def test_handler_pages_generator():
filename = os.path.join(testdir, "foo.pdf")
handler = PDFHandler(filename)
assert handler._get_pages("1") == [1]
handler = PDFHandler(filename)
assert handler._get_pages("all") == [1]
handler = PDFHandler(filename)
assert handler._get_pages("1-end") == [1]
handler = PDFHandler(filename)
assert handler._get_pages("1,2,3,4") == [1, 2, 3, 4]
handler = PDFHandler(filename)
assert handler._get_pages("1,2,5-10") == [1, 2, 5, 6, 7, 8, 9, 10]

View File

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import os
import sys
import warnings
import pytest
@ -10,88 +11,145 @@ import camelot
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
filename = os.path.join(testdir, 'foo.pdf')
filename = os.path.join(testdir, "foo.pdf")
skip_on_windows = pytest.mark.skipif(
sys.platform.startswith("win"),
reason="Ghostscript not installed in Windows test environment",
)
def test_unknown_flavor():
message = ("Unknown flavor specified."
" Use either 'lattice' or 'stream'")
with pytest.raises(NotImplementedError, message=message):
tables = camelot.read_pdf(filename, flavor='chocolate')
message = "Unknown flavor specified." " Use either 'lattice' or 'stream'"
with pytest.raises(NotImplementedError, match=message):
tables = camelot.read_pdf(filename, flavor="chocolate")
def test_input_kwargs():
message = "columns cannot be used with flavor='lattice'"
with pytest.raises(ValueError, message=message):
tables = camelot.read_pdf(filename, columns=['10,20,30,40'])
with pytest.raises(ValueError, match=message):
tables = camelot.read_pdf(filename, columns=["10,20,30,40"])
def test_unsupported_format():
message = 'File format not supported'
filename = os.path.join(testdir, 'foo.csv')
with pytest.raises(NotImplementedError, message=message):
message = "File format not supported"
filename = os.path.join(testdir, "foo.csv")
with pytest.raises(NotImplementedError, match=message):
tables = camelot.read_pdf(filename)
def test_stream_equal_length():
message = ("Length of table_areas and columns"
" should be equal")
with pytest.raises(ValueError, message=message):
tables = camelot.read_pdf(filename, flavor='stream',
table_areas=['10,20,30,40'], columns=['10,20,30,40', '10,20,30,40'])
def test_image_warning():
filename = os.path.join(testdir, 'image.pdf')
with warnings.catch_warnings():
warnings.simplefilter('error')
with pytest.raises(UserWarning) as e:
tables = camelot.read_pdf(filename)
assert str(e.value) == 'page-1 is image-based, camelot only works on text-based pages.'
def test_no_tables_found():
filename = os.path.join(testdir, 'blank.pdf')
with warnings.catch_warnings():
warnings.simplefilter('error')
with pytest.raises(UserWarning) as e:
tables = camelot.read_pdf(filename)
assert str(e.value) == 'No tables found on page-1'
@skip_on_windows
def test_no_tables_found_logs_suppressed():
filename = os.path.join(testdir, 'foo.pdf')
filename = os.path.join(testdir, "foo.pdf")
with warnings.catch_warnings():
# the test should fail if any warning is thrown
warnings.simplefilter('error')
warnings.simplefilter("error")
try:
tables = camelot.read_pdf(filename, suppress_stdout=True)
except Warning as e:
warning_text = str(e)
pytest.fail('Unexpected warning: {}'.format(warning_text))
pytest.fail(f"Unexpected warning: {warning_text}")
def test_no_tables_found_warnings_suppressed():
filename = os.path.join(testdir, 'blank.pdf')
filename = os.path.join(testdir, "empty.pdf")
with warnings.catch_warnings():
# the test should fail if any warning is thrown
warnings.simplefilter('error')
warnings.simplefilter("error")
try:
tables = camelot.read_pdf(filename, suppress_stdout=True)
except Warning as e:
warning_text = str(e)
pytest.fail('Unexpected warning: {}'.format(warning_text))
pytest.fail(f"Unexpected warning: {warning_text}")
def test_no_password():
filename = os.path.join(testdir, 'health_protected.pdf')
message = 'file has not been decrypted'
with pytest.raises(Exception, message=message):
filename = os.path.join(testdir, "health_protected.pdf")
message = "file has not been decrypted"
with pytest.raises(Exception, match=message):
tables = camelot.read_pdf(filename)
def test_bad_password():
filename = os.path.join(testdir, 'health_protected.pdf')
message = 'file has not been decrypted'
with pytest.raises(Exception, message=message):
tables = camelot.read_pdf(filename, password='wrongpass')
filename = os.path.join(testdir, "health_protected.pdf")
message = "file has not been decrypted"
with pytest.raises(Exception, match=message):
tables = camelot.read_pdf(filename, password="wrongpass")
def test_stream_equal_length():
message = "Length of table_areas and columns" " should be equal"
with pytest.raises(ValueError, match=message):
tables = camelot.read_pdf(
filename,
flavor="stream",
table_areas=["10,20,30,40"],
columns=["10,20,30,40", "10,20,30,40"],
)
def test_image_warning():
filename = os.path.join(testdir, "image.pdf")
with warnings.catch_warnings():
warnings.simplefilter("error", category=UserWarning)
with pytest.raises(UserWarning) as e:
tables = camelot.read_pdf(filename)
assert (
str(e.value)
== "page-1 is image-based, camelot only works on text-based pages."
)
def test_stream_no_tables_on_page():
filename = os.path.join(testdir, "empty.pdf")
with warnings.catch_warnings():
warnings.simplefilter("error")
with pytest.raises(UserWarning) as e:
tables = camelot.read_pdf(filename, flavor="stream")
assert str(e.value) == "No tables found on page-1"
def test_stream_no_tables_in_area():
filename = os.path.join(testdir, "only_page_number.pdf")
with warnings.catch_warnings():
warnings.simplefilter("error")
with pytest.raises(UserWarning) as e:
tables = camelot.read_pdf(filename, flavor="stream")
assert str(e.value) == "No tables found in table area 1"
def test_lattice_no_tables_on_page():
filename = os.path.join(testdir, "empty.pdf")
with warnings.catch_warnings():
warnings.simplefilter("error", category=UserWarning)
with pytest.raises(UserWarning) as e:
tables = camelot.read_pdf(filename, flavor="lattice")
assert str(e.value) == "No tables found on page-1"
def test_lattice_unknown_backend():
message = "Unknown backend 'mupdf' specified. Please use either 'poppler' or 'ghostscript'."
with pytest.raises(NotImplementedError, match=message):
tables = camelot.read_pdf(filename, backend="mupdf")
def test_lattice_no_convert_method():
class ConversionBackend(object):
pass
message = "must implement a 'convert' method"
with pytest.raises(NotImplementedError, match=message):
tables = camelot.read_pdf(filename, backend=ConversionBackend())
def test_lattice_ghostscript_deprecation_warning():
ghostscript_deprecation_warning = (
"'ghostscript' will be replaced by 'poppler' as the default image conversion"
" backend in v0.12.0. You can try out 'poppler' with backend='poppler'."
)
with warnings.catch_warnings():
warnings.simplefilter("error")
with pytest.raises(DeprecationWarning) as e:
tables = camelot.read_pdf(filename)
assert str(e.value) == ghostscript_deprecation_warning

View File

@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-
import pytest
import camelot.backends.image_conversion
from camelot.backends import ImageConversionBackend
class PopplerBackendError(object):
def convert(self, pdf_path, png_path):
raise ValueError("Image conversion failed")
class GhostscriptBackendError(object):
def convert(self, pdf_path, png_path):
raise ValueError("Image conversion failed")
class GhostscriptBackendNoError(object):
def convert(self, pdf_path, png_path):
pass
def test_poppler_backend_error_when_no_use_fallback(monkeypatch):
BACKENDS = {
"poppler": PopplerBackendError,
"ghostscript": GhostscriptBackendNoError,
}
monkeypatch.setattr(
"camelot.backends.image_conversion.BACKENDS", BACKENDS, raising=True
)
backend = ImageConversionBackend(use_fallback=False)
message = "Image conversion failed with image conversion backend 'poppler'"
with pytest.raises(ValueError, match=message):
backend.convert("foo", "bar")
def test_ghostscript_backend_when_use_fallback(monkeypatch):
BACKENDS = {
"poppler": PopplerBackendError,
"ghostscript": GhostscriptBackendNoError,
}
monkeypatch.setattr(
"camelot.backends.image_conversion.BACKENDS", BACKENDS, raising=True
)
backend = ImageConversionBackend()
backend.convert("foo", "bar")
def test_ghostscript_backend_error_when_use_fallback(monkeypatch):
BACKENDS = {"poppler": PopplerBackendError, "ghostscript": GhostscriptBackendError}
monkeypatch.setattr(
"camelot.backends.image_conversion.BACKENDS", BACKENDS, raising=True
)
backend = ImageConversionBackend()
message = "Image conversion failed with image conversion backend 'ghostscript'"
with pytest.raises(ValueError, match=message):
backend.convert("foo", "bar")

View File

@ -0,0 +1,120 @@
# -*- coding: utf-8 -*-
import os
import sys
import pytest
import pandas as pd
from pandas.testing import assert_frame_equal
import camelot
from camelot.core import Table, TableList
from camelot.__version__ import generate_version
from .data import *
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
skip_on_windows = pytest.mark.skipif(
sys.platform.startswith("win"),
reason="Ghostscript not installed in Windows test environment",
)
@skip_on_windows
def test_lattice():
df = pd.DataFrame(data_lattice)
filename = os.path.join(
testdir, "tabula/icdar2013-dataset/competition-dataset-us/us-030.pdf"
)
tables = camelot.read_pdf(filename, pages="2")
assert_frame_equal(df, tables[0].df)
@skip_on_windows
def test_lattice_table_rotated():
df = pd.DataFrame(data_lattice_table_rotated)
filename = os.path.join(testdir, "clockwise_table_1.pdf")
tables = camelot.read_pdf(filename)
assert_frame_equal(df, tables[0].df)
filename = os.path.join(testdir, "anticlockwise_table_1.pdf")
tables = camelot.read_pdf(filename)
assert_frame_equal(df, tables[0].df)
@skip_on_windows
def test_lattice_two_tables():
df1 = pd.DataFrame(data_lattice_two_tables_1)
df2 = pd.DataFrame(data_lattice_two_tables_2)
filename = os.path.join(testdir, "twotables_2.pdf")
tables = camelot.read_pdf(filename)
assert len(tables) == 2
assert df1.equals(tables[0].df)
assert df2.equals(tables[1].df)
@skip_on_windows
def test_lattice_table_regions():
df = pd.DataFrame(data_lattice_table_regions)
filename = os.path.join(testdir, "table_region.pdf")
tables = camelot.read_pdf(filename, table_regions=["170,370,560,270"])
assert_frame_equal(df, tables[0].df)
@skip_on_windows
def test_lattice_table_areas():
df = pd.DataFrame(data_lattice_table_areas)
filename = os.path.join(testdir, "twotables_2.pdf")
tables = camelot.read_pdf(filename, table_areas=["80,693,535,448"])
assert_frame_equal(df, tables[0].df)
@skip_on_windows
def test_lattice_process_background():
df = pd.DataFrame(data_lattice_process_background)
filename = os.path.join(testdir, "background_lines_1.pdf")
tables = camelot.read_pdf(filename, process_background=True)
assert_frame_equal(df, tables[1].df)
@skip_on_windows
def test_lattice_copy_text():
df = pd.DataFrame(data_lattice_copy_text)
filename = os.path.join(testdir, "row_span_1.pdf")
tables = camelot.read_pdf(filename, line_scale=60, copy_text="v")
assert_frame_equal(df, tables[0].df)
@skip_on_windows
def test_lattice_shift_text():
df_lt = pd.DataFrame(data_lattice_shift_text_left_top)
df_disable = pd.DataFrame(data_lattice_shift_text_disable)
df_rb = pd.DataFrame(data_lattice_shift_text_right_bottom)
filename = os.path.join(testdir, "column_span_2.pdf")
tables = camelot.read_pdf(filename, line_scale=40)
assert df_lt.equals(tables[0].df)
tables = camelot.read_pdf(filename, line_scale=40, shift_text=[""])
assert df_disable.equals(tables[0].df)
tables = camelot.read_pdf(filename, line_scale=40, shift_text=["r", "b"])
assert df_rb.equals(tables[0].df)
@skip_on_windows
def test_lattice_arabic():
df = pd.DataFrame(data_arabic)
filename = os.path.join(testdir, "tabula/arabic.pdf")
tables = camelot.read_pdf(filename)
assert_frame_equal(df, tables[0].df)

View File

@ -1,67 +1,98 @@
# -*- coding: utf-8 -*-
import os
import sys
import pytest
import camelot
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
skip_on_windows = pytest.mark.skipif(
sys.platform.startswith("win"),
reason="Ghostscript not installed in Windows test environment",
)
@pytest.mark.mpl_image_compare(
baseline_dir="files/baseline_plots", remove_text=True)
@skip_on_windows
@pytest.mark.mpl_image_compare(baseline_dir="files/baseline_plots", remove_text=True)
def test_text_plot():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename)
return camelot.plot(tables[0], kind='text')
return camelot.plot(tables[0], kind="text")
@pytest.mark.mpl_image_compare(
baseline_dir="files/baseline_plots", remove_text=True)
def test_grid_plot():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename)
return camelot.plot(tables[0], kind='grid')
@pytest.mark.mpl_image_compare(
baseline_dir="files/baseline_plots", remove_text=True)
def test_lattice_contour_plot():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename)
return camelot.plot(tables[0], kind='contour')
@pytest.mark.mpl_image_compare(
baseline_dir="files/baseline_plots", remove_text=True)
def test_stream_contour_plot():
filename = os.path.join(testdir, "tabula/12s0324.pdf")
tables = camelot.read_pdf(filename, flavor='stream')
return camelot.plot(tables[0], kind='contour')
@pytest.mark.mpl_image_compare(
baseline_dir="files/baseline_plots", remove_text=True)
def test_line_plot():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename)
return camelot.plot(tables[0], kind='line')
@pytest.mark.mpl_image_compare(
baseline_dir="files/baseline_plots", remove_text=True)
def test_joint_plot():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename)
return camelot.plot(tables[0], kind='joint')
@pytest.mark.mpl_image_compare(
baseline_dir="files/baseline_plots", remove_text=True)
@pytest.mark.mpl_image_compare(baseline_dir="files/baseline_plots", remove_text=True)
def test_textedge_plot():
filename = os.path.join(testdir, "tabula/12s0324.pdf")
tables = camelot.read_pdf(filename, flavor='stream')
return camelot.plot(tables[0], kind='textedge')
tables = camelot.read_pdf(filename, flavor="stream")
return camelot.plot(tables[0], kind="textedge")
@pytest.mark.mpl_image_compare(baseline_dir="files/baseline_plots", remove_text=True)
def test_lattice_contour_plot_poppler():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename, backend="poppler")
return camelot.plot(tables[0], kind="contour")
@skip_on_windows
@pytest.mark.mpl_image_compare(baseline_dir="files/baseline_plots", remove_text=True)
def test_lattice_contour_plot_ghostscript():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename, backend="ghostscript")
return camelot.plot(tables[0], kind="contour")
@pytest.mark.mpl_image_compare(baseline_dir="files/baseline_plots", remove_text=True)
def test_stream_contour_plot():
filename = os.path.join(testdir, "tabula/12s0324.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
return camelot.plot(tables[0], kind="contour")
@pytest.mark.mpl_image_compare(baseline_dir="files/baseline_plots", remove_text=True)
def test_line_plot_poppler():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename, backend="poppler")
return camelot.plot(tables[0], kind="line")
@skip_on_windows
@pytest.mark.mpl_image_compare(baseline_dir="files/baseline_plots", remove_text=True)
def test_line_plot_ghostscript():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename, backend="ghostscript")
return camelot.plot(tables[0], kind="line")
@pytest.mark.mpl_image_compare(baseline_dir="files/baseline_plots", remove_text=True)
def test_joint_plot_poppler():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename, backend="poppler")
return camelot.plot(tables[0], kind="joint")
@skip_on_windows
@pytest.mark.mpl_image_compare(baseline_dir="files/baseline_plots", remove_text=True)
def test_joint_plot_ghostscript():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename, backend="ghostscript")
return camelot.plot(tables[0], kind="joint")
@pytest.mark.mpl_image_compare(baseline_dir="files/baseline_plots", remove_text=True)
def test_grid_plot_poppler():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename, backend="poppler")
return camelot.plot(tables[0], kind="grid")
@skip_on_windows
@pytest.mark.mpl_image_compare(baseline_dir="files/baseline_plots", remove_text=True)
def test_grid_plot_ghostscript():
filename = os.path.join(testdir, "foo.pdf")
tables = camelot.read_pdf(filename, backend="ghostscript")
return camelot.plot(tables[0], kind="grid")

View File

@ -0,0 +1,133 @@
# -*- coding: utf-8 -*-
import os
import pytest
import pandas as pd
from pandas.testing import assert_frame_equal
import camelot
from camelot.core import Table, TableList
from camelot.__version__ import generate_version
from .data import *
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
def test_stream():
df = pd.DataFrame(data_stream)
filename = os.path.join(testdir, "health.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert_frame_equal(df, tables[0].df)
def test_stream_table_rotated():
df = pd.DataFrame(data_stream_table_rotated)
filename = os.path.join(testdir, "clockwise_table_2.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert_frame_equal(df, tables[0].df)
filename = os.path.join(testdir, "anticlockwise_table_2.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert_frame_equal(df, tables[0].df)
def test_stream_two_tables():
df1 = pd.DataFrame(data_stream_two_tables_1)
df2 = pd.DataFrame(data_stream_two_tables_2)
filename = os.path.join(testdir, "tabula/12s0324.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert len(tables) == 2
assert df1.equals(tables[0].df)
assert df2.equals(tables[1].df)
def test_stream_table_regions():
df = pd.DataFrame(data_stream_table_areas)
filename = os.path.join(testdir, "tabula/us-007.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", table_regions=["320,460,573,335"]
)
assert_frame_equal(df, tables[0].df)
def test_stream_table_areas():
df = pd.DataFrame(data_stream_table_areas)
filename = os.path.join(testdir, "tabula/us-007.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", table_areas=["320,500,573,335"]
)
assert_frame_equal(df, tables[0].df)
def test_stream_columns():
df = pd.DataFrame(data_stream_columns)
filename = os.path.join(testdir, "mexican_towns.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", columns=["67,180,230,425,475"], row_tol=10
)
assert_frame_equal(df, tables[0].df)
def test_stream_split_text():
df = pd.DataFrame(data_stream_split_text)
filename = os.path.join(testdir, "tabula/m27.pdf")
tables = camelot.read_pdf(
filename,
flavor="stream",
columns=["72,95,209,327,442,529,566,606,683"],
split_text=True,
)
assert_frame_equal(df, tables[0].df)
def test_stream_flag_size():
df = pd.DataFrame(data_stream_flag_size)
filename = os.path.join(testdir, "superscript.pdf")
tables = camelot.read_pdf(filename, flavor="stream", flag_size=True)
assert_frame_equal(df, tables[0].df)
def test_stream_strip_text():
df = pd.DataFrame(data_stream_strip_text)
filename = os.path.join(testdir, "detect_vertical_false.pdf")
tables = camelot.read_pdf(filename, flavor="stream", strip_text=" ,\n")
assert_frame_equal(df, tables[0].df)
def test_stream_edge_tol():
df = pd.DataFrame(data_stream_edge_tol)
filename = os.path.join(testdir, "edge_tol.pdf")
tables = camelot.read_pdf(filename, flavor="stream", edge_tol=500)
assert_frame_equal(df, tables[0].df)
def test_stream_layout_kwargs():
df = pd.DataFrame(data_stream_layout_kwargs)
filename = os.path.join(testdir, "detect_vertical_false.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", layout_kwargs={"detect_vertical": False}
)
assert_frame_equal(df, tables[0].df)
def test_stream_duplicated_text():
df = pd.DataFrame(data_stream_duplicated_text)
filename = os.path.join(testdir, "birdisland.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert_frame_equal(df, tables[0].df)