aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/matplotlib/py2
diff options
context:
space:
mode:
authorshumkovnd <shumkovnd@yandex-team.com>2023-11-10 14:39:34 +0300
committershumkovnd <shumkovnd@yandex-team.com>2023-11-10 16:42:24 +0300
commit77eb2d3fdcec5c978c64e025ced2764c57c00285 (patch)
treec51edb0748ca8d4a08d7c7323312c27ba1a8b79a /contrib/python/matplotlib/py2
parentdd6d20cadb65582270ac23f4b3b14ae189704b9d (diff)
downloadydb-77eb2d3fdcec5c978c64e025ced2764c57c00285.tar.gz
KIKIMR-19287: add task_stats_drawing script
Diffstat (limited to 'contrib/python/matplotlib/py2')
-rw-r--r--contrib/python/matplotlib/py2/LICENSE/LICENSE99
-rw-r--r--contrib/python/matplotlib/py2/LICENSE/LICENSE.PIL12
-rw-r--r--contrib/python/matplotlib/py2/LICENSE/LICENSE_AMSFONTS240
-rw-r--r--contrib/python/matplotlib/py2/LICENSE/LICENSE_BAKOMA40
-rw-r--r--contrib/python/matplotlib/py2/LICENSE/LICENSE_COLORBREWER38
-rw-r--r--contrib/python/matplotlib/py2/LICENSE/LICENSE_CONDA51
-rw-r--r--contrib/python/matplotlib/py2/LICENSE/LICENSE_JQUERY61
-rw-r--r--contrib/python/matplotlib/py2/LICENSE/LICENSE_QT4_EDITOR30
-rw-r--r--contrib/python/matplotlib/py2/LICENSE/LICENSE_STIX71
-rw-r--r--contrib/python/matplotlib/py2/LICENSE/LICENSE_YORICK49
-rw-r--r--contrib/python/matplotlib/py2/LICENSE/LICENSE_enthought.txt29
-rw-r--r--contrib/python/matplotlib/py2/LICENSE/Solarized.txt20
-rw-r--r--contrib/python/matplotlib/py2/README.rst83
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_alpha_mask_u8.h499
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_array.h1119
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_basics.h560
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_bezier_arc.h159
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_clip_liang_barsky.h333
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_color_gray.h1047
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_color_rgba.h1353
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_config.h44
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_adaptor_vcgen.h157
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_adaptor_vpgen.h159
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_contour.h65
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_curve.h201
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_dash.h68
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_segmentator.h48
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_stroke.h73
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_transform.h68
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_curves.h693
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_dda_line.h290
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_gamma_functions.h132
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_gamma_lut.h305
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_image_accessors.h481
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_image_filters.h448
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_math.h437
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_math_stroke.h527
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_path_storage.h1545
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_amask_adaptor.h240
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_base.h97
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_gray.h737
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_rgb.h994
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_rgba.h2801
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_cells_aa.h743
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_scanline_aa.h481
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_scanline_aa_nogamma.h482
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_sl_clip.h351
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_renderer_base.h731
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_renderer_scanline.h852
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rendering_buffer.h300
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_bin.h264
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_p.h329
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_storage_aa.h815
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_storage_bin.h586
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_u.h499
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_shorten_path.h66
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_allocator.h54
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_converter.h56
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_gouraud.h172
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_gouraud_rgba.h277
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_image_filter.h246
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_image_filter_gray.h723
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_image_filter_rgba.h890
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_interpolator_adaptor.h77
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_interpolator_linear.h232
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_pattern_rgba.h94
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_trans_affine.h518
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vcgen_contour.h94
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vcgen_dash.h93
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vcgen_stroke.h102
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vertex_sequence.h172
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vpgen_segmentator.h61
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/util/agg_color_conv.h128
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/include/util/agg_color_conv_rgb8.h476
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/src/ChangeLog0
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_bezier_arc.cpp258
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_curves.cpp613
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_image_filters.cpp103
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_trans_affine.cpp194
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vcgen_contour.cpp165
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vcgen_dash.cpp235
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vcgen_stroke.cpp213
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vpgen_segmentator.cpp67
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/src/authors0
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/src/copying11
-rw-r--r--contrib/python/matplotlib/py2/extern/agg24-svn/ya.make22
-rw-r--r--contrib/python/matplotlib/py2/extern/ttconv/pprdrv.h113
-rw-r--r--contrib/python/matplotlib/py2/extern/ttconv/pprdrv_tt.cpp1484
-rw-r--r--contrib/python/matplotlib/py2/extern/ttconv/pprdrv_tt2.cpp736
-rw-r--r--contrib/python/matplotlib/py2/extern/ttconv/truetype.h129
-rw-r--r--contrib/python/matplotlib/py2/extern/ttconv/ttutil.cpp82
-rw-r--r--contrib/python/matplotlib/py2/extern/ttconv/ya.make13
-rw-r--r--contrib/python/matplotlib/py2/extern/ya.make4
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/__init__.py1925
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/_animation_data.py210
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/_cm.py1445
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/_cm_listed.py1298
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/_color_data.py1147
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/_constrained_layout.py666
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/_layoutbox.py739
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/_mathtext_data.py2548
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/_pylab_helpers.py138
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/_version.py460
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/afm.py547
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/animation.py1778
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/artist.py1482
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/axes/__init__.py5
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/axes/_axes.py8153
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/axes/_base.py4297
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/axes/_subplots.py267
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/axis.py2509
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backend_bases.py3383
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backend_managers.py436
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backend_tools.py1081
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/__init__.py97
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/_backend_tk.py1075
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/_gtk3_compat.py41
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_agg.py606
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_cairo.py520
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_gdk.py438
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk.py1037
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk3.py920
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk3agg.py102
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk3cairo.py55
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_gtkagg.py96
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_gtkcairo.py74
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_macosx.py210
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_mixed.py155
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_nbagg.py270
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_pdf.py2604
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_pgf.py990
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_ps.py1762
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_qt4.py15
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_qt4agg.py15
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_qt4cairo.py6
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_qt5.py1118
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_qt5agg.py105
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_qt5cairo.py49
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_svg.py1261
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_template.py278
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_tkagg.py34
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_tkcairo.py37
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_webagg.py350
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_webagg_core.py543
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_wx.py2002
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_wxagg.py147
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/backend_wxcairo.py53
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/qt_compat.py176
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/__init__.py2
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/figureoptions.py262
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/formlayout.py544
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/formsubplottool.py56
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/tkagg.py44
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/windowing.py31
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/backends/wx_compat.py177
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/bezier.py495
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/blocking_input.py375
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/category.py211
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/cbook/__init__.py2852
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/cbook/_backports.py147
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/cbook/deprecation.py222
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/cm.py392
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/collections.py1994
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/colorbar.py1405
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/colors.py2027
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/compat/__init__.py0
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/compat/subprocess.py51
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/container.py194
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/contour.py1836
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/dates.py1839
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/docstring.py128
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/dviread.py1083
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/figure.py2532
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/font_manager.py1479
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/fontconfig_pattern.py196
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/gridspec.py498
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/hatch.py220
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/image.py1534
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/legend.py1401
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/legend_handler.py730
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/lines.py1507
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/markers.py896
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/mathtext.py3445
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/mlab.py4041
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/offsetbox.py1811
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/patches.py4720
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/path.py1028
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/patheffects.py393
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/projections/__init__.py110
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/projections/geo.py547
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/projections/polar.py1537
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/pylab.py268
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/pyplot.py4099
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/quiver.py1197
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/rcsetup.py1450
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/sankey.py833
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/scale.py607
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/sphinxext/__init__.py2
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/sphinxext/mathmpl.py126
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/sphinxext/only_directives.py75
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/sphinxext/plot_directive.py868
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/README.md3
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/_static/README.txt7
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/index.rst21
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/some_plots.rst129
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/spines.py542
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/stackplot.py126
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/streamplot.py674
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/style/__init__.py3
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/style/core.py234
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/table.py702
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/__init__.py59
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/_nose/__init__.py78
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/_nose/decorators.py33
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/_nose/exceptions.py10
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/__init__.py0
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/knownfailure.py49
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/performgc.py26
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/compare.py489
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/conftest.py100
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/decorators.py589
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/determinism.py145
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/disable_internet.py150
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/exceptions.py4
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/Duration.py211
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/Epoch.py238
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/EpochConverter.py165
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/StrConverter.py164
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/UnitDbl.py297
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/UnitDblConverter.py159
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/UnitDblFormatter.py47
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/__init__.py88
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/testing/noseclasses.py26
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/texmanager.py505
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/text.py2336
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/textpath.py536
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/ticker.py2628
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tight_bbox.py87
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tight_layout.py381
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/transforms.py3025
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/__init__.py16
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/_tri.cpp1999
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/_tri.h815
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/_tri_wrapper.cpp550
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/triangulation.py218
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/tricontour.py283
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/trifinder.py96
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/triinterpolate.py1637
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/tripcolor.py154
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/triplot.py88
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/trirefine.py323
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/tritools.py304
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/tri/ya.make41
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/type1font.py334
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/units.py200
-rw-r--r--contrib/python/matplotlib/py2/matplotlib/widgets.py2818
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/__init__.py4
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/ChangeLog13
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/__init__.py15
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/anchored_artists.py9
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/angle_helper.py4
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_divider.py8
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_grid.py30
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_rgb.py11
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_size.py4
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axis_artist.py4
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axisline_style.py4
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axislines.py4
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/clip_path.py4
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/colorbar.py5
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/floating_axes.py4
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/grid_finder.py4
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/grid_helper_curvelinear.py4
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/inset_locator.py7
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/parasite_axes.py18
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/__init__.py12
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/anchored_artists.py376
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_divider.py975
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_grid.py771
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_rgb.py228
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_size.py323
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/colorbar.py836
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/inset_locator.py659
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/mpl_axes.py154
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/parasite_axes.py486
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/__init__.py26
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/angle_helper.py416
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axes_divider.py9
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axes_grid.py30
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axes_rgb.py11
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axis_artist.py1527
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axisline_style.py168
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axislines.py828
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/clip_path.py135
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/floating_axes.py544
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/grid_finder.py340
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/grid_helper_curvelinear.py475
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/axisartist/parasite_axes.py18
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/__init__.py6
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/art3d.py774
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/axes3d.py2958
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/axis3d.py484
-rw-r--r--contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/proj3d.py203
-rw-r--r--contrib/python/matplotlib/py2/pylab.py3
-rw-r--r--contrib/python/matplotlib/py2/src/_backend_agg.cpp234
-rw-r--r--contrib/python/matplotlib/py2/src/_backend_agg.h1294
-rw-r--r--contrib/python/matplotlib/py2/src/_backend_agg_basic_types.h127
-rw-r--r--contrib/python/matplotlib/py2/src/_backend_agg_wrapper.cpp777
-rw-r--r--contrib/python/matplotlib/py2/src/_contour.cpp1790
-rw-r--r--contrib/python/matplotlib/py2/src/_contour.h530
-rw-r--r--contrib/python/matplotlib/py2/src/_contour_wrapper.cpp203
-rw-r--r--contrib/python/matplotlib/py2/src/_image.cpp175
-rw-r--r--contrib/python/matplotlib/py2/src/_image.h200
-rw-r--r--contrib/python/matplotlib/py2/src/_image_resample.h1013
-rw-r--r--contrib/python/matplotlib/py2/src/_image_wrapper.cpp510
-rw-r--r--contrib/python/matplotlib/py2/src/_path.h1316
-rw-r--r--contrib/python/matplotlib/py2/src/_path_wrapper.cpp900
-rw-r--r--contrib/python/matplotlib/py2/src/_png.cpp793
-rw-r--r--contrib/python/matplotlib/py2/src/_tkagg.cpp475
-rw-r--r--contrib/python/matplotlib/py2/src/_tkmini.h128
-rw-r--r--contrib/python/matplotlib/py2/src/_ttconv.cpp307
-rw-r--r--contrib/python/matplotlib/py2/src/agg_workaround.h85
-rw-r--r--contrib/python/matplotlib/py2/src/array.h80
-rw-r--r--contrib/python/matplotlib/py2/src/file_compat.h240
-rw-r--r--contrib/python/matplotlib/py2/src/ft2font.cpp808
-rw-r--r--contrib/python/matplotlib/py2/src/ft2font.h139
-rw-r--r--contrib/python/matplotlib/py2/src/ft2font_wrapper.cpp1805
-rw-r--r--contrib/python/matplotlib/py2/src/mplutils.cpp21
-rw-r--r--contrib/python/matplotlib/py2/src/mplutils.h72
-rw-r--r--contrib/python/matplotlib/py2/src/numpy_cpp.h569
-rw-r--r--contrib/python/matplotlib/py2/src/path_converters.h1011
-rw-r--r--contrib/python/matplotlib/py2/src/py_adaptors.h251
-rw-r--r--contrib/python/matplotlib/py2/src/py_converters.cpp619
-rw-r--r--contrib/python/matplotlib/py2/src/py_converters.h49
-rw-r--r--contrib/python/matplotlib/py2/src/py_exceptions.h72
-rw-r--r--contrib/python/matplotlib/py2/src/qhull_wrap.c377
-rw-r--r--contrib/python/matplotlib/py2/src/ya.make66
-rw-r--r--contrib/python/matplotlib/py2/ya.make243
338 files changed, 194333 insertions, 0 deletions
diff --git a/contrib/python/matplotlib/py2/LICENSE/LICENSE b/contrib/python/matplotlib/py2/LICENSE/LICENSE
new file mode 100644
index 00000000000..ec51537db27
--- /dev/null
+++ b/contrib/python/matplotlib/py2/LICENSE/LICENSE
@@ -0,0 +1,99 @@
+License agreement for matplotlib versions 1.3.0 and later
+=========================================================
+
+1. This LICENSE AGREEMENT is between the Matplotlib Development Team
+("MDT"), and the Individual or Organization ("Licensee") accessing and
+otherwise using matplotlib software in source or binary form and its
+associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, MDT
+hereby grants Licensee a nonexclusive, royalty-free, world-wide license
+to reproduce, analyze, test, perform and/or display publicly, prepare
+derivative works, distribute, and otherwise use matplotlib
+alone or in any derivative version, provided, however, that MDT's
+License Agreement and MDT's notice of copyright, i.e., "Copyright (c)
+2012- Matplotlib Development Team; All Rights Reserved" are retained in
+matplotlib alone or in any derivative version prepared by
+Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on or
+incorporates matplotlib or any part thereof, and wants to
+make the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to matplotlib .
+
+4. MDT is making matplotlib available to Licensee on an "AS
+IS" basis. MDT MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, MDT MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB
+WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. MDT SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
+ FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
+LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
+MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
+THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between MDT and
+Licensee. This License Agreement does not grant permission to use MDT
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using matplotlib ,
+Licensee agrees to be bound by the terms and conditions of this License
+Agreement.
+
+License agreement for matplotlib versions prior to 1.3.0
+========================================================
+
+1. This LICENSE AGREEMENT is between John D. Hunter ("JDH"), and the
+Individual or Organization ("Licensee") accessing and otherwise using
+matplotlib software in source or binary form and its associated
+documentation.
+
+2. Subject to the terms and conditions of this License Agreement, JDH
+hereby grants Licensee a nonexclusive, royalty-free, world-wide license
+to reproduce, analyze, test, perform and/or display publicly, prepare
+derivative works, distribute, and otherwise use matplotlib
+alone or in any derivative version, provided, however, that JDH's
+License Agreement and JDH's notice of copyright, i.e., "Copyright (c)
+2002-2011 John D. Hunter; All Rights Reserved" are retained in
+matplotlib alone or in any derivative version prepared by
+Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on or
+incorporates matplotlib or any part thereof, and wants to
+make the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to matplotlib.
+
+4. JDH is making matplotlib available to Licensee on an "AS
+IS" basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB
+WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
+ FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
+LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
+MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
+THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between JDH and
+Licensee. This License Agreement does not grant permission to use JDH
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using matplotlib,
+Licensee agrees to be bound by the terms and conditions of this License
+Agreement. \ No newline at end of file
diff --git a/contrib/python/matplotlib/py2/LICENSE/LICENSE.PIL b/contrib/python/matplotlib/py2/LICENSE/LICENSE.PIL
new file mode 100644
index 00000000000..3f77350b923
--- /dev/null
+++ b/contrib/python/matplotlib/py2/LICENSE/LICENSE.PIL
@@ -0,0 +1,12 @@
+Software License
+
+The Python Imaging Library (PIL) is
+
+ Copyright © 1997-2011 by Secret Labs AB
+ Copyright © 1995-2011 by Fredrik Lundh
+
+By obtaining, using, and/or copying this software and/or its associated documentation, you agree that you have read, understood, and will comply with the following terms and conditions:
+
+Permission to use, copy, modify, and distribute this software and its associated documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appears in all copies, and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of Secret Labs AB or the author not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission.
+
+SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. \ No newline at end of file
diff --git a/contrib/python/matplotlib/py2/LICENSE/LICENSE_AMSFONTS b/contrib/python/matplotlib/py2/LICENSE/LICENSE_AMSFONTS
new file mode 100644
index 00000000000..3627bb9bb61
--- /dev/null
+++ b/contrib/python/matplotlib/py2/LICENSE/LICENSE_AMSFONTS
@@ -0,0 +1,240 @@
+The cmr10.pfb file is a Type-1 version of one of Knuth's Computer Modern fonts.
+It is included here as test data only, but the following license applies.
+
+Copyright (c) 1997, 2009, American Mathematical Society (http://www.ams.org).
+All Rights Reserved.
+
+"cmb10" is a Reserved Font Name for this Font Software.
+"cmbsy10" is a Reserved Font Name for this Font Software.
+"cmbsy5" is a Reserved Font Name for this Font Software.
+"cmbsy6" is a Reserved Font Name for this Font Software.
+"cmbsy7" is a Reserved Font Name for this Font Software.
+"cmbsy8" is a Reserved Font Name for this Font Software.
+"cmbsy9" is a Reserved Font Name for this Font Software.
+"cmbx10" is a Reserved Font Name for this Font Software.
+"cmbx12" is a Reserved Font Name for this Font Software.
+"cmbx5" is a Reserved Font Name for this Font Software.
+"cmbx6" is a Reserved Font Name for this Font Software.
+"cmbx7" is a Reserved Font Name for this Font Software.
+"cmbx8" is a Reserved Font Name for this Font Software.
+"cmbx9" is a Reserved Font Name for this Font Software.
+"cmbxsl10" is a Reserved Font Name for this Font Software.
+"cmbxti10" is a Reserved Font Name for this Font Software.
+"cmcsc10" is a Reserved Font Name for this Font Software.
+"cmcsc8" is a Reserved Font Name for this Font Software.
+"cmcsc9" is a Reserved Font Name for this Font Software.
+"cmdunh10" is a Reserved Font Name for this Font Software.
+"cmex10" is a Reserved Font Name for this Font Software.
+"cmex7" is a Reserved Font Name for this Font Software.
+"cmex8" is a Reserved Font Name for this Font Software.
+"cmex9" is a Reserved Font Name for this Font Software.
+"cmff10" is a Reserved Font Name for this Font Software.
+"cmfi10" is a Reserved Font Name for this Font Software.
+"cmfib8" is a Reserved Font Name for this Font Software.
+"cminch" is a Reserved Font Name for this Font Software.
+"cmitt10" is a Reserved Font Name for this Font Software.
+"cmmi10" is a Reserved Font Name for this Font Software.
+"cmmi12" is a Reserved Font Name for this Font Software.
+"cmmi5" is a Reserved Font Name for this Font Software.
+"cmmi6" is a Reserved Font Name for this Font Software.
+"cmmi7" is a Reserved Font Name for this Font Software.
+"cmmi8" is a Reserved Font Name for this Font Software.
+"cmmi9" is a Reserved Font Name for this Font Software.
+"cmmib10" is a Reserved Font Name for this Font Software.
+"cmmib5" is a Reserved Font Name for this Font Software.
+"cmmib6" is a Reserved Font Name for this Font Software.
+"cmmib7" is a Reserved Font Name for this Font Software.
+"cmmib8" is a Reserved Font Name for this Font Software.
+"cmmib9" is a Reserved Font Name for this Font Software.
+"cmr10" is a Reserved Font Name for this Font Software.
+"cmr12" is a Reserved Font Name for this Font Software.
+"cmr17" is a Reserved Font Name for this Font Software.
+"cmr5" is a Reserved Font Name for this Font Software.
+"cmr6" is a Reserved Font Name for this Font Software.
+"cmr7" is a Reserved Font Name for this Font Software.
+"cmr8" is a Reserved Font Name for this Font Software.
+"cmr9" is a Reserved Font Name for this Font Software.
+"cmsl10" is a Reserved Font Name for this Font Software.
+"cmsl12" is a Reserved Font Name for this Font Software.
+"cmsl8" is a Reserved Font Name for this Font Software.
+"cmsl9" is a Reserved Font Name for this Font Software.
+"cmsltt10" is a Reserved Font Name for this Font Software.
+"cmss10" is a Reserved Font Name for this Font Software.
+"cmss12" is a Reserved Font Name for this Font Software.
+"cmss17" is a Reserved Font Name for this Font Software.
+"cmss8" is a Reserved Font Name for this Font Software.
+"cmss9" is a Reserved Font Name for this Font Software.
+"cmssbx10" is a Reserved Font Name for this Font Software.
+"cmssdc10" is a Reserved Font Name for this Font Software.
+"cmssi10" is a Reserved Font Name for this Font Software.
+"cmssi12" is a Reserved Font Name for this Font Software.
+"cmssi17" is a Reserved Font Name for this Font Software.
+"cmssi8" is a Reserved Font Name for this Font Software.
+"cmssi9" is a Reserved Font Name for this Font Software.
+"cmssq8" is a Reserved Font Name for this Font Software.
+"cmssqi8" is a Reserved Font Name for this Font Software.
+"cmsy10" is a Reserved Font Name for this Font Software.
+"cmsy5" is a Reserved Font Name for this Font Software.
+"cmsy6" is a Reserved Font Name for this Font Software.
+"cmsy7" is a Reserved Font Name for this Font Software.
+"cmsy8" is a Reserved Font Name for this Font Software.
+"cmsy9" is a Reserved Font Name for this Font Software.
+"cmtcsc10" is a Reserved Font Name for this Font Software.
+"cmtex10" is a Reserved Font Name for this Font Software.
+"cmtex8" is a Reserved Font Name for this Font Software.
+"cmtex9" is a Reserved Font Name for this Font Software.
+"cmti10" is a Reserved Font Name for this Font Software.
+"cmti12" is a Reserved Font Name for this Font Software.
+"cmti7" is a Reserved Font Name for this Font Software.
+"cmti8" is a Reserved Font Name for this Font Software.
+"cmti9" is a Reserved Font Name for this Font Software.
+"cmtt10" is a Reserved Font Name for this Font Software.
+"cmtt12" is a Reserved Font Name for this Font Software.
+"cmtt8" is a Reserved Font Name for this Font Software.
+"cmtt9" is a Reserved Font Name for this Font Software.
+"cmu10" is a Reserved Font Name for this Font Software.
+"cmvtt10" is a Reserved Font Name for this Font Software.
+"euex10" is a Reserved Font Name for this Font Software.
+"euex7" is a Reserved Font Name for this Font Software.
+"euex8" is a Reserved Font Name for this Font Software.
+"euex9" is a Reserved Font Name for this Font Software.
+"eufb10" is a Reserved Font Name for this Font Software.
+"eufb5" is a Reserved Font Name for this Font Software.
+"eufb7" is a Reserved Font Name for this Font Software.
+"eufm10" is a Reserved Font Name for this Font Software.
+"eufm5" is a Reserved Font Name for this Font Software.
+"eufm7" is a Reserved Font Name for this Font Software.
+"eurb10" is a Reserved Font Name for this Font Software.
+"eurb5" is a Reserved Font Name for this Font Software.
+"eurb7" is a Reserved Font Name for this Font Software.
+"eurm10" is a Reserved Font Name for this Font Software.
+"eurm5" is a Reserved Font Name for this Font Software.
+"eurm7" is a Reserved Font Name for this Font Software.
+"eusb10" is a Reserved Font Name for this Font Software.
+"eusb5" is a Reserved Font Name for this Font Software.
+"eusb7" is a Reserved Font Name for this Font Software.
+"eusm10" is a Reserved Font Name for this Font Software.
+"eusm5" is a Reserved Font Name for this Font Software.
+"eusm7" is a Reserved Font Name for this Font Software.
+"lasy10" is a Reserved Font Name for this Font Software.
+"lasy5" is a Reserved Font Name for this Font Software.
+"lasy6" is a Reserved Font Name for this Font Software.
+"lasy7" is a Reserved Font Name for this Font Software.
+"lasy8" is a Reserved Font Name for this Font Software.
+"lasy9" is a Reserved Font Name for this Font Software.
+"lasyb10" is a Reserved Font Name for this Font Software.
+"lcircle1" is a Reserved Font Name for this Font Software.
+"lcirclew" is a Reserved Font Name for this Font Software.
+"lcmss8" is a Reserved Font Name for this Font Software.
+"lcmssb8" is a Reserved Font Name for this Font Software.
+"lcmssi8" is a Reserved Font Name for this Font Software.
+"line10" is a Reserved Font Name for this Font Software.
+"linew10" is a Reserved Font Name for this Font Software.
+"msam10" is a Reserved Font Name for this Font Software.
+"msam5" is a Reserved Font Name for this Font Software.
+"msam6" is a Reserved Font Name for this Font Software.
+"msam7" is a Reserved Font Name for this Font Software.
+"msam8" is a Reserved Font Name for this Font Software.
+"msam9" is a Reserved Font Name for this Font Software.
+"msbm10" is a Reserved Font Name for this Font Software.
+"msbm5" is a Reserved Font Name for this Font Software.
+"msbm6" is a Reserved Font Name for this Font Software.
+"msbm7" is a Reserved Font Name for this Font Software.
+"msbm8" is a Reserved Font Name for this Font Software.
+"msbm9" is a Reserved Font Name for this Font Software.
+"wncyb10" is a Reserved Font Name for this Font Software.
+"wncyi10" is a Reserved Font Name for this Font Software.
+"wncyr10" is a Reserved Font Name for this Font Software.
+"wncysc10" is a Reserved Font Name for this Font Software.
+"wncyss10" is a Reserved Font Name for this Font Software.
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+http://scripts.sil.org/OFL
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/contrib/python/matplotlib/py2/LICENSE/LICENSE_BAKOMA b/contrib/python/matplotlib/py2/LICENSE/LICENSE_BAKOMA
new file mode 100644
index 00000000000..801e20cd736
--- /dev/null
+++ b/contrib/python/matplotlib/py2/LICENSE/LICENSE_BAKOMA
@@ -0,0 +1,40 @@
+
+ BaKoMa Fonts Licence
+ --------------------
+
+ This licence covers two font packs (known as BaKoMa Fonts Colelction,
+ which is available at `CTAN:fonts/cm/ps-type1/bakoma/'):
+
+ 1) BaKoMa-CM (1.1/12-Nov-94)
+ Computer Modern Fonts in PostScript Type 1 and TrueType font formats.
+
+ 2) BaKoMa-AMS (1.2/19-Jan-95)
+ AMS TeX fonts in PostScript Type 1 and TrueType font formats.
+
+ Copyright (C) 1994, 1995, Basil K. Malyshev. All Rights Reserved.
+
+ Permission to copy and distribute these fonts for any purpose is
+ hereby granted without fee, provided that the above copyright notice,
+ author statement and this permission notice appear in all copies of
+ these fonts and related documentation.
+
+ Permission to modify and distribute modified fonts for any purpose is
+ hereby granted without fee, provided that the copyright notice,
+ author statement, this permission notice and location of original
+ fonts (http://www.ctan.org/tex-archive/fonts/cm/ps-type1/bakoma)
+ appear in all copies of modified fonts and related documentation.
+
+ Permission to use these fonts (embedding into PostScript, PDF, SVG
+ and printing by using any software) is hereby granted without fee.
+ It is not required to provide any notices about using these fonts.
+
+ Basil K. Malyshev
+ INSTITUTE FOR HIGH ENERGY PHYSICS
+ IHEP, OMVT
+ Moscow Region
+ 142281 PROTVINO
+ RUSSIA
+
+ E-Mail: bakoma@mail.ru
+ or malyshev@mail.ihep.ru
+
diff --git a/contrib/python/matplotlib/py2/LICENSE/LICENSE_COLORBREWER b/contrib/python/matplotlib/py2/LICENSE/LICENSE_COLORBREWER
new file mode 100644
index 00000000000..568afe883ec
--- /dev/null
+++ b/contrib/python/matplotlib/py2/LICENSE/LICENSE_COLORBREWER
@@ -0,0 +1,38 @@
+Apache-Style Software License for ColorBrewer Color Schemes
+
+Version 1.1
+
+Copyright (c) 2002 Cynthia Brewer, Mark Harrower, and The Pennsylvania
+State University. All rights reserved. Redistribution and use in source
+and binary forms, with or without modification, are permitted provided
+that the following conditions are met:
+
+1. Redistributions as source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. The end-user documentation included with the redistribution, if any,
+must include the following acknowledgment: "This product includes color
+specifications and designs developed by Cynthia Brewer
+(http://colorbrewer.org/)." Alternately, this acknowledgment may appear in
+the software itself, if and wherever such third-party acknowledgments
+normally appear.
+
+3. The name "ColorBrewer" must not be used to endorse or promote products
+derived from this software without prior written permission. For written
+permission, please contact Cynthia Brewer at cbrewer@psu.edu.
+
+4. Products derived from this software may not be called "ColorBrewer",
+nor may "ColorBrewer" appear in their name, without prior written
+permission of Cynthia Brewer.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+CYNTHIA BREWER, MARK HARROWER, OR THE PENNSYLVANIA STATE UNIVERSITY BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/contrib/python/matplotlib/py2/LICENSE/LICENSE_CONDA b/contrib/python/matplotlib/py2/LICENSE/LICENSE_CONDA
new file mode 100644
index 00000000000..8794a6d484a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/LICENSE/LICENSE_CONDA
@@ -0,0 +1,51 @@
+Except where noted below, conda is released under the following terms:
+
+(c) 2012 Continuum Analytics, Inc. / http://continuum.io
+All Rights Reserved
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Continuum Analytics, Inc. nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL CONTINUUM ANALYTICS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Exceptions
+==========
+
+versioneer.py is Public Domain
+
+The ProgressBar package is released under the following terms:
+
+# progressbar - Text progress bar library for Python.
+# Copyright (c) 2005 Nilton Volpato
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA \ No newline at end of file
diff --git a/contrib/python/matplotlib/py2/LICENSE/LICENSE_JQUERY b/contrib/python/matplotlib/py2/LICENSE/LICENSE_JQUERY
new file mode 100644
index 00000000000..f35387a3ab4
--- /dev/null
+++ b/contrib/python/matplotlib/py2/LICENSE/LICENSE_JQUERY
@@ -0,0 +1,61 @@
+Comment found in jQuery source code:
+
+/*!
+ * jQuery JavaScript Library v1.11.3
+ * http://jquery.com/
+ *
+ * Includes Sizzle.js
+ * http://sizzlejs.com/
+ *
+ * Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
+ * Released under the MIT license
+ * http://jquery.org/license
+ *
+ * Date: 2015-04-28T16:19Z
+ */
+
+Comment found in jQuery UI source code:
+
+/*! jQuery UI - v1.11.4 - 2015-03-11
+* http://jqueryui.com
+* Includes: core.js, widget.js, mouse.js, position.js, accordion.js, autocomplete.js, button.js, datepicker.js, dialog.js, draggable.js, droppable.js, effect.js, effect-blind.js, effect-bounce.js, effect-clip.js, effect-drop.js, effect-explode.js, effect-fade.js, effect-fold.js, effect-highlight.js, effect-puff.js, effect-pulsate.js, effect-scale.js, effect-shake.js, effect-size.js, effect-slide.js, effect-transfer.js, menu.js, progressbar.js, resizable.js, selectable.js, selectmenu.js, slider.js, sortable.js, spinner.js, tabs.js, tooltip.js
+* Copyright 2015 jQuery Foundation and other contributors; Licensed MIT */
+
+Text found at http://jquery.org/license:
+
+ jQuery Foundation projects are released under the terms of the license
+ specified in the project's repository or if not specified, under the
+ MIT license.
+
+ The MIT License is simple and easy to understand and it places almost
+ no restrictions on what you can do with a jQuery Foundation project.
+
+ You are free to use any jQuery Foundation project in any other project
+ (even commercial projects) as long as the copyright header is left
+ intact.
+
+The text links to https://tldrlegal.com/license/mit-license
+which includes the following as the "Full License Text":
+
+ The MIT License (MIT)
+
+ Copyright (c) <year> <copyright holders>
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/contrib/python/matplotlib/py2/LICENSE/LICENSE_QT4_EDITOR b/contrib/python/matplotlib/py2/LICENSE/LICENSE_QT4_EDITOR
new file mode 100644
index 00000000000..1c9d941973c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/LICENSE/LICENSE_QT4_EDITOR
@@ -0,0 +1,30 @@
+
+Module creating PyQt4 form dialogs/layouts to edit various type of parameters
+
+
+formlayout License Agreement (MIT License)
+------------------------------------------
+
+Copyright (c) 2009 Pierre Raybaut
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+"""
diff --git a/contrib/python/matplotlib/py2/LICENSE/LICENSE_STIX b/contrib/python/matplotlib/py2/LICENSE/LICENSE_STIX
new file mode 100644
index 00000000000..2f7aeea331c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/LICENSE/LICENSE_STIX
@@ -0,0 +1,71 @@
+TERMS AND CONDITIONS
+
+ 1. Permission is hereby granted, free of charge, to any person
+obtaining a copy of the STIX Fonts-TM set accompanying this license
+(collectively, the "Fonts") and the associated documentation files
+(collectively with the Fonts, the "Font Software"), to reproduce and
+distribute the Font Software, including the rights to use, copy, merge
+and publish copies of the Font Software, and to permit persons to whom
+the Font Software is furnished to do so same, subject to the following
+terms and conditions (the "License").
+
+ 2. The following copyright and trademark notice and these Terms and
+Conditions shall be included in all copies of one or more of the Font
+typefaces and any derivative work created as permitted under this
+License:
+
+ Copyright (c) 2001-2005 by the STI Pub Companies, consisting of
+the American Institute of Physics, the American Chemical Society, the
+American Mathematical Society, the American Physical Society, Elsevier,
+Inc., and The Institute of Electrical and Electronic Engineers, Inc.
+Portions copyright (c) 1998-2003 by MicroPress, Inc. Portions copyright
+(c) 1990 by Elsevier, Inc. All rights reserved. STIX Fonts-TM is a
+trademark of The Institute of Electrical and Electronics Engineers, Inc.
+
+ 3. You may (a) convert the Fonts from one format to another (e.g.,
+from TrueType to PostScript), in which case the normal and reasonable
+distortion that occurs during such conversion shall be permitted and (b)
+embed or include a subset of the Fonts in a document for the purposes of
+allowing users to read text in the document that utilizes the Fonts. In
+each case, you may use the STIX Fonts-TM mark to designate the resulting
+Fonts or subset of the Fonts.
+
+ 4. You may also (a) add glyphs or characters to the Fonts, or modify
+the shape of existing glyphs, so long as the base set of glyphs is not
+removed and (b) delete glyphs or characters from the Fonts, provided
+that the resulting font set is distributed with the following
+disclaimer: "This [name] font does not include all the Unicode points
+covered in the STIX Fonts-TM set but may include others." In each case,
+the name used to denote the resulting font set shall not include the
+term "STIX" or any similar term.
+
+ 5. You may charge a fee in connection with the distribution of the
+Font Software, provided that no copy of one or more of the individual
+Font typefaces that form the STIX Fonts-TM set may be sold by itself.
+
+ 6. THE FONT SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY
+KIND, EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK OR OTHER RIGHT. IN NO EVENT SHALL
+MICROPRESS OR ANY OF THE STI PUB COMPANIES BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, INCLUDING, BUT NOT LIMITED TO, ANY GENERAL,
+SPECIAL, INDIRECT, INCIDENTAL OR CONSEQUENTIAL DAMAGES, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM OR OUT OF THE USE OR
+INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT
+SOFTWARE.
+
+ 7. Except as contained in the notice set forth in Section 2, the
+names MicroPress Inc. and STI Pub Companies, as well as the names of the
+companies/organizations that compose the STI Pub Companies, shall not be
+used in advertising or otherwise to promote the sale, use or other
+dealings in the Font Software without the prior written consent of the
+respective company or organization.
+
+ 8. This License shall become null and void in the event of any
+material breach of the Terms and Conditions herein by licensee.
+
+ 9. A substantial portion of the STIX Fonts set was developed by
+MicroPress Inc. for the STI Pub Companies. To obtain additional
+mathematical fonts, please contact MicroPress, Inc., 68-30 Harrow
+Street, Forest Hills, NY 11375, USA - Phone: (718) 575-1816.
+
diff --git a/contrib/python/matplotlib/py2/LICENSE/LICENSE_YORICK b/contrib/python/matplotlib/py2/LICENSE/LICENSE_YORICK
new file mode 100644
index 00000000000..8c908509a73
--- /dev/null
+++ b/contrib/python/matplotlib/py2/LICENSE/LICENSE_YORICK
@@ -0,0 +1,49 @@
+BSD-style license for gist/yorick colormaps.
+
+Copyright:
+
+ Copyright (c) 1996. The Regents of the University of California.
+ All rights reserved.
+
+Permission to use, copy, modify, and distribute this software for any
+purpose without fee is hereby granted, provided that this entire
+notice is included in all copies of any software which is or includes
+a copy or modification of this software and in all copies of the
+supporting documentation for such software.
+
+This work was produced at the University of California, Lawrence
+Livermore National Laboratory under contract no. W-7405-ENG-48 between
+the U.S. Department of Energy and The Regents of the University of
+California for the operation of UC LLNL.
+
+
+ DISCLAIMER
+
+This software was prepared as an account of work sponsored by an
+agency of the United States Government. Neither the United States
+Government nor the University of California nor any of their
+employees, makes any warranty, express or implied, or assumes any
+liability or responsibility for the accuracy, completeness, or
+usefulness of any information, apparatus, product, or process
+disclosed, or represents that its use would not infringe
+privately-owned rights. Reference herein to any specific commercial
+products, process, or service by trade name, trademark, manufacturer,
+or otherwise, does not necessarily constitute or imply its
+endorsement, recommendation, or favoring by the United States
+Government or the University of California. The views and opinions of
+authors expressed herein do not necessarily state or reflect those of
+the United States Government or the University of California, and
+shall not be used for advertising or product endorsement purposes.
+
+
+ AUTHOR
+
+David H. Munro wrote Yorick and Gist. Berkeley Yacc (byacc) generated
+the Yorick parser. The routines in Math are from LAPACK and FFTPACK;
+MathC contains C translations by David H. Munro. The algorithms for
+Yorick's random number generator and several special functions in
+Yorick/include were taken from Numerical Recipes by Press, et. al.,
+although the Yorick implementations are unrelated to those in
+Numerical Recipes. A small amount of code in Gist was adapted from
+the X11R4 release, copyright M.I.T. -- the complete copyright notice
+may be found in the (unused) file Gist/host.c.
diff --git a/contrib/python/matplotlib/py2/LICENSE/LICENSE_enthought.txt b/contrib/python/matplotlib/py2/LICENSE/LICENSE_enthought.txt
new file mode 100644
index 00000000000..27727c5eae9
--- /dev/null
+++ b/contrib/python/matplotlib/py2/LICENSE/LICENSE_enthought.txt
@@ -0,0 +1,29 @@
+Copyright (c) 2001, 2002 Enthought, Inc.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ a. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ b. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ c. Neither the name of the Enthought nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
diff --git a/contrib/python/matplotlib/py2/LICENSE/Solarized.txt b/contrib/python/matplotlib/py2/LICENSE/Solarized.txt
new file mode 100644
index 00000000000..6e5a0475dd2
--- /dev/null
+++ b/contrib/python/matplotlib/py2/LICENSE/Solarized.txt
@@ -0,0 +1,20 @@
+https://github.com/altercation/solarized/blob/master/LICENSE
+Copyright (c) 2011 Ethan Schoonover
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/contrib/python/matplotlib/py2/README.rst b/contrib/python/matplotlib/py2/README.rst
new file mode 100644
index 00000000000..b13658c300f
--- /dev/null
+++ b/contrib/python/matplotlib/py2/README.rst
@@ -0,0 +1,83 @@
+|Travis|_ |AppVeyor|_ |Codecov|_ |PyPi|_ |Gitter|_ |NUMFocus|_
+
+
+.. |Travis| image:: https://travis-ci.org/matplotlib/matplotlib.svg?branch=master
+.. _Travis: https://travis-ci.org/matplotlib/matplotlib
+
+.. |AppVeyor| image:: https://ci.appveyor.com/api/projects/status/github/matplotlib/matplotlib?branch=master&svg=true
+.. _AppVeyor: https://ci.appveyor.com/project/matplotlib/matplotlib
+
+.. |Codecov| image:: https://codecov.io/github/matplotlib/matplotlib/badge.svg?branch=master&service=github
+.. _Codecov: https://codecov.io/github/matplotlib/matplotlib?branch=master
+
+.. |PyPi| image:: https://badge.fury.io/py/matplotlib.svg
+.. _PyPi: https://badge.fury.io/py/matplotlib
+
+.. |Gitter| image:: https://badges.gitter.im/matplotlib/matplotlib.png
+.. _Gitter: https://gitter.im/matplotlib/matplotlib
+
+.. |NUMFocus| image:: https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A
+.. _NUMFocus: http://www.numfocus.org
+
+
+##########
+Matplotlib
+##########
+
+Matplotlib is a Python 2D plotting library which produces publication-quality
+figures in a variety of hardcopy formats and interactive environments across
+platforms. Matplotlib can be used in Python scripts, the Python and IPython
+shell (à la MATLAB or Mathematica), web application servers, and various
+graphical user interface toolkits.
+
+`Home page <http://matplotlib.org/>`_
+
+Installation
+============
+
+For installation instructions and requirements, see the INSTALL.rst file or the
+`install <http://matplotlib.org/users/installing.html>`_ documentation. If you
+think you may want to contribute to matplotlib, check out the `guide to
+working with the source code
+<http://matplotlib.org/devel/gitwash/index.html>`_.
+
+Testing
+=======
+
+After installation, you can launch the test suite::
+
+ py.test
+
+Or from the Python interpreter::
+
+ import matplotlib
+ matplotlib.test()
+
+Consider reading http://matplotlib.org/devel/coding_guide.html#testing for
+more information. Note that the test suite requires pytest and, on Python 2.7,
+mock. Please install with pip or your package manager of choice.
+
+Contact
+=======
+matplotlib's communication channels include active mailing lists:
+
+* `Users <https://mail.python.org/mailman/listinfo/matplotlib-users>`_ mailing list: matplotlib-users@python.org
+* `Announcement <https://mail.python.org/mailman/listinfo/matplotlib-announce>`_ mailing list: matplotlib-announce@python.org
+* `Development <https://mail.python.org/mailman/listinfo/matplotlib-devel>`_ mailing list: matplotlib-devel@python.org
+
+The first is a good starting point for general questions and discussions.
+
+Gitter_ is for coordinating development and asking questions directly related
+to contributing to matplotlib.
+
+Contribute
+==========
+You've discovered a bug or something else you want to change - excellent!
+
+You've worked out a way to fix it – even better!
+
+You want to tell us about it – best of all!
+
+Start at the `contributing guide <http://matplotlib.org/devdocs/devel/contributing.html>`_!
+
+Developer notes are now at `_Developer Discussions <https://github.com/orgs/matplotlib/teams/developers/discussions>`_
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_alpha_mask_u8.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_alpha_mask_u8.h
new file mode 100644
index 00000000000..e301c100880
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_alpha_mask_u8.h
@@ -0,0 +1,499 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// scanline_u8 class
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_ALPHA_MASK_U8_INCLUDED
+#define AGG_ALPHA_MASK_U8_INCLUDED
+
+#include <string.h>
+#include "agg_basics.h"
+#include "agg_rendering_buffer.h"
+
+namespace agg
+{
+ //===================================================one_component_mask_u8
+ struct one_component_mask_u8
+ {
+ static unsigned calculate(const int8u* p) { return *p; }
+ };
+
+
+ //=====================================================rgb_to_gray_mask_u8
+ template<unsigned R, unsigned G, unsigned B>
+ struct rgb_to_gray_mask_u8
+ {
+ static unsigned calculate(const int8u* p)
+ {
+ return (p[R]*77 + p[G]*150 + p[B]*29) >> 8;
+ }
+ };
+
+ //==========================================================alpha_mask_u8
+ template<unsigned Step=1, unsigned Offset=0, class MaskF=one_component_mask_u8>
+ class alpha_mask_u8
+ {
+ public:
+ typedef int8u cover_type;
+ typedef alpha_mask_u8<Step, Offset, MaskF> self_type;
+ enum cover_scale_e
+ {
+ cover_shift = 8,
+ cover_none = 0,
+ cover_full = 255
+ };
+
+ alpha_mask_u8() : m_rbuf(0) {}
+ explicit alpha_mask_u8(rendering_buffer& rbuf) : m_rbuf(&rbuf) {}
+
+ void attach(rendering_buffer& rbuf) { m_rbuf = &rbuf; }
+
+ MaskF& mask_function() { return m_mask_function; }
+ const MaskF& mask_function() const { return m_mask_function; }
+
+
+ //--------------------------------------------------------------------
+ cover_type pixel(int x, int y) const
+ {
+ if(x >= 0 && y >= 0 &&
+ x < (int)m_rbuf->width() &&
+ y < (int)m_rbuf->height())
+ {
+ return (cover_type)m_mask_function.calculate(
+ m_rbuf->row_ptr(y) + x * Step + Offset);
+ }
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ cover_type combine_pixel(int x, int y, cover_type val) const
+ {
+ if(x >= 0 && y >= 0 &&
+ x < (int)m_rbuf->width() &&
+ y < (int)m_rbuf->height())
+ {
+ return (cover_type)((cover_full + val *
+ m_mask_function.calculate(
+ m_rbuf->row_ptr(y) + x * Step + Offset)) >>
+ cover_shift);
+ }
+ return 0;
+ }
+
+
+ //--------------------------------------------------------------------
+ void fill_hspan(int x, int y, cover_type* dst, int num_pix) const
+ {
+ int xmax = m_rbuf->width() - 1;
+ int ymax = m_rbuf->height() - 1;
+
+ int count = num_pix;
+ cover_type* covers = dst;
+
+ if(y < 0 || y > ymax)
+ {
+ memset(dst, 0, num_pix * sizeof(cover_type));
+ return;
+ }
+
+ if(x < 0)
+ {
+ count += x;
+ if(count <= 0)
+ {
+ memset(dst, 0, num_pix * sizeof(cover_type));
+ return;
+ }
+ memset(covers, 0, -x * sizeof(cover_type));
+ covers -= x;
+ x = 0;
+ }
+
+ if(x + count > xmax)
+ {
+ int rest = x + count - xmax - 1;
+ count -= rest;
+ if(count <= 0)
+ {
+ memset(dst, 0, num_pix * sizeof(cover_type));
+ return;
+ }
+ memset(covers + count, 0, rest * sizeof(cover_type));
+ }
+
+ const int8u* mask = m_rbuf->row_ptr(y) + x * Step + Offset;
+ do
+ {
+ *covers++ = (cover_type)m_mask_function.calculate(mask);
+ mask += Step;
+ }
+ while(--count);
+ }
+
+
+ //--------------------------------------------------------------------
+ void combine_hspan(int x, int y, cover_type* dst, int num_pix) const
+ {
+ int xmax = m_rbuf->width() - 1;
+ int ymax = m_rbuf->height() - 1;
+
+ int count = num_pix;
+ cover_type* covers = dst;
+
+ if(y < 0 || y > ymax)
+ {
+ memset(dst, 0, num_pix * sizeof(cover_type));
+ return;
+ }
+
+ if(x < 0)
+ {
+ count += x;
+ if(count <= 0)
+ {
+ memset(dst, 0, num_pix * sizeof(cover_type));
+ return;
+ }
+ memset(covers, 0, -x * sizeof(cover_type));
+ covers -= x;
+ x = 0;
+ }
+
+ if(x + count > xmax)
+ {
+ int rest = x + count - xmax - 1;
+ count -= rest;
+ if(count <= 0)
+ {
+ memset(dst, 0, num_pix * sizeof(cover_type));
+ return;
+ }
+ memset(covers + count, 0, rest * sizeof(cover_type));
+ }
+
+ const int8u* mask = m_rbuf->row_ptr(y) + x * Step + Offset;
+ do
+ {
+ *covers = (cover_type)((cover_full + (*covers) *
+ m_mask_function.calculate(mask)) >>
+ cover_shift);
+ ++covers;
+ mask += Step;
+ }
+ while(--count);
+ }
+
+ //--------------------------------------------------------------------
+ void fill_vspan(int x, int y, cover_type* dst, int num_pix) const
+ {
+ int xmax = m_rbuf->width() - 1;
+ int ymax = m_rbuf->height() - 1;
+
+ int count = num_pix;
+ cover_type* covers = dst;
+
+ if(x < 0 || x > xmax)
+ {
+ memset(dst, 0, num_pix * sizeof(cover_type));
+ return;
+ }
+
+ if(y < 0)
+ {
+ count += y;
+ if(count <= 0)
+ {
+ memset(dst, 0, num_pix * sizeof(cover_type));
+ return;
+ }
+ memset(covers, 0, -y * sizeof(cover_type));
+ covers -= y;
+ y = 0;
+ }
+
+ if(y + count > ymax)
+ {
+ int rest = y + count - ymax - 1;
+ count -= rest;
+ if(count <= 0)
+ {
+ memset(dst, 0, num_pix * sizeof(cover_type));
+ return;
+ }
+ memset(covers + count, 0, rest * sizeof(cover_type));
+ }
+
+ const int8u* mask = m_rbuf->row_ptr(y) + x * Step + Offset;
+ do
+ {
+ *covers++ = (cover_type)m_mask_function.calculate(mask);
+ mask += m_rbuf->stride();
+ }
+ while(--count);
+ }
+
+ //--------------------------------------------------------------------
+ void combine_vspan(int x, int y, cover_type* dst, int num_pix) const
+ {
+ int xmax = m_rbuf->width() - 1;
+ int ymax = m_rbuf->height() - 1;
+
+ int count = num_pix;
+ cover_type* covers = dst;
+
+ if(x < 0 || x > xmax)
+ {
+ memset(dst, 0, num_pix * sizeof(cover_type));
+ return;
+ }
+
+ if(y < 0)
+ {
+ count += y;
+ if(count <= 0)
+ {
+ memset(dst, 0, num_pix * sizeof(cover_type));
+ return;
+ }
+ memset(covers, 0, -y * sizeof(cover_type));
+ covers -= y;
+ y = 0;
+ }
+
+ if(y + count > ymax)
+ {
+ int rest = y + count - ymax - 1;
+ count -= rest;
+ if(count <= 0)
+ {
+ memset(dst, 0, num_pix * sizeof(cover_type));
+ return;
+ }
+ memset(covers + count, 0, rest * sizeof(cover_type));
+ }
+
+ const int8u* mask = m_rbuf->row_ptr(y) + x * Step + Offset;
+ do
+ {
+ *covers = (cover_type)((cover_full + (*covers) *
+ m_mask_function.calculate(mask)) >>
+ cover_shift);
+ ++covers;
+ mask += m_rbuf->stride();
+ }
+ while(--count);
+ }
+
+
+ private:
+ alpha_mask_u8(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ rendering_buffer* m_rbuf;
+ MaskF m_mask_function;
+ };
+
+
+ typedef alpha_mask_u8<1, 0> alpha_mask_gray8; //----alpha_mask_gray8
+
+ typedef alpha_mask_u8<3, 0> alpha_mask_rgb24r; //----alpha_mask_rgb24r
+ typedef alpha_mask_u8<3, 1> alpha_mask_rgb24g; //----alpha_mask_rgb24g
+ typedef alpha_mask_u8<3, 2> alpha_mask_rgb24b; //----alpha_mask_rgb24b
+
+ typedef alpha_mask_u8<3, 2> alpha_mask_bgr24r; //----alpha_mask_bgr24r
+ typedef alpha_mask_u8<3, 1> alpha_mask_bgr24g; //----alpha_mask_bgr24g
+ typedef alpha_mask_u8<3, 0> alpha_mask_bgr24b; //----alpha_mask_bgr24b
+
+ typedef alpha_mask_u8<4, 0> alpha_mask_rgba32r; //----alpha_mask_rgba32r
+ typedef alpha_mask_u8<4, 1> alpha_mask_rgba32g; //----alpha_mask_rgba32g
+ typedef alpha_mask_u8<4, 2> alpha_mask_rgba32b; //----alpha_mask_rgba32b
+ typedef alpha_mask_u8<4, 3> alpha_mask_rgba32a; //----alpha_mask_rgba32a
+
+ typedef alpha_mask_u8<4, 1> alpha_mask_argb32r; //----alpha_mask_argb32r
+ typedef alpha_mask_u8<4, 2> alpha_mask_argb32g; //----alpha_mask_argb32g
+ typedef alpha_mask_u8<4, 3> alpha_mask_argb32b; //----alpha_mask_argb32b
+ typedef alpha_mask_u8<4, 0> alpha_mask_argb32a; //----alpha_mask_argb32a
+
+ typedef alpha_mask_u8<4, 2> alpha_mask_bgra32r; //----alpha_mask_bgra32r
+ typedef alpha_mask_u8<4, 1> alpha_mask_bgra32g; //----alpha_mask_bgra32g
+ typedef alpha_mask_u8<4, 0> alpha_mask_bgra32b; //----alpha_mask_bgra32b
+ typedef alpha_mask_u8<4, 3> alpha_mask_bgra32a; //----alpha_mask_bgra32a
+
+ typedef alpha_mask_u8<4, 3> alpha_mask_abgr32r; //----alpha_mask_abgr32r
+ typedef alpha_mask_u8<4, 2> alpha_mask_abgr32g; //----alpha_mask_abgr32g
+ typedef alpha_mask_u8<4, 1> alpha_mask_abgr32b; //----alpha_mask_abgr32b
+ typedef alpha_mask_u8<4, 0> alpha_mask_abgr32a; //----alpha_mask_abgr32a
+
+ typedef alpha_mask_u8<3, 0, rgb_to_gray_mask_u8<0, 1, 2> > alpha_mask_rgb24gray; //----alpha_mask_rgb24gray
+ typedef alpha_mask_u8<3, 0, rgb_to_gray_mask_u8<2, 1, 0> > alpha_mask_bgr24gray; //----alpha_mask_bgr24gray
+ typedef alpha_mask_u8<4, 0, rgb_to_gray_mask_u8<0, 1, 2> > alpha_mask_rgba32gray; //----alpha_mask_rgba32gray
+ typedef alpha_mask_u8<4, 1, rgb_to_gray_mask_u8<0, 1, 2> > alpha_mask_argb32gray; //----alpha_mask_argb32gray
+ typedef alpha_mask_u8<4, 0, rgb_to_gray_mask_u8<2, 1, 0> > alpha_mask_bgra32gray; //----alpha_mask_bgra32gray
+ typedef alpha_mask_u8<4, 1, rgb_to_gray_mask_u8<2, 1, 0> > alpha_mask_abgr32gray; //----alpha_mask_abgr32gray
+
+
+
+ //==========================================================amask_no_clip_u8
+ template<unsigned Step=1, unsigned Offset=0, class MaskF=one_component_mask_u8>
+ class amask_no_clip_u8
+ {
+ public:
+ typedef int8u cover_type;
+ typedef amask_no_clip_u8<Step, Offset, MaskF> self_type;
+ enum cover_scale_e
+ {
+ cover_shift = 8,
+ cover_none = 0,
+ cover_full = 255
+ };
+
+ amask_no_clip_u8() : m_rbuf(0) {}
+ explicit amask_no_clip_u8(rendering_buffer& rbuf) : m_rbuf(&rbuf) {}
+
+ void attach(rendering_buffer& rbuf) { m_rbuf = &rbuf; }
+
+ MaskF& mask_function() { return m_mask_function; }
+ const MaskF& mask_function() const { return m_mask_function; }
+
+
+ //--------------------------------------------------------------------
+ cover_type pixel(int x, int y) const
+ {
+ return (cover_type)m_mask_function.calculate(
+ m_rbuf->row_ptr(y) + x * Step + Offset);
+ }
+
+
+ //--------------------------------------------------------------------
+ cover_type combine_pixel(int x, int y, cover_type val) const
+ {
+ return (cover_type)((cover_full + val *
+ m_mask_function.calculate(
+ m_rbuf->row_ptr(y) + x * Step + Offset)) >>
+ cover_shift);
+ }
+
+
+ //--------------------------------------------------------------------
+ void fill_hspan(int x, int y, cover_type* dst, int num_pix) const
+ {
+ const int8u* mask = m_rbuf->row_ptr(y) + x * Step + Offset;
+ do
+ {
+ *dst++ = (cover_type)m_mask_function.calculate(mask);
+ mask += Step;
+ }
+ while(--num_pix);
+ }
+
+
+
+ //--------------------------------------------------------------------
+ void combine_hspan(int x, int y, cover_type* dst, int num_pix) const
+ {
+ const int8u* mask = m_rbuf->row_ptr(y) + x * Step + Offset;
+ do
+ {
+ *dst = (cover_type)((cover_full + (*dst) *
+ m_mask_function.calculate(mask)) >>
+ cover_shift);
+ ++dst;
+ mask += Step;
+ }
+ while(--num_pix);
+ }
+
+
+ //--------------------------------------------------------------------
+ void fill_vspan(int x, int y, cover_type* dst, int num_pix) const
+ {
+ const int8u* mask = m_rbuf->row_ptr(y) + x * Step + Offset;
+ do
+ {
+ *dst++ = (cover_type)m_mask_function.calculate(mask);
+ mask += m_rbuf->stride();
+ }
+ while(--num_pix);
+ }
+
+
+ //--------------------------------------------------------------------
+ void combine_vspan(int x, int y, cover_type* dst, int num_pix) const
+ {
+ const int8u* mask = m_rbuf->row_ptr(y) + x * Step + Offset;
+ do
+ {
+ *dst = (cover_type)((cover_full + (*dst) *
+ m_mask_function.calculate(mask)) >>
+ cover_shift);
+ ++dst;
+ mask += m_rbuf->stride();
+ }
+ while(--num_pix);
+ }
+
+ private:
+ amask_no_clip_u8(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ rendering_buffer* m_rbuf;
+ MaskF m_mask_function;
+ };
+
+
+ typedef amask_no_clip_u8<1, 0> amask_no_clip_gray8; //----amask_no_clip_gray8
+
+ typedef amask_no_clip_u8<3, 0> amask_no_clip_rgb24r; //----amask_no_clip_rgb24r
+ typedef amask_no_clip_u8<3, 1> amask_no_clip_rgb24g; //----amask_no_clip_rgb24g
+ typedef amask_no_clip_u8<3, 2> amask_no_clip_rgb24b; //----amask_no_clip_rgb24b
+
+ typedef amask_no_clip_u8<3, 2> amask_no_clip_bgr24r; //----amask_no_clip_bgr24r
+ typedef amask_no_clip_u8<3, 1> amask_no_clip_bgr24g; //----amask_no_clip_bgr24g
+ typedef amask_no_clip_u8<3, 0> amask_no_clip_bgr24b; //----amask_no_clip_bgr24b
+
+ typedef amask_no_clip_u8<4, 0> amask_no_clip_rgba32r; //----amask_no_clip_rgba32r
+ typedef amask_no_clip_u8<4, 1> amask_no_clip_rgba32g; //----amask_no_clip_rgba32g
+ typedef amask_no_clip_u8<4, 2> amask_no_clip_rgba32b; //----amask_no_clip_rgba32b
+ typedef amask_no_clip_u8<4, 3> amask_no_clip_rgba32a; //----amask_no_clip_rgba32a
+
+ typedef amask_no_clip_u8<4, 1> amask_no_clip_argb32r; //----amask_no_clip_argb32r
+ typedef amask_no_clip_u8<4, 2> amask_no_clip_argb32g; //----amask_no_clip_argb32g
+ typedef amask_no_clip_u8<4, 3> amask_no_clip_argb32b; //----amask_no_clip_argb32b
+ typedef amask_no_clip_u8<4, 0> amask_no_clip_argb32a; //----amask_no_clip_argb32a
+
+ typedef amask_no_clip_u8<4, 2> amask_no_clip_bgra32r; //----amask_no_clip_bgra32r
+ typedef amask_no_clip_u8<4, 1> amask_no_clip_bgra32g; //----amask_no_clip_bgra32g
+ typedef amask_no_clip_u8<4, 0> amask_no_clip_bgra32b; //----amask_no_clip_bgra32b
+ typedef amask_no_clip_u8<4, 3> amask_no_clip_bgra32a; //----amask_no_clip_bgra32a
+
+ typedef amask_no_clip_u8<4, 3> amask_no_clip_abgr32r; //----amask_no_clip_abgr32r
+ typedef amask_no_clip_u8<4, 2> amask_no_clip_abgr32g; //----amask_no_clip_abgr32g
+ typedef amask_no_clip_u8<4, 1> amask_no_clip_abgr32b; //----amask_no_clip_abgr32b
+ typedef amask_no_clip_u8<4, 0> amask_no_clip_abgr32a; //----amask_no_clip_abgr32a
+
+ typedef amask_no_clip_u8<3, 0, rgb_to_gray_mask_u8<0, 1, 2> > amask_no_clip_rgb24gray; //----amask_no_clip_rgb24gray
+ typedef amask_no_clip_u8<3, 0, rgb_to_gray_mask_u8<2, 1, 0> > amask_no_clip_bgr24gray; //----amask_no_clip_bgr24gray
+ typedef amask_no_clip_u8<4, 0, rgb_to_gray_mask_u8<0, 1, 2> > amask_no_clip_rgba32gray; //----amask_no_clip_rgba32gray
+ typedef amask_no_clip_u8<4, 1, rgb_to_gray_mask_u8<0, 1, 2> > amask_no_clip_argb32gray; //----amask_no_clip_argb32gray
+ typedef amask_no_clip_u8<4, 0, rgb_to_gray_mask_u8<2, 1, 0> > amask_no_clip_bgra32gray; //----amask_no_clip_bgra32gray
+ typedef amask_no_clip_u8<4, 1, rgb_to_gray_mask_u8<2, 1, 0> > amask_no_clip_abgr32gray; //----amask_no_clip_abgr32gray
+
+
+}
+
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_array.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_array.h
new file mode 100644
index 00000000000..8d56683840d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_array.h
@@ -0,0 +1,1119 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+#ifndef AGG_ARRAY_INCLUDED
+#define AGG_ARRAY_INCLUDED
+
+#include <stddef.h>
+#include <string.h>
+#include "agg_basics.h"
+
+namespace agg
+{
+
+ //-------------------------------------------------------pod_array_adaptor
+ template<class T> class pod_array_adaptor
+ {
+ public:
+ typedef T value_type;
+ pod_array_adaptor(T* array, unsigned size) :
+ m_array(array), m_size(size) {}
+
+ unsigned size() const { return m_size; }
+ const T& operator [] (unsigned i) const { return m_array[i]; }
+ T& operator [] (unsigned i) { return m_array[i]; }
+ const T& at(unsigned i) const { return m_array[i]; }
+ T& at(unsigned i) { return m_array[i]; }
+ T value_at(unsigned i) const { return m_array[i]; }
+
+ private:
+ T* m_array;
+ unsigned m_size;
+ };
+
+
+ //---------------------------------------------------------pod_auto_array
+ template<class T, unsigned Size> class pod_auto_array
+ {
+ public:
+ typedef T value_type;
+ typedef pod_auto_array<T, Size> self_type;
+
+ pod_auto_array() {}
+ explicit pod_auto_array(const T* c)
+ {
+ memcpy(m_array, c, sizeof(T) * Size);
+ }
+
+ const self_type& operator = (const T* c)
+ {
+ memcpy(m_array, c, sizeof(T) * Size);
+ return *this;
+ }
+
+ static unsigned size() { return Size; }
+ const T& operator [] (unsigned i) const { return m_array[i]; }
+ T& operator [] (unsigned i) { return m_array[i]; }
+ const T& at(unsigned i) const { return m_array[i]; }
+ T& at(unsigned i) { return m_array[i]; }
+ T value_at(unsigned i) const { return m_array[i]; }
+
+ private:
+ T m_array[Size];
+ };
+
+
+ //--------------------------------------------------------pod_auto_vector
+ template<class T, unsigned Size> class pod_auto_vector
+ {
+ public:
+ typedef T value_type;
+ typedef pod_auto_vector<T, Size> self_type;
+
+ pod_auto_vector() : m_size(0) {}
+
+ void remove_all() { m_size = 0; }
+ void clear() { m_size = 0; }
+ void add(const T& v) { m_array[m_size++] = v; }
+ void push_back(const T& v) { m_array[m_size++] = v; }
+ void inc_size(unsigned size) { m_size += size; }
+
+ unsigned size() const { return m_size; }
+ const T& operator [] (unsigned i) const { return m_array[i]; }
+ T& operator [] (unsigned i) { return m_array[i]; }
+ const T& at(unsigned i) const { return m_array[i]; }
+ T& at(unsigned i) { return m_array[i]; }
+ T value_at(unsigned i) const { return m_array[i]; }
+
+ private:
+ T m_array[Size];
+ unsigned m_size;
+ };
+
+
+ //---------------------------------------------------------------pod_array
+ template<class T> class pod_array
+ {
+ public:
+ typedef T value_type;
+ typedef pod_array<T> self_type;
+
+ ~pod_array() { pod_allocator<T>::deallocate(m_array, m_size); }
+ pod_array() : m_array(0), m_size(0) {}
+
+ pod_array(unsigned size) :
+ m_array(pod_allocator<T>::allocate(size)),
+ m_size(size)
+ {}
+
+ pod_array(const self_type& v) :
+ m_array(pod_allocator<T>::allocate(v.m_size)),
+ m_size(v.m_size)
+ {
+ memcpy(m_array, v.m_array, sizeof(T) * m_size);
+ }
+
+ void resize(unsigned size)
+ {
+ if(size != m_size)
+ {
+ pod_allocator<T>::deallocate(m_array, m_size);
+ m_array = pod_allocator<T>::allocate(m_size = size);
+ }
+ }
+ const self_type& operator = (const self_type& v)
+ {
+ resize(v.size());
+ memcpy(m_array, v.m_array, sizeof(T) * m_size);
+ return *this;
+ }
+
+ unsigned size() const { return m_size; }
+ const T& operator [] (unsigned i) const { return m_array[i]; }
+ T& operator [] (unsigned i) { return m_array[i]; }
+ const T& at(unsigned i) const { return m_array[i]; }
+ T& at(unsigned i) { return m_array[i]; }
+ T value_at(unsigned i) const { return m_array[i]; }
+
+ const T* data() const { return m_array; }
+ T* data() { return m_array; }
+ private:
+ T* m_array;
+ unsigned m_size;
+ };
+
+
+
+ //--------------------------------------------------------------pod_vector
+ // A simple class template to store Plain Old Data, a vector
+ // of a fixed size. The data is continous in memory
+ //------------------------------------------------------------------------
+ template<class T> class pod_vector
+ {
+ public:
+ typedef T value_type;
+
+ ~pod_vector() { pod_allocator<T>::deallocate(m_array, m_capacity); }
+ pod_vector() : m_size(0), m_capacity(0), m_array(0) {}
+ pod_vector(unsigned cap, unsigned extra_tail=0);
+
+ // Copying
+ pod_vector(const pod_vector<T>&);
+ const pod_vector<T>& operator = (const pod_vector<T>&);
+
+ // Set new capacity. All data is lost, size is set to zero.
+ void capacity(unsigned cap, unsigned extra_tail=0);
+ unsigned capacity() const { return m_capacity; }
+
+ // Allocate n elements. All data is lost,
+ // but elements can be accessed in range 0...size-1.
+ void allocate(unsigned size, unsigned extra_tail=0);
+
+ // Resize keeping the content.
+ void resize(unsigned new_size);
+
+ void zero()
+ {
+ memset(m_array, 0, sizeof(T) * m_size);
+ }
+
+ void add(const T& v) { m_array[m_size++] = v; }
+ void push_back(const T& v) { m_array[m_size++] = v; }
+ void insert_at(unsigned pos, const T& val);
+ void inc_size(unsigned size) { m_size += size; }
+ unsigned size() const { return m_size; }
+ unsigned byte_size() const { return m_size * sizeof(T); }
+ void serialize(int8u* ptr) const;
+ void deserialize(const int8u* data, unsigned byte_size);
+ const T& operator [] (unsigned i) const { return m_array[i]; }
+ T& operator [] (unsigned i) { return m_array[i]; }
+ const T& at(unsigned i) const { return m_array[i]; }
+ T& at(unsigned i) { return m_array[i]; }
+ T value_at(unsigned i) const { return m_array[i]; }
+
+ const T* data() const { return m_array; }
+ T* data() { return m_array; }
+
+ void remove_all() { m_size = 0; }
+ void clear() { m_size = 0; }
+ void cut_at(unsigned num) { if(num < m_size) m_size = num; }
+
+ private:
+ unsigned m_size;
+ unsigned m_capacity;
+ T* m_array;
+ };
+
+ //------------------------------------------------------------------------
+ template<class T>
+ void pod_vector<T>::capacity(unsigned cap, unsigned extra_tail)
+ {
+ m_size = 0;
+ if(cap > m_capacity)
+ {
+ pod_allocator<T>::deallocate(m_array, m_capacity);
+ m_capacity = cap + extra_tail;
+ m_array = m_capacity ? pod_allocator<T>::allocate(m_capacity) : 0;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class T>
+ void pod_vector<T>::allocate(unsigned size, unsigned extra_tail)
+ {
+ capacity(size, extra_tail);
+ m_size = size;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T>
+ void pod_vector<T>::resize(unsigned new_size)
+ {
+ if(new_size > m_size)
+ {
+ if(new_size > m_capacity)
+ {
+ T* data = pod_allocator<T>::allocate(new_size);
+ memcpy(data, m_array, m_size * sizeof(T));
+ pod_allocator<T>::deallocate(m_array, m_capacity);
+ m_array = data;
+ }
+ }
+ else
+ {
+ m_size = new_size;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class T> pod_vector<T>::pod_vector(unsigned cap, unsigned extra_tail) :
+ m_size(0),
+ m_capacity(cap + extra_tail),
+ m_array(pod_allocator<T>::allocate(m_capacity)) {}
+
+ //------------------------------------------------------------------------
+ template<class T> pod_vector<T>::pod_vector(const pod_vector<T>& v) :
+ m_size(v.m_size),
+ m_capacity(v.m_capacity),
+ m_array(v.m_capacity ? pod_allocator<T>::allocate(v.m_capacity) : 0)
+ {
+ memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
+ }
+
+ //------------------------------------------------------------------------
+ template<class T> const pod_vector<T>&
+ pod_vector<T>::operator = (const pod_vector<T>&v)
+ {
+ allocate(v.m_size);
+ if(v.m_size) memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T> void pod_vector<T>::serialize(int8u* ptr) const
+ {
+ if(m_size) memcpy(ptr, m_array, m_size * sizeof(T));
+ }
+
+ //------------------------------------------------------------------------
+ template<class T>
+ void pod_vector<T>::deserialize(const int8u* data, unsigned byte_size)
+ {
+ byte_size /= sizeof(T);
+ allocate(byte_size);
+ if(byte_size) memcpy(m_array, data, byte_size * sizeof(T));
+ }
+
+ //------------------------------------------------------------------------
+ template<class T>
+ void pod_vector<T>::insert_at(unsigned pos, const T& val)
+ {
+ if(pos >= m_size)
+ {
+ m_array[m_size] = val;
+ }
+ else
+ {
+ memmove(m_array + pos + 1, m_array + pos, (m_size - pos) * sizeof(T));
+ m_array[pos] = val;
+ }
+ ++m_size;
+ }
+
+ //---------------------------------------------------------------pod_bvector
+ // A simple class template to store Plain Old Data, similar to std::deque
+ // It doesn't reallocate memory but instead, uses blocks of data of size
+ // of (1 << S), that is, power of two. The data is NOT contiguous in memory,
+ // so the only valid access method is operator [] or curr(), prev(), next()
+ //
+ // There reallocs occure only when the pool of pointers to blocks needs
+ // to be extended (it happens very rarely). You can control the value
+ // of increment to reallocate the pointer buffer. See the second constructor.
+ // By default, the incremeent value equals (1 << S), i.e., the block size.
+ //------------------------------------------------------------------------
+ template<class T, unsigned S=6> class pod_bvector
+ {
+ public:
+ enum block_scale_e
+ {
+ block_shift = S,
+ block_size = 1 << block_shift,
+ block_mask = block_size - 1
+ };
+
+ typedef T value_type;
+
+ ~pod_bvector();
+ pod_bvector();
+ pod_bvector(unsigned block_ptr_inc);
+
+ // Copying
+ pod_bvector(const pod_bvector<T, S>& v);
+ const pod_bvector<T, S>& operator = (const pod_bvector<T, S>& v);
+
+ void remove_all() { m_size = 0; }
+ void clear() { m_size = 0; }
+ void free_all() { free_tail(0); }
+ void free_tail(unsigned size);
+ void add(const T& val);
+ void push_back(const T& val) { add(val); }
+ void modify_last(const T& val);
+ void remove_last();
+
+ int allocate_continuous_block(unsigned num_elements);
+
+ void add_array(const T* ptr, unsigned num_elem)
+ {
+ while(num_elem--)
+ {
+ add(*ptr++);
+ }
+ }
+
+ template<class DataAccessor> void add_data(DataAccessor& data)
+ {
+ while(data.size())
+ {
+ add(*data);
+ ++data;
+ }
+ }
+
+ void cut_at(unsigned size)
+ {
+ if(size < m_size) m_size = size;
+ }
+
+ unsigned size() const { return m_size; }
+
+ const T& operator [] (unsigned i) const
+ {
+ return m_blocks[i >> block_shift][i & block_mask];
+ }
+
+ T& operator [] (unsigned i)
+ {
+ return m_blocks[i >> block_shift][i & block_mask];
+ }
+
+ const T& at(unsigned i) const
+ {
+ return m_blocks[i >> block_shift][i & block_mask];
+ }
+
+ T& at(unsigned i)
+ {
+ return m_blocks[i >> block_shift][i & block_mask];
+ }
+
+ T value_at(unsigned i) const
+ {
+ return m_blocks[i >> block_shift][i & block_mask];
+ }
+
+ const T& curr(unsigned idx) const
+ {
+ return (*this)[idx];
+ }
+
+ T& curr(unsigned idx)
+ {
+ return (*this)[idx];
+ }
+
+ const T& prev(unsigned idx) const
+ {
+ return (*this)[(idx + m_size - 1) % m_size];
+ }
+
+ T& prev(unsigned idx)
+ {
+ return (*this)[(idx + m_size - 1) % m_size];
+ }
+
+ const T& next(unsigned idx) const
+ {
+ return (*this)[(idx + 1) % m_size];
+ }
+
+ T& next(unsigned idx)
+ {
+ return (*this)[(idx + 1) % m_size];
+ }
+
+ const T& last() const
+ {
+ return (*this)[m_size - 1];
+ }
+
+ T& last()
+ {
+ return (*this)[m_size - 1];
+ }
+
+ unsigned byte_size() const;
+ void serialize(int8u* ptr) const;
+ void deserialize(const int8u* data, unsigned byte_size);
+ void deserialize(unsigned start, const T& empty_val,
+ const int8u* data, unsigned byte_size);
+
+ template<class ByteAccessor>
+ void deserialize(ByteAccessor data)
+ {
+ remove_all();
+ unsigned elem_size = data.size() / sizeof(T);
+
+ for(unsigned i = 0; i < elem_size; ++i)
+ {
+ int8u* ptr = (int8u*)data_ptr();
+ for(unsigned j = 0; j < sizeof(T); ++j)
+ {
+ *ptr++ = *data;
+ ++data;
+ }
+ ++m_size;
+ }
+ }
+
+ template<class ByteAccessor>
+ void deserialize(unsigned start, const T& empty_val, ByteAccessor data)
+ {
+ while(m_size < start)
+ {
+ add(empty_val);
+ }
+
+ unsigned elem_size = data.size() / sizeof(T);
+ for(unsigned i = 0; i < elem_size; ++i)
+ {
+ int8u* ptr;
+ if(start + i < m_size)
+ {
+ ptr = (int8u*)(&((*this)[start + i]));
+ }
+ else
+ {
+ ptr = (int8u*)data_ptr();
+ ++m_size;
+ }
+ for(unsigned j = 0; j < sizeof(T); ++j)
+ {
+ *ptr++ = *data;
+ ++data;
+ }
+ }
+ }
+
+ const T* block(unsigned nb) const { return m_blocks[nb]; }
+
+ private:
+ void allocate_block(unsigned nb);
+ T* data_ptr();
+
+ unsigned m_size;
+ unsigned m_num_blocks;
+ unsigned m_max_blocks;
+ T** m_blocks;
+ unsigned m_block_ptr_inc;
+ };
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S> pod_bvector<T, S>::~pod_bvector()
+ {
+ if(m_num_blocks)
+ {
+ T** blk = m_blocks + m_num_blocks - 1;
+ while(m_num_blocks--)
+ {
+ pod_allocator<T>::deallocate(*blk, block_size);
+ --blk;
+ }
+ }
+ pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void pod_bvector<T, S>::free_tail(unsigned size)
+ {
+ if(size < m_size)
+ {
+ unsigned nb = (size + block_mask) >> block_shift;
+ while(m_num_blocks > nb)
+ {
+ pod_allocator<T>::deallocate(m_blocks[--m_num_blocks], block_size);
+ }
+ if(m_num_blocks == 0)
+ {
+ pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
+ m_blocks = 0;
+ m_max_blocks = 0;
+ }
+ m_size = size;
+ }
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S> pod_bvector<T, S>::pod_bvector() :
+ m_size(0),
+ m_num_blocks(0),
+ m_max_blocks(0),
+ m_blocks(0),
+ m_block_ptr_inc(block_size)
+ {
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ pod_bvector<T, S>::pod_bvector(unsigned block_ptr_inc) :
+ m_size(0),
+ m_num_blocks(0),
+ m_max_blocks(0),
+ m_blocks(0),
+ m_block_ptr_inc(block_ptr_inc)
+ {
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ pod_bvector<T, S>::pod_bvector(const pod_bvector<T, S>& v) :
+ m_size(v.m_size),
+ m_num_blocks(v.m_num_blocks),
+ m_max_blocks(v.m_max_blocks),
+ m_blocks(v.m_max_blocks ?
+ pod_allocator<T*>::allocate(v.m_max_blocks) :
+ 0),
+ m_block_ptr_inc(v.m_block_ptr_inc)
+ {
+ unsigned i;
+ for(i = 0; i < v.m_num_blocks; ++i)
+ {
+ m_blocks[i] = pod_allocator<T>::allocate(block_size);
+ memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
+ }
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ const pod_bvector<T, S>&
+ pod_bvector<T, S>::operator = (const pod_bvector<T, S>& v)
+ {
+ unsigned i;
+ for(i = m_num_blocks; i < v.m_num_blocks; ++i)
+ {
+ allocate_block(i);
+ }
+ for(i = 0; i < v.m_num_blocks; ++i)
+ {
+ memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
+ }
+ m_size = v.m_size;
+ return *this;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void pod_bvector<T, S>::allocate_block(unsigned nb)
+ {
+ if(nb >= m_max_blocks)
+ {
+ T** new_blocks = pod_allocator<T*>::allocate(m_max_blocks + m_block_ptr_inc);
+
+ if(m_blocks)
+ {
+ memcpy(new_blocks,
+ m_blocks,
+ m_num_blocks * sizeof(T*));
+
+ pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
+ }
+ m_blocks = new_blocks;
+ m_max_blocks += m_block_ptr_inc;
+ }
+ m_blocks[nb] = pod_allocator<T>::allocate(block_size);
+ m_num_blocks++;
+ }
+
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ inline T* pod_bvector<T, S>::data_ptr()
+ {
+ unsigned nb = m_size >> block_shift;
+ if(nb >= m_num_blocks)
+ {
+ allocate_block(nb);
+ }
+ return m_blocks[nb] + (m_size & block_mask);
+ }
+
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ inline void pod_bvector<T, S>::add(const T& val)
+ {
+ *data_ptr() = val;
+ ++m_size;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ inline void pod_bvector<T, S>::remove_last()
+ {
+ if(m_size) --m_size;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void pod_bvector<T, S>::modify_last(const T& val)
+ {
+ remove_last();
+ add(val);
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ int pod_bvector<T, S>::allocate_continuous_block(unsigned num_elements)
+ {
+ if(num_elements < block_size)
+ {
+ data_ptr(); // Allocate initial block if necessary
+ unsigned rest = block_size - (m_size & block_mask);
+ unsigned index;
+ if(num_elements <= rest)
+ {
+ // The rest of the block is good, we can use it
+ //-----------------
+ index = m_size;
+ m_size += num_elements;
+ return index;
+ }
+
+ // New block
+ //---------------
+ m_size += rest;
+ data_ptr();
+ index = m_size;
+ m_size += num_elements;
+ return index;
+ }
+ return -1; // Impossible to allocate
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ unsigned pod_bvector<T, S>::byte_size() const
+ {
+ return m_size * sizeof(T);
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void pod_bvector<T, S>::serialize(int8u* ptr) const
+ {
+ unsigned i;
+ for(i = 0; i < m_size; i++)
+ {
+ memcpy(ptr, &(*this)[i], sizeof(T));
+ ptr += sizeof(T);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void pod_bvector<T, S>::deserialize(const int8u* data, unsigned byte_size)
+ {
+ remove_all();
+ byte_size /= sizeof(T);
+ for(unsigned i = 0; i < byte_size; ++i)
+ {
+ T* ptr = data_ptr();
+ memcpy(ptr, data, sizeof(T));
+ ++m_size;
+ data += sizeof(T);
+ }
+ }
+
+
+ // Replace or add a number of elements starting from "start" position
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void pod_bvector<T, S>::deserialize(unsigned start, const T& empty_val,
+ const int8u* data, unsigned byte_size)
+ {
+ while(m_size < start)
+ {
+ add(empty_val);
+ }
+
+ byte_size /= sizeof(T);
+ for(unsigned i = 0; i < byte_size; ++i)
+ {
+ if(start + i < m_size)
+ {
+ memcpy(&((*this)[start + i]), data, sizeof(T));
+ }
+ else
+ {
+ T* ptr = data_ptr();
+ memcpy(ptr, data, sizeof(T));
+ ++m_size;
+ }
+ data += sizeof(T);
+ }
+ }
+
+
+ //---------------------------------------------------------block_allocator
+ // Allocator for arbitrary POD data. Most usable in different cache
+ // systems for efficient memory allocations.
+ // Memory is allocated with blocks of fixed size ("block_size" in
+ // the constructor). If required size exceeds the block size the allocator
+ // creates a new block of the required size. However, the most efficient
+ // use is when the average reqired size is much less than the block size.
+ //------------------------------------------------------------------------
+ class block_allocator
+ {
+ struct block_type
+ {
+ int8u* data;
+ unsigned size;
+ };
+
+ public:
+ void remove_all()
+ {
+ if(m_num_blocks)
+ {
+ block_type* blk = m_blocks + m_num_blocks - 1;
+ while(m_num_blocks--)
+ {
+ pod_allocator<int8u>::deallocate(blk->data, blk->size);
+ --blk;
+ }
+ pod_allocator<block_type>::deallocate(m_blocks, m_max_blocks);
+ }
+ m_num_blocks = 0;
+ m_max_blocks = 0;
+ m_blocks = 0;
+ m_buf_ptr = 0;
+ m_rest = 0;
+ }
+
+ ~block_allocator()
+ {
+ remove_all();
+ }
+
+ block_allocator(unsigned block_size, unsigned block_ptr_inc=256-8) :
+ m_block_size(block_size),
+ m_block_ptr_inc(block_ptr_inc),
+ m_num_blocks(0),
+ m_max_blocks(0),
+ m_blocks(0),
+ m_buf_ptr(0),
+ m_rest(0)
+ {
+ }
+
+
+ int8u* allocate(unsigned size, unsigned alignment=1)
+ {
+ if(size == 0) return 0;
+ if(size <= m_rest)
+ {
+ int8u* ptr = m_buf_ptr;
+ if(alignment > 1)
+ {
+ unsigned align =
+ (alignment - unsigned((size_t)ptr) % alignment) % alignment;
+
+ size += align;
+ ptr += align;
+ if(size <= m_rest)
+ {
+ m_rest -= size;
+ m_buf_ptr += size;
+ return ptr;
+ }
+ allocate_block(size);
+ return allocate(size - align, alignment);
+ }
+ m_rest -= size;
+ m_buf_ptr += size;
+ return ptr;
+ }
+ allocate_block(size + alignment - 1);
+ return allocate(size, alignment);
+ }
+
+
+ private:
+ void allocate_block(unsigned size)
+ {
+ if(size < m_block_size) size = m_block_size;
+ if(m_num_blocks >= m_max_blocks)
+ {
+ block_type* new_blocks =
+ pod_allocator<block_type>::allocate(m_max_blocks + m_block_ptr_inc);
+
+ if(m_blocks)
+ {
+ memcpy(new_blocks,
+ m_blocks,
+ m_num_blocks * sizeof(block_type));
+ pod_allocator<block_type>::deallocate(m_blocks, m_max_blocks);
+ }
+ m_blocks = new_blocks;
+ m_max_blocks += m_block_ptr_inc;
+ }
+
+ m_blocks[m_num_blocks].size = size;
+ m_blocks[m_num_blocks].data =
+ m_buf_ptr =
+ pod_allocator<int8u>::allocate(size);
+
+ m_num_blocks++;
+ m_rest = size;
+ }
+
+ unsigned m_block_size;
+ unsigned m_block_ptr_inc;
+ unsigned m_num_blocks;
+ unsigned m_max_blocks;
+ block_type* m_blocks;
+ int8u* m_buf_ptr;
+ unsigned m_rest;
+ };
+
+
+
+
+
+
+
+
+ //------------------------------------------------------------------------
+ enum quick_sort_threshold_e
+ {
+ quick_sort_threshold = 9
+ };
+
+
+ //-----------------------------------------------------------swap_elements
+ template<class T> inline void swap_elements(T& a, T& b)
+ {
+ T temp = a;
+ a = b;
+ b = temp;
+ }
+
+
+ //--------------------------------------------------------------quick_sort
+ template<class Array, class Less>
+ void quick_sort(Array& arr, Less less)
+ {
+ if(arr.size() < 2) return;
+
+ typename Array::value_type* e1;
+ typename Array::value_type* e2;
+
+ int stack[80];
+ int* top = stack;
+ int limit = arr.size();
+ int base = 0;
+
+ for(;;)
+ {
+ int len = limit - base;
+
+ int i;
+ int j;
+ int pivot;
+
+ if(len > quick_sort_threshold)
+ {
+ // we use base + len/2 as the pivot
+ pivot = base + len / 2;
+ swap_elements(arr[base], arr[pivot]);
+
+ i = base + 1;
+ j = limit - 1;
+
+ // now ensure that *i <= *base <= *j
+ e1 = &(arr[j]);
+ e2 = &(arr[i]);
+ if(less(*e1, *e2)) swap_elements(*e1, *e2);
+
+ e1 = &(arr[base]);
+ e2 = &(arr[i]);
+ if(less(*e1, *e2)) swap_elements(*e1, *e2);
+
+ e1 = &(arr[j]);
+ e2 = &(arr[base]);
+ if(less(*e1, *e2)) swap_elements(*e1, *e2);
+
+ for(;;)
+ {
+ do i++; while( less(arr[i], arr[base]) );
+ do j--; while( less(arr[base], arr[j]) );
+
+ if( i > j )
+ {
+ break;
+ }
+
+ swap_elements(arr[i], arr[j]);
+ }
+
+ swap_elements(arr[base], arr[j]);
+
+ // now, push the largest sub-array
+ if(j - base > limit - i)
+ {
+ top[0] = base;
+ top[1] = j;
+ base = i;
+ }
+ else
+ {
+ top[0] = i;
+ top[1] = limit;
+ limit = j;
+ }
+ top += 2;
+ }
+ else
+ {
+ // the sub-array is small, perform insertion sort
+ j = base;
+ i = j + 1;
+
+ for(; i < limit; j = i, i++)
+ {
+ for(; less(*(e1 = &(arr[j + 1])), *(e2 = &(arr[j]))); j--)
+ {
+ swap_elements(*e1, *e2);
+ if(j == base)
+ {
+ break;
+ }
+ }
+ }
+ if(top > stack)
+ {
+ top -= 2;
+ base = top[0];
+ limit = top[1];
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+ }
+
+
+
+
+ //------------------------------------------------------remove_duplicates
+ // Remove duplicates from a sorted array. It doesn't cut the
+ // tail of the array, it just returns the number of remaining elements.
+ //-----------------------------------------------------------------------
+ template<class Array, class Equal>
+ unsigned remove_duplicates(Array& arr, Equal equal)
+ {
+ if(arr.size() < 2) return arr.size();
+
+ unsigned i, j;
+ for(i = 1, j = 1; i < arr.size(); i++)
+ {
+ typename Array::value_type& e = arr[i];
+ if(!equal(e, arr[i - 1]))
+ {
+ arr[j++] = e;
+ }
+ }
+ return j;
+ }
+
+ //--------------------------------------------------------invert_container
+ template<class Array> void invert_container(Array& arr)
+ {
+ int i = 0;
+ int j = arr.size() - 1;
+ while(i < j)
+ {
+ swap_elements(arr[i++], arr[j--]);
+ }
+ }
+
+ //------------------------------------------------------binary_search_pos
+ template<class Array, class Value, class Less>
+ unsigned binary_search_pos(const Array& arr, const Value& val, Less less)
+ {
+ if(arr.size() == 0) return 0;
+
+ unsigned beg = 0;
+ unsigned end = arr.size() - 1;
+
+ if(less(val, arr[0])) return 0;
+ if(less(arr[end], val)) return end + 1;
+
+ while(end - beg > 1)
+ {
+ unsigned mid = (end + beg) >> 1;
+ if(less(val, arr[mid])) end = mid;
+ else beg = mid;
+ }
+
+ //if(beg <= 0 && less(val, arr[0])) return 0;
+ //if(end >= arr.size() - 1 && less(arr[end], val)) ++end;
+
+ return end;
+ }
+
+ //----------------------------------------------------------range_adaptor
+ template<class Array> class range_adaptor
+ {
+ public:
+ typedef typename Array::value_type value_type;
+
+ range_adaptor(Array& array, unsigned start, unsigned size) :
+ m_array(array), m_start(start), m_size(size)
+ {}
+
+ unsigned size() const { return m_size; }
+ const value_type& operator [] (unsigned i) const { return m_array[m_start + i]; }
+ value_type& operator [] (unsigned i) { return m_array[m_start + i]; }
+ const value_type& at(unsigned i) const { return m_array[m_start + i]; }
+ value_type& at(unsigned i) { return m_array[m_start + i]; }
+ value_type value_at(unsigned i) const { return m_array[m_start + i]; }
+
+ private:
+ Array& m_array;
+ unsigned m_start;
+ unsigned m_size;
+ };
+
+ //---------------------------------------------------------------int_less
+ inline bool int_less(int a, int b) { return a < b; }
+
+ //------------------------------------------------------------int_greater
+ inline bool int_greater(int a, int b) { return a > b; }
+
+ //----------------------------------------------------------unsigned_less
+ inline bool unsigned_less(unsigned a, unsigned b) { return a < b; }
+
+ //-------------------------------------------------------unsigned_greater
+ inline bool unsigned_greater(unsigned a, unsigned b) { return a > b; }
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_basics.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_basics.h
new file mode 100644
index 00000000000..309713002b3
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_basics.h
@@ -0,0 +1,560 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_BASICS_INCLUDED
+#define AGG_BASICS_INCLUDED
+
+#include <math.h>
+#include "agg_config.h"
+
+//---------------------------------------------------------AGG_CUSTOM_ALLOCATOR
+#ifdef AGG_CUSTOM_ALLOCATOR
+#include "agg_allocator.h"
+#else
+namespace agg
+{
+ // The policy of all AGG containers and memory allocation strategy
+ // in general is that no allocated data requires explicit construction.
+ // It means that the allocator can be really simple; you can even
+ // replace new/delete to malloc/free. The constructors and destructors
+ // won't be called in this case, however everything will remain working.
+ // The second argument of deallocate() is the size of the allocated
+ // block. You can use this information if you wish.
+ //------------------------------------------------------------pod_allocator
+ template<class T> struct pod_allocator
+ {
+ static T* allocate(unsigned num) { return new T [num]; }
+ static void deallocate(T* ptr, unsigned) { delete [] ptr; }
+ };
+
+ // Single object allocator. It's also can be replaced with your custom
+ // allocator. The difference is that it can only allocate a single
+ // object and the constructor and destructor must be called.
+ // In AGG there is no need to allocate an array of objects with
+ // calling their constructors (only single ones). So that, if you
+ // replace these new/delete to malloc/free make sure that the in-place
+ // new is called and take care of calling the destructor too.
+ //------------------------------------------------------------obj_allocator
+ template<class T> struct obj_allocator
+ {
+ static T* allocate() { return new T; }
+ static void deallocate(T* ptr) { delete ptr; }
+ };
+}
+#endif
+
+
+//-------------------------------------------------------- Default basic types
+//
+// If the compiler has different capacity of the basic types you can redefine
+// them via the compiler command line or by generating agg_config.h that is
+// empty by default.
+//
+#ifndef AGG_INT8
+#define AGG_INT8 signed char
+#endif
+
+#ifndef AGG_INT8U
+#define AGG_INT8U unsigned char
+#endif
+
+#ifndef AGG_INT16
+#define AGG_INT16 short
+#endif
+
+#ifndef AGG_INT16U
+#define AGG_INT16U unsigned short
+#endif
+
+#ifndef AGG_INT32
+#define AGG_INT32 int
+#endif
+
+#ifndef AGG_INT32U
+#define AGG_INT32U unsigned
+#endif
+
+#ifndef AGG_INT64
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+#define AGG_INT64 signed __int64
+#else
+#define AGG_INT64 signed long long
+#endif
+#endif
+
+#ifndef AGG_INT64U
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+#define AGG_INT64U unsigned __int64
+#else
+#define AGG_INT64U unsigned long long
+#endif
+#endif
+
+//------------------------------------------------ Some fixes for MS Visual C++
+#if defined(_MSC_VER)
+#pragma warning(disable:4786) // Identifier was truncated...
+#endif
+
+#if defined(_MSC_VER)
+#define AGG_INLINE __forceinline
+#else
+#define AGG_INLINE inline
+#endif
+
+namespace agg
+{
+ //-------------------------------------------------------------------------
+ typedef AGG_INT8 int8; //----int8
+ typedef AGG_INT8U int8u; //----int8u
+ typedef AGG_INT16 int16; //----int16
+ typedef AGG_INT16U int16u; //----int16u
+ typedef AGG_INT32 int32; //----int32
+ typedef AGG_INT32U int32u; //----int32u
+ typedef AGG_INT64 int64; //----int64
+ typedef AGG_INT64U int64u; //----int64u
+
+#if defined(AGG_FISTP)
+#pragma warning(push)
+#pragma warning(disable : 4035) //Disable warning "no return value"
+ AGG_INLINE int iround(double v) //-------iround
+ {
+ int t;
+ __asm fld qword ptr [v]
+ __asm fistp dword ptr [t]
+ __asm mov eax, dword ptr [t]
+ }
+ AGG_INLINE unsigned uround(double v) //-------uround
+ {
+ unsigned t;
+ __asm fld qword ptr [v]
+ __asm fistp dword ptr [t]
+ __asm mov eax, dword ptr [t]
+ }
+#pragma warning(pop)
+ AGG_INLINE int ifloor(double v)
+ {
+ return int(floor(v));
+ }
+ AGG_INLINE unsigned ufloor(double v) //-------ufloor
+ {
+ return unsigned(floor(v));
+ }
+ AGG_INLINE int iceil(double v)
+ {
+ return int(ceil(v));
+ }
+ AGG_INLINE unsigned uceil(double v) //--------uceil
+ {
+ return unsigned(ceil(v));
+ }
+#elif defined(AGG_QIFIST)
+ AGG_INLINE int iround(double v)
+ {
+ return int(v);
+ }
+ AGG_INLINE int uround(double v)
+ {
+ return unsigned(v);
+ }
+ AGG_INLINE int ifloor(double v)
+ {
+ return int(floor(v));
+ }
+ AGG_INLINE unsigned ufloor(double v)
+ {
+ return unsigned(floor(v));
+ }
+ AGG_INLINE int iceil(double v)
+ {
+ return int(ceil(v));
+ }
+ AGG_INLINE unsigned uceil(double v)
+ {
+ return unsigned(ceil(v));
+ }
+#else
+ AGG_INLINE int iround(double v)
+ {
+ return int((v < 0.0) ? v - 0.5 : v + 0.5);
+ }
+ AGG_INLINE int uround(double v)
+ {
+ return unsigned(v + 0.5);
+ }
+ AGG_INLINE int ifloor(double v)
+ {
+ int i = int(v);
+ return i - (i > v);
+ }
+ AGG_INLINE unsigned ufloor(double v)
+ {
+ return unsigned(v);
+ }
+ AGG_INLINE int iceil(double v)
+ {
+ return int(ceil(v));
+ }
+ AGG_INLINE unsigned uceil(double v)
+ {
+ return unsigned(ceil(v));
+ }
+#endif
+
+ //---------------------------------------------------------------saturation
+ template<int Limit> struct saturation
+ {
+ AGG_INLINE static int iround(double v)
+ {
+ if(v < double(-Limit)) return -Limit;
+ if(v > double( Limit)) return Limit;
+ return agg::iround(v);
+ }
+ };
+
+ //------------------------------------------------------------------mul_one
+ template<unsigned Shift> struct mul_one
+ {
+ AGG_INLINE static unsigned mul(unsigned a, unsigned b)
+ {
+ unsigned q = a * b + (1 << (Shift-1));
+ return (q + (q >> Shift)) >> Shift;
+ }
+ };
+
+ //-------------------------------------------------------------------------
+ typedef unsigned char cover_type; //----cover_type
+ enum cover_scale_e
+ {
+ cover_shift = 8, //----cover_shift
+ cover_size = 1 << cover_shift, //----cover_size
+ cover_mask = cover_size - 1, //----cover_mask
+ cover_none = 0, //----cover_none
+ cover_full = cover_mask //----cover_full
+ };
+
+ //----------------------------------------------------poly_subpixel_scale_e
+ // These constants determine the subpixel accuracy, to be more precise,
+ // the number of bits of the fractional part of the coordinates.
+ // The possible coordinate capacity in bits can be calculated by formula:
+ // sizeof(int) * 8 - poly_subpixel_shift, i.e, for 32-bit integers and
+ // 8-bits fractional part the capacity is 24 bits.
+ enum poly_subpixel_scale_e
+ {
+ poly_subpixel_shift = 8, //----poly_subpixel_shift
+ poly_subpixel_scale = 1<<poly_subpixel_shift, //----poly_subpixel_scale
+ poly_subpixel_mask = poly_subpixel_scale-1 //----poly_subpixel_mask
+ };
+
+ //----------------------------------------------------------filling_rule_e
+ enum filling_rule_e
+ {
+ fill_non_zero,
+ fill_even_odd
+ };
+
+ //-----------------------------------------------------------------------pi
+ const double pi = 3.14159265358979323846;
+
+ //------------------------------------------------------------------deg2rad
+ inline double deg2rad(double deg)
+ {
+ return deg * pi / 180.0;
+ }
+
+ //------------------------------------------------------------------rad2deg
+ inline double rad2deg(double rad)
+ {
+ return rad * 180.0 / pi;
+ }
+
+ //----------------------------------------------------------------rect_base
+ template<class T> struct rect_base
+ {
+ typedef T value_type;
+ typedef rect_base<T> self_type;
+ T x1, y1, x2, y2;
+
+ rect_base() {}
+ rect_base(T x1_, T y1_, T x2_, T y2_) :
+ x1(x1_), y1(y1_), x2(x2_), y2(y2_) {}
+
+ void init(T x1_, T y1_, T x2_, T y2_)
+ {
+ x1 = x1_; y1 = y1_; x2 = x2_; y2 = y2_;
+ }
+
+ const self_type& normalize()
+ {
+ T t;
+ if(x1 > x2) { t = x1; x1 = x2; x2 = t; }
+ if(y1 > y2) { t = y1; y1 = y2; y2 = t; }
+ return *this;
+ }
+
+ bool clip(const self_type& r)
+ {
+ if(x2 > r.x2) x2 = r.x2;
+ if(y2 > r.y2) y2 = r.y2;
+ if(x1 < r.x1) x1 = r.x1;
+ if(y1 < r.y1) y1 = r.y1;
+ return x1 <= x2 && y1 <= y2;
+ }
+
+ bool is_valid() const
+ {
+ return x1 <= x2 && y1 <= y2;
+ }
+
+ bool hit_test(T x, T y) const
+ {
+ return (x >= x1 && x <= x2 && y >= y1 && y <= y2);
+ }
+
+ bool overlaps(const self_type& r) const
+ {
+ return !(r.x1 > x2 || r.x2 < x1
+ || r.y1 > y2 || r.y2 < y1);
+ }
+ };
+
+ //-----------------------------------------------------intersect_rectangles
+ template<class Rect>
+ inline Rect intersect_rectangles(const Rect& r1, const Rect& r2)
+ {
+ Rect r = r1;
+
+ // First process x2,y2 because the other order
+ // results in Internal Compiler Error under
+ // Microsoft Visual C++ .NET 2003 69462-335-0000007-18038 in
+ // case of "Maximize Speed" optimization option.
+ //-----------------
+ if(r.x2 > r2.x2) r.x2 = r2.x2;
+ if(r.y2 > r2.y2) r.y2 = r2.y2;
+ if(r.x1 < r2.x1) r.x1 = r2.x1;
+ if(r.y1 < r2.y1) r.y1 = r2.y1;
+ return r;
+ }
+
+
+ //---------------------------------------------------------unite_rectangles
+ template<class Rect>
+ inline Rect unite_rectangles(const Rect& r1, const Rect& r2)
+ {
+ Rect r = r1;
+ if(r.x2 < r2.x2) r.x2 = r2.x2;
+ if(r.y2 < r2.y2) r.y2 = r2.y2;
+ if(r.x1 > r2.x1) r.x1 = r2.x1;
+ if(r.y1 > r2.y1) r.y1 = r2.y1;
+ return r;
+ }
+
+ typedef rect_base<int> rect_i; //----rect_i
+ typedef rect_base<float> rect_f; //----rect_f
+ typedef rect_base<double> rect_d; //----rect_d
+
+ //---------------------------------------------------------path_commands_e
+ enum path_commands_e
+ {
+ path_cmd_stop = 0, //----path_cmd_stop
+ path_cmd_move_to = 1, //----path_cmd_move_to
+ path_cmd_line_to = 2, //----path_cmd_line_to
+ path_cmd_curve3 = 3, //----path_cmd_curve3
+ path_cmd_curve4 = 4, //----path_cmd_curve4
+ path_cmd_curveN = 5, //----path_cmd_curveN
+ path_cmd_catrom = 6, //----path_cmd_catrom
+ path_cmd_ubspline = 7, //----path_cmd_ubspline
+ path_cmd_end_poly = 0x0F, //----path_cmd_end_poly
+ path_cmd_mask = 0x0F //----path_cmd_mask
+ };
+
+ //------------------------------------------------------------path_flags_e
+ enum path_flags_e
+ {
+ path_flags_none = 0, //----path_flags_none
+ path_flags_ccw = 0x10, //----path_flags_ccw
+ path_flags_cw = 0x20, //----path_flags_cw
+ path_flags_close = 0x40, //----path_flags_close
+ path_flags_mask = 0xF0 //----path_flags_mask
+ };
+
+ //---------------------------------------------------------------is_vertex
+ inline bool is_vertex(unsigned c)
+ {
+ return c >= path_cmd_move_to && c < path_cmd_end_poly;
+ }
+
+ //--------------------------------------------------------------is_drawing
+ inline bool is_drawing(unsigned c)
+ {
+ return c >= path_cmd_line_to && c < path_cmd_end_poly;
+ }
+
+ //-----------------------------------------------------------------is_stop
+ inline bool is_stop(unsigned c)
+ {
+ return c == path_cmd_stop;
+ }
+
+ //--------------------------------------------------------------is_move_to
+ inline bool is_move_to(unsigned c)
+ {
+ return c == path_cmd_move_to;
+ }
+
+ //--------------------------------------------------------------is_line_to
+ inline bool is_line_to(unsigned c)
+ {
+ return c == path_cmd_line_to;
+ }
+
+ //----------------------------------------------------------------is_curve
+ inline bool is_curve(unsigned c)
+ {
+ return c == path_cmd_curve3 || c == path_cmd_curve4;
+ }
+
+ //---------------------------------------------------------------is_curve3
+ inline bool is_curve3(unsigned c)
+ {
+ return c == path_cmd_curve3;
+ }
+
+ //---------------------------------------------------------------is_curve4
+ inline bool is_curve4(unsigned c)
+ {
+ return c == path_cmd_curve4;
+ }
+
+ //-------------------------------------------------------------is_end_poly
+ inline bool is_end_poly(unsigned c)
+ {
+ return (c & path_cmd_mask) == path_cmd_end_poly;
+ }
+
+ //----------------------------------------------------------------is_close
+ inline bool is_close(unsigned c)
+ {
+ return (c & ~(path_flags_cw | path_flags_ccw)) ==
+ (path_cmd_end_poly | path_flags_close);
+ }
+
+ //------------------------------------------------------------is_next_poly
+ inline bool is_next_poly(unsigned c)
+ {
+ return is_stop(c) || is_move_to(c) || is_end_poly(c);
+ }
+
+ //-------------------------------------------------------------------is_cw
+ inline bool is_cw(unsigned c)
+ {
+ return (c & path_flags_cw) != 0;
+ }
+
+ //------------------------------------------------------------------is_ccw
+ inline bool is_ccw(unsigned c)
+ {
+ return (c & path_flags_ccw) != 0;
+ }
+
+ //-------------------------------------------------------------is_oriented
+ inline bool is_oriented(unsigned c)
+ {
+ return (c & (path_flags_cw | path_flags_ccw)) != 0;
+ }
+
+ //---------------------------------------------------------------is_closed
+ inline bool is_closed(unsigned c)
+ {
+ return (c & path_flags_close) != 0;
+ }
+
+ //----------------------------------------------------------get_close_flag
+ inline unsigned get_close_flag(unsigned c)
+ {
+ return c & path_flags_close;
+ }
+
+ //-------------------------------------------------------clear_orientation
+ inline unsigned clear_orientation(unsigned c)
+ {
+ return c & ~(path_flags_cw | path_flags_ccw);
+ }
+
+ //---------------------------------------------------------get_orientation
+ inline unsigned get_orientation(unsigned c)
+ {
+ return c & (path_flags_cw | path_flags_ccw);
+ }
+
+ //---------------------------------------------------------set_orientation
+ inline unsigned set_orientation(unsigned c, unsigned o)
+ {
+ return clear_orientation(c) | o;
+ }
+
+ //--------------------------------------------------------------point_base
+ template<class T> struct point_base
+ {
+ typedef T value_type;
+ T x,y;
+ point_base() {}
+ point_base(T x_, T y_) : x(x_), y(y_) {}
+ };
+ typedef point_base<int> point_i; //-----point_i
+ typedef point_base<float> point_f; //-----point_f
+ typedef point_base<double> point_d; //-----point_d
+
+ //-------------------------------------------------------------vertex_base
+ template<class T> struct vertex_base
+ {
+ typedef T value_type;
+ T x,y;
+ unsigned cmd;
+ vertex_base() {}
+ vertex_base(T x_, T y_, unsigned cmd_) : x(x_), y(y_), cmd(cmd_) {}
+ };
+ typedef vertex_base<int> vertex_i; //-----vertex_i
+ typedef vertex_base<float> vertex_f; //-----vertex_f
+ typedef vertex_base<double> vertex_d; //-----vertex_d
+
+ //----------------------------------------------------------------row_info
+ template<class T> struct row_info
+ {
+ int x1, x2;
+ T* ptr;
+ row_info() {}
+ row_info(int x1_, int x2_, T* ptr_) : x1(x1_), x2(x2_), ptr(ptr_) {}
+ };
+
+ //----------------------------------------------------------const_row_info
+ template<class T> struct const_row_info
+ {
+ int x1, x2;
+ const T* ptr;
+ const_row_info() {}
+ const_row_info(int x1_, int x2_, const T* ptr_) :
+ x1(x1_), x2(x2_), ptr(ptr_) {}
+ };
+
+ //------------------------------------------------------------is_equal_eps
+ template<class T> inline bool is_equal_eps(T v1, T v2, T epsilon)
+ {
+ return fabs(v1 - v2) <= double(epsilon);
+ }
+}
+
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_bezier_arc.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_bezier_arc.h
new file mode 100644
index 00000000000..6d98d1a9f0d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_bezier_arc.h
@@ -0,0 +1,159 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Arc generator. Produces at most 4 consecutive cubic bezier curves, i.e.,
+// 4, 7, 10, or 13 vertices.
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_BEZIER_ARC_INCLUDED
+#define AGG_BEZIER_ARC_INCLUDED
+
+#include "agg_conv_transform.h"
+
+namespace agg
+{
+
+ //-----------------------------------------------------------------------
+ void arc_to_bezier(double cx, double cy, double rx, double ry,
+ double start_angle, double sweep_angle,
+ double* curve);
+
+
+ //==============================================================bezier_arc
+ //
+ // See implemantaion agg_bezier_arc.cpp
+ //
+ class bezier_arc
+ {
+ public:
+ //--------------------------------------------------------------------
+ bezier_arc() : m_vertex(26), m_num_vertices(0), m_cmd(path_cmd_line_to) {}
+ bezier_arc(double x, double y,
+ double rx, double ry,
+ double start_angle,
+ double sweep_angle)
+ {
+ init(x, y, rx, ry, start_angle, sweep_angle);
+ }
+
+ //--------------------------------------------------------------------
+ void init(double x, double y,
+ double rx, double ry,
+ double start_angle,
+ double sweep_angle);
+
+ //--------------------------------------------------------------------
+ void rewind(unsigned)
+ {
+ m_vertex = 0;
+ }
+
+ //--------------------------------------------------------------------
+ unsigned vertex(double* x, double* y)
+ {
+ if(m_vertex >= m_num_vertices) return path_cmd_stop;
+ *x = m_vertices[m_vertex];
+ *y = m_vertices[m_vertex + 1];
+ m_vertex += 2;
+ return (m_vertex == 2) ? path_cmd_move_to : m_cmd;
+ }
+
+ // Supplemantary functions. num_vertices() actually returns doubled
+ // number of vertices. That is, for 1 vertex it returns 2.
+ //--------------------------------------------------------------------
+ unsigned num_vertices() const { return m_num_vertices; }
+ const double* vertices() const { return m_vertices; }
+ double* vertices() { return m_vertices; }
+
+ private:
+ unsigned m_vertex;
+ unsigned m_num_vertices;
+ double m_vertices[26];
+ unsigned m_cmd;
+ };
+
+
+
+ //==========================================================bezier_arc_svg
+ // Compute an SVG-style bezier arc.
+ //
+ // Computes an elliptical arc from (x1, y1) to (x2, y2). The size and
+ // orientation of the ellipse are defined by two radii (rx, ry)
+ // and an x-axis-rotation, which indicates how the ellipse as a whole
+ // is rotated relative to the current coordinate system. The center
+ // (cx, cy) of the ellipse is calculated automatically to satisfy the
+ // constraints imposed by the other parameters.
+ // large-arc-flag and sweep-flag contribute to the automatic calculations
+ // and help determine how the arc is drawn.
+ class bezier_arc_svg
+ {
+ public:
+ //--------------------------------------------------------------------
+ bezier_arc_svg() : m_arc(), m_radii_ok(false) {}
+
+ bezier_arc_svg(double x1, double y1,
+ double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double x2, double y2) :
+ m_arc(), m_radii_ok(false)
+ {
+ init(x1, y1, rx, ry, angle, large_arc_flag, sweep_flag, x2, y2);
+ }
+
+ //--------------------------------------------------------------------
+ void init(double x1, double y1,
+ double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double x2, double y2);
+
+ //--------------------------------------------------------------------
+ bool radii_ok() const { return m_radii_ok; }
+
+ //--------------------------------------------------------------------
+ void rewind(unsigned)
+ {
+ m_arc.rewind(0);
+ }
+
+ //--------------------------------------------------------------------
+ unsigned vertex(double* x, double* y)
+ {
+ return m_arc.vertex(x, y);
+ }
+
+ // Supplemantary functions. num_vertices() actually returns doubled
+ // number of vertices. That is, for 1 vertex it returns 2.
+ //--------------------------------------------------------------------
+ unsigned num_vertices() const { return m_arc.num_vertices(); }
+ const double* vertices() const { return m_arc.vertices(); }
+ double* vertices() { return m_arc.vertices(); }
+
+ private:
+ bezier_arc m_arc;
+ bool m_radii_ok;
+ };
+
+
+
+
+}
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_clip_liang_barsky.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_clip_liang_barsky.h
new file mode 100644
index 00000000000..4b5fedbab5f
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_clip_liang_barsky.h
@@ -0,0 +1,333 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Liang-Barsky clipping
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_CLIP_LIANG_BARSKY_INCLUDED
+#define AGG_CLIP_LIANG_BARSKY_INCLUDED
+
+#include "agg_basics.h"
+
+namespace agg
+{
+
+ //------------------------------------------------------------------------
+ enum clipping_flags_e
+ {
+ clipping_flags_x1_clipped = 4,
+ clipping_flags_x2_clipped = 1,
+ clipping_flags_y1_clipped = 8,
+ clipping_flags_y2_clipped = 2,
+ clipping_flags_x_clipped = clipping_flags_x1_clipped | clipping_flags_x2_clipped,
+ clipping_flags_y_clipped = clipping_flags_y1_clipped | clipping_flags_y2_clipped
+ };
+
+ //----------------------------------------------------------clipping_flags
+ // Determine the clipping code of the vertex according to the
+ // Cyrus-Beck line clipping algorithm
+ //
+ // | |
+ // 0110 | 0010 | 0011
+ // | |
+ // -------+--------+-------- clip_box.y2
+ // | |
+ // 0100 | 0000 | 0001
+ // | |
+ // -------+--------+-------- clip_box.y1
+ // | |
+ // 1100 | 1000 | 1001
+ // | |
+ // clip_box.x1 clip_box.x2
+ //
+ //
+ template<class T>
+ inline unsigned clipping_flags(T x, T y, const rect_base<T>& clip_box)
+ {
+ return (x > clip_box.x2) |
+ ((y > clip_box.y2) << 1) |
+ ((x < clip_box.x1) << 2) |
+ ((y < clip_box.y1) << 3);
+ }
+
+ //--------------------------------------------------------clipping_flags_x
+ template<class T>
+ inline unsigned clipping_flags_x(T x, const rect_base<T>& clip_box)
+ {
+ return (x > clip_box.x2) | ((x < clip_box.x1) << 2);
+ }
+
+
+ //--------------------------------------------------------clipping_flags_y
+ template<class T>
+ inline unsigned clipping_flags_y(T y, const rect_base<T>& clip_box)
+ {
+ return ((y > clip_box.y2) << 1) | ((y < clip_box.y1) << 3);
+ }
+
+
+ //-------------------------------------------------------clip_liang_barsky
+ template<class T>
+ inline unsigned clip_liang_barsky(T x1, T y1, T x2, T y2,
+ const rect_base<T>& clip_box,
+ T* x, T* y)
+ {
+ const double nearzero = 1e-30;
+
+ double deltax = x2 - x1;
+ double deltay = y2 - y1;
+ double xin;
+ double xout;
+ double yin;
+ double yout;
+ double tinx;
+ double tiny;
+ double toutx;
+ double touty;
+ double tin1;
+ double tin2;
+ double tout1;
+ unsigned np = 0;
+
+ if(deltax == 0.0)
+ {
+ // bump off of the vertical
+ deltax = (x1 > clip_box.x1) ? -nearzero : nearzero;
+ }
+
+ if(deltay == 0.0)
+ {
+ // bump off of the horizontal
+ deltay = (y1 > clip_box.y1) ? -nearzero : nearzero;
+ }
+
+ if(deltax > 0.0)
+ {
+ // points to right
+ xin = clip_box.x1;
+ xout = clip_box.x2;
+ }
+ else
+ {
+ xin = clip_box.x2;
+ xout = clip_box.x1;
+ }
+
+ if(deltay > 0.0)
+ {
+ // points up
+ yin = clip_box.y1;
+ yout = clip_box.y2;
+ }
+ else
+ {
+ yin = clip_box.y2;
+ yout = clip_box.y1;
+ }
+
+ tinx = (xin - x1) / deltax;
+ tiny = (yin - y1) / deltay;
+
+ if (tinx < tiny)
+ {
+ // hits x first
+ tin1 = tinx;
+ tin2 = tiny;
+ }
+ else
+ {
+ // hits y first
+ tin1 = tiny;
+ tin2 = tinx;
+ }
+
+ if(tin1 <= 1.0)
+ {
+ if(0.0 < tin1)
+ {
+ *x++ = (T)xin;
+ *y++ = (T)yin;
+ ++np;
+ }
+
+ if(tin2 <= 1.0)
+ {
+ toutx = (xout - x1) / deltax;
+ touty = (yout - y1) / deltay;
+
+ tout1 = (toutx < touty) ? toutx : touty;
+
+ if(tin2 > 0.0 || tout1 > 0.0)
+ {
+ if(tin2 <= tout1)
+ {
+ if(tin2 > 0.0)
+ {
+ if(tinx > tiny)
+ {
+ *x++ = (T)xin;
+ *y++ = (T)(y1 + tinx * deltay);
+ }
+ else
+ {
+ *x++ = (T)(x1 + tiny * deltax);
+ *y++ = (T)yin;
+ }
+ ++np;
+ }
+
+ if(tout1 < 1.0)
+ {
+ if(toutx < touty)
+ {
+ *x++ = (T)xout;
+ *y++ = (T)(y1 + toutx * deltay);
+ }
+ else
+ {
+ *x++ = (T)(x1 + touty * deltax);
+ *y++ = (T)yout;
+ }
+ }
+ else
+ {
+ *x++ = x2;
+ *y++ = y2;
+ }
+ ++np;
+ }
+ else
+ {
+ if(tinx > tiny)
+ {
+ *x++ = (T)xin;
+ *y++ = (T)yout;
+ }
+ else
+ {
+ *x++ = (T)xout;
+ *y++ = (T)yin;
+ }
+ ++np;
+ }
+ }
+ }
+ }
+ return np;
+ }
+
+
+ //----------------------------------------------------------------------------
+ template<class T>
+ bool clip_move_point(T x1, T y1, T x2, T y2,
+ const rect_base<T>& clip_box,
+ T* x, T* y, unsigned flags)
+ {
+ T bound;
+
+ if(flags & clipping_flags_x_clipped)
+ {
+ if(x1 == x2)
+ {
+ return false;
+ }
+ bound = (flags & clipping_flags_x1_clipped) ? clip_box.x1 : clip_box.x2;
+ *y = (T)(double(bound - x1) * (y2 - y1) / (x2 - x1) + y1);
+ *x = bound;
+ }
+
+ flags = clipping_flags_y(*y, clip_box);
+ if(flags & clipping_flags_y_clipped)
+ {
+ if(y1 == y2)
+ {
+ return false;
+ }
+ bound = (flags & clipping_flags_y1_clipped) ? clip_box.y1 : clip_box.y2;
+ *x = (T)(double(bound - y1) * (x2 - x1) / (y2 - y1) + x1);
+ *y = bound;
+ }
+ return true;
+ }
+
+ //-------------------------------------------------------clip_line_segment
+ // Returns: ret >= 4 - Fully clipped
+ // (ret & 1) != 0 - First point has been moved
+ // (ret & 2) != 0 - Second point has been moved
+ //
+ template<class T>
+ unsigned clip_line_segment(T* x1, T* y1, T* x2, T* y2,
+ const rect_base<T>& clip_box)
+ {
+ unsigned f1 = clipping_flags(*x1, *y1, clip_box);
+ unsigned f2 = clipping_flags(*x2, *y2, clip_box);
+ unsigned ret = 0;
+
+ if((f2 | f1) == 0)
+ {
+ // Fully visible
+ return 0;
+ }
+
+ if((f1 & clipping_flags_x_clipped) != 0 &&
+ (f1 & clipping_flags_x_clipped) == (f2 & clipping_flags_x_clipped))
+ {
+ // Fully clipped
+ return 4;
+ }
+
+ if((f1 & clipping_flags_y_clipped) != 0 &&
+ (f1 & clipping_flags_y_clipped) == (f2 & clipping_flags_y_clipped))
+ {
+ // Fully clipped
+ return 4;
+ }
+
+ T tx1 = *x1;
+ T ty1 = *y1;
+ T tx2 = *x2;
+ T ty2 = *y2;
+ if(f1)
+ {
+ if(!clip_move_point(tx1, ty1, tx2, ty2, clip_box, x1, y1, f1))
+ {
+ return 4;
+ }
+ if(*x1 == *x2 && *y1 == *y2)
+ {
+ return 4;
+ }
+ ret |= 1;
+ }
+ if(f2)
+ {
+ if(!clip_move_point(tx1, ty1, tx2, ty2, clip_box, x2, y2, f2))
+ {
+ return 4;
+ }
+ if(*x1 == *x2 && *y1 == *y2)
+ {
+ return 4;
+ }
+ ret |= 2;
+ }
+ return ret;
+ }
+
+
+}
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_color_gray.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_color_gray.h
new file mode 100644
index 00000000000..8d5f2ed8d04
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_color_gray.h
@@ -0,0 +1,1047 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+//
+// color types gray8, gray16
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_COLOR_GRAY_INCLUDED
+#define AGG_COLOR_GRAY_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_color_rgba.h"
+
+namespace agg
+{
+
+ //===================================================================gray8
+ template<class Colorspace>
+ struct gray8T
+ {
+ typedef int8u value_type;
+ typedef int32u calc_type;
+ typedef int32 long_type;
+ enum base_scale_e
+ {
+ base_shift = 8,
+ base_scale = 1 << base_shift,
+ base_mask = base_scale - 1,
+ base_MSB = 1 << (base_shift - 1)
+ };
+ typedef gray8T self_type;
+
+ value_type v;
+ value_type a;
+
+ static value_type luminance(const rgba& c)
+ {
+ // Calculate grayscale value as per ITU-R BT.709.
+ return value_type(uround((0.2126 * c.r + 0.7152 * c.g + 0.0722 * c.b) * base_mask));
+ }
+
+ static value_type luminance(const rgba8& c)
+ {
+ // Calculate grayscale value as per ITU-R BT.709.
+ return value_type((55u * c.r + 184u * c.g + 18u * c.b) >> 8);
+ }
+
+ static void convert(gray8T<linear>& dst, const gray8T<sRGB>& src)
+ {
+ dst.v = sRGB_conv<value_type>::rgb_from_sRGB(src.v);
+ dst.a = src.a;
+ }
+
+ static void convert(gray8T<sRGB>& dst, const gray8T<linear>& src)
+ {
+ dst.v = sRGB_conv<value_type>::rgb_to_sRGB(src.v);
+ dst.a = src.a;
+ }
+
+ static void convert(gray8T<linear>& dst, const rgba8& src)
+ {
+ dst.v = luminance(src);
+ dst.a = src.a;
+ }
+
+ static void convert(gray8T<linear>& dst, const srgba8& src)
+ {
+ // The RGB weights are only valid for linear values.
+ convert(dst, rgba8(src));
+ }
+
+ static void convert(gray8T<sRGB>& dst, const rgba8& src)
+ {
+ dst.v = sRGB_conv<value_type>::rgb_to_sRGB(luminance(src));
+ dst.a = src.a;
+ }
+
+ static void convert(gray8T<sRGB>& dst, const srgba8& src)
+ {
+ // The RGB weights are only valid for linear values.
+ convert(dst, rgba8(src));
+ }
+
+ //--------------------------------------------------------------------
+ gray8T() {}
+
+ //--------------------------------------------------------------------
+ explicit gray8T(unsigned v_, unsigned a_ = base_mask) :
+ v(int8u(v_)), a(int8u(a_)) {}
+
+ //--------------------------------------------------------------------
+ gray8T(const self_type& c, unsigned a_) :
+ v(c.v), a(value_type(a_)) {}
+
+ //--------------------------------------------------------------------
+ gray8T(const rgba& c) :
+ v(luminance(c)),
+ a(value_type(uround(c.a * base_mask))) {}
+
+ //--------------------------------------------------------------------
+ template<class T>
+ gray8T(const gray8T<T>& c)
+ {
+ convert(*this, c);
+ }
+
+ //--------------------------------------------------------------------
+ template<class T>
+ gray8T(const rgba8T<T>& c)
+ {
+ convert(*this, c);
+ }
+
+ //--------------------------------------------------------------------
+ template<class T>
+ T convert_from_sRGB() const
+ {
+ typename T::value_type y = sRGB_conv<typename T::value_type>::rgb_from_sRGB(v);
+ return T(y, y, y, sRGB_conv<typename T::value_type>::alpha_from_sRGB(a));
+ }
+
+ template<class T>
+ T convert_to_sRGB() const
+ {
+ typename T::value_type y = sRGB_conv<typename T::value_type>::rgb_to_sRGB(v);
+ return T(y, y, y, sRGB_conv<typename T::value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ rgba8 make_rgba8(const linear&) const
+ {
+ return rgba8(v, v, v, a);
+ }
+
+ rgba8 make_rgba8(const sRGB&) const
+ {
+ return convert_from_sRGB<srgba8>();
+ }
+
+ operator rgba8() const
+ {
+ return make_rgba8(Colorspace());
+ }
+
+ //--------------------------------------------------------------------
+ srgba8 make_srgba8(const linear&) const
+ {
+ return convert_to_sRGB<rgba8>();
+ }
+
+ srgba8 make_srgba8(const sRGB&) const
+ {
+ return srgba8(v, v, v, a);
+ }
+
+ operator srgba8() const
+ {
+ return make_rgba8(Colorspace());
+ }
+
+ //--------------------------------------------------------------------
+ rgba16 make_rgba16(const linear&) const
+ {
+ rgba16::value_type rgb = (v << 8) | v;
+ return rgba16(rgb, rgb, rgb, (a << 8) | a);
+ }
+
+ rgba16 make_rgba16(const sRGB&) const
+ {
+ return convert_from_sRGB<rgba16>();
+ }
+
+ operator rgba16() const
+ {
+ return make_rgba16(Colorspace());
+ }
+
+ //--------------------------------------------------------------------
+ rgba32 make_rgba32(const linear&) const
+ {
+ rgba32::value_type v32 = v / 255.0f;
+ return rgba32(v32, v32, v32, a / 255.0f);
+ }
+
+ rgba32 make_rgba32(const sRGB&) const
+ {
+ return convert_from_sRGB<rgba32>();
+ }
+
+ operator rgba32() const
+ {
+ return make_rgba32(Colorspace());
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return double(a) / base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(uround(a * base_mask));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a == 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a == base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, exact over int8u.
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ calc_type t = a * b + base_MSB;
+ return value_type(((t >> base_shift) + t) >> base_shift);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ if (a * b == 0)
+ {
+ return 0;
+ }
+ else if (a >= b)
+ {
+ return base_mask;
+ }
+ else return value_type((a * base_mask + (b >> 1)) / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a >> base_shift;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return a >> n;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, exact over int8u.
+ // Specifically for multiplying a color component by a cover.
+ static AGG_INLINE value_type mult_cover(value_type a, value_type b)
+ {
+ return multiply(a, b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return multiply(b, a);
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return p + q - multiply(p, a);
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ int t = (q - p) * a + base_MSB - (p > q);
+ return value_type(p + (((t >> base_shift) + t) >> base_shift));
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ v = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if (a_ > 1) a = 1;
+ else a = (value_type)uround(a_ * double(base_mask));
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return double(a) / double(base_mask);
+ }
+
+ //--------------------------------------------------------------------
+ self_type& premultiply()
+ {
+ if (a < base_mask)
+ {
+ if (a == 0) v = 0;
+ else v = multiply(v, a);
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& demultiply()
+ {
+ if (a < base_mask)
+ {
+ if (a == 0)
+ {
+ v = 0;
+ }
+ else
+ {
+ calc_type v_ = (calc_type(v) * base_mask) / a;
+ v = value_type((v_ > base_mask) ? (value_type)base_mask : v_);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type gradient(self_type c, double k) const
+ {
+ self_type ret;
+ calc_type ik = uround(k * base_scale);
+ ret.v = lerp(v, c.v, ik);
+ ret.a = lerp(a, c.a, ik);
+ return ret;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void add(const self_type& c, unsigned cover)
+ {
+ calc_type cv, ca;
+ if (cover == cover_mask)
+ {
+ if (c.a == base_mask)
+ {
+ *this = c;
+ return;
+ }
+ else
+ {
+ cv = v + c.v;
+ ca = a + c.a;
+ }
+ }
+ else
+ {
+ cv = v + mult_cover(c.v, cover);
+ ca = a + mult_cover(c.a, cover);
+ }
+ v = (value_type)((cv > calc_type(base_mask)) ? calc_type(base_mask) : cv);
+ a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0); }
+ };
+
+ typedef gray8T<linear> gray8;
+ typedef gray8T<sRGB> sgray8;
+
+
+ //==================================================================gray16
+ struct gray16
+ {
+ typedef int16u value_type;
+ typedef int32u calc_type;
+ typedef int64 long_type;
+ enum base_scale_e
+ {
+ base_shift = 16,
+ base_scale = 1 << base_shift,
+ base_mask = base_scale - 1,
+ base_MSB = 1 << (base_shift - 1)
+ };
+ typedef gray16 self_type;
+
+ value_type v;
+ value_type a;
+
+ static value_type luminance(const rgba& c)
+ {
+ // Calculate grayscale value as per ITU-R BT.709.
+ return value_type(uround((0.2126 * c.r + 0.7152 * c.g + 0.0722 * c.b) * base_mask));
+ }
+
+ static value_type luminance(const rgba16& c)
+ {
+ // Calculate grayscale value as per ITU-R BT.709.
+ return value_type((13933u * c.r + 46872u * c.g + 4732u * c.b) >> 16);
+ }
+
+ static value_type luminance(const rgba8& c)
+ {
+ return luminance(rgba16(c));
+ }
+
+ static value_type luminance(const srgba8& c)
+ {
+ return luminance(rgba16(c));
+ }
+
+ static value_type luminance(const rgba32& c)
+ {
+ return luminance(rgba(c));
+ }
+
+ //--------------------------------------------------------------------
+ gray16() {}
+
+ //--------------------------------------------------------------------
+ explicit gray16(unsigned v_, unsigned a_ = base_mask) :
+ v(int16u(v_)), a(int16u(a_)) {}
+
+ //--------------------------------------------------------------------
+ gray16(const self_type& c, unsigned a_) :
+ v(c.v), a(value_type(a_)) {}
+
+ //--------------------------------------------------------------------
+ gray16(const rgba& c) :
+ v(luminance(c)),
+ a((value_type)uround(c.a * double(base_mask))) {}
+
+ //--------------------------------------------------------------------
+ gray16(const rgba8& c) :
+ v(luminance(c)),
+ a((value_type(c.a) << 8) | c.a) {}
+
+ //--------------------------------------------------------------------
+ gray16(const srgba8& c) :
+ v(luminance(c)),
+ a((value_type(c.a) << 8) | c.a) {}
+
+ //--------------------------------------------------------------------
+ gray16(const rgba16& c) :
+ v(luminance(c)),
+ a(c.a) {}
+
+ //--------------------------------------------------------------------
+ gray16(const gray8& c) :
+ v((value_type(c.v) << 8) | c.v),
+ a((value_type(c.a) << 8) | c.a) {}
+
+ //--------------------------------------------------------------------
+ gray16(const sgray8& c) :
+ v(sRGB_conv<value_type>::rgb_from_sRGB(c.v)),
+ a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
+
+ //--------------------------------------------------------------------
+ operator rgba8() const
+ {
+ return rgba8(v >> 8, v >> 8, v >> 8, a >> 8);
+ }
+
+ //--------------------------------------------------------------------
+ operator srgba8() const
+ {
+ value_type y = sRGB_conv<value_type>::rgb_to_sRGB(v);
+ return srgba8(y, y, y, sRGB_conv<value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba16() const
+ {
+ return rgba16(v, v, v, a);
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba32() const
+ {
+ rgba32::value_type v32 = v / 65535.0f;
+ return rgba32(v32, v32, v32, a / 65535.0f);
+ }
+
+ //--------------------------------------------------------------------
+ operator gray8() const
+ {
+ return gray8(v >> 8, a >> 8);
+ }
+
+ //--------------------------------------------------------------------
+ operator sgray8() const
+ {
+ return sgray8(
+ sRGB_conv<value_type>::rgb_to_sRGB(v),
+ sRGB_conv<value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return double(a) / base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(uround(a * base_mask));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a == 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a == base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, exact over int16u.
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ calc_type t = a * b + base_MSB;
+ return value_type(((t >> base_shift) + t) >> base_shift);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ if (a * b == 0)
+ {
+ return 0;
+ }
+ else if (a >= b)
+ {
+ return base_mask;
+ }
+ else return value_type((a * base_mask + (b >> 1)) / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a >> base_shift;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return a >> n;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, almost exact over int16u.
+ // Specifically for multiplying a color component by a cover.
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
+ {
+ return multiply(a, b << 8 | b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return mult_cover(b, a) >> 8;
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return p + q - multiply(p, a);
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ int t = (q - p) * a + base_MSB - (p > q);
+ return value_type(p + (((t >> base_shift) + t) >> base_shift));
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ v = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if(a_ > 1) a = 1;
+ else a = (value_type)uround(a_ * double(base_mask));
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return double(a) / double(base_mask);
+ }
+
+
+ //--------------------------------------------------------------------
+ self_type& premultiply()
+ {
+ if (a < base_mask)
+ {
+ if(a == 0) v = 0;
+ else v = multiply(v, a);
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& demultiply()
+ {
+ if (a < base_mask)
+ {
+ if (a == 0)
+ {
+ v = 0;
+ }
+ else
+ {
+ calc_type v_ = (calc_type(v) * base_mask) / a;
+ v = value_type((v_ > base_mask) ? base_mask : v_);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type gradient(self_type c, double k) const
+ {
+ self_type ret;
+ calc_type ik = uround(k * base_scale);
+ ret.v = lerp(v, c.v, ik);
+ ret.a = lerp(a, c.a, ik);
+ return ret;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void add(const self_type& c, unsigned cover)
+ {
+ calc_type cv, ca;
+ if (cover == cover_mask)
+ {
+ if (c.a == base_mask)
+ {
+ *this = c;
+ return;
+ }
+ else
+ {
+ cv = v + c.v;
+ ca = a + c.a;
+ }
+ }
+ else
+ {
+ cv = v + mult_cover(c.v, cover);
+ ca = a + mult_cover(c.a, cover);
+ }
+ v = (value_type)((cv > calc_type(base_mask)) ? calc_type(base_mask) : cv);
+ a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0); }
+ };
+
+
+ //===================================================================gray32
+ struct gray32
+ {
+ typedef float value_type;
+ typedef double calc_type;
+ typedef double long_type;
+ typedef gray32 self_type;
+
+ value_type v;
+ value_type a;
+
+ // Calculate grayscale value as per ITU-R BT.709.
+ static value_type luminance(double r, double g, double b)
+ {
+ return value_type(0.2126 * r + 0.7152 * g + 0.0722 * b);
+ }
+
+ static value_type luminance(const rgba& c)
+ {
+ return luminance(c.r, c.g, c.b);
+ }
+
+ static value_type luminance(const rgba32& c)
+ {
+ return luminance(c.r, c.g, c.b);
+ }
+
+ static value_type luminance(const rgba8& c)
+ {
+ return luminance(c.r / 255.0, c.g / 255.0, c.g / 255.0);
+ }
+
+ static value_type luminance(const rgba16& c)
+ {
+ return luminance(c.r / 65535.0, c.g / 65535.0, c.g / 65535.0);
+ }
+
+ //--------------------------------------------------------------------
+ gray32() {}
+
+ //--------------------------------------------------------------------
+ explicit gray32(value_type v_, value_type a_ = 1) :
+ v(v_), a(a_) {}
+
+ //--------------------------------------------------------------------
+ gray32(const self_type& c, value_type a_) :
+ v(c.v), a(a_) {}
+
+ //--------------------------------------------------------------------
+ gray32(const rgba& c) :
+ v(luminance(c)),
+ a(value_type(c.a)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const rgba8& c) :
+ v(luminance(c)),
+ a(value_type(c.a / 255.0)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const srgba8& c) :
+ v(luminance(rgba32(c))),
+ a(value_type(c.a / 255.0)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const rgba16& c) :
+ v(luminance(c)),
+ a(value_type(c.a / 65535.0)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const rgba32& c) :
+ v(luminance(c)),
+ a(value_type(c.a)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const gray8& c) :
+ v(value_type(c.v / 255.0)),
+ a(value_type(c.a / 255.0)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const sgray8& c) :
+ v(sRGB_conv<value_type>::rgb_from_sRGB(c.v)),
+ a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
+
+ //--------------------------------------------------------------------
+ gray32(const gray16& c) :
+ v(value_type(c.v / 65535.0)),
+ a(value_type(c.a / 65535.0)) {}
+
+ //--------------------------------------------------------------------
+ operator rgba() const
+ {
+ return rgba(v, v, v, a);
+ }
+
+ //--------------------------------------------------------------------
+ operator gray8() const
+ {
+ return gray8(uround(v * 255.0), uround(a * 255.0));
+ }
+
+ //--------------------------------------------------------------------
+ operator sgray8() const
+ {
+ // Return (non-premultiplied) sRGB values.
+ return sgray8(
+ sRGB_conv<value_type>::rgb_to_sRGB(v),
+ sRGB_conv<value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ operator gray16() const
+ {
+ return gray16(uround(v * 65535.0), uround(a * 65535.0));
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba8() const
+ {
+ rgba8::value_type y = uround(v * 255.0);
+ return rgba8(y, y, y, uround(a * 255.0));
+ }
+
+ //--------------------------------------------------------------------
+ operator srgba8() const
+ {
+ srgba8::value_type y = sRGB_conv<value_type>::rgb_to_sRGB(v);
+ return srgba8(y, y, y, sRGB_conv<value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba16() const
+ {
+ rgba16::value_type y = uround(v * 65535.0);
+ return rgba16(y, y, y, uround(a * 65535.0));
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba32() const
+ {
+ return rgba32(v, v, v, a);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(a);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return 1;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a <= 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a >= 1;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type invert(value_type x)
+ {
+ return 1 - x;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ return value_type(a * b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ return (b == 0) ? 0 : value_type(a / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return n > 0 ? a / (1 << n) : a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
+ {
+ return value_type(a * b / cover_mask);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return cover_type(uround(a * b));
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return (1 - a) * p + q; // more accurate than "p + q - p * a"
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ // The form "p + a * (q - p)" avoids a multiplication, but may produce an
+ // inaccurate result. For example, "p + (q - p)" may not be exactly equal
+ // to q. Therefore, stick to the basic expression, which at least produces
+ // the correct result at either extreme.
+ return (1 - a) * p + a * q;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ v = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if (a_ > 1) a = 1;
+ else a = value_type(a_);
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return a;
+ }
+
+
+ //--------------------------------------------------------------------
+ self_type& premultiply()
+ {
+ if (a < 0) v = 0;
+ else if(a < 1) v *= a;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& demultiply()
+ {
+ if (a < 0) v = 0;
+ else if (a < 1) v /= a;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type gradient(self_type c, double k) const
+ {
+ return self_type(
+ value_type(v + (c.v - v) * k),
+ value_type(a + (c.a - a) * k));
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0); }
+ };
+}
+
+
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_color_rgba.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_color_rgba.h
new file mode 100644
index 00000000000..74f871be17b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_color_rgba.h
@@ -0,0 +1,1353 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_COLOR_RGBA_INCLUDED
+#define AGG_COLOR_RGBA_INCLUDED
+
+#include <math.h>
+#include "agg_basics.h"
+#include "agg_gamma_lut.h"
+
+namespace agg
+{
+ // Supported component orders for RGB and RGBA pixel formats
+ //=======================================================================
+ struct order_rgb { enum rgb_e { R=0, G=1, B=2, N=3 }; };
+ struct order_bgr { enum bgr_e { B=0, G=1, R=2, N=3 }; };
+ struct order_rgba { enum rgba_e { R=0, G=1, B=2, A=3, N=4 }; };
+ struct order_argb { enum argb_e { A=0, R=1, G=2, B=3, N=4 }; };
+ struct order_abgr { enum abgr_e { A=0, B=1, G=2, R=3, N=4 }; };
+ struct order_bgra { enum bgra_e { B=0, G=1, R=2, A=3, N=4 }; };
+
+ // Colorspace tag types.
+ struct linear {};
+ struct sRGB {};
+
+ //====================================================================rgba
+ struct rgba
+ {
+ typedef double value_type;
+
+ double r;
+ double g;
+ double b;
+ double a;
+
+ //--------------------------------------------------------------------
+ rgba() {}
+
+ //--------------------------------------------------------------------
+ rgba(double r_, double g_, double b_, double a_=1.0) :
+ r(r_), g(g_), b(b_), a(a_) {}
+
+ //--------------------------------------------------------------------
+ rgba(const rgba& c, double a_) : r(c.r), g(c.g), b(c.b), a(a_) {}
+
+ //--------------------------------------------------------------------
+ rgba& clear()
+ {
+ r = g = b = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ rgba& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ rgba& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if (a_ > 1) a = 1;
+ else a = a_;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ rgba& premultiply()
+ {
+ r *= a;
+ g *= a;
+ b *= a;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ rgba& premultiply(double a_)
+ {
+ if (a <= 0 || a_ <= 0)
+ {
+ r = g = b = a = 0;
+ }
+ else
+ {
+ a_ /= a;
+ r *= a_;
+ g *= a_;
+ b *= a_;
+ a = a_;
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ rgba& demultiply()
+ {
+ if (a == 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ double a_ = 1.0 / a;
+ r *= a_;
+ g *= a_;
+ b *= a_;
+ }
+ return *this;
+ }
+
+
+ //--------------------------------------------------------------------
+ rgba gradient(rgba c, double k) const
+ {
+ rgba ret;
+ ret.r = r + (c.r - r) * k;
+ ret.g = g + (c.g - g) * k;
+ ret.b = b + (c.b - b) * k;
+ ret.a = a + (c.a - a) * k;
+ return ret;
+ }
+
+ rgba& operator+=(const rgba& c)
+ {
+ r += c.r;
+ g += c.g;
+ b += c.b;
+ a += c.a;
+ return *this;
+ }
+
+ rgba& operator*=(double k)
+ {
+ r *= k;
+ g *= k;
+ b *= k;
+ a *= k;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ static rgba no_color() { return rgba(0,0,0,0); }
+
+ //--------------------------------------------------------------------
+ static rgba from_wavelength(double wl, double gamma = 1.0);
+
+ //--------------------------------------------------------------------
+ explicit rgba(double wavelen, double gamma=1.0)
+ {
+ *this = from_wavelength(wavelen, gamma);
+ }
+
+ };
+
+ inline rgba operator+(const rgba& a, const rgba& b)
+ {
+ return rgba(a) += b;
+ }
+
+ inline rgba operator*(const rgba& a, double b)
+ {
+ return rgba(a) *= b;
+ }
+
+ //------------------------------------------------------------------------
+ inline rgba rgba::from_wavelength(double wl, double gamma)
+ {
+ rgba t(0.0, 0.0, 0.0);
+
+ if (wl >= 380.0 && wl <= 440.0)
+ {
+ t.r = -1.0 * (wl - 440.0) / (440.0 - 380.0);
+ t.b = 1.0;
+ }
+ else if (wl >= 440.0 && wl <= 490.0)
+ {
+ t.g = (wl - 440.0) / (490.0 - 440.0);
+ t.b = 1.0;
+ }
+ else if (wl >= 490.0 && wl <= 510.0)
+ {
+ t.g = 1.0;
+ t.b = -1.0 * (wl - 510.0) / (510.0 - 490.0);
+ }
+ else if (wl >= 510.0 && wl <= 580.0)
+ {
+ t.r = (wl - 510.0) / (580.0 - 510.0);
+ t.g = 1.0;
+ }
+ else if (wl >= 580.0 && wl <= 645.0)
+ {
+ t.r = 1.0;
+ t.g = -1.0 * (wl - 645.0) / (645.0 - 580.0);
+ }
+ else if (wl >= 645.0 && wl <= 780.0)
+ {
+ t.r = 1.0;
+ }
+
+ double s = 1.0;
+ if (wl > 700.0) s = 0.3 + 0.7 * (780.0 - wl) / (780.0 - 700.0);
+ else if (wl < 420.0) s = 0.3 + 0.7 * (wl - 380.0) / (420.0 - 380.0);
+
+ t.r = pow(t.r * s, gamma);
+ t.g = pow(t.g * s, gamma);
+ t.b = pow(t.b * s, gamma);
+ return t;
+ }
+
+ inline rgba rgba_pre(double r, double g, double b, double a)
+ {
+ return rgba(r, g, b, a).premultiply();
+ }
+
+
+ //===================================================================rgba8
+ template<class Colorspace>
+ struct rgba8T
+ {
+ typedef int8u value_type;
+ typedef int32u calc_type;
+ typedef int32 long_type;
+ enum base_scale_e
+ {
+ base_shift = 8,
+ base_scale = 1 << base_shift,
+ base_mask = base_scale - 1,
+ base_MSB = 1 << (base_shift - 1)
+ };
+ typedef rgba8T self_type;
+
+
+ value_type r;
+ value_type g;
+ value_type b;
+ value_type a;
+
+ static void convert(rgba8T<linear>& dst, const rgba8T<sRGB>& src)
+ {
+ dst.r = sRGB_conv<value_type>::rgb_from_sRGB(src.r);
+ dst.g = sRGB_conv<value_type>::rgb_from_sRGB(src.g);
+ dst.b = sRGB_conv<value_type>::rgb_from_sRGB(src.b);
+ dst.a = src.a;
+ }
+
+ static void convert(rgba8T<sRGB>& dst, const rgba8T<linear>& src)
+ {
+ dst.r = sRGB_conv<value_type>::rgb_to_sRGB(src.r);
+ dst.g = sRGB_conv<value_type>::rgb_to_sRGB(src.g);
+ dst.b = sRGB_conv<value_type>::rgb_to_sRGB(src.b);
+ dst.a = src.a;
+ }
+
+ static void convert(rgba8T<linear>& dst, const rgba& src)
+ {
+ dst.r = value_type(uround(src.r * base_mask));
+ dst.g = value_type(uround(src.g * base_mask));
+ dst.b = value_type(uround(src.b * base_mask));
+ dst.a = value_type(uround(src.a * base_mask));
+ }
+
+ static void convert(rgba8T<sRGB>& dst, const rgba& src)
+ {
+ // Use the "float" table.
+ dst.r = sRGB_conv<float>::rgb_to_sRGB(float(src.r));
+ dst.g = sRGB_conv<float>::rgb_to_sRGB(float(src.g));
+ dst.b = sRGB_conv<float>::rgb_to_sRGB(float(src.b));
+ dst.a = sRGB_conv<float>::alpha_to_sRGB(float(src.a));
+ }
+
+ static void convert(rgba& dst, const rgba8T<linear>& src)
+ {
+ dst.r = src.r / 255.0;
+ dst.g = src.g / 255.0;
+ dst.b = src.b / 255.0;
+ dst.a = src.a / 255.0;
+ }
+
+ static void convert(rgba& dst, const rgba8T<sRGB>& src)
+ {
+ // Use the "float" table.
+ dst.r = sRGB_conv<float>::rgb_from_sRGB(src.r);
+ dst.g = sRGB_conv<float>::rgb_from_sRGB(src.g);
+ dst.b = sRGB_conv<float>::rgb_from_sRGB(src.b);
+ dst.a = sRGB_conv<float>::alpha_from_sRGB(src.a);
+ }
+
+ //--------------------------------------------------------------------
+ rgba8T() {}
+
+ //--------------------------------------------------------------------
+ rgba8T(unsigned r_, unsigned g_, unsigned b_, unsigned a_ = base_mask) :
+ r(value_type(r_)),
+ g(value_type(g_)),
+ b(value_type(b_)),
+ a(value_type(a_)) {}
+
+ //--------------------------------------------------------------------
+ rgba8T(const rgba& c)
+ {
+ convert(*this, c);
+ }
+
+ //--------------------------------------------------------------------
+ rgba8T(const self_type& c, unsigned a_) :
+ r(c.r), g(c.g), b(c.b), a(value_type(a_)) {}
+
+ //--------------------------------------------------------------------
+ template<class T>
+ rgba8T(const rgba8T<T>& c)
+ {
+ convert(*this, c);
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba() const
+ {
+ rgba c;
+ convert(c, *this);
+ return c;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return double(a) / base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(uround(a * base_mask));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a == 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a == base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type invert(value_type x)
+ {
+ return base_mask - x;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, exact over int8u.
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ calc_type t = a * b + base_MSB;
+ return value_type(((t >> base_shift) + t) >> base_shift);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ if (a * b == 0)
+ {
+ return 0;
+ }
+ else if (a >= b)
+ {
+ return base_mask;
+ }
+ else return value_type((a * base_mask + (b >> 1)) / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a >> base_shift;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return a >> n;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, exact over int8u.
+ // Specifically for multiplying a color component by a cover.
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
+ {
+ return multiply(a, b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return multiply(b, a);
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return p + q - multiply(p, a);
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ int t = (q - p) * a + base_MSB - (p > q);
+ return value_type(p + (((t >> base_shift) + t) >> base_shift));
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ r = g = b = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if (a_ > 1) a = 1;
+ else a = (value_type)uround(a_ * double(base_mask));
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return double(a) / double(base_mask);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& premultiply()
+ {
+ if (a != base_mask)
+ {
+ if (a == 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ r = multiply(r, a);
+ g = multiply(g, a);
+ b = multiply(b, a);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& premultiply(unsigned a_)
+ {
+ if (a != base_mask || a_ < base_mask)
+ {
+ if (a == 0 || a_ == 0)
+ {
+ r = g = b = a = 0;
+ }
+ else
+ {
+ calc_type r_ = (calc_type(r) * a_) / a;
+ calc_type g_ = (calc_type(g) * a_) / a;
+ calc_type b_ = (calc_type(b) * a_) / a;
+ r = value_type((r_ > a_) ? a_ : r_);
+ g = value_type((g_ > a_) ? a_ : g_);
+ b = value_type((b_ > a_) ? a_ : b_);
+ a = value_type(a_);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& demultiply()
+ {
+ if (a < base_mask)
+ {
+ if (a == 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ calc_type r_ = (calc_type(r) * base_mask) / a;
+ calc_type g_ = (calc_type(g) * base_mask) / a;
+ calc_type b_ = (calc_type(b) * base_mask) / a;
+ r = value_type((r_ > calc_type(base_mask)) ? calc_type(base_mask) : r_);
+ g = value_type((g_ > calc_type(base_mask)) ? calc_type(base_mask) : g_);
+ b = value_type((b_ > calc_type(base_mask)) ? calc_type(base_mask) : b_);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type gradient(const self_type& c, double k) const
+ {
+ self_type ret;
+ calc_type ik = uround(k * base_mask);
+ ret.r = lerp(r, c.r, ik);
+ ret.g = lerp(g, c.g, ik);
+ ret.b = lerp(b, c.b, ik);
+ ret.a = lerp(a, c.a, ik);
+ return ret;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void add(const self_type& c, unsigned cover)
+ {
+ calc_type cr, cg, cb, ca;
+ if (cover == cover_mask)
+ {
+ if (c.a == base_mask)
+ {
+ *this = c;
+ return;
+ }
+ else
+ {
+ cr = r + c.r;
+ cg = g + c.g;
+ cb = b + c.b;
+ ca = a + c.a;
+ }
+ }
+ else
+ {
+ cr = r + mult_cover(c.r, cover);
+ cg = g + mult_cover(c.g, cover);
+ cb = b + mult_cover(c.b, cover);
+ ca = a + mult_cover(c.a, cover);
+ }
+ r = (value_type)((cr > calc_type(base_mask)) ? calc_type(base_mask) : cr);
+ g = (value_type)((cg > calc_type(base_mask)) ? calc_type(base_mask) : cg);
+ b = (value_type)((cb > calc_type(base_mask)) ? calc_type(base_mask) : cb);
+ a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLUT>
+ AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma)
+ {
+ r = gamma.dir(r);
+ g = gamma.dir(g);
+ b = gamma.dir(b);
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLUT>
+ AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma)
+ {
+ r = gamma.inv(r);
+ g = gamma.inv(g);
+ b = gamma.inv(b);
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0,0,0); }
+
+ //--------------------------------------------------------------------
+ static self_type from_wavelength(double wl, double gamma = 1.0)
+ {
+ return self_type(rgba::from_wavelength(wl, gamma));
+ }
+ };
+
+ typedef rgba8T<linear> rgba8;
+ typedef rgba8T<sRGB> srgba8;
+
+
+ //-------------------------------------------------------------rgb8_packed
+ inline rgba8 rgb8_packed(unsigned v)
+ {
+ return rgba8((v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF);
+ }
+
+ //-------------------------------------------------------------bgr8_packed
+ inline rgba8 bgr8_packed(unsigned v)
+ {
+ return rgba8(v & 0xFF, (v >> 8) & 0xFF, (v >> 16) & 0xFF);
+ }
+
+ //------------------------------------------------------------argb8_packed
+ inline rgba8 argb8_packed(unsigned v)
+ {
+ return rgba8((v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF, v >> 24);
+ }
+
+ //---------------------------------------------------------rgba8_gamma_dir
+ template<class GammaLUT>
+ rgba8 rgba8_gamma_dir(rgba8 c, const GammaLUT& gamma)
+ {
+ return rgba8(gamma.dir(c.r), gamma.dir(c.g), gamma.dir(c.b), c.a);
+ }
+
+ //---------------------------------------------------------rgba8_gamma_inv
+ template<class GammaLUT>
+ rgba8 rgba8_gamma_inv(rgba8 c, const GammaLUT& gamma)
+ {
+ return rgba8(gamma.inv(c.r), gamma.inv(c.g), gamma.inv(c.b), c.a);
+ }
+
+
+
+ //==================================================================rgba16
+ struct rgba16
+ {
+ typedef int16u value_type;
+ typedef int32u calc_type;
+ typedef int64 long_type;
+ enum base_scale_e
+ {
+ base_shift = 16,
+ base_scale = 1 << base_shift,
+ base_mask = base_scale - 1,
+ base_MSB = 1 << (base_shift - 1)
+ };
+ typedef rgba16 self_type;
+
+ value_type r;
+ value_type g;
+ value_type b;
+ value_type a;
+
+ //--------------------------------------------------------------------
+ rgba16() {}
+
+ //--------------------------------------------------------------------
+ rgba16(unsigned r_, unsigned g_, unsigned b_, unsigned a_=base_mask) :
+ r(value_type(r_)),
+ g(value_type(g_)),
+ b(value_type(b_)),
+ a(value_type(a_)) {}
+
+ //--------------------------------------------------------------------
+ rgba16(const self_type& c, unsigned a_) :
+ r(c.r), g(c.g), b(c.b), a(value_type(a_)) {}
+
+ //--------------------------------------------------------------------
+ rgba16(const rgba& c) :
+ r((value_type)uround(c.r * double(base_mask))),
+ g((value_type)uround(c.g * double(base_mask))),
+ b((value_type)uround(c.b * double(base_mask))),
+ a((value_type)uround(c.a * double(base_mask))) {}
+
+ //--------------------------------------------------------------------
+ rgba16(const rgba8& c) :
+ r(value_type((value_type(c.r) << 8) | c.r)),
+ g(value_type((value_type(c.g) << 8) | c.g)),
+ b(value_type((value_type(c.b) << 8) | c.b)),
+ a(value_type((value_type(c.a) << 8) | c.a)) {}
+
+ //--------------------------------------------------------------------
+ rgba16(const srgba8& c) :
+ r(sRGB_conv<value_type>::rgb_from_sRGB(c.r)),
+ g(sRGB_conv<value_type>::rgb_from_sRGB(c.g)),
+ b(sRGB_conv<value_type>::rgb_from_sRGB(c.b)),
+ a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
+
+ //--------------------------------------------------------------------
+ operator rgba() const
+ {
+ return rgba(
+ r / 65535.0,
+ g / 65535.0,
+ b / 65535.0,
+ a / 65535.0);
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba8() const
+ {
+ return rgba8(r >> 8, g >> 8, b >> 8, a >> 8);
+ }
+
+ //--------------------------------------------------------------------
+ operator srgba8() const
+ {
+ // Return (non-premultiplied) sRGB values.
+ return srgba8(
+ sRGB_conv<value_type>::rgb_to_sRGB(r),
+ sRGB_conv<value_type>::rgb_to_sRGB(g),
+ sRGB_conv<value_type>::rgb_to_sRGB(b),
+ sRGB_conv<value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return double(a) / base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(uround(a * base_mask));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a == 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a == base_mask;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type invert(value_type x)
+ {
+ return base_mask - x;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, exact over int16u.
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ calc_type t = a * b + base_MSB;
+ return value_type(((t >> base_shift) + t) >> base_shift);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ if (a * b == 0)
+ {
+ return 0;
+ }
+ else if (a >= b)
+ {
+ return base_mask;
+ }
+ else return value_type((a * base_mask + (b >> 1)) / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a >> base_shift;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return a >> n;
+ }
+
+ //--------------------------------------------------------------------
+ // Fixed-point multiply, almost exact over int16u.
+ // Specifically for multiplying a color component by a cover.
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
+ {
+ return multiply(a, (b << 8) | b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return multiply((a << 8) | a, b) >> 8;
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return p + q - multiply(p, a);
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ int t = (q - p) * a + base_MSB - (p > q);
+ return value_type(p + (((t >> base_shift) + t) >> base_shift));
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ r = g = b = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ if (a_ > 1) a = 1;
+ a = value_type(uround(a_ * double(base_mask)));
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return double(a) / double(base_mask);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& premultiply()
+ {
+ if (a != base_mask)
+ {
+ if (a == 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ r = multiply(r, a);
+ g = multiply(g, a);
+ b = multiply(b, a);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& premultiply(unsigned a_)
+ {
+ if (a < base_mask || a_ < base_mask)
+ {
+ if (a == 0 || a_ == 0)
+ {
+ r = g = b = a = 0;
+ }
+ else
+ {
+ calc_type r_ = (calc_type(r) * a_) / a;
+ calc_type g_ = (calc_type(g) * a_) / a;
+ calc_type b_ = (calc_type(b) * a_) / a;
+ r = value_type((r_ > a_) ? a_ : r_);
+ g = value_type((g_ > a_) ? a_ : g_);
+ b = value_type((b_ > a_) ? a_ : b_);
+ a = value_type(a_);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& demultiply()
+ {
+ if (a < base_mask)
+ {
+ if (a == 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ calc_type r_ = (calc_type(r) * base_mask) / a;
+ calc_type g_ = (calc_type(g) * base_mask) / a;
+ calc_type b_ = (calc_type(b) * base_mask) / a;
+ r = value_type((r_ > calc_type(base_mask)) ? calc_type(base_mask) : r_);
+ g = value_type((g_ > calc_type(base_mask)) ? calc_type(base_mask) : g_);
+ b = value_type((b_ > calc_type(base_mask)) ? calc_type(base_mask) : b_);
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type gradient(const self_type& c, double k) const
+ {
+ self_type ret;
+ calc_type ik = uround(k * base_mask);
+ ret.r = lerp(r, c.r, ik);
+ ret.g = lerp(g, c.g, ik);
+ ret.b = lerp(b, c.b, ik);
+ ret.a = lerp(a, c.a, ik);
+ return ret;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void add(const self_type& c, unsigned cover)
+ {
+ calc_type cr, cg, cb, ca;
+ if (cover == cover_mask)
+ {
+ if (c.a == base_mask)
+ {
+ *this = c;
+ return;
+ }
+ else
+ {
+ cr = r + c.r;
+ cg = g + c.g;
+ cb = b + c.b;
+ ca = a + c.a;
+ }
+ }
+ else
+ {
+ cr = r + mult_cover(c.r, cover);
+ cg = g + mult_cover(c.g, cover);
+ cb = b + mult_cover(c.b, cover);
+ ca = a + mult_cover(c.a, cover);
+ }
+ r = (value_type)((cr > calc_type(base_mask)) ? calc_type(base_mask) : cr);
+ g = (value_type)((cg > calc_type(base_mask)) ? calc_type(base_mask) : cg);
+ b = (value_type)((cb > calc_type(base_mask)) ? calc_type(base_mask) : cb);
+ a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLUT>
+ AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma)
+ {
+ r = gamma.dir(r);
+ g = gamma.dir(g);
+ b = gamma.dir(b);
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLUT>
+ AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma)
+ {
+ r = gamma.inv(r);
+ g = gamma.inv(g);
+ b = gamma.inv(b);
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0,0,0); }
+
+ //--------------------------------------------------------------------
+ static self_type from_wavelength(double wl, double gamma = 1.0)
+ {
+ return self_type(rgba::from_wavelength(wl, gamma));
+ }
+ };
+
+
+ //------------------------------------------------------rgba16_gamma_dir
+ template<class GammaLUT>
+ rgba16 rgba16_gamma_dir(rgba16 c, const GammaLUT& gamma)
+ {
+ return rgba16(gamma.dir(c.r), gamma.dir(c.g), gamma.dir(c.b), c.a);
+ }
+
+ //------------------------------------------------------rgba16_gamma_inv
+ template<class GammaLUT>
+ rgba16 rgba16_gamma_inv(rgba16 c, const GammaLUT& gamma)
+ {
+ return rgba16(gamma.inv(c.r), gamma.inv(c.g), gamma.inv(c.b), c.a);
+ }
+
+ //====================================================================rgba32
+ struct rgba32
+ {
+ typedef float value_type;
+ typedef double calc_type;
+ typedef double long_type;
+ typedef rgba32 self_type;
+
+ value_type r;
+ value_type g;
+ value_type b;
+ value_type a;
+
+ //--------------------------------------------------------------------
+ rgba32() {}
+
+ //--------------------------------------------------------------------
+ rgba32(value_type r_, value_type g_, value_type b_, value_type a_= 1) :
+ r(r_), g(g_), b(b_), a(a_) {}
+
+ //--------------------------------------------------------------------
+ rgba32(const self_type& c, float a_) :
+ r(c.r), g(c.g), b(c.b), a(a_) {}
+
+ //--------------------------------------------------------------------
+ rgba32(const rgba& c) :
+ r(value_type(c.r)), g(value_type(c.g)), b(value_type(c.b)), a(value_type(c.a)) {}
+
+ //--------------------------------------------------------------------
+ rgba32(const rgba8& c) :
+ r(value_type(c.r / 255.0)),
+ g(value_type(c.g / 255.0)),
+ b(value_type(c.b / 255.0)),
+ a(value_type(c.a / 255.0)) {}
+
+ //--------------------------------------------------------------------
+ rgba32(const srgba8& c) :
+ r(sRGB_conv<value_type>::rgb_from_sRGB(c.r)),
+ g(sRGB_conv<value_type>::rgb_from_sRGB(c.g)),
+ b(sRGB_conv<value_type>::rgb_from_sRGB(c.b)),
+ a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
+
+ //--------------------------------------------------------------------
+ rgba32(const rgba16& c) :
+ r(value_type(c.r / 65535.0)),
+ g(value_type(c.g / 65535.0)),
+ b(value_type(c.b / 65535.0)),
+ a(value_type(c.a / 65535.0)) {}
+
+ //--------------------------------------------------------------------
+ operator rgba() const
+ {
+ return rgba(r, g, b, a);
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba8() const
+ {
+ return rgba8(
+ uround(r * 255.0),
+ uround(g * 255.0),
+ uround(b * 255.0),
+ uround(a * 255.0));
+ }
+
+ //--------------------------------------------------------------------
+ operator srgba8() const
+ {
+ return srgba8(
+ sRGB_conv<value_type>::rgb_to_sRGB(r),
+ sRGB_conv<value_type>::rgb_to_sRGB(g),
+ sRGB_conv<value_type>::rgb_to_sRGB(b),
+ sRGB_conv<value_type>::alpha_to_sRGB(a));
+ }
+
+ //--------------------------------------------------------------------
+ operator rgba16() const
+ {
+ return rgba8(
+ uround(r * 65535.0),
+ uround(g * 65535.0),
+ uround(b * 65535.0),
+ uround(a * 65535.0));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(a);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return 1;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a <= 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a >= 1;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type invert(value_type x)
+ {
+ return 1 - x;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ return value_type(a * b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ return (b == 0) ? 0 : value_type(a / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return n > 0 ? a / (1 << n) : a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
+ {
+ return value_type(a * b / cover_mask);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return cover_type(uround(a * b));
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return (1 - a) * p + q; // more accurate than "p + q - p * a"
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ // The form "p + a * (q - p)" avoids a multiplication, but may produce an
+ // inaccurate result. For example, "p + (q - p)" may not be exactly equal
+ // to q. Therefore, stick to the basic expression, which at least produces
+ // the correct result at either extreme.
+ return (1 - a) * p + a * q;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ r = g = b = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if (a_ > 1) a = 1;
+ else a = value_type(a_);
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& premultiply()
+ {
+ if (a < 1)
+ {
+ if (a <= 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ r *= a;
+ g *= a;
+ b *= a;
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& demultiply()
+ {
+ if (a < 1)
+ {
+ if (a <= 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ r /= a;
+ g /= a;
+ b /= a;
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type gradient(const self_type& c, double k) const
+ {
+ self_type ret;
+ ret.r = value_type(r + (c.r - r) * k);
+ ret.g = value_type(g + (c.g - g) * k);
+ ret.b = value_type(b + (c.b - b) * k);
+ ret.a = value_type(a + (c.a - a) * k);
+ return ret;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void add(const self_type& c, unsigned cover)
+ {
+ if (cover == cover_mask)
+ {
+ if (c.is_opaque())
+ {
+ *this = c;
+ return;
+ }
+ else
+ {
+ r += c.r;
+ g += c.g;
+ b += c.b;
+ a += c.a;
+ }
+ }
+ else
+ {
+ r += mult_cover(c.r, cover);
+ g += mult_cover(c.g, cover);
+ b += mult_cover(c.b, cover);
+ a += mult_cover(c.a, cover);
+ }
+ if (a > 1) a = 1;
+ if (r > a) r = a;
+ if (g > a) g = a;
+ if (b > a) b = a;
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLUT>
+ AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma)
+ {
+ r = gamma.dir(r);
+ g = gamma.dir(g);
+ b = gamma.dir(b);
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLUT>
+ AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma)
+ {
+ r = gamma.inv(r);
+ g = gamma.inv(g);
+ b = gamma.inv(b);
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0,0,0); }
+
+ //--------------------------------------------------------------------
+ static self_type from_wavelength(double wl, double gamma = 1)
+ {
+ return self_type(rgba::from_wavelength(wl, gamma));
+ }
+ };
+}
+
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_config.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_config.h
new file mode 100644
index 00000000000..fa1dae2ba7b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_config.h
@@ -0,0 +1,44 @@
+#ifndef AGG_CONFIG_INCLUDED
+#define AGG_CONFIG_INCLUDED
+
+// This file can be used to redefine certain data types.
+
+//---------------------------------------
+// 1. Default basic types such as:
+//
+// AGG_INT8
+// AGG_INT8U
+// AGG_INT16
+// AGG_INT16U
+// AGG_INT32
+// AGG_INT32U
+// AGG_INT64
+// AGG_INT64U
+//
+// Just replace this file with new defines if necessary.
+// For example, if your compiler doesn't have a 64 bit integer type
+// you can still use AGG if you define the follows:
+//
+// #define AGG_INT64 int
+// #define AGG_INT64U unsigned
+//
+// It will result in overflow in 16 bit-per-component image/pattern resampling
+// but it won't result any crash and the rest of the library will remain
+// fully functional.
+
+
+//---------------------------------------
+// 2. Default rendering_buffer type. Can be:
+//
+// Provides faster access for massive pixel operations,
+// such as blur, image filtering:
+// #define AGG_RENDERING_BUFFER row_ptr_cache<int8u>
+//
+// Provides cheaper creation and destruction (no mem allocs):
+// #define AGG_RENDERING_BUFFER row_accessor<int8u>
+//
+// You can still use both of them simultaneously in your applications
+// This #define is used only for default rendering_buffer type,
+// in short hand typedefs like pixfmt_rgba32.
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_adaptor_vcgen.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_adaptor_vcgen.h
new file mode 100644
index 00000000000..a79f2208c61
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_adaptor_vcgen.h
@@ -0,0 +1,157 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_CONV_ADAPTOR_VCGEN_INCLUDED
+#define AGG_CONV_ADAPTOR_VCGEN_INCLUDED
+
+#include "agg_basics.h"
+
+namespace agg
+{
+ //------------------------------------------------------------null_markers
+ struct null_markers
+ {
+ void remove_all() {}
+ void add_vertex(double, double, unsigned) {}
+ void prepare_src() {}
+
+ void rewind(unsigned) {}
+ unsigned vertex(double*, double*) { return path_cmd_stop; }
+ };
+
+
+ //------------------------------------------------------conv_adaptor_vcgen
+ template<class VertexSource,
+ class Generator,
+ class Markers=null_markers> class conv_adaptor_vcgen
+ {
+ enum status
+ {
+ initial,
+ accumulate,
+ generate
+ };
+
+ public:
+ explicit conv_adaptor_vcgen(VertexSource& source) :
+ m_source(&source),
+ m_status(initial)
+ {}
+ void attach(VertexSource& source) { m_source = &source; }
+
+ Generator& generator() { return m_generator; }
+ const Generator& generator() const { return m_generator; }
+
+ Markers& markers() { return m_markers; }
+ const Markers& markers() const { return m_markers; }
+
+ void rewind(unsigned path_id)
+ {
+ m_source->rewind(path_id);
+ m_status = initial;
+ }
+
+ unsigned vertex(double* x, double* y);
+
+ private:
+ // Prohibit copying
+ conv_adaptor_vcgen(const conv_adaptor_vcgen<VertexSource, Generator, Markers>&);
+ const conv_adaptor_vcgen<VertexSource, Generator, Markers>&
+ operator = (const conv_adaptor_vcgen<VertexSource, Generator, Markers>&);
+
+ VertexSource* m_source;
+ Generator m_generator;
+ Markers m_markers;
+ status m_status;
+ unsigned m_last_cmd;
+ double m_start_x;
+ double m_start_y;
+ };
+
+
+
+
+
+ //------------------------------------------------------------------------
+ template<class VertexSource, class Generator, class Markers>
+ unsigned conv_adaptor_vcgen<VertexSource, Generator, Markers>::vertex(double* x, double* y)
+ {
+ unsigned cmd = path_cmd_stop;
+ bool done = false;
+ while(!done)
+ {
+ switch(m_status)
+ {
+ case initial:
+ m_markers.remove_all();
+ m_last_cmd = m_source->vertex(&m_start_x, &m_start_y);
+ m_status = accumulate;
+
+ case accumulate:
+ if(is_stop(m_last_cmd)) return path_cmd_stop;
+
+ m_generator.remove_all();
+ m_generator.add_vertex(m_start_x, m_start_y, path_cmd_move_to);
+ m_markers.add_vertex(m_start_x, m_start_y, path_cmd_move_to);
+
+ for(;;)
+ {
+ cmd = m_source->vertex(x, y);
+ if(is_vertex(cmd))
+ {
+ m_last_cmd = cmd;
+ if(is_move_to(cmd))
+ {
+ m_start_x = *x;
+ m_start_y = *y;
+ break;
+ }
+ m_generator.add_vertex(*x, *y, cmd);
+ m_markers.add_vertex(*x, *y, path_cmd_line_to);
+ }
+ else
+ {
+ if(is_stop(cmd))
+ {
+ m_last_cmd = path_cmd_stop;
+ break;
+ }
+ if(is_end_poly(cmd))
+ {
+ m_generator.add_vertex(*x, *y, cmd);
+ break;
+ }
+ }
+ }
+ m_generator.rewind(0);
+ m_status = generate;
+
+ case generate:
+ cmd = m_generator.vertex(x, y);
+ if(is_stop(cmd))
+ {
+ m_status = accumulate;
+ break;
+ }
+ done = true;
+ break;
+ }
+ }
+ return cmd;
+ }
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_adaptor_vpgen.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_adaptor_vpgen.h
new file mode 100644
index 00000000000..d6b545ef1f2
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_adaptor_vpgen.h
@@ -0,0 +1,159 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_CONV_ADAPTOR_VPGEN_INCLUDED
+#define AGG_CONV_ADAPTOR_VPGEN_INCLUDED
+
+#include "agg_basics.h"
+
+namespace agg
+{
+
+ //======================================================conv_adaptor_vpgen
+ template<class VertexSource, class VPGen> class conv_adaptor_vpgen
+ {
+ public:
+ explicit conv_adaptor_vpgen(VertexSource& source) : m_source(&source) {}
+ void attach(VertexSource& source) { m_source = &source; }
+
+ VPGen& vpgen() { return m_vpgen; }
+ const VPGen& vpgen() const { return m_vpgen; }
+
+ void rewind(unsigned path_id);
+ unsigned vertex(double* x, double* y);
+
+ private:
+ conv_adaptor_vpgen(const conv_adaptor_vpgen<VertexSource, VPGen>&);
+ const conv_adaptor_vpgen<VertexSource, VPGen>&
+ operator = (const conv_adaptor_vpgen<VertexSource, VPGen>&);
+
+ VertexSource* m_source;
+ VPGen m_vpgen;
+ double m_start_x;
+ double m_start_y;
+ unsigned m_poly_flags;
+ int m_vertices;
+ };
+
+
+
+ //------------------------------------------------------------------------
+ template<class VertexSource, class VPGen>
+ void conv_adaptor_vpgen<VertexSource, VPGen>::rewind(unsigned path_id)
+ {
+ m_source->rewind(path_id);
+ m_vpgen.reset();
+ m_start_x = 0;
+ m_start_y = 0;
+ m_poly_flags = 0;
+ m_vertices = 0;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class VertexSource, class VPGen>
+ unsigned conv_adaptor_vpgen<VertexSource, VPGen>::vertex(double* x, double* y)
+ {
+ unsigned cmd = path_cmd_stop;
+ for(;;)
+ {
+ cmd = m_vpgen.vertex(x, y);
+ if(!is_stop(cmd)) break;
+
+ if(m_poly_flags && !m_vpgen.auto_unclose())
+ {
+ *x = 0.0;
+ *y = 0.0;
+ cmd = m_poly_flags;
+ m_poly_flags = 0;
+ break;
+ }
+
+ if(m_vertices < 0)
+ {
+ if(m_vertices < -1)
+ {
+ m_vertices = 0;
+ return path_cmd_stop;
+ }
+ m_vpgen.move_to(m_start_x, m_start_y);
+ m_vertices = 1;
+ continue;
+ }
+
+ double tx, ty;
+ cmd = m_source->vertex(&tx, &ty);
+ if(is_vertex(cmd))
+ {
+ if(is_move_to(cmd))
+ {
+ if(m_vpgen.auto_close() && m_vertices > 2)
+ {
+ m_vpgen.line_to(m_start_x, m_start_y);
+ m_poly_flags = path_cmd_end_poly | path_flags_close;
+ m_start_x = tx;
+ m_start_y = ty;
+ m_vertices = -1;
+ continue;
+ }
+ m_vpgen.move_to(tx, ty);
+ m_start_x = tx;
+ m_start_y = ty;
+ m_vertices = 1;
+ }
+ else
+ {
+ m_vpgen.line_to(tx, ty);
+ ++m_vertices;
+ }
+ }
+ else
+ {
+ if(is_end_poly(cmd))
+ {
+ m_poly_flags = cmd;
+ if(is_closed(cmd) || m_vpgen.auto_close())
+ {
+ if(m_vpgen.auto_close()) m_poly_flags |= path_flags_close;
+ if(m_vertices > 2)
+ {
+ m_vpgen.line_to(m_start_x, m_start_y);
+ }
+ m_vertices = 0;
+ }
+ }
+ else
+ {
+ // path_cmd_stop
+ if(m_vpgen.auto_close() && m_vertices > 2)
+ {
+ m_vpgen.line_to(m_start_x, m_start_y);
+ m_poly_flags = path_cmd_end_poly | path_flags_close;
+ m_vertices = -2;
+ continue;
+ }
+ break;
+ }
+ }
+ }
+ return cmd;
+ }
+
+
+}
+
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_contour.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_contour.h
new file mode 100644
index 00000000000..b4b5a9047e9
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_contour.h
@@ -0,0 +1,65 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// conv_stroke
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_CONV_CONTOUR_INCLUDED
+#define AGG_CONV_CONTOUR_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_vcgen_contour.h"
+#include "agg_conv_adaptor_vcgen.h"
+
+namespace agg
+{
+
+ //-----------------------------------------------------------conv_contour
+ template<class VertexSource>
+ struct conv_contour : public conv_adaptor_vcgen<VertexSource, vcgen_contour>
+ {
+ typedef conv_adaptor_vcgen<VertexSource, vcgen_contour> base_type;
+
+ conv_contour(VertexSource& vs) :
+ conv_adaptor_vcgen<VertexSource, vcgen_contour>(vs)
+ {
+ }
+
+ void line_join(line_join_e lj) { base_type::generator().line_join(lj); }
+ void inner_join(inner_join_e ij) { base_type::generator().inner_join(ij); }
+ void width(double w) { base_type::generator().width(w); }
+ void miter_limit(double ml) { base_type::generator().miter_limit(ml); }
+ void miter_limit_theta(double t) { base_type::generator().miter_limit_theta(t); }
+ void inner_miter_limit(double ml) { base_type::generator().inner_miter_limit(ml); }
+ void approximation_scale(double as) { base_type::generator().approximation_scale(as); }
+ void auto_detect_orientation(bool v) { base_type::generator().auto_detect_orientation(v); }
+
+ line_join_e line_join() const { return base_type::generator().line_join(); }
+ inner_join_e inner_join() const { return base_type::generator().inner_join(); }
+ double width() const { return base_type::generator().width(); }
+ double miter_limit() const { return base_type::generator().miter_limit(); }
+ double inner_miter_limit() const { return base_type::generator().inner_miter_limit(); }
+ double approximation_scale() const { return base_type::generator().approximation_scale(); }
+ bool auto_detect_orientation() const { return base_type::generator().auto_detect_orientation(); }
+
+ private:
+ conv_contour(const conv_contour<VertexSource>&);
+ const conv_contour<VertexSource>&
+ operator = (const conv_contour<VertexSource>&);
+ };
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_curve.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_curve.h
new file mode 100644
index 00000000000..d5b475de7a1
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_curve.h
@@ -0,0 +1,201 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// classes conv_curve
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_CONV_CURVE_INCLUDED
+#define AGG_CONV_CURVE_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_curves.h"
+
+namespace agg
+{
+
+
+ //---------------------------------------------------------------conv_curve
+ // Curve converter class. Any path storage can have Bezier curves defined
+ // by their control points. There're two types of curves supported: curve3
+ // and curve4. Curve3 is a conic Bezier curve with 2 endpoints and 1 control
+ // point. Curve4 has 2 control points (4 points in total) and can be used
+ // to interpolate more complicated curves. Curve4, unlike curve3 can be used
+ // to approximate arcs, both circular and elliptical. Curves are approximated
+ // with straight lines and one of the approaches is just to store the whole
+ // sequence of vertices that approximate our curve. It takes additional
+ // memory, and at the same time the consecutive vertices can be calculated
+ // on demand.
+ //
+ // Initially, path storages are not suppose to keep all the vertices of the
+ // curves (although, nothing prevents us from doing so). Instead, path_storage
+ // keeps only vertices, needed to calculate a curve on demand. Those vertices
+ // are marked with special commands. So, if the path_storage contains curves
+ // (which are not real curves yet), and we render this storage directly,
+ // all we will see is only 2 or 3 straight line segments (for curve3 and
+ // curve4 respectively). If we need to see real curves drawn we need to
+ // include this class into the conversion pipeline.
+ //
+ // Class conv_curve recognizes commands path_cmd_curve3 and path_cmd_curve4
+ // and converts these vertices into a move_to/line_to sequence.
+ //-----------------------------------------------------------------------
+ template<class VertexSource,
+ class Curve3=curve3,
+ class Curve4=curve4> class conv_curve
+ {
+ public:
+ typedef Curve3 curve3_type;
+ typedef Curve4 curve4_type;
+ typedef conv_curve<VertexSource, Curve3, Curve4> self_type;
+
+ explicit conv_curve(VertexSource& source) :
+ m_source(&source), m_last_x(0.0), m_last_y(0.0) {}
+ void attach(VertexSource& source) { m_source = &source; }
+
+ void approximation_method(curve_approximation_method_e v)
+ {
+ m_curve3.approximation_method(v);
+ m_curve4.approximation_method(v);
+ }
+
+ curve_approximation_method_e approximation_method() const
+ {
+ return m_curve4.approximation_method();
+ }
+
+ void approximation_scale(double s)
+ {
+ m_curve3.approximation_scale(s);
+ m_curve4.approximation_scale(s);
+ }
+
+ double approximation_scale() const
+ {
+ return m_curve4.approximation_scale();
+ }
+
+ void angle_tolerance(double v)
+ {
+ m_curve3.angle_tolerance(v);
+ m_curve4.angle_tolerance(v);
+ }
+
+ double angle_tolerance() const
+ {
+ return m_curve4.angle_tolerance();
+ }
+
+ void cusp_limit(double v)
+ {
+ m_curve3.cusp_limit(v);
+ m_curve4.cusp_limit(v);
+ }
+
+ double cusp_limit() const
+ {
+ return m_curve4.cusp_limit();
+ }
+
+ void rewind(unsigned path_id);
+ unsigned vertex(double* x, double* y);
+
+ private:
+ conv_curve(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ VertexSource* m_source;
+ double m_last_x;
+ double m_last_y;
+ curve3_type m_curve3;
+ curve4_type m_curve4;
+ };
+
+
+
+ //------------------------------------------------------------------------
+ template<class VertexSource, class Curve3, class Curve4>
+ void conv_curve<VertexSource, Curve3, Curve4>::rewind(unsigned path_id)
+ {
+ m_source->rewind(path_id);
+ m_last_x = 0.0;
+ m_last_y = 0.0;
+ m_curve3.reset();
+ m_curve4.reset();
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class VertexSource, class Curve3, class Curve4>
+ unsigned conv_curve<VertexSource, Curve3, Curve4>::vertex(double* x, double* y)
+ {
+ if(!is_stop(m_curve3.vertex(x, y)))
+ {
+ m_last_x = *x;
+ m_last_y = *y;
+ return path_cmd_line_to;
+ }
+
+ if(!is_stop(m_curve4.vertex(x, y)))
+ {
+ m_last_x = *x;
+ m_last_y = *y;
+ return path_cmd_line_to;
+ }
+
+ double ct2_x;
+ double ct2_y;
+ double end_x;
+ double end_y;
+
+ unsigned cmd = m_source->vertex(x, y);
+ switch(cmd)
+ {
+ case path_cmd_curve3:
+ m_source->vertex(&end_x, &end_y);
+
+ m_curve3.init(m_last_x, m_last_y,
+ *x, *y,
+ end_x, end_y);
+
+ m_curve3.vertex(x, y); // First call returns path_cmd_move_to
+ m_curve3.vertex(x, y); // This is the first vertex of the curve
+ cmd = path_cmd_line_to;
+ break;
+
+ case path_cmd_curve4:
+ m_source->vertex(&ct2_x, &ct2_y);
+ m_source->vertex(&end_x, &end_y);
+
+ m_curve4.init(m_last_x, m_last_y,
+ *x, *y,
+ ct2_x, ct2_y,
+ end_x, end_y);
+
+ m_curve4.vertex(x, y); // First call returns path_cmd_move_to
+ m_curve4.vertex(x, y); // This is the first vertex of the curve
+ cmd = path_cmd_line_to;
+ break;
+ }
+ m_last_x = *x;
+ m_last_y = *y;
+ return cmd;
+ }
+
+
+}
+
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_dash.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_dash.h
new file mode 100644
index 00000000000..23c13ad0ab3
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_dash.h
@@ -0,0 +1,68 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// conv_dash
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_CONV_DASH_INCLUDED
+#define AGG_CONV_DASH_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_vcgen_dash.h"
+#include "agg_conv_adaptor_vcgen.h"
+
+namespace agg
+{
+
+ //---------------------------------------------------------------conv_dash
+ template<class VertexSource, class Markers=null_markers>
+ struct conv_dash : public conv_adaptor_vcgen<VertexSource, vcgen_dash, Markers>
+ {
+ typedef Markers marker_type;
+ typedef conv_adaptor_vcgen<VertexSource, vcgen_dash, Markers> base_type;
+
+ conv_dash(VertexSource& vs) :
+ conv_adaptor_vcgen<VertexSource, vcgen_dash, Markers>(vs)
+ {
+ }
+
+ void remove_all_dashes()
+ {
+ base_type::generator().remove_all_dashes();
+ }
+
+ void add_dash(double dash_len, double gap_len)
+ {
+ base_type::generator().add_dash(dash_len, gap_len);
+ }
+
+ void dash_start(double ds)
+ {
+ base_type::generator().dash_start(ds);
+ }
+
+ void shorten(double s) { base_type::generator().shorten(s); }
+ double shorten() const { return base_type::generator().shorten(); }
+
+ private:
+ conv_dash(const conv_dash<VertexSource, Markers>&);
+ const conv_dash<VertexSource, Markers>&
+ operator = (const conv_dash<VertexSource, Markers>&);
+ };
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_segmentator.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_segmentator.h
new file mode 100644
index 00000000000..e69a9e7d7d0
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_segmentator.h
@@ -0,0 +1,48 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_CONV_SEGMENTATOR_INCLUDED
+#define AGG_CONV_SEGMENTATOR_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_conv_adaptor_vpgen.h"
+#include "agg_vpgen_segmentator.h"
+
+namespace agg
+{
+
+ //========================================================conv_segmentator
+ template<class VertexSource>
+ struct conv_segmentator : public conv_adaptor_vpgen<VertexSource, vpgen_segmentator>
+ {
+ typedef conv_adaptor_vpgen<VertexSource, vpgen_segmentator> base_type;
+
+ conv_segmentator(VertexSource& vs) :
+ conv_adaptor_vpgen<VertexSource, vpgen_segmentator>(vs) {}
+
+ void approximation_scale(double s) { base_type::vpgen().approximation_scale(s); }
+ double approximation_scale() const { return base_type::vpgen().approximation_scale(); }
+
+ private:
+ conv_segmentator(const conv_segmentator<VertexSource>&);
+ const conv_segmentator<VertexSource>&
+ operator = (const conv_segmentator<VertexSource>&);
+ };
+
+
+}
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_stroke.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_stroke.h
new file mode 100644
index 00000000000..e19a6b61f40
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_stroke.h
@@ -0,0 +1,73 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// conv_stroke
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_CONV_STROKE_INCLUDED
+#define AGG_CONV_STROKE_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_vcgen_stroke.h"
+#include "agg_conv_adaptor_vcgen.h"
+
+namespace agg
+{
+
+ //-------------------------------------------------------------conv_stroke
+ template<class VertexSource, class Markers=null_markers>
+ struct conv_stroke :
+ public conv_adaptor_vcgen<VertexSource, vcgen_stroke, Markers>
+ {
+ typedef Markers marker_type;
+ typedef conv_adaptor_vcgen<VertexSource, vcgen_stroke, Markers> base_type;
+
+ conv_stroke(VertexSource& vs) :
+ conv_adaptor_vcgen<VertexSource, vcgen_stroke, Markers>(vs)
+ {
+ }
+
+ void line_cap(line_cap_e lc) { base_type::generator().line_cap(lc); }
+ void line_join(line_join_e lj) { base_type::generator().line_join(lj); }
+ void inner_join(inner_join_e ij) { base_type::generator().inner_join(ij); }
+
+ line_cap_e line_cap() const { return base_type::generator().line_cap(); }
+ line_join_e line_join() const { return base_type::generator().line_join(); }
+ inner_join_e inner_join() const { return base_type::generator().inner_join(); }
+
+ void width(double w) { base_type::generator().width(w); }
+ void miter_limit(double ml) { base_type::generator().miter_limit(ml); }
+ void miter_limit_theta(double t) { base_type::generator().miter_limit_theta(t); }
+ void inner_miter_limit(double ml) { base_type::generator().inner_miter_limit(ml); }
+ void approximation_scale(double as) { base_type::generator().approximation_scale(as); }
+
+ double width() const { return base_type::generator().width(); }
+ double miter_limit() const { return base_type::generator().miter_limit(); }
+ double inner_miter_limit() const { return base_type::generator().inner_miter_limit(); }
+ double approximation_scale() const { return base_type::generator().approximation_scale(); }
+
+ void shorten(double s) { base_type::generator().shorten(s); }
+ double shorten() const { return base_type::generator().shorten(); }
+
+ private:
+ conv_stroke(const conv_stroke<VertexSource, Markers>&);
+ const conv_stroke<VertexSource, Markers>&
+ operator = (const conv_stroke<VertexSource, Markers>&);
+
+ };
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_transform.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_transform.h
new file mode 100644
index 00000000000..0c88a245bda
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_conv_transform.h
@@ -0,0 +1,68 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// class conv_transform
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_CONV_TRANSFORM_INCLUDED
+#define AGG_CONV_TRANSFORM_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_trans_affine.h"
+
+namespace agg
+{
+
+ //----------------------------------------------------------conv_transform
+ template<class VertexSource, class Transformer=trans_affine> class conv_transform
+ {
+ public:
+ conv_transform(VertexSource& source, Transformer& tr) :
+ m_source(&source), m_trans(&tr) {}
+ void attach(VertexSource& source) { m_source = &source; }
+
+ void rewind(unsigned path_id)
+ {
+ m_source->rewind(path_id);
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ unsigned cmd = m_source->vertex(x, y);
+ if(is_vertex(cmd))
+ {
+ m_trans->transform(x, y);
+ }
+ return cmd;
+ }
+
+ void transformer(Transformer& tr)
+ {
+ m_trans = &tr;
+ }
+
+ private:
+ conv_transform(const conv_transform<VertexSource>&);
+ const conv_transform<VertexSource>&
+ operator = (const conv_transform<VertexSource>&);
+
+ VertexSource* m_source;
+ Transformer* m_trans;
+ };
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_curves.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_curves.h
new file mode 100644
index 00000000000..1ef02e87834
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_curves.h
@@ -0,0 +1,693 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+// Copyright (C) 2005 Tony Juricic (tonygeek@yahoo.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_CURVES_INCLUDED
+#define AGG_CURVES_INCLUDED
+
+#include "agg_array.h"
+
+namespace agg
+{
+
+ // See Implementation agg_curves.cpp
+
+ //--------------------------------------------curve_approximation_method_e
+ enum curve_approximation_method_e
+ {
+ curve_inc,
+ curve_div
+ };
+
+ //--------------------------------------------------------------curve3_inc
+ class curve3_inc
+ {
+ public:
+ curve3_inc() :
+ m_num_steps(0), m_step(0), m_scale(1.0) { }
+
+ curve3_inc(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3) :
+ m_num_steps(0), m_step(0), m_scale(1.0)
+ {
+ init(x1, y1, x2, y2, x3, y3);
+ }
+
+ void reset() { m_num_steps = 0; m_step = -1; }
+ void init(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3);
+
+ void approximation_method(curve_approximation_method_e) {}
+ curve_approximation_method_e approximation_method() const { return curve_inc; }
+
+ void approximation_scale(double s);
+ double approximation_scale() const;
+
+ void angle_tolerance(double) {}
+ double angle_tolerance() const { return 0.0; }
+
+ void cusp_limit(double) {}
+ double cusp_limit() const { return 0.0; }
+
+ void rewind(unsigned path_id);
+ unsigned vertex(double* x, double* y);
+
+ private:
+ int m_num_steps;
+ int m_step;
+ double m_scale;
+ double m_start_x;
+ double m_start_y;
+ double m_end_x;
+ double m_end_y;
+ double m_fx;
+ double m_fy;
+ double m_dfx;
+ double m_dfy;
+ double m_ddfx;
+ double m_ddfy;
+ double m_saved_fx;
+ double m_saved_fy;
+ double m_saved_dfx;
+ double m_saved_dfy;
+ };
+
+
+
+
+
+ //-------------------------------------------------------------curve3_div
+ class curve3_div
+ {
+ public:
+ curve3_div() :
+ m_approximation_scale(1.0),
+ m_angle_tolerance(0.0),
+ m_count(0)
+ {}
+
+ curve3_div(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3) :
+ m_approximation_scale(1.0),
+ m_angle_tolerance(0.0),
+ m_count(0)
+ {
+ init(x1, y1, x2, y2, x3, y3);
+ }
+
+ void reset() { m_points.remove_all(); m_count = 0; }
+ void init(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3);
+
+ void approximation_method(curve_approximation_method_e) {}
+ curve_approximation_method_e approximation_method() const { return curve_div; }
+
+ void approximation_scale(double s) { m_approximation_scale = s; }
+ double approximation_scale() const { return m_approximation_scale; }
+
+ void angle_tolerance(double a) { m_angle_tolerance = a; }
+ double angle_tolerance() const { return m_angle_tolerance; }
+
+ void cusp_limit(double) {}
+ double cusp_limit() const { return 0.0; }
+
+ void rewind(unsigned)
+ {
+ m_count = 0;
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ if(m_count >= m_points.size()) return path_cmd_stop;
+ const point_d& p = m_points[m_count++];
+ *x = p.x;
+ *y = p.y;
+ return (m_count == 1) ? path_cmd_move_to : path_cmd_line_to;
+ }
+
+ private:
+ void bezier(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3);
+ void recursive_bezier(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ unsigned level);
+
+ double m_approximation_scale;
+ double m_distance_tolerance_square;
+ double m_angle_tolerance;
+ unsigned m_count;
+ pod_bvector<point_d> m_points;
+ };
+
+
+
+
+
+
+
+ //-------------------------------------------------------------curve4_points
+ struct curve4_points
+ {
+ double cp[8];
+ curve4_points() {}
+ curve4_points(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4)
+ {
+ cp[0] = x1; cp[1] = y1; cp[2] = x2; cp[3] = y2;
+ cp[4] = x3; cp[5] = y3; cp[6] = x4; cp[7] = y4;
+ }
+ void init(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4)
+ {
+ cp[0] = x1; cp[1] = y1; cp[2] = x2; cp[3] = y2;
+ cp[4] = x3; cp[5] = y3; cp[6] = x4; cp[7] = y4;
+ }
+ double operator [] (unsigned i) const { return cp[i]; }
+ double& operator [] (unsigned i) { return cp[i]; }
+ };
+
+
+
+ //-------------------------------------------------------------curve4_inc
+ class curve4_inc
+ {
+ public:
+ curve4_inc() :
+ m_num_steps(0), m_step(0), m_scale(1.0) { }
+
+ curve4_inc(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4) :
+ m_num_steps(0), m_step(0), m_scale(1.0)
+ {
+ init(x1, y1, x2, y2, x3, y3, x4, y4);
+ }
+
+ curve4_inc(const curve4_points& cp) :
+ m_num_steps(0), m_step(0), m_scale(1.0)
+ {
+ init(cp[0], cp[1], cp[2], cp[3], cp[4], cp[5], cp[6], cp[7]);
+ }
+
+ void reset() { m_num_steps = 0; m_step = -1; }
+ void init(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4);
+
+ void init(const curve4_points& cp)
+ {
+ init(cp[0], cp[1], cp[2], cp[3], cp[4], cp[5], cp[6], cp[7]);
+ }
+
+ void approximation_method(curve_approximation_method_e) {}
+ curve_approximation_method_e approximation_method() const { return curve_inc; }
+
+ void approximation_scale(double s);
+ double approximation_scale() const;
+
+ void angle_tolerance(double) {}
+ double angle_tolerance() const { return 0.0; }
+
+ void cusp_limit(double) {}
+ double cusp_limit() const { return 0.0; }
+
+ void rewind(unsigned path_id);
+ unsigned vertex(double* x, double* y);
+
+ private:
+ int m_num_steps;
+ int m_step;
+ double m_scale;
+ double m_start_x;
+ double m_start_y;
+ double m_end_x;
+ double m_end_y;
+ double m_fx;
+ double m_fy;
+ double m_dfx;
+ double m_dfy;
+ double m_ddfx;
+ double m_ddfy;
+ double m_dddfx;
+ double m_dddfy;
+ double m_saved_fx;
+ double m_saved_fy;
+ double m_saved_dfx;
+ double m_saved_dfy;
+ double m_saved_ddfx;
+ double m_saved_ddfy;
+ };
+
+
+
+ //-------------------------------------------------------catrom_to_bezier
+ inline curve4_points catrom_to_bezier(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4)
+ {
+ // Trans. matrix Catmull-Rom to Bezier
+ //
+ // 0 1 0 0
+ // -1/6 1 1/6 0
+ // 0 1/6 1 -1/6
+ // 0 0 1 0
+ //
+ return curve4_points(
+ x2,
+ y2,
+ (-x1 + 6*x2 + x3) / 6,
+ (-y1 + 6*y2 + y3) / 6,
+ ( x2 + 6*x3 - x4) / 6,
+ ( y2 + 6*y3 - y4) / 6,
+ x3,
+ y3);
+ }
+
+
+ //-----------------------------------------------------------------------
+ inline curve4_points
+ catrom_to_bezier(const curve4_points& cp)
+ {
+ return catrom_to_bezier(cp[0], cp[1], cp[2], cp[3],
+ cp[4], cp[5], cp[6], cp[7]);
+ }
+
+
+
+ //-----------------------------------------------------ubspline_to_bezier
+ inline curve4_points ubspline_to_bezier(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4)
+ {
+ // Trans. matrix Uniform BSpline to Bezier
+ //
+ // 1/6 4/6 1/6 0
+ // 0 4/6 2/6 0
+ // 0 2/6 4/6 0
+ // 0 1/6 4/6 1/6
+ //
+ return curve4_points(
+ (x1 + 4*x2 + x3) / 6,
+ (y1 + 4*y2 + y3) / 6,
+ (4*x2 + 2*x3) / 6,
+ (4*y2 + 2*y3) / 6,
+ (2*x2 + 4*x3) / 6,
+ (2*y2 + 4*y3) / 6,
+ (x2 + 4*x3 + x4) / 6,
+ (y2 + 4*y3 + y4) / 6);
+ }
+
+
+ //-----------------------------------------------------------------------
+ inline curve4_points
+ ubspline_to_bezier(const curve4_points& cp)
+ {
+ return ubspline_to_bezier(cp[0], cp[1], cp[2], cp[3],
+ cp[4], cp[5], cp[6], cp[7]);
+ }
+
+
+
+
+ //------------------------------------------------------hermite_to_bezier
+ inline curve4_points hermite_to_bezier(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4)
+ {
+ // Trans. matrix Hermite to Bezier
+ //
+ // 1 0 0 0
+ // 1 0 1/3 0
+ // 0 1 0 -1/3
+ // 0 1 0 0
+ //
+ return curve4_points(
+ x1,
+ y1,
+ (3*x1 + x3) / 3,
+ (3*y1 + y3) / 3,
+ (3*x2 - x4) / 3,
+ (3*y2 - y4) / 3,
+ x2,
+ y2);
+ }
+
+
+
+ //-----------------------------------------------------------------------
+ inline curve4_points
+ hermite_to_bezier(const curve4_points& cp)
+ {
+ return hermite_to_bezier(cp[0], cp[1], cp[2], cp[3],
+ cp[4], cp[5], cp[6], cp[7]);
+ }
+
+
+ //-------------------------------------------------------------curve4_div
+ class curve4_div
+ {
+ public:
+ curve4_div() :
+ m_approximation_scale(1.0),
+ m_angle_tolerance(0.0),
+ m_cusp_limit(0.0),
+ m_count(0)
+ {}
+
+ curve4_div(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4) :
+ m_approximation_scale(1.0),
+ m_angle_tolerance(0.0),
+ m_cusp_limit(0.0),
+ m_count(0)
+ {
+ init(x1, y1, x2, y2, x3, y3, x4, y4);
+ }
+
+ curve4_div(const curve4_points& cp) :
+ m_approximation_scale(1.0),
+ m_angle_tolerance(0.0),
+ m_count(0)
+ {
+ init(cp[0], cp[1], cp[2], cp[3], cp[4], cp[5], cp[6], cp[7]);
+ }
+
+ void reset() { m_points.remove_all(); m_count = 0; }
+ void init(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4);
+
+ void init(const curve4_points& cp)
+ {
+ init(cp[0], cp[1], cp[2], cp[3], cp[4], cp[5], cp[6], cp[7]);
+ }
+
+ void approximation_method(curve_approximation_method_e) {}
+
+ curve_approximation_method_e approximation_method() const
+ {
+ return curve_div;
+ }
+
+ void approximation_scale(double s) { m_approximation_scale = s; }
+ double approximation_scale() const { return m_approximation_scale; }
+
+ void angle_tolerance(double a) { m_angle_tolerance = a; }
+ double angle_tolerance() const { return m_angle_tolerance; }
+
+ void cusp_limit(double v)
+ {
+ m_cusp_limit = (v == 0.0) ? 0.0 : pi - v;
+ }
+
+ double cusp_limit() const
+ {
+ return (m_cusp_limit == 0.0) ? 0.0 : pi - m_cusp_limit;
+ }
+
+ void rewind(unsigned)
+ {
+ m_count = 0;
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ if(m_count >= m_points.size()) return path_cmd_stop;
+ const point_d& p = m_points[m_count++];
+ *x = p.x;
+ *y = p.y;
+ return (m_count == 1) ? path_cmd_move_to : path_cmd_line_to;
+ }
+
+ private:
+ void bezier(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4);
+
+ void recursive_bezier(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4,
+ unsigned level);
+
+ double m_approximation_scale;
+ double m_distance_tolerance_square;
+ double m_angle_tolerance;
+ double m_cusp_limit;
+ unsigned m_count;
+ pod_bvector<point_d> m_points;
+ };
+
+
+ //-----------------------------------------------------------------curve3
+ class curve3
+ {
+ public:
+ curve3() : m_approximation_method(curve_div) {}
+ curve3(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3) :
+ m_approximation_method(curve_div)
+ {
+ init(x1, y1, x2, y2, x3, y3);
+ }
+
+ void reset()
+ {
+ m_curve_inc.reset();
+ m_curve_div.reset();
+ }
+
+ void init(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3)
+ {
+ if(m_approximation_method == curve_inc)
+ {
+ m_curve_inc.init(x1, y1, x2, y2, x3, y3);
+ }
+ else
+ {
+ m_curve_div.init(x1, y1, x2, y2, x3, y3);
+ }
+ }
+
+ void approximation_method(curve_approximation_method_e v)
+ {
+ m_approximation_method = v;
+ }
+
+ curve_approximation_method_e approximation_method() const
+ {
+ return m_approximation_method;
+ }
+
+ void approximation_scale(double s)
+ {
+ m_curve_inc.approximation_scale(s);
+ m_curve_div.approximation_scale(s);
+ }
+
+ double approximation_scale() const
+ {
+ return m_curve_inc.approximation_scale();
+ }
+
+ void angle_tolerance(double a)
+ {
+ m_curve_div.angle_tolerance(a);
+ }
+
+ double angle_tolerance() const
+ {
+ return m_curve_div.angle_tolerance();
+ }
+
+ void cusp_limit(double v)
+ {
+ m_curve_div.cusp_limit(v);
+ }
+
+ double cusp_limit() const
+ {
+ return m_curve_div.cusp_limit();
+ }
+
+ void rewind(unsigned path_id)
+ {
+ if(m_approximation_method == curve_inc)
+ {
+ m_curve_inc.rewind(path_id);
+ }
+ else
+ {
+ m_curve_div.rewind(path_id);
+ }
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ if(m_approximation_method == curve_inc)
+ {
+ return m_curve_inc.vertex(x, y);
+ }
+ return m_curve_div.vertex(x, y);
+ }
+
+ private:
+ curve3_inc m_curve_inc;
+ curve3_div m_curve_div;
+ curve_approximation_method_e m_approximation_method;
+ };
+
+
+
+
+
+ //-----------------------------------------------------------------curve4
+ class curve4
+ {
+ public:
+ curve4() : m_approximation_method(curve_div) {}
+ curve4(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4) :
+ m_approximation_method(curve_div)
+ {
+ init(x1, y1, x2, y2, x3, y3, x4, y4);
+ }
+
+ curve4(const curve4_points& cp) :
+ m_approximation_method(curve_div)
+ {
+ init(cp[0], cp[1], cp[2], cp[3], cp[4], cp[5], cp[6], cp[7]);
+ }
+
+ void reset()
+ {
+ m_curve_inc.reset();
+ m_curve_div.reset();
+ }
+
+ void init(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4)
+ {
+ if(m_approximation_method == curve_inc)
+ {
+ m_curve_inc.init(x1, y1, x2, y2, x3, y3, x4, y4);
+ }
+ else
+ {
+ m_curve_div.init(x1, y1, x2, y2, x3, y3, x4, y4);
+ }
+ }
+
+ void init(const curve4_points& cp)
+ {
+ init(cp[0], cp[1], cp[2], cp[3], cp[4], cp[5], cp[6], cp[7]);
+ }
+
+ void approximation_method(curve_approximation_method_e v)
+ {
+ m_approximation_method = v;
+ }
+
+ curve_approximation_method_e approximation_method() const
+ {
+ return m_approximation_method;
+ }
+
+ void approximation_scale(double s)
+ {
+ m_curve_inc.approximation_scale(s);
+ m_curve_div.approximation_scale(s);
+ }
+ double approximation_scale() const { return m_curve_inc.approximation_scale(); }
+
+ void angle_tolerance(double v)
+ {
+ m_curve_div.angle_tolerance(v);
+ }
+
+ double angle_tolerance() const
+ {
+ return m_curve_div.angle_tolerance();
+ }
+
+ void cusp_limit(double v)
+ {
+ m_curve_div.cusp_limit(v);
+ }
+
+ double cusp_limit() const
+ {
+ return m_curve_div.cusp_limit();
+ }
+
+ void rewind(unsigned path_id)
+ {
+ if(m_approximation_method == curve_inc)
+ {
+ m_curve_inc.rewind(path_id);
+ }
+ else
+ {
+ m_curve_div.rewind(path_id);
+ }
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ if(m_approximation_method == curve_inc)
+ {
+ return m_curve_inc.vertex(x, y);
+ }
+ return m_curve_div.vertex(x, y);
+ }
+
+ private:
+ curve4_inc m_curve_inc;
+ curve4_div m_curve_div;
+ curve_approximation_method_e m_approximation_method;
+ };
+
+
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_dda_line.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_dda_line.h
new file mode 100644
index 00000000000..f589e76b83d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_dda_line.h
@@ -0,0 +1,290 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// classes dda_line_interpolator, dda2_line_interpolator
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_DDA_LINE_INCLUDED
+#define AGG_DDA_LINE_INCLUDED
+
+#include <stdlib.h>
+#include "agg_basics.h"
+
+namespace agg
+{
+
+ //===================================================dda_line_interpolator
+ template<int FractionShift, int YShift=0> class dda_line_interpolator
+ {
+ public:
+ //--------------------------------------------------------------------
+ dda_line_interpolator() {}
+
+ //--------------------------------------------------------------------
+ dda_line_interpolator(int y1, int y2, unsigned count) :
+ m_y(y1),
+ m_inc(((y2 - y1) << FractionShift) / int(count)),
+ m_dy(0)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ void operator ++ ()
+ {
+ m_dy += m_inc;
+ }
+
+ //--------------------------------------------------------------------
+ void operator -- ()
+ {
+ m_dy -= m_inc;
+ }
+
+ //--------------------------------------------------------------------
+ void operator += (unsigned n)
+ {
+ m_dy += m_inc * n;
+ }
+
+ //--------------------------------------------------------------------
+ void operator -= (unsigned n)
+ {
+ m_dy -= m_inc * n;
+ }
+
+
+ //--------------------------------------------------------------------
+ int y() const { return m_y + (m_dy >> (FractionShift-YShift)); }
+ int dy() const { return m_dy; }
+
+
+ private:
+ int m_y;
+ int m_inc;
+ int m_dy;
+ };
+
+
+
+
+
+ //=================================================dda2_line_interpolator
+ class dda2_line_interpolator
+ {
+ public:
+ typedef int save_data_type;
+ enum save_size_e { save_size = 2 };
+
+ //--------------------------------------------------------------------
+ dda2_line_interpolator() {}
+
+ //-------------------------------------------- Forward-adjusted line
+ dda2_line_interpolator(int y1, int y2, int count) :
+ m_cnt(count <= 0 ? 1 : count),
+ m_lft((y2 - y1) / m_cnt),
+ m_rem((y2 - y1) % m_cnt),
+ m_mod(m_rem),
+ m_y(y1)
+ {
+ if(m_mod <= 0)
+ {
+ m_mod += count;
+ m_rem += count;
+ m_lft--;
+ }
+ m_mod -= count;
+ }
+
+ //-------------------------------------------- Backward-adjusted line
+ dda2_line_interpolator(int y1, int y2, int count, int) :
+ m_cnt(count <= 0 ? 1 : count),
+ m_lft((y2 - y1) / m_cnt),
+ m_rem((y2 - y1) % m_cnt),
+ m_mod(m_rem),
+ m_y(y1)
+ {
+ if(m_mod <= 0)
+ {
+ m_mod += count;
+ m_rem += count;
+ m_lft--;
+ }
+ }
+
+ //-------------------------------------------- Backward-adjusted line
+ dda2_line_interpolator(int y, int count) :
+ m_cnt(count <= 0 ? 1 : count),
+ m_lft(y / m_cnt),
+ m_rem(y % m_cnt),
+ m_mod(m_rem),
+ m_y(0)
+ {
+ if(m_mod <= 0)
+ {
+ m_mod += count;
+ m_rem += count;
+ m_lft--;
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void save(save_data_type* data) const
+ {
+ data[0] = m_mod;
+ data[1] = m_y;
+ }
+
+ //--------------------------------------------------------------------
+ void load(const save_data_type* data)
+ {
+ m_mod = data[0];
+ m_y = data[1];
+ }
+
+ //--------------------------------------------------------------------
+ void operator++()
+ {
+ m_mod += m_rem;
+ m_y += m_lft;
+ if(m_mod > 0)
+ {
+ m_mod -= m_cnt;
+ m_y++;
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void operator--()
+ {
+ if(m_mod <= m_rem)
+ {
+ m_mod += m_cnt;
+ m_y--;
+ }
+ m_mod -= m_rem;
+ m_y -= m_lft;
+ }
+
+ //--------------------------------------------------------------------
+ void adjust_forward()
+ {
+ m_mod -= m_cnt;
+ }
+
+ //--------------------------------------------------------------------
+ void adjust_backward()
+ {
+ m_mod += m_cnt;
+ }
+
+ //--------------------------------------------------------------------
+ int mod() const { return m_mod; }
+ int rem() const { return m_rem; }
+ int lft() const { return m_lft; }
+
+ //--------------------------------------------------------------------
+ int y() const { return m_y; }
+
+ private:
+ int m_cnt;
+ int m_lft;
+ int m_rem;
+ int m_mod;
+ int m_y;
+ };
+
+
+
+
+
+
+
+ //---------------------------------------------line_bresenham_interpolator
+ class line_bresenham_interpolator
+ {
+ public:
+ enum subpixel_scale_e
+ {
+ subpixel_shift = 8,
+ subpixel_scale = 1 << subpixel_shift,
+ subpixel_mask = subpixel_scale - 1
+ };
+
+ //--------------------------------------------------------------------
+ static int line_lr(int v) { return v >> subpixel_shift; }
+
+ //--------------------------------------------------------------------
+ line_bresenham_interpolator(int x1, int y1, int x2, int y2) :
+ m_x1_lr(line_lr(x1)),
+ m_y1_lr(line_lr(y1)),
+ m_x2_lr(line_lr(x2)),
+ m_y2_lr(line_lr(y2)),
+ m_ver(abs(m_x2_lr - m_x1_lr) < abs(m_y2_lr - m_y1_lr)),
+ m_len(m_ver ? abs(m_y2_lr - m_y1_lr) :
+ abs(m_x2_lr - m_x1_lr)),
+ m_inc(m_ver ? ((y2 > y1) ? 1 : -1) : ((x2 > x1) ? 1 : -1)),
+ m_interpolator(m_ver ? x1 : y1,
+ m_ver ? x2 : y2,
+ m_len)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ bool is_ver() const { return m_ver; }
+ unsigned len() const { return m_len; }
+ int inc() const { return m_inc; }
+
+ //--------------------------------------------------------------------
+ void hstep()
+ {
+ ++m_interpolator;
+ m_x1_lr += m_inc;
+ }
+
+ //--------------------------------------------------------------------
+ void vstep()
+ {
+ ++m_interpolator;
+ m_y1_lr += m_inc;
+ }
+
+ //--------------------------------------------------------------------
+ int x1() const { return m_x1_lr; }
+ int y1() const { return m_y1_lr; }
+ int x2() const { return line_lr(m_interpolator.y()); }
+ int y2() const { return line_lr(m_interpolator.y()); }
+ int x2_hr() const { return m_interpolator.y(); }
+ int y2_hr() const { return m_interpolator.y(); }
+
+ private:
+ int m_x1_lr;
+ int m_y1_lr;
+ int m_x2_lr;
+ int m_y2_lr;
+ bool m_ver;
+ unsigned m_len;
+ int m_inc;
+ dda2_line_interpolator m_interpolator;
+
+ };
+
+
+}
+
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_gamma_functions.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_gamma_functions.h
new file mode 100644
index 00000000000..5d720daa9a7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_gamma_functions.h
@@ -0,0 +1,132 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_GAMMA_FUNCTIONS_INCLUDED
+#define AGG_GAMMA_FUNCTIONS_INCLUDED
+
+#include <math.h>
+#include "agg_basics.h"
+
+namespace agg
+{
+ //===============================================================gamma_none
+ struct gamma_none
+ {
+ double operator()(double x) const { return x; }
+ };
+
+
+ //==============================================================gamma_power
+ class gamma_power
+ {
+ public:
+ gamma_power() : m_gamma(1.0) {}
+ gamma_power(double g) : m_gamma(g) {}
+
+ void gamma(double g) { m_gamma = g; }
+ double gamma() const { return m_gamma; }
+
+ double operator() (double x) const
+ {
+ return pow(x, m_gamma);
+ }
+
+ private:
+ double m_gamma;
+ };
+
+
+ //==========================================================gamma_threshold
+ class gamma_threshold
+ {
+ public:
+ gamma_threshold() : m_threshold(0.5) {}
+ gamma_threshold(double t) : m_threshold(t) {}
+
+ void threshold(double t) { m_threshold = t; }
+ double threshold() const { return m_threshold; }
+
+ double operator() (double x) const
+ {
+ return (x < m_threshold) ? 0.0 : 1.0;
+ }
+
+ private:
+ double m_threshold;
+ };
+
+
+ //============================================================gamma_linear
+ class gamma_linear
+ {
+ public:
+ gamma_linear() : m_start(0.0), m_end(1.0) {}
+ gamma_linear(double s, double e) : m_start(s), m_end(e) {}
+
+ void set(double s, double e) { m_start = s; m_end = e; }
+ void start(double s) { m_start = s; }
+ void end(double e) { m_end = e; }
+ double start() const { return m_start; }
+ double end() const { return m_end; }
+
+ double operator() (double x) const
+ {
+ if(x < m_start) return 0.0;
+ if(x > m_end) return 1.0;
+ return (x - m_start) / (m_end - m_start);
+ }
+
+ private:
+ double m_start;
+ double m_end;
+ };
+
+
+ //==========================================================gamma_multiply
+ class gamma_multiply
+ {
+ public:
+ gamma_multiply() : m_mul(1.0) {}
+ gamma_multiply(double v) : m_mul(v) {}
+
+ void value(double v) { m_mul = v; }
+ double value() const { return m_mul; }
+
+ double operator() (double x) const
+ {
+ double y = x * m_mul;
+ if(y > 1.0) y = 1.0;
+ return y;
+ }
+
+ private:
+ double m_mul;
+ };
+
+ inline double sRGB_to_linear(double x)
+ {
+ return (x <= 0.04045) ? (x / 12.92) : pow((x + 0.055) / (1.055), 2.4);
+ }
+
+ inline double linear_to_sRGB(double x)
+ {
+ return (x <= 0.0031308) ? (x * 12.92) : (1.055 * pow(x, 1 / 2.4) - 0.055);
+ }
+}
+
+#endif
+
+
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_gamma_lut.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_gamma_lut.h
new file mode 100644
index 00000000000..ef1e38d8092
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_gamma_lut.h
@@ -0,0 +1,305 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_GAMMA_LUT_INCLUDED
+#define AGG_GAMMA_LUT_INCLUDED
+
+#include <math.h>
+#include "agg_basics.h"
+#include "agg_gamma_functions.h"
+
+namespace agg
+{
+ template<class LoResT=int8u,
+ class HiResT=int8u,
+ unsigned GammaShift=8,
+ unsigned HiResShift=8> class gamma_lut
+ {
+ public:
+ typedef gamma_lut<LoResT, HiResT, GammaShift, HiResShift> self_type;
+
+ enum gamma_scale_e
+ {
+ gamma_shift = GammaShift,
+ gamma_size = 1 << gamma_shift,
+ gamma_mask = gamma_size - 1
+ };
+
+ enum hi_res_scale_e
+ {
+ hi_res_shift = HiResShift,
+ hi_res_size = 1 << hi_res_shift,
+ hi_res_mask = hi_res_size - 1
+ };
+
+ ~gamma_lut()
+ {
+ pod_allocator<LoResT>::deallocate(m_inv_gamma, hi_res_size);
+ pod_allocator<HiResT>::deallocate(m_dir_gamma, gamma_size);
+ }
+
+ gamma_lut() :
+ m_gamma(1.0),
+ m_dir_gamma(pod_allocator<HiResT>::allocate(gamma_size)),
+ m_inv_gamma(pod_allocator<LoResT>::allocate(hi_res_size))
+ {
+ unsigned i;
+ for(i = 0; i < gamma_size; i++)
+ {
+ m_dir_gamma[i] = HiResT(i << (hi_res_shift - gamma_shift));
+ }
+
+ for(i = 0; i < hi_res_size; i++)
+ {
+ m_inv_gamma[i] = LoResT(i >> (hi_res_shift - gamma_shift));
+ }
+ }
+
+ gamma_lut(double g) :
+ m_gamma(1.0),
+ m_dir_gamma(pod_allocator<HiResT>::allocate(gamma_size)),
+ m_inv_gamma(pod_allocator<LoResT>::allocate(hi_res_size))
+ {
+ gamma(g);
+ }
+
+ void gamma(double g)
+ {
+ m_gamma = g;
+
+ unsigned i;
+ for(i = 0; i < gamma_size; i++)
+ {
+ m_dir_gamma[i] = (HiResT)
+ uround(pow(i / double(gamma_mask), m_gamma) * double(hi_res_mask));
+ }
+
+ double inv_g = 1.0 / g;
+ for(i = 0; i < hi_res_size; i++)
+ {
+ m_inv_gamma[i] = (LoResT)
+ uround(pow(i / double(hi_res_mask), inv_g) * double(gamma_mask));
+ }
+ }
+
+ double gamma() const
+ {
+ return m_gamma;
+ }
+
+ HiResT dir(LoResT v) const
+ {
+ return m_dir_gamma[unsigned(v)];
+ }
+
+ LoResT inv(HiResT v) const
+ {
+ return m_inv_gamma[unsigned(v)];
+ }
+
+ private:
+ gamma_lut(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ double m_gamma;
+ HiResT* m_dir_gamma;
+ LoResT* m_inv_gamma;
+ };
+
+ //
+ // sRGB support classes
+ //
+
+ // Optimized sRGB lookup table. The direct conversion (sRGB to linear)
+ // is a straightforward lookup. The inverse conversion (linear to sRGB)
+ // is implemented using binary search.
+ template<class LinearType>
+ class sRGB_lut_base
+ {
+ public:
+ LinearType dir(int8u v) const
+ {
+ return m_dir_table[v];
+ }
+
+ int8u inv(LinearType v) const
+ {
+ // Unrolled binary search.
+ int8u x = 0;
+ if (v > m_inv_table[128]) x = 128;
+ if (v > m_inv_table[x + 64]) x += 64;
+ if (v > m_inv_table[x + 32]) x += 32;
+ if (v > m_inv_table[x + 16]) x += 16;
+ if (v > m_inv_table[x + 8]) x += 8;
+ if (v > m_inv_table[x + 4]) x += 4;
+ if (v > m_inv_table[x + 2]) x += 2;
+ if (v > m_inv_table[x + 1]) x += 1;
+ return x;
+ }
+
+ protected:
+ LinearType m_dir_table[256];
+ LinearType m_inv_table[256];
+
+ // Only derived classes may instantiate.
+ sRGB_lut_base()
+ {
+ }
+ };
+
+ // sRGB_lut - implements sRGB conversion for the various types.
+ // Base template is undefined, specializations are provided below.
+ template<class LinearType>
+ class sRGB_lut;
+
+ template<>
+ class sRGB_lut<float> : public sRGB_lut_base<float>
+ {
+ public:
+ sRGB_lut()
+ {
+ // Generate lookup tables.
+ m_dir_table[0] = 0;
+ m_inv_table[0] = 0;
+ for (unsigned i = 1; i <= 255; ++i)
+ {
+ // Floating-point RGB is in range [0,1].
+ m_dir_table[i] = float(sRGB_to_linear(i / 255.0));
+ m_inv_table[i] = float(sRGB_to_linear((i - 0.5) / 255.0));
+ }
+ }
+ };
+
+ template<>
+ class sRGB_lut<int16u> : public sRGB_lut_base<int16u>
+ {
+ public:
+ sRGB_lut()
+ {
+ // Generate lookup tables.
+ m_dir_table[0] = 0;
+ m_inv_table[0] = 0;
+ for (unsigned i = 1; i <= 255; ++i)
+ {
+ // 16-bit RGB is in range [0,65535].
+ m_dir_table[i] = uround(65535.0 * sRGB_to_linear(i / 255.0));
+ m_inv_table[i] = uround(65535.0 * sRGB_to_linear((i - 0.5) / 255.0));
+ }
+ }
+ };
+
+ template<>
+ class sRGB_lut<int8u> : public sRGB_lut_base<int8u>
+ {
+ public:
+ sRGB_lut()
+ {
+ // Generate lookup tables.
+ m_dir_table[0] = 0;
+ m_inv_table[0] = 0;
+ for (unsigned i = 1; i <= 255; ++i)
+ {
+ // 8-bit RGB is handled with simple bidirectional lookup tables.
+ m_dir_table[i] = uround(255.0 * sRGB_to_linear(i / 255.0));
+ m_inv_table[i] = uround(255.0 * linear_to_sRGB(i / 255.0));
+ }
+ }
+
+ int8u inv(int8u v) const
+ {
+ // In this case, the inverse transform is a simple lookup.
+ return m_inv_table[v];
+ }
+ };
+
+ // Common base class for sRGB_conv objects. Defines an internal
+ // sRGB_lut object so that users don't have to.
+ template<class T>
+ class sRGB_conv_base
+ {
+ public:
+ static T rgb_from_sRGB(int8u x)
+ {
+ return lut.dir(x);
+ }
+
+ static int8u rgb_to_sRGB(T x)
+ {
+ return lut.inv(x);
+ }
+
+ private:
+ static sRGB_lut<T> lut;
+ };
+
+ // Definition of sRGB_conv_base::lut. Due to the fact that this a template,
+ // we don't need to place the definition in a cpp file. Hurrah.
+ template<class T>
+ sRGB_lut<T> sRGB_conv_base<T>::lut;
+
+ // Wrapper for sRGB-linear conversion.
+ // Base template is undefined, specializations are provided below.
+ template<class T>
+ class sRGB_conv;
+
+ template<>
+ class sRGB_conv<float> : public sRGB_conv_base<float>
+ {
+ public:
+ static float alpha_from_sRGB(int8u x)
+ {
+ return float(x / 255.0);
+ }
+
+ static int8u alpha_to_sRGB(float x)
+ {
+ if (x <= 0) return 0;
+ else if (x >= 1) return 255;
+ else return int8u(0.5 + x * 255);
+ }
+ };
+
+ template<>
+ class sRGB_conv<int16u> : public sRGB_conv_base<int16u>
+ {
+ public:
+ static int16u alpha_from_sRGB(int8u x)
+ {
+ return (x << 8) | x;
+ }
+
+ static int8u alpha_to_sRGB(int16u x)
+ {
+ return x >> 8;
+ }
+ };
+
+ template<>
+ class sRGB_conv<int8u> : public sRGB_conv_base<int8u>
+ {
+ public:
+ static int8u alpha_from_sRGB(int8u x)
+ {
+ return x;
+ }
+
+ static int8u alpha_to_sRGB(int8u x)
+ {
+ return x;
+ }
+ };
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_image_accessors.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_image_accessors.h
new file mode 100644
index 00000000000..c651d6d2e8d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_image_accessors.h
@@ -0,0 +1,481 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_IMAGE_ACCESSORS_INCLUDED
+#define AGG_IMAGE_ACCESSORS_INCLUDED
+
+#include "agg_basics.h"
+
+namespace agg
+{
+
+ //-----------------------------------------------------image_accessor_clip
+ template<class PixFmt> class image_accessor_clip
+ {
+ public:
+ typedef PixFmt pixfmt_type;
+ typedef typename pixfmt_type::color_type color_type;
+ typedef typename pixfmt_type::order_type order_type;
+ typedef typename pixfmt_type::value_type value_type;
+ enum pix_width_e { pix_width = pixfmt_type::pix_width };
+
+ image_accessor_clip() {}
+ explicit image_accessor_clip(pixfmt_type& pixf,
+ const color_type& bk) :
+ m_pixf(&pixf)
+ {
+ pixfmt_type::make_pix(m_bk_buf, bk);
+ }
+
+ void attach(pixfmt_type& pixf)
+ {
+ m_pixf = &pixf;
+ }
+
+ void background_color(const color_type& bk)
+ {
+ pixfmt_type::make_pix(m_bk_buf, bk);
+ }
+
+ private:
+ AGG_INLINE const int8u* pixel() const
+ {
+ if(m_y >= 0 && m_y < (int)m_pixf->height() &&
+ m_x >= 0 && m_x < (int)m_pixf->width())
+ {
+ return m_pixf->pix_ptr(m_x, m_y);
+ }
+ return m_bk_buf;
+ }
+
+ public:
+ AGG_INLINE const int8u* span(int x, int y, unsigned len)
+ {
+ m_x = m_x0 = x;
+ m_y = y;
+ if(y >= 0 && y < (int)m_pixf->height() &&
+ x >= 0 && x+(int)len <= (int)m_pixf->width())
+ {
+ return m_pix_ptr = m_pixf->pix_ptr(x, y);
+ }
+ m_pix_ptr = 0;
+ return pixel();
+ }
+
+ AGG_INLINE const int8u* next_x()
+ {
+ if(m_pix_ptr) return m_pix_ptr += pix_width;
+ ++m_x;
+ return pixel();
+ }
+
+ AGG_INLINE const int8u* next_y()
+ {
+ ++m_y;
+ m_x = m_x0;
+ if(m_pix_ptr &&
+ m_y >= 0 && m_y < (int)m_pixf->height())
+ {
+ return m_pix_ptr = m_pixf->pix_ptr(m_x, m_y);
+ }
+ m_pix_ptr = 0;
+ return pixel();
+ }
+
+ private:
+ const pixfmt_type* m_pixf;
+ int8u m_bk_buf[pix_width];
+ int m_x, m_x0, m_y;
+ const int8u* m_pix_ptr;
+ };
+
+
+
+
+ //--------------------------------------------------image_accessor_no_clip
+ template<class PixFmt> class image_accessor_no_clip
+ {
+ public:
+ typedef PixFmt pixfmt_type;
+ typedef typename pixfmt_type::color_type color_type;
+ typedef typename pixfmt_type::order_type order_type;
+ typedef typename pixfmt_type::value_type value_type;
+ enum pix_width_e { pix_width = pixfmt_type::pix_width };
+
+ image_accessor_no_clip() {}
+ explicit image_accessor_no_clip(pixfmt_type& pixf) :
+ m_pixf(&pixf)
+ {}
+
+ void attach(pixfmt_type& pixf)
+ {
+ m_pixf = &pixf;
+ }
+
+ AGG_INLINE const int8u* span(int x, int y, unsigned)
+ {
+ m_x = x;
+ m_y = y;
+ return m_pix_ptr = m_pixf->pix_ptr(x, y);
+ }
+
+ AGG_INLINE const int8u* next_x()
+ {
+ return m_pix_ptr += pix_width;
+ }
+
+ AGG_INLINE const int8u* next_y()
+ {
+ ++m_y;
+ return m_pix_ptr = m_pixf->pix_ptr(m_x, m_y);
+ }
+
+ private:
+ const pixfmt_type* m_pixf;
+ int m_x, m_y;
+ const int8u* m_pix_ptr;
+ };
+
+
+
+
+ //----------------------------------------------------image_accessor_clone
+ template<class PixFmt> class image_accessor_clone
+ {
+ public:
+ typedef PixFmt pixfmt_type;
+ typedef typename pixfmt_type::color_type color_type;
+ typedef typename pixfmt_type::order_type order_type;
+ typedef typename pixfmt_type::value_type value_type;
+ enum pix_width_e { pix_width = pixfmt_type::pix_width };
+
+ image_accessor_clone() {}
+ explicit image_accessor_clone(pixfmt_type& pixf) :
+ m_pixf(&pixf)
+ {}
+
+ void attach(pixfmt_type& pixf)
+ {
+ m_pixf = &pixf;
+ }
+
+ private:
+ AGG_INLINE const int8u* pixel() const
+ {
+ int x = m_x;
+ int y = m_y;
+ if(x < 0) x = 0;
+ if(y < 0) y = 0;
+ if(x >= (int)m_pixf->width()) x = m_pixf->width() - 1;
+ if(y >= (int)m_pixf->height()) y = m_pixf->height() - 1;
+ return m_pixf->pix_ptr(x, y);
+ }
+
+ public:
+ AGG_INLINE const int8u* span(int x, int y, unsigned len)
+ {
+ m_x = m_x0 = x;
+ m_y = y;
+ if(y >= 0 && y < (int)m_pixf->height() &&
+ x >= 0 && x+len <= (int)m_pixf->width())
+ {
+ return m_pix_ptr = m_pixf->pix_ptr(x, y);
+ }
+ m_pix_ptr = 0;
+ return pixel();
+ }
+
+ AGG_INLINE const int8u* next_x()
+ {
+ if(m_pix_ptr) return m_pix_ptr += pix_width;
+ ++m_x;
+ return pixel();
+ }
+
+ AGG_INLINE const int8u* next_y()
+ {
+ ++m_y;
+ m_x = m_x0;
+ if(m_pix_ptr &&
+ m_y >= 0 && m_y < (int)m_pixf->height())
+ {
+ return m_pix_ptr = m_pixf->pix_ptr(m_x, m_y);
+ }
+ m_pix_ptr = 0;
+ return pixel();
+ }
+
+ private:
+ const pixfmt_type* m_pixf;
+ int m_x, m_x0, m_y;
+ const int8u* m_pix_ptr;
+ };
+
+
+
+
+
+ //-----------------------------------------------------image_accessor_wrap
+ template<class PixFmt, class WrapX, class WrapY> class image_accessor_wrap
+ {
+ public:
+ typedef PixFmt pixfmt_type;
+ typedef typename pixfmt_type::color_type color_type;
+ typedef typename pixfmt_type::order_type order_type;
+ typedef typename pixfmt_type::value_type value_type;
+ enum pix_width_e { pix_width = pixfmt_type::pix_width };
+
+ image_accessor_wrap() {}
+ explicit image_accessor_wrap(pixfmt_type& pixf) :
+ m_pixf(&pixf),
+ m_wrap_x(pixf.width()),
+ m_wrap_y(pixf.height())
+ {}
+
+ void attach(pixfmt_type& pixf)
+ {
+ m_pixf = &pixf;
+ }
+
+ AGG_INLINE const int8u* span(int x, int y, unsigned)
+ {
+ m_x = x;
+ m_row_ptr = m_pixf->pix_ptr(0, m_wrap_y(y));
+ return m_row_ptr + m_wrap_x(x) * pix_width;
+ }
+
+ AGG_INLINE const int8u* next_x()
+ {
+ int x = ++m_wrap_x;
+ return m_row_ptr + x * pix_width;
+ }
+
+ AGG_INLINE const int8u* next_y()
+ {
+ m_row_ptr = m_pixf->pix_ptr(0, ++m_wrap_y);
+ return m_row_ptr + m_wrap_x(m_x) * pix_width;
+ }
+
+ private:
+ const pixfmt_type* m_pixf;
+ const int8u* m_row_ptr;
+ int m_x;
+ WrapX m_wrap_x;
+ WrapY m_wrap_y;
+ };
+
+
+
+
+ //--------------------------------------------------------wrap_mode_repeat
+ class wrap_mode_repeat
+ {
+ public:
+ wrap_mode_repeat() {}
+ wrap_mode_repeat(unsigned size) :
+ m_size(size),
+ m_add(size * (0x3FFFFFFF / size)),
+ m_value(0)
+ {}
+
+ AGG_INLINE unsigned operator() (int v)
+ {
+ return m_value = (unsigned(v) + m_add) % m_size;
+ }
+
+ AGG_INLINE unsigned operator++ ()
+ {
+ ++m_value;
+ if(m_value >= m_size) m_value = 0;
+ return m_value;
+ }
+ private:
+ unsigned m_size;
+ unsigned m_add;
+ unsigned m_value;
+ };
+
+
+ //---------------------------------------------------wrap_mode_repeat_pow2
+ class wrap_mode_repeat_pow2
+ {
+ public:
+ wrap_mode_repeat_pow2() {}
+ wrap_mode_repeat_pow2(unsigned size) : m_value(0)
+ {
+ m_mask = 1;
+ while(m_mask < size) m_mask = (m_mask << 1) | 1;
+ m_mask >>= 1;
+ }
+ AGG_INLINE unsigned operator() (int v)
+ {
+ return m_value = unsigned(v) & m_mask;
+ }
+ AGG_INLINE unsigned operator++ ()
+ {
+ ++m_value;
+ if(m_value > m_mask) m_value = 0;
+ return m_value;
+ }
+ private:
+ unsigned m_mask;
+ unsigned m_value;
+ };
+
+
+ //----------------------------------------------wrap_mode_repeat_auto_pow2
+ class wrap_mode_repeat_auto_pow2
+ {
+ public:
+ wrap_mode_repeat_auto_pow2() {}
+ wrap_mode_repeat_auto_pow2(unsigned size) :
+ m_size(size),
+ m_add(size * (0x3FFFFFFF / size)),
+ m_mask((m_size & (m_size-1)) ? 0 : m_size-1),
+ m_value(0)
+ {}
+
+ AGG_INLINE unsigned operator() (int v)
+ {
+ if(m_mask) return m_value = unsigned(v) & m_mask;
+ return m_value = (unsigned(v) + m_add) % m_size;
+ }
+ AGG_INLINE unsigned operator++ ()
+ {
+ ++m_value;
+ if(m_value >= m_size) m_value = 0;
+ return m_value;
+ }
+
+ private:
+ unsigned m_size;
+ unsigned m_add;
+ unsigned m_mask;
+ unsigned m_value;
+ };
+
+
+ //-------------------------------------------------------wrap_mode_reflect
+ class wrap_mode_reflect
+ {
+ public:
+ wrap_mode_reflect() {}
+ wrap_mode_reflect(unsigned size) :
+ m_size(size),
+ m_size2(size * 2),
+ m_add(m_size2 * (0x3FFFFFFF / m_size2)),
+ m_value(0)
+ {}
+
+ AGG_INLINE unsigned operator() (int v)
+ {
+ m_value = (unsigned(v) + m_add) % m_size2;
+ if(m_value >= m_size) return m_size2 - m_value - 1;
+ return m_value;
+ }
+
+ AGG_INLINE unsigned operator++ ()
+ {
+ ++m_value;
+ if(m_value >= m_size2) m_value = 0;
+ if(m_value >= m_size) return m_size2 - m_value - 1;
+ return m_value;
+ }
+ private:
+ unsigned m_size;
+ unsigned m_size2;
+ unsigned m_add;
+ unsigned m_value;
+ };
+
+
+
+ //--------------------------------------------------wrap_mode_reflect_pow2
+ class wrap_mode_reflect_pow2
+ {
+ public:
+ wrap_mode_reflect_pow2() {}
+ wrap_mode_reflect_pow2(unsigned size) : m_value(0)
+ {
+ m_mask = 1;
+ m_size = 1;
+ while(m_mask < size)
+ {
+ m_mask = (m_mask << 1) | 1;
+ m_size <<= 1;
+ }
+ }
+ AGG_INLINE unsigned operator() (int v)
+ {
+ m_value = unsigned(v) & m_mask;
+ if(m_value >= m_size) return m_mask - m_value;
+ return m_value;
+ }
+ AGG_INLINE unsigned operator++ ()
+ {
+ ++m_value;
+ m_value &= m_mask;
+ if(m_value >= m_size) return m_mask - m_value;
+ return m_value;
+ }
+ private:
+ unsigned m_size;
+ unsigned m_mask;
+ unsigned m_value;
+ };
+
+
+
+ //---------------------------------------------wrap_mode_reflect_auto_pow2
+ class wrap_mode_reflect_auto_pow2
+ {
+ public:
+ wrap_mode_reflect_auto_pow2() {}
+ wrap_mode_reflect_auto_pow2(unsigned size) :
+ m_size(size),
+ m_size2(size * 2),
+ m_add(m_size2 * (0x3FFFFFFF / m_size2)),
+ m_mask((m_size2 & (m_size2-1)) ? 0 : m_size2-1),
+ m_value(0)
+ {}
+
+ AGG_INLINE unsigned operator() (int v)
+ {
+ m_value = m_mask ? unsigned(v) & m_mask :
+ (unsigned(v) + m_add) % m_size2;
+ if(m_value >= m_size) return m_size2 - m_value - 1;
+ return m_value;
+ }
+ AGG_INLINE unsigned operator++ ()
+ {
+ ++m_value;
+ if(m_value >= m_size2) m_value = 0;
+ if(m_value >= m_size) return m_size2 - m_value - 1;
+ return m_value;
+ }
+
+ private:
+ unsigned m_size;
+ unsigned m_size2;
+ unsigned m_add;
+ unsigned m_mask;
+ unsigned m_value;
+ };
+
+
+}
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_image_filters.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_image_filters.h
new file mode 100644
index 00000000000..8e1bc8f0dba
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_image_filters.h
@@ -0,0 +1,448 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Image transformation filters,
+// Filtering classes (image_filter_lut, image_filter),
+// Basic filter shape classes
+//----------------------------------------------------------------------------
+#ifndef AGG_IMAGE_FILTERS_INCLUDED
+#define AGG_IMAGE_FILTERS_INCLUDED
+
+#include "agg_array.h"
+#include "agg_math.h"
+
+namespace agg
+{
+
+ // See Implementation agg_image_filters.cpp
+
+ enum image_filter_scale_e
+ {
+ image_filter_shift = 14, //----image_filter_shift
+ image_filter_scale = 1 << image_filter_shift, //----image_filter_scale
+ image_filter_mask = image_filter_scale - 1 //----image_filter_mask
+ };
+
+ enum image_subpixel_scale_e
+ {
+ image_subpixel_shift = 8, //----image_subpixel_shift
+ image_subpixel_scale = 1 << image_subpixel_shift, //----image_subpixel_scale
+ image_subpixel_mask = image_subpixel_scale - 1 //----image_subpixel_mask
+ };
+
+
+ //-----------------------------------------------------image_filter_lut
+ class image_filter_lut
+ {
+ public:
+ template<class FilterF> void calculate(const FilterF& filter,
+ bool normalization=true)
+ {
+ double r = filter.radius();
+ realloc_lut(r);
+ unsigned i;
+ unsigned pivot = diameter() << (image_subpixel_shift - 1);
+ for(i = 0; i < pivot; i++)
+ {
+ double x = double(i) / double(image_subpixel_scale);
+ double y = filter.calc_weight(x);
+ m_weight_array[pivot + i] =
+ m_weight_array[pivot - i] = (int16)iround(y * image_filter_scale);
+ }
+ unsigned end = (diameter() << image_subpixel_shift) - 1;
+ m_weight_array[0] = m_weight_array[end];
+ if(normalization)
+ {
+ normalize();
+ }
+ }
+
+ image_filter_lut() : m_radius(0), m_diameter(0), m_start(0) {}
+
+ template<class FilterF> image_filter_lut(const FilterF& filter,
+ bool normalization=true)
+ {
+ calculate(filter, normalization);
+ }
+
+ double radius() const { return m_radius; }
+ unsigned diameter() const { return m_diameter; }
+ int start() const { return m_start; }
+ const int16* weight_array() const { return &m_weight_array[0]; }
+ void normalize();
+
+ private:
+ void realloc_lut(double radius);
+ image_filter_lut(const image_filter_lut&);
+ const image_filter_lut& operator = (const image_filter_lut&);
+
+ double m_radius;
+ unsigned m_diameter;
+ int m_start;
+ pod_array<int16> m_weight_array;
+ };
+
+
+
+ //--------------------------------------------------------image_filter
+ template<class FilterF> class image_filter : public image_filter_lut
+ {
+ public:
+ image_filter()
+ {
+ calculate(m_filter_function);
+ }
+ private:
+ FilterF m_filter_function;
+ };
+
+
+ //-----------------------------------------------image_filter_bilinear
+ struct image_filter_bilinear
+ {
+ static double radius() { return 1.0; }
+ static double calc_weight(double x)
+ {
+ return 1.0 - x;
+ }
+ };
+
+
+ //-----------------------------------------------image_filter_hanning
+ struct image_filter_hanning
+ {
+ static double radius() { return 1.0; }
+ static double calc_weight(double x)
+ {
+ return 0.5 + 0.5 * cos(pi * x);
+ }
+ };
+
+
+ //-----------------------------------------------image_filter_hamming
+ struct image_filter_hamming
+ {
+ static double radius() { return 1.0; }
+ static double calc_weight(double x)
+ {
+ return 0.54 + 0.46 * cos(pi * x);
+ }
+ };
+
+ //-----------------------------------------------image_filter_hermite
+ struct image_filter_hermite
+ {
+ static double radius() { return 1.0; }
+ static double calc_weight(double x)
+ {
+ return (2.0 * x - 3.0) * x * x + 1.0;
+ }
+ };
+
+ //------------------------------------------------image_filter_quadric
+ struct image_filter_quadric
+ {
+ static double radius() { return 1.5; }
+ static double calc_weight(double x)
+ {
+ double t;
+ if(x < 0.5) return 0.75 - x * x;
+ if(x < 1.5) {t = x - 1.5; return 0.5 * t * t;}
+ return 0.0;
+ }
+ };
+
+ //------------------------------------------------image_filter_bicubic
+ class image_filter_bicubic
+ {
+ static double pow3(double x)
+ {
+ return (x <= 0.0) ? 0.0 : x * x * x;
+ }
+
+ public:
+ static double radius() { return 2.0; }
+ static double calc_weight(double x)
+ {
+ return
+ (1.0/6.0) *
+ (pow3(x + 2) - 4 * pow3(x + 1) + 6 * pow3(x) - 4 * pow3(x - 1));
+ }
+ };
+
+ //-------------------------------------------------image_filter_kaiser
+ class image_filter_kaiser
+ {
+ double a;
+ double i0a;
+ double epsilon;
+
+ public:
+ image_filter_kaiser(double b = 6.33) :
+ a(b), epsilon(1e-12)
+ {
+ i0a = 1.0 / bessel_i0(b);
+ }
+
+ static double radius() { return 1.0; }
+ double calc_weight(double x) const
+ {
+ return bessel_i0(a * sqrt(1. - x * x)) * i0a;
+ }
+
+ private:
+ double bessel_i0(double x) const
+ {
+ int i;
+ double sum, y, t;
+
+ sum = 1.;
+ y = x * x / 4.;
+ t = y;
+
+ for(i = 2; t > epsilon; i++)
+ {
+ sum += t;
+ t *= (double)y / (i * i);
+ }
+ return sum;
+ }
+ };
+
+ //----------------------------------------------image_filter_catrom
+ struct image_filter_catrom
+ {
+ static double radius() { return 2.0; }
+ static double calc_weight(double x)
+ {
+ if(x < 1.0) return 0.5 * (2.0 + x * x * (-5.0 + x * 3.0));
+ if(x < 2.0) return 0.5 * (4.0 + x * (-8.0 + x * (5.0 - x)));
+ return 0.;
+ }
+ };
+
+ //---------------------------------------------image_filter_mitchell
+ class image_filter_mitchell
+ {
+ double p0, p2, p3;
+ double q0, q1, q2, q3;
+
+ public:
+ image_filter_mitchell(double b = 1.0/3.0, double c = 1.0/3.0) :
+ p0((6.0 - 2.0 * b) / 6.0),
+ p2((-18.0 + 12.0 * b + 6.0 * c) / 6.0),
+ p3((12.0 - 9.0 * b - 6.0 * c) / 6.0),
+ q0((8.0 * b + 24.0 * c) / 6.0),
+ q1((-12.0 * b - 48.0 * c) / 6.0),
+ q2((6.0 * b + 30.0 * c) / 6.0),
+ q3((-b - 6.0 * c) / 6.0)
+ {}
+
+ static double radius() { return 2.0; }
+ double calc_weight(double x) const
+ {
+ if(x < 1.0) return p0 + x * x * (p2 + x * p3);
+ if(x < 2.0) return q0 + x * (q1 + x * (q2 + x * q3));
+ return 0.0;
+ }
+ };
+
+
+ //----------------------------------------------image_filter_spline16
+ struct image_filter_spline16
+ {
+ static double radius() { return 2.0; }
+ static double calc_weight(double x)
+ {
+ if(x < 1.0)
+ {
+ return ((x - 9.0/5.0 ) * x - 1.0/5.0 ) * x + 1.0;
+ }
+ return ((-1.0/3.0 * (x-1) + 4.0/5.0) * (x-1) - 7.0/15.0 ) * (x-1);
+ }
+ };
+
+
+ //---------------------------------------------image_filter_spline36
+ struct image_filter_spline36
+ {
+ static double radius() { return 3.0; }
+ static double calc_weight(double x)
+ {
+ if(x < 1.0)
+ {
+ return ((13.0/11.0 * x - 453.0/209.0) * x - 3.0/209.0) * x + 1.0;
+ }
+ if(x < 2.0)
+ {
+ return ((-6.0/11.0 * (x-1) + 270.0/209.0) * (x-1) - 156.0/ 209.0) * (x-1);
+ }
+ return ((1.0/11.0 * (x-2) - 45.0/209.0) * (x-2) + 26.0/209.0) * (x-2);
+ }
+ };
+
+
+ //----------------------------------------------image_filter_gaussian
+ struct image_filter_gaussian
+ {
+ static double radius() { return 2.0; }
+ static double calc_weight(double x)
+ {
+ return exp(-2.0 * x * x) * sqrt(2.0 / pi);
+ }
+ };
+
+
+ //------------------------------------------------image_filter_bessel
+ struct image_filter_bessel
+ {
+ static double radius() { return 3.2383; }
+ static double calc_weight(double x)
+ {
+ return (x == 0.0) ? pi / 4.0 : besj(pi * x, 1) / (2.0 * x);
+ }
+ };
+
+
+ //-------------------------------------------------image_filter_sinc
+ class image_filter_sinc
+ {
+ public:
+ image_filter_sinc(double r) : m_radius(r < 2.0 ? 2.0 : r) {}
+ double radius() const { return m_radius; }
+ double calc_weight(double x) const
+ {
+ if(x == 0.0) return 1.0;
+ x *= pi;
+ return sin(x) / x;
+ }
+ private:
+ double m_radius;
+ };
+
+
+ //-----------------------------------------------image_filter_lanczos
+ class image_filter_lanczos
+ {
+ public:
+ image_filter_lanczos(double r) : m_radius(r < 2.0 ? 2.0 : r) {}
+ double radius() const { return m_radius; }
+ double calc_weight(double x) const
+ {
+ if(x == 0.0) return 1.0;
+ if(x > m_radius) return 0.0;
+ x *= pi;
+ double xr = x / m_radius;
+ return (sin(x) / x) * (sin(xr) / xr);
+ }
+ private:
+ double m_radius;
+ };
+
+
+ //----------------------------------------------image_filter_blackman
+ class image_filter_blackman
+ {
+ public:
+ image_filter_blackman(double r) : m_radius(r < 2.0 ? 2.0 : r) {}
+ double radius() const { return m_radius; }
+ double calc_weight(double x) const
+ {
+ if(x == 0.0) return 1.0;
+ if(x > m_radius) return 0.0;
+ x *= pi;
+ double xr = x / m_radius;
+ return (sin(x) / x) * (0.42 + 0.5*cos(xr) + 0.08*cos(2*xr));
+ }
+ private:
+ double m_radius;
+ };
+
+ //------------------------------------------------image_filter_sinc36
+ class image_filter_sinc36 : public image_filter_sinc
+ { public: image_filter_sinc36() : image_filter_sinc(3.0){} };
+
+ //------------------------------------------------image_filter_sinc64
+ class image_filter_sinc64 : public image_filter_sinc
+ { public: image_filter_sinc64() : image_filter_sinc(4.0){} };
+
+ //-----------------------------------------------image_filter_sinc100
+ class image_filter_sinc100 : public image_filter_sinc
+ { public: image_filter_sinc100() : image_filter_sinc(5.0){} };
+
+ //-----------------------------------------------image_filter_sinc144
+ class image_filter_sinc144 : public image_filter_sinc
+ { public: image_filter_sinc144() : image_filter_sinc(6.0){} };
+
+ //-----------------------------------------------image_filter_sinc196
+ class image_filter_sinc196 : public image_filter_sinc
+ { public: image_filter_sinc196() : image_filter_sinc(7.0){} };
+
+ //-----------------------------------------------image_filter_sinc256
+ class image_filter_sinc256 : public image_filter_sinc
+ { public: image_filter_sinc256() : image_filter_sinc(8.0){} };
+
+ //---------------------------------------------image_filter_lanczos36
+ class image_filter_lanczos36 : public image_filter_lanczos
+ { public: image_filter_lanczos36() : image_filter_lanczos(3.0){} };
+
+ //---------------------------------------------image_filter_lanczos64
+ class image_filter_lanczos64 : public image_filter_lanczos
+ { public: image_filter_lanczos64() : image_filter_lanczos(4.0){} };
+
+ //--------------------------------------------image_filter_lanczos100
+ class image_filter_lanczos100 : public image_filter_lanczos
+ { public: image_filter_lanczos100() : image_filter_lanczos(5.0){} };
+
+ //--------------------------------------------image_filter_lanczos144
+ class image_filter_lanczos144 : public image_filter_lanczos
+ { public: image_filter_lanczos144() : image_filter_lanczos(6.0){} };
+
+ //--------------------------------------------image_filter_lanczos196
+ class image_filter_lanczos196 : public image_filter_lanczos
+ { public: image_filter_lanczos196() : image_filter_lanczos(7.0){} };
+
+ //--------------------------------------------image_filter_lanczos256
+ class image_filter_lanczos256 : public image_filter_lanczos
+ { public: image_filter_lanczos256() : image_filter_lanczos(8.0){} };
+
+ //--------------------------------------------image_filter_blackman36
+ class image_filter_blackman36 : public image_filter_blackman
+ { public: image_filter_blackman36() : image_filter_blackman(3.0){} };
+
+ //--------------------------------------------image_filter_blackman64
+ class image_filter_blackman64 : public image_filter_blackman
+ { public: image_filter_blackman64() : image_filter_blackman(4.0){} };
+
+ //-------------------------------------------image_filter_blackman100
+ class image_filter_blackman100 : public image_filter_blackman
+ { public: image_filter_blackman100() : image_filter_blackman(5.0){} };
+
+ //-------------------------------------------image_filter_blackman144
+ class image_filter_blackman144 : public image_filter_blackman
+ { public: image_filter_blackman144() : image_filter_blackman(6.0){} };
+
+ //-------------------------------------------image_filter_blackman196
+ class image_filter_blackman196 : public image_filter_blackman
+ { public: image_filter_blackman196() : image_filter_blackman(7.0){} };
+
+ //-------------------------------------------image_filter_blackman256
+ class image_filter_blackman256 : public image_filter_blackman
+ { public: image_filter_blackman256() : image_filter_blackman(8.0){} };
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_math.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_math.h
new file mode 100644
index 00000000000..2ec49cf3ff8
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_math.h
@@ -0,0 +1,437 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+// Bessel function (besj) was adapted for use in AGG library by Andy Wilk
+// Contact: castor.vulgaris@gmail.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_MATH_INCLUDED
+#define AGG_MATH_INCLUDED
+
+#include <math.h>
+#include "agg_basics.h"
+
+namespace agg
+{
+
+ //------------------------------------------------------vertex_dist_epsilon
+ // Coinciding points maximal distance (Epsilon)
+ const double vertex_dist_epsilon = 1e-14;
+
+ //-----------------------------------------------------intersection_epsilon
+ // See calc_intersection
+ const double intersection_epsilon = 1.0e-30;
+
+ //------------------------------------------------------------cross_product
+ AGG_INLINE double cross_product(double x1, double y1,
+ double x2, double y2,
+ double x, double y)
+ {
+ return (x - x2) * (y2 - y1) - (y - y2) * (x2 - x1);
+ }
+
+ //--------------------------------------------------------point_in_triangle
+ AGG_INLINE bool point_in_triangle(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x, double y)
+ {
+ bool cp1 = cross_product(x1, y1, x2, y2, x, y) < 0.0;
+ bool cp2 = cross_product(x2, y2, x3, y3, x, y) < 0.0;
+ bool cp3 = cross_product(x3, y3, x1, y1, x, y) < 0.0;
+ return cp1 == cp2 && cp2 == cp3 && cp3 == cp1;
+ }
+
+ //-----------------------------------------------------------calc_distance
+ AGG_INLINE double calc_distance(double x1, double y1, double x2, double y2)
+ {
+ double dx = x2-x1;
+ double dy = y2-y1;
+ return sqrt(dx * dx + dy * dy);
+ }
+
+ //--------------------------------------------------------calc_sq_distance
+ AGG_INLINE double calc_sq_distance(double x1, double y1, double x2, double y2)
+ {
+ double dx = x2-x1;
+ double dy = y2-y1;
+ return dx * dx + dy * dy;
+ }
+
+ //------------------------------------------------calc_line_point_distance
+ AGG_INLINE double calc_line_point_distance(double x1, double y1,
+ double x2, double y2,
+ double x, double y)
+ {
+ double dx = x2-x1;
+ double dy = y2-y1;
+ double d = sqrt(dx * dx + dy * dy);
+ if(d < vertex_dist_epsilon)
+ {
+ return calc_distance(x1, y1, x, y);
+ }
+ return ((x - x2) * dy - (y - y2) * dx) / d;
+ }
+
+ //-------------------------------------------------------calc_line_point_u
+ AGG_INLINE double calc_segment_point_u(double x1, double y1,
+ double x2, double y2,
+ double x, double y)
+ {
+ double dx = x2 - x1;
+ double dy = y2 - y1;
+
+ if(dx == 0 && dy == 0)
+ {
+ return 0;
+ }
+
+ double pdx = x - x1;
+ double pdy = y - y1;
+
+ return (pdx * dx + pdy * dy) / (dx * dx + dy * dy);
+ }
+
+ //---------------------------------------------calc_line_point_sq_distance
+ AGG_INLINE double calc_segment_point_sq_distance(double x1, double y1,
+ double x2, double y2,
+ double x, double y,
+ double u)
+ {
+ if(u <= 0)
+ {
+ return calc_sq_distance(x, y, x1, y1);
+ }
+ else
+ if(u >= 1)
+ {
+ return calc_sq_distance(x, y, x2, y2);
+ }
+ return calc_sq_distance(x, y, x1 + u * (x2 - x1), y1 + u * (y2 - y1));
+ }
+
+ //---------------------------------------------calc_line_point_sq_distance
+ AGG_INLINE double calc_segment_point_sq_distance(double x1, double y1,
+ double x2, double y2,
+ double x, double y)
+ {
+ return
+ calc_segment_point_sq_distance(
+ x1, y1, x2, y2, x, y,
+ calc_segment_point_u(x1, y1, x2, y2, x, y));
+ }
+
+ //-------------------------------------------------------calc_intersection
+ AGG_INLINE bool calc_intersection(double ax, double ay, double bx, double by,
+ double cx, double cy, double dx, double dy,
+ double* x, double* y)
+ {
+ double num = (ay-cy) * (dx-cx) - (ax-cx) * (dy-cy);
+ double den = (bx-ax) * (dy-cy) - (by-ay) * (dx-cx);
+ if(fabs(den) < intersection_epsilon) return false;
+ double r = num / den;
+ *x = ax + r * (bx-ax);
+ *y = ay + r * (by-ay);
+ return true;
+ }
+
+ //-----------------------------------------------------intersection_exists
+ AGG_INLINE bool intersection_exists(double x1, double y1, double x2, double y2,
+ double x3, double y3, double x4, double y4)
+ {
+ // It's less expensive but you can't control the
+ // boundary conditions: Less or LessEqual
+ double dx1 = x2 - x1;
+ double dy1 = y2 - y1;
+ double dx2 = x4 - x3;
+ double dy2 = y4 - y3;
+ return ((x3 - x2) * dy1 - (y3 - y2) * dx1 < 0.0) !=
+ ((x4 - x2) * dy1 - (y4 - y2) * dx1 < 0.0) &&
+ ((x1 - x4) * dy2 - (y1 - y4) * dx2 < 0.0) !=
+ ((x2 - x4) * dy2 - (y2 - y4) * dx2 < 0.0);
+
+ // It's is more expensive but more flexible
+ // in terms of boundary conditions.
+ //--------------------
+ //double den = (x2-x1) * (y4-y3) - (y2-y1) * (x4-x3);
+ //if(fabs(den) < intersection_epsilon) return false;
+ //double nom1 = (x4-x3) * (y1-y3) - (y4-y3) * (x1-x3);
+ //double nom2 = (x2-x1) * (y1-y3) - (y2-y1) * (x1-x3);
+ //double ua = nom1 / den;
+ //double ub = nom2 / den;
+ //return ua >= 0.0 && ua <= 1.0 && ub >= 0.0 && ub <= 1.0;
+ }
+
+ //--------------------------------------------------------calc_orthogonal
+ AGG_INLINE void calc_orthogonal(double thickness,
+ double x1, double y1,
+ double x2, double y2,
+ double* x, double* y)
+ {
+ double dx = x2 - x1;
+ double dy = y2 - y1;
+ double d = sqrt(dx*dx + dy*dy);
+ *x = thickness * dy / d;
+ *y = -thickness * dx / d;
+ }
+
+ //--------------------------------------------------------dilate_triangle
+ AGG_INLINE void dilate_triangle(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double *x, double* y,
+ double d)
+ {
+ double dx1=0.0;
+ double dy1=0.0;
+ double dx2=0.0;
+ double dy2=0.0;
+ double dx3=0.0;
+ double dy3=0.0;
+ double loc = cross_product(x1, y1, x2, y2, x3, y3);
+ if(fabs(loc) > intersection_epsilon)
+ {
+ if(cross_product(x1, y1, x2, y2, x3, y3) > 0.0)
+ {
+ d = -d;
+ }
+ calc_orthogonal(d, x1, y1, x2, y2, &dx1, &dy1);
+ calc_orthogonal(d, x2, y2, x3, y3, &dx2, &dy2);
+ calc_orthogonal(d, x3, y3, x1, y1, &dx3, &dy3);
+ }
+ *x++ = x1 + dx1; *y++ = y1 + dy1;
+ *x++ = x2 + dx1; *y++ = y2 + dy1;
+ *x++ = x2 + dx2; *y++ = y2 + dy2;
+ *x++ = x3 + dx2; *y++ = y3 + dy2;
+ *x++ = x3 + dx3; *y++ = y3 + dy3;
+ *x++ = x1 + dx3; *y++ = y1 + dy3;
+ }
+
+ //------------------------------------------------------calc_triangle_area
+ AGG_INLINE double calc_triangle_area(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3)
+ {
+ return (x1*y2 - x2*y1 + x2*y3 - x3*y2 + x3*y1 - x1*y3) * 0.5;
+ }
+
+ //-------------------------------------------------------calc_polygon_area
+ template<class Storage> double calc_polygon_area(const Storage& st)
+ {
+ unsigned i;
+ double sum = 0.0;
+ double x = st[0].x;
+ double y = st[0].y;
+ double xs = x;
+ double ys = y;
+
+ for(i = 1; i < st.size(); i++)
+ {
+ const typename Storage::value_type& v = st[i];
+ sum += x * v.y - y * v.x;
+ x = v.x;
+ y = v.y;
+ }
+ return (sum + x * ys - y * xs) * 0.5;
+ }
+
+ //------------------------------------------------------------------------
+ // Tables for fast sqrt
+ extern int16u g_sqrt_table[1024];
+ extern int8 g_elder_bit_table[256];
+
+
+ //---------------------------------------------------------------fast_sqrt
+ //Fast integer Sqrt - really fast: no cycles, divisions or multiplications
+ #if defined(_MSC_VER)
+ #pragma warning(push)
+ #pragma warning(disable : 4035) //Disable warning "no return value"
+ #endif
+ AGG_INLINE unsigned fast_sqrt(unsigned val)
+ {
+ #if defined(_M_IX86) && defined(_MSC_VER) && !defined(AGG_NO_ASM)
+ //For Ix86 family processors this assembler code is used.
+ //The key command here is bsr - determination the number of the most
+ //significant bit of the value. For other processors
+ //(and maybe compilers) the pure C "#else" section is used.
+ __asm
+ {
+ mov ebx, val
+ mov edx, 11
+ bsr ecx, ebx
+ sub ecx, 9
+ jle less_than_9_bits
+ shr ecx, 1
+ adc ecx, 0
+ sub edx, ecx
+ shl ecx, 1
+ shr ebx, cl
+ less_than_9_bits:
+ xor eax, eax
+ mov ax, g_sqrt_table[ebx*2]
+ mov ecx, edx
+ shr eax, cl
+ }
+ #else
+
+ //This code is actually pure C and portable to most
+ //arcitectures including 64bit ones.
+ unsigned t = val;
+ int bit=0;
+ unsigned shift = 11;
+
+ //The following piece of code is just an emulation of the
+ //Ix86 assembler command "bsr" (see above). However on old
+ //Intels (like Intel MMX 233MHz) this code is about twice
+ //faster (sic!) then just one "bsr". On PIII and PIV the
+ //bsr is optimized quite well.
+ bit = t >> 24;
+ if(bit)
+ {
+ bit = g_elder_bit_table[bit] + 24;
+ }
+ else
+ {
+ bit = (t >> 16) & 0xFF;
+ if(bit)
+ {
+ bit = g_elder_bit_table[bit] + 16;
+ }
+ else
+ {
+ bit = (t >> 8) & 0xFF;
+ if(bit)
+ {
+ bit = g_elder_bit_table[bit] + 8;
+ }
+ else
+ {
+ bit = g_elder_bit_table[t];
+ }
+ }
+ }
+
+ //This code calculates the sqrt.
+ bit -= 9;
+ if(bit > 0)
+ {
+ bit = (bit >> 1) + (bit & 1);
+ shift -= bit;
+ val >>= (bit << 1);
+ }
+ return g_sqrt_table[val] >> shift;
+ #endif
+ }
+ #if defined(_MSC_VER)
+ #pragma warning(pop)
+ #endif
+
+
+
+
+ //--------------------------------------------------------------------besj
+ // Function BESJ calculates Bessel function of first kind of order n
+ // Arguments:
+ // n - an integer (>=0), the order
+ // x - value at which the Bessel function is required
+ //--------------------
+ // C++ Mathematical Library
+ // Convereted from equivalent FORTRAN library
+ // Converetd by Gareth Walker for use by course 392 computational project
+ // All functions tested and yield the same results as the corresponding
+ // FORTRAN versions.
+ //
+ // If you have any problems using these functions please report them to
+ // M.Muldoon@UMIST.ac.uk
+ //
+ // Documentation available on the web
+ // http://www.ma.umist.ac.uk/mrm/Teaching/392/libs/392.html
+ // Version 1.0 8/98
+ // 29 October, 1999
+ //--------------------
+ // Adapted for use in AGG library by Andy Wilk (castor.vulgaris@gmail.com)
+ //------------------------------------------------------------------------
+ inline double besj(double x, int n)
+ {
+ if(n < 0)
+ {
+ return 0;
+ }
+ double d = 1E-6;
+ double b = 0;
+ if(fabs(x) <= d)
+ {
+ if(n != 0) return 0;
+ return 1;
+ }
+ double b1 = 0; // b1 is the value from the previous iteration
+ // Set up a starting order for recurrence
+ int m1 = (int)fabs(x) + 6;
+ if(fabs(x) > 5)
+ {
+ m1 = (int)(fabs(1.4 * x + 60 / x));
+ }
+ int m2 = (int)(n + 2 + fabs(x) / 4);
+ if (m1 > m2)
+ {
+ m2 = m1;
+ }
+
+ // Apply recurrence down from curent max order
+ for(;;)
+ {
+ double c3 = 0;
+ double c2 = 1E-30;
+ double c4 = 0;
+ int m8 = 1;
+ if (m2 / 2 * 2 == m2)
+ {
+ m8 = -1;
+ }
+ int imax = m2 - 2;
+ for (int i = 1; i <= imax; i++)
+ {
+ double c6 = 2 * (m2 - i) * c2 / x - c3;
+ c3 = c2;
+ c2 = c6;
+ if(m2 - i - 1 == n)
+ {
+ b = c6;
+ }
+ m8 = -1 * m8;
+ if (m8 > 0)
+ {
+ c4 = c4 + 2 * c6;
+ }
+ }
+ double c6 = 2 * c2 / x - c3;
+ if(n == 0)
+ {
+ b = c6;
+ }
+ c4 += c6;
+ b /= c4;
+ if(fabs(b - b1) < d)
+ {
+ return b;
+ }
+ b1 = b;
+ m2 += 3;
+ }
+ }
+
+}
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_math_stroke.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_math_stroke.h
new file mode 100644
index 00000000000..4871d96cef6
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_math_stroke.h
@@ -0,0 +1,527 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Stroke math
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_STROKE_MATH_INCLUDED
+#define AGG_STROKE_MATH_INCLUDED
+
+#include "agg_math.h"
+#include "agg_vertex_sequence.h"
+
+namespace agg
+{
+ //-------------------------------------------------------------line_cap_e
+ enum line_cap_e
+ {
+ butt_cap,
+ square_cap,
+ round_cap
+ };
+
+ //------------------------------------------------------------line_join_e
+ enum line_join_e
+ {
+ miter_join = 0,
+ miter_join_revert = 1,
+ round_join = 2,
+ bevel_join = 3,
+ miter_join_round = 4
+ };
+
+
+ //-----------------------------------------------------------inner_join_e
+ enum inner_join_e
+ {
+ inner_bevel,
+ inner_miter,
+ inner_jag,
+ inner_round
+ };
+
+ //------------------------------------------------------------math_stroke
+ template<class VertexConsumer> class math_stroke
+ {
+ public:
+ typedef typename VertexConsumer::value_type coord_type;
+
+ math_stroke();
+
+ void line_cap(line_cap_e lc) { m_line_cap = lc; }
+ void line_join(line_join_e lj) { m_line_join = lj; }
+ void inner_join(inner_join_e ij) { m_inner_join = ij; }
+
+ line_cap_e line_cap() const { return m_line_cap; }
+ line_join_e line_join() const { return m_line_join; }
+ inner_join_e inner_join() const { return m_inner_join; }
+
+ void width(double w);
+ void miter_limit(double ml) { m_miter_limit = ml; }
+ void miter_limit_theta(double t);
+ void inner_miter_limit(double ml) { m_inner_miter_limit = ml; }
+ void approximation_scale(double as) { m_approx_scale = as; }
+
+ double width() const { return m_width * 2.0; }
+ double miter_limit() const { return m_miter_limit; }
+ double inner_miter_limit() const { return m_inner_miter_limit; }
+ double approximation_scale() const { return m_approx_scale; }
+
+ void calc_cap(VertexConsumer& vc,
+ const vertex_dist& v0,
+ const vertex_dist& v1,
+ double len);
+
+ void calc_join(VertexConsumer& vc,
+ const vertex_dist& v0,
+ const vertex_dist& v1,
+ const vertex_dist& v2,
+ double len1,
+ double len2);
+
+ private:
+ AGG_INLINE void add_vertex(VertexConsumer& vc, double x, double y)
+ {
+ vc.add(coord_type(x, y));
+ }
+
+ void calc_arc(VertexConsumer& vc,
+ double x, double y,
+ double dx1, double dy1,
+ double dx2, double dy2);
+
+ void calc_miter(VertexConsumer& vc,
+ const vertex_dist& v0,
+ const vertex_dist& v1,
+ const vertex_dist& v2,
+ double dx1, double dy1,
+ double dx2, double dy2,
+ line_join_e lj,
+ double mlimit,
+ double dbevel);
+
+ double m_width;
+ double m_width_abs;
+ double m_width_eps;
+ int m_width_sign;
+ double m_miter_limit;
+ double m_inner_miter_limit;
+ double m_approx_scale;
+ line_cap_e m_line_cap;
+ line_join_e m_line_join;
+ inner_join_e m_inner_join;
+ };
+
+ //-----------------------------------------------------------------------
+ template<class VC> math_stroke<VC>::math_stroke() :
+ m_width(0.5),
+ m_width_abs(0.5),
+ m_width_eps(0.5/1024.0),
+ m_width_sign(1),
+ m_miter_limit(4.0),
+ m_inner_miter_limit(1.01),
+ m_approx_scale(1.0),
+ m_line_cap(butt_cap),
+ m_line_join(miter_join),
+ m_inner_join(inner_miter)
+ {
+ }
+
+ //-----------------------------------------------------------------------
+ template<class VC> void math_stroke<VC>::width(double w)
+ {
+ m_width = w * 0.5;
+ if(m_width < 0)
+ {
+ m_width_abs = -m_width;
+ m_width_sign = -1;
+ }
+ else
+ {
+ m_width_abs = m_width;
+ m_width_sign = 1;
+ }
+ m_width_eps = m_width / 1024.0;
+ }
+
+ //-----------------------------------------------------------------------
+ template<class VC> void math_stroke<VC>::miter_limit_theta(double t)
+ {
+ m_miter_limit = 1.0 / sin(t * 0.5) ;
+ }
+
+ //-----------------------------------------------------------------------
+ template<class VC>
+ void math_stroke<VC>::calc_arc(VC& vc,
+ double x, double y,
+ double dx1, double dy1,
+ double dx2, double dy2)
+ {
+ double a1 = atan2(dy1 * m_width_sign, dx1 * m_width_sign);
+ double a2 = atan2(dy2 * m_width_sign, dx2 * m_width_sign);
+ double da = a1 - a2;
+ int i, n;
+
+ da = acos(m_width_abs / (m_width_abs + 0.125 / m_approx_scale)) * 2;
+
+ add_vertex(vc, x + dx1, y + dy1);
+ if(m_width_sign > 0)
+ {
+ if(a1 > a2) a2 += 2 * pi;
+ n = int((a2 - a1) / da);
+ da = (a2 - a1) / (n + 1);
+ a1 += da;
+ for(i = 0; i < n; i++)
+ {
+ add_vertex(vc, x + cos(a1) * m_width, y + sin(a1) * m_width);
+ a1 += da;
+ }
+ }
+ else
+ {
+ if(a1 < a2) a2 -= 2 * pi;
+ n = int((a1 - a2) / da);
+ da = (a1 - a2) / (n + 1);
+ a1 -= da;
+ for(i = 0; i < n; i++)
+ {
+ add_vertex(vc, x + cos(a1) * m_width, y + sin(a1) * m_width);
+ a1 -= da;
+ }
+ }
+ add_vertex(vc, x + dx2, y + dy2);
+ }
+
+ //-----------------------------------------------------------------------
+ template<class VC>
+ void math_stroke<VC>::calc_miter(VC& vc,
+ const vertex_dist& v0,
+ const vertex_dist& v1,
+ const vertex_dist& v2,
+ double dx1, double dy1,
+ double dx2, double dy2,
+ line_join_e lj,
+ double mlimit,
+ double dbevel)
+ {
+ double xi = v1.x;
+ double yi = v1.y;
+ double di = 1;
+ double lim = m_width_abs * mlimit;
+ bool miter_limit_exceeded = true; // Assume the worst
+ bool intersection_failed = true; // Assume the worst
+
+ if(calc_intersection(v0.x + dx1, v0.y - dy1,
+ v1.x + dx1, v1.y - dy1,
+ v1.x + dx2, v1.y - dy2,
+ v2.x + dx2, v2.y - dy2,
+ &xi, &yi))
+ {
+ // Calculation of the intersection succeeded
+ //---------------------
+ di = calc_distance(v1.x, v1.y, xi, yi);
+ if(di <= lim)
+ {
+ // Inside the miter limit
+ //---------------------
+ add_vertex(vc, xi, yi);
+ miter_limit_exceeded = false;
+ }
+ intersection_failed = false;
+ }
+ else
+ {
+ // Calculation of the intersection failed, most probably
+ // the three points lie one straight line.
+ // First check if v0 and v2 lie on the opposite sides of vector:
+ // (v1.x, v1.y) -> (v1.x+dx1, v1.y-dy1), that is, the perpendicular
+ // to the line determined by vertices v0 and v1.
+ // This condition determines whether the next line segments continues
+ // the previous one or goes back.
+ //----------------
+ double x2 = v1.x + dx1;
+ double y2 = v1.y - dy1;
+ if((cross_product(v0.x, v0.y, v1.x, v1.y, x2, y2) < 0.0) ==
+ (cross_product(v1.x, v1.y, v2.x, v2.y, x2, y2) < 0.0))
+ {
+ // This case means that the next segment continues
+ // the previous one (straight line)
+ //-----------------
+ add_vertex(vc, v1.x + dx1, v1.y - dy1);
+ miter_limit_exceeded = false;
+ }
+ }
+
+ if(miter_limit_exceeded)
+ {
+ // Miter limit exceeded
+ //------------------------
+ switch(lj)
+ {
+ case miter_join_revert:
+ // For the compatibility with SVG, PDF, etc,
+ // we use a simple bevel join instead of
+ // "smart" bevel
+ //-------------------
+ add_vertex(vc, v1.x + dx1, v1.y - dy1);
+ add_vertex(vc, v1.x + dx2, v1.y - dy2);
+ break;
+
+ case miter_join_round:
+ calc_arc(vc, v1.x, v1.y, dx1, -dy1, dx2, -dy2);
+ break;
+
+ default:
+ // If no miter-revert, calculate new dx1, dy1, dx2, dy2
+ //----------------
+ if(intersection_failed)
+ {
+ mlimit *= m_width_sign;
+ add_vertex(vc, v1.x + dx1 + dy1 * mlimit,
+ v1.y - dy1 + dx1 * mlimit);
+ add_vertex(vc, v1.x + dx2 - dy2 * mlimit,
+ v1.y - dy2 - dx2 * mlimit);
+ }
+ else
+ {
+ double x1 = v1.x + dx1;
+ double y1 = v1.y - dy1;
+ double x2 = v1.x + dx2;
+ double y2 = v1.y - dy2;
+ di = (lim - dbevel) / (di - dbevel);
+ add_vertex(vc, x1 + (xi - x1) * di,
+ y1 + (yi - y1) * di);
+ add_vertex(vc, x2 + (xi - x2) * di,
+ y2 + (yi - y2) * di);
+ }
+ break;
+ }
+ }
+ }
+
+ //--------------------------------------------------------stroke_calc_cap
+ template<class VC>
+ void math_stroke<VC>::calc_cap(VC& vc,
+ const vertex_dist& v0,
+ const vertex_dist& v1,
+ double len)
+ {
+ vc.remove_all();
+
+ double dx1 = (v1.y - v0.y) / len;
+ double dy1 = (v1.x - v0.x) / len;
+ double dx2 = 0;
+ double dy2 = 0;
+
+ dx1 *= m_width;
+ dy1 *= m_width;
+
+ if(m_line_cap != round_cap)
+ {
+ if(m_line_cap == square_cap)
+ {
+ dx2 = dy1 * m_width_sign;
+ dy2 = dx1 * m_width_sign;
+ }
+ add_vertex(vc, v0.x - dx1 - dx2, v0.y + dy1 - dy2);
+ add_vertex(vc, v0.x + dx1 - dx2, v0.y - dy1 - dy2);
+ }
+ else
+ {
+ double da = acos(m_width_abs / (m_width_abs + 0.125 / m_approx_scale)) * 2;
+ double a1;
+ int i;
+ int n = int(pi / da);
+
+ da = pi / (n + 1);
+ add_vertex(vc, v0.x - dx1, v0.y + dy1);
+ if(m_width_sign > 0)
+ {
+ a1 = atan2(dy1, -dx1);
+ a1 += da;
+ for(i = 0; i < n; i++)
+ {
+ add_vertex(vc, v0.x + cos(a1) * m_width,
+ v0.y + sin(a1) * m_width);
+ a1 += da;
+ }
+ }
+ else
+ {
+ a1 = atan2(-dy1, dx1);
+ a1 -= da;
+ for(i = 0; i < n; i++)
+ {
+ add_vertex(vc, v0.x + cos(a1) * m_width,
+ v0.y + sin(a1) * m_width);
+ a1 -= da;
+ }
+ }
+ add_vertex(vc, v0.x + dx1, v0.y - dy1);
+ }
+ }
+
+ //-----------------------------------------------------------------------
+ template<class VC>
+ void math_stroke<VC>::calc_join(VC& vc,
+ const vertex_dist& v0,
+ const vertex_dist& v1,
+ const vertex_dist& v2,
+ double len1,
+ double len2)
+ {
+ double dx1 = m_width * (v1.y - v0.y) / len1;
+ double dy1 = m_width * (v1.x - v0.x) / len1;
+ double dx2 = m_width * (v2.y - v1.y) / len2;
+ double dy2 = m_width * (v2.x - v1.x) / len2;
+
+ vc.remove_all();
+
+ double cp = cross_product(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y);
+ if ((cp > agg::vertex_dist_epsilon && m_width > 0) ||
+ (cp < -agg::vertex_dist_epsilon && m_width < 0))
+ {
+ // Inner join
+ //---------------
+ double limit = ((len1 < len2) ? len1 : len2) / m_width_abs;
+ if(limit < m_inner_miter_limit)
+ {
+ limit = m_inner_miter_limit;
+ }
+
+ switch(m_inner_join)
+ {
+ default: // inner_bevel
+ add_vertex(vc, v1.x + dx1, v1.y - dy1);
+ add_vertex(vc, v1.x + dx2, v1.y - dy2);
+ break;
+
+ case inner_miter:
+ calc_miter(vc,
+ v0, v1, v2, dx1, dy1, dx2, dy2,
+ miter_join_revert,
+ limit, 0);
+ break;
+
+ case inner_jag:
+ case inner_round:
+ cp = (dx1-dx2) * (dx1-dx2) + (dy1-dy2) * (dy1-dy2);
+ if(cp < len1 * len1 && cp < len2 * len2)
+ {
+ calc_miter(vc,
+ v0, v1, v2, dx1, dy1, dx2, dy2,
+ miter_join_revert,
+ limit, 0);
+ }
+ else
+ {
+ if(m_inner_join == inner_jag)
+ {
+ add_vertex(vc, v1.x + dx1, v1.y - dy1);
+ add_vertex(vc, v1.x, v1.y );
+ add_vertex(vc, v1.x + dx2, v1.y - dy2);
+ }
+ else
+ {
+ add_vertex(vc, v1.x + dx1, v1.y - dy1);
+ add_vertex(vc, v1.x, v1.y );
+ calc_arc(vc, v1.x, v1.y, dx2, -dy2, dx1, -dy1);
+ add_vertex(vc, v1.x, v1.y );
+ add_vertex(vc, v1.x + dx2, v1.y - dy2);
+ }
+ }
+ break;
+ }
+ }
+ else
+ {
+ // Outer join
+ //---------------
+
+ // Calculate the distance between v1 and
+ // the central point of the bevel line segment
+ //---------------
+ double dx = (dx1 + dx2) / 2;
+ double dy = (dy1 + dy2) / 2;
+ double dbevel = sqrt(dx * dx + dy * dy);
+
+ if(m_line_join == round_join || m_line_join == bevel_join)
+ {
+ // This is an optimization that reduces the number of points
+ // in cases of almost collinear segments. If there's no
+ // visible difference between bevel and miter joins we'd rather
+ // use miter join because it adds only one point instead of two.
+ //
+ // Here we calculate the middle point between the bevel points
+ // and then, the distance between v1 and this middle point.
+ // At outer joins this distance always less than stroke width,
+ // because it's actually the height of an isosceles triangle of
+ // v1 and its two bevel points. If the difference between this
+ // width and this value is small (no visible bevel) we can
+ // add just one point.
+ //
+ // The constant in the expression makes the result approximately
+ // the same as in round joins and caps. You can safely comment
+ // out this entire "if".
+ //-------------------
+ if(m_approx_scale * (m_width_abs - dbevel) < m_width_eps)
+ {
+ if(calc_intersection(v0.x + dx1, v0.y - dy1,
+ v1.x + dx1, v1.y - dy1,
+ v1.x + dx2, v1.y - dy2,
+ v2.x + dx2, v2.y - dy2,
+ &dx, &dy))
+ {
+ add_vertex(vc, dx, dy);
+ }
+ else
+ {
+ add_vertex(vc, v1.x + dx1, v1.y - dy1);
+ }
+ return;
+ }
+ }
+
+ switch(m_line_join)
+ {
+ case miter_join:
+ case miter_join_revert:
+ case miter_join_round:
+ calc_miter(vc,
+ v0, v1, v2, dx1, dy1, dx2, dy2,
+ m_line_join,
+ m_miter_limit,
+ dbevel);
+ break;
+
+ case round_join:
+ calc_arc(vc, v1.x, v1.y, dx1, -dy1, dx2, -dy2);
+ break;
+
+ default: // Bevel join
+ add_vertex(vc, v1.x + dx1, v1.y - dy1);
+ add_vertex(vc, v1.x + dx2, v1.y - dy2);
+ break;
+ }
+ }
+ }
+
+
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_path_storage.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_path_storage.h
new file mode 100644
index 00000000000..c01b867f26c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_path_storage.h
@@ -0,0 +1,1545 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_PATH_STORAGE_INCLUDED
+#define AGG_PATH_STORAGE_INCLUDED
+
+#include <string.h>
+#include <math.h>
+#include "agg_math.h"
+#include "agg_array.h"
+#include "agg_bezier_arc.h"
+
+namespace agg
+{
+
+
+ //----------------------------------------------------vertex_block_storage
+ template<class T, unsigned BlockShift=8, unsigned BlockPool=256>
+ class vertex_block_storage
+ {
+ public:
+ // Allocation parameters
+ enum block_scale_e
+ {
+ block_shift = BlockShift,
+ block_size = 1 << block_shift,
+ block_mask = block_size - 1,
+ block_pool = BlockPool
+ };
+
+ typedef T value_type;
+ typedef vertex_block_storage<T, BlockShift, BlockPool> self_type;
+
+ ~vertex_block_storage();
+ vertex_block_storage();
+ vertex_block_storage(const self_type& v);
+ const self_type& operator = (const self_type& ps);
+
+ void remove_all();
+ void free_all();
+
+ void add_vertex(double x, double y, unsigned cmd);
+ void modify_vertex(unsigned idx, double x, double y);
+ void modify_vertex(unsigned idx, double x, double y, unsigned cmd);
+ void modify_command(unsigned idx, unsigned cmd);
+ void swap_vertices(unsigned v1, unsigned v2);
+
+ unsigned last_command() const;
+ unsigned last_vertex(double* x, double* y) const;
+ unsigned prev_vertex(double* x, double* y) const;
+
+ double last_x() const;
+ double last_y() const;
+
+ unsigned total_vertices() const;
+ unsigned vertex(unsigned idx, double* x, double* y) const;
+ unsigned command(unsigned idx) const;
+
+ private:
+ void allocate_block(unsigned nb);
+ int8u* storage_ptrs(T** xy_ptr);
+
+ private:
+ unsigned m_total_vertices;
+ unsigned m_total_blocks;
+ unsigned m_max_blocks;
+ T** m_coord_blocks;
+ int8u** m_cmd_blocks;
+ };
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ void vertex_block_storage<T,S,P>::free_all()
+ {
+ if(m_total_blocks)
+ {
+ T** coord_blk = m_coord_blocks + m_total_blocks - 1;
+ while(m_total_blocks--)
+ {
+ pod_allocator<T>::deallocate(
+ *coord_blk,
+ block_size * 2 +
+ block_size / (sizeof(T) / sizeof(unsigned char)));
+ --coord_blk;
+ }
+ pod_allocator<T*>::deallocate(m_coord_blocks, m_max_blocks * 2);
+ m_total_blocks = 0;
+ m_max_blocks = 0;
+ m_coord_blocks = 0;
+ m_cmd_blocks = 0;
+ m_total_vertices = 0;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ vertex_block_storage<T,S,P>::~vertex_block_storage()
+ {
+ free_all();
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ vertex_block_storage<T,S,P>::vertex_block_storage() :
+ m_total_vertices(0),
+ m_total_blocks(0),
+ m_max_blocks(0),
+ m_coord_blocks(0),
+ m_cmd_blocks(0)
+ {
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ vertex_block_storage<T,S,P>::vertex_block_storage(const vertex_block_storage<T,S,P>& v) :
+ m_total_vertices(0),
+ m_total_blocks(0),
+ m_max_blocks(0),
+ m_coord_blocks(0),
+ m_cmd_blocks(0)
+ {
+ *this = v;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ const vertex_block_storage<T,S,P>&
+ vertex_block_storage<T,S,P>::operator = (const vertex_block_storage<T,S,P>& v)
+ {
+ remove_all();
+ unsigned i;
+ for(i = 0; i < v.total_vertices(); i++)
+ {
+ double x, y;
+ unsigned cmd = v.vertex(i, &x, &y);
+ add_vertex(x, y, cmd);
+ }
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline void vertex_block_storage<T,S,P>::remove_all()
+ {
+ m_total_vertices = 0;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline void vertex_block_storage<T,S,P>::add_vertex(double x, double y,
+ unsigned cmd)
+ {
+ T* coord_ptr = 0;
+ *storage_ptrs(&coord_ptr) = (int8u)cmd;
+ coord_ptr[0] = T(x);
+ coord_ptr[1] = T(y);
+ m_total_vertices++;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline void vertex_block_storage<T,S,P>::modify_vertex(unsigned idx,
+ double x, double y)
+ {
+ T* pv = m_coord_blocks[idx >> block_shift] + ((idx & block_mask) << 1);
+ pv[0] = T(x);
+ pv[1] = T(y);
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline void vertex_block_storage<T,S,P>::modify_vertex(unsigned idx,
+ double x, double y,
+ unsigned cmd)
+ {
+ unsigned block = idx >> block_shift;
+ unsigned offset = idx & block_mask;
+ T* pv = m_coord_blocks[block] + (offset << 1);
+ pv[0] = T(x);
+ pv[1] = T(y);
+ m_cmd_blocks[block][offset] = (int8u)cmd;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline void vertex_block_storage<T,S,P>::modify_command(unsigned idx,
+ unsigned cmd)
+ {
+ m_cmd_blocks[idx >> block_shift][idx & block_mask] = (int8u)cmd;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline void vertex_block_storage<T,S,P>::swap_vertices(unsigned v1, unsigned v2)
+ {
+ unsigned b1 = v1 >> block_shift;
+ unsigned b2 = v2 >> block_shift;
+ unsigned o1 = v1 & block_mask;
+ unsigned o2 = v2 & block_mask;
+ T* pv1 = m_coord_blocks[b1] + (o1 << 1);
+ T* pv2 = m_coord_blocks[b2] + (o2 << 1);
+ T val;
+ val = pv1[0]; pv1[0] = pv2[0]; pv2[0] = val;
+ val = pv1[1]; pv1[1] = pv2[1]; pv2[1] = val;
+ int8u cmd = m_cmd_blocks[b1][o1];
+ m_cmd_blocks[b1][o1] = m_cmd_blocks[b2][o2];
+ m_cmd_blocks[b2][o2] = cmd;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline unsigned vertex_block_storage<T,S,P>::last_command() const
+ {
+ if(m_total_vertices) return command(m_total_vertices - 1);
+ return path_cmd_stop;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline unsigned vertex_block_storage<T,S,P>::last_vertex(double* x, double* y) const
+ {
+ if(m_total_vertices) return vertex(m_total_vertices - 1, x, y);
+ return path_cmd_stop;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline unsigned vertex_block_storage<T,S,P>::prev_vertex(double* x, double* y) const
+ {
+ if(m_total_vertices > 1) return vertex(m_total_vertices - 2, x, y);
+ return path_cmd_stop;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline double vertex_block_storage<T,S,P>::last_x() const
+ {
+ if(m_total_vertices)
+ {
+ unsigned idx = m_total_vertices - 1;
+ return m_coord_blocks[idx >> block_shift][(idx & block_mask) << 1];
+ }
+ return 0.0;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline double vertex_block_storage<T,S,P>::last_y() const
+ {
+ if(m_total_vertices)
+ {
+ unsigned idx = m_total_vertices - 1;
+ return m_coord_blocks[idx >> block_shift][((idx & block_mask) << 1) + 1];
+ }
+ return 0.0;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline unsigned vertex_block_storage<T,S,P>::total_vertices() const
+ {
+ return m_total_vertices;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline unsigned vertex_block_storage<T,S,P>::vertex(unsigned idx,
+ double* x, double* y) const
+ {
+ unsigned nb = idx >> block_shift;
+ const T* pv = m_coord_blocks[nb] + ((idx & block_mask) << 1);
+ *x = pv[0];
+ *y = pv[1];
+ return m_cmd_blocks[nb][idx & block_mask];
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ inline unsigned vertex_block_storage<T,S,P>::command(unsigned idx) const
+ {
+ return m_cmd_blocks[idx >> block_shift][idx & block_mask];
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ void vertex_block_storage<T,S,P>::allocate_block(unsigned nb)
+ {
+ if(nb >= m_max_blocks)
+ {
+ T** new_coords =
+ pod_allocator<T*>::allocate((m_max_blocks + block_pool) * 2);
+
+ unsigned char** new_cmds =
+ (unsigned char**)(new_coords + m_max_blocks + block_pool);
+
+ if(m_coord_blocks)
+ {
+ memcpy(new_coords,
+ m_coord_blocks,
+ m_max_blocks * sizeof(T*));
+
+ memcpy(new_cmds,
+ m_cmd_blocks,
+ m_max_blocks * sizeof(unsigned char*));
+
+ pod_allocator<T*>::deallocate(m_coord_blocks, m_max_blocks * 2);
+ }
+ m_coord_blocks = new_coords;
+ m_cmd_blocks = new_cmds;
+ m_max_blocks += block_pool;
+ }
+ m_coord_blocks[nb] =
+ pod_allocator<T>::allocate(block_size * 2 +
+ block_size / (sizeof(T) / sizeof(unsigned char)));
+
+ m_cmd_blocks[nb] =
+ (unsigned char*)(m_coord_blocks[nb] + block_size * 2);
+
+ m_total_blocks++;
+ }
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S, unsigned P>
+ int8u* vertex_block_storage<T,S,P>::storage_ptrs(T** xy_ptr)
+ {
+ unsigned nb = m_total_vertices >> block_shift;
+ if(nb >= m_total_blocks)
+ {
+ allocate_block(nb);
+ }
+ *xy_ptr = m_coord_blocks[nb] + ((m_total_vertices & block_mask) << 1);
+ return m_cmd_blocks[nb] + (m_total_vertices & block_mask);
+ }
+
+
+
+
+ //-----------------------------------------------------poly_plain_adaptor
+ template<class T> class poly_plain_adaptor
+ {
+ public:
+ typedef T value_type;
+
+ poly_plain_adaptor() :
+ m_data(0),
+ m_ptr(0),
+ m_end(0),
+ m_closed(false),
+ m_stop(false)
+ {}
+
+ poly_plain_adaptor(const T* data, unsigned num_points, bool closed) :
+ m_data(data),
+ m_ptr(data),
+ m_end(data + num_points * 2),
+ m_closed(closed),
+ m_stop(false)
+ {}
+
+ void init(const T* data, unsigned num_points, bool closed)
+ {
+ m_data = data;
+ m_ptr = data;
+ m_end = data + num_points * 2;
+ m_closed = closed;
+ m_stop = false;
+ }
+
+ void rewind(unsigned)
+ {
+ m_ptr = m_data;
+ m_stop = false;
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ if(m_ptr < m_end)
+ {
+ bool first = m_ptr == m_data;
+ *x = *m_ptr++;
+ *y = *m_ptr++;
+ return first ? path_cmd_move_to : path_cmd_line_to;
+ }
+ *x = *y = 0.0;
+ if(m_closed && !m_stop)
+ {
+ m_stop = true;
+ return path_cmd_end_poly | path_flags_close;
+ }
+ return path_cmd_stop;
+ }
+
+ private:
+ const T* m_data;
+ const T* m_ptr;
+ const T* m_end;
+ bool m_closed;
+ bool m_stop;
+ };
+
+
+
+
+
+ //-------------------------------------------------poly_container_adaptor
+ template<class Container> class poly_container_adaptor
+ {
+ public:
+ typedef typename Container::value_type vertex_type;
+
+ poly_container_adaptor() :
+ m_container(0),
+ m_index(0),
+ m_closed(false),
+ m_stop(false)
+ {}
+
+ poly_container_adaptor(const Container& data, bool closed) :
+ m_container(&data),
+ m_index(0),
+ m_closed(closed),
+ m_stop(false)
+ {}
+
+ void init(const Container& data, bool closed)
+ {
+ m_container = &data;
+ m_index = 0;
+ m_closed = closed;
+ m_stop = false;
+ }
+
+ void rewind(unsigned)
+ {
+ m_index = 0;
+ m_stop = false;
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ if(m_index < m_container->size())
+ {
+ bool first = m_index == 0;
+ const vertex_type& v = (*m_container)[m_index++];
+ *x = v.x;
+ *y = v.y;
+ return first ? path_cmd_move_to : path_cmd_line_to;
+ }
+ *x = *y = 0.0;
+ if(m_closed && !m_stop)
+ {
+ m_stop = true;
+ return path_cmd_end_poly | path_flags_close;
+ }
+ return path_cmd_stop;
+ }
+
+ private:
+ const Container* m_container;
+ unsigned m_index;
+ bool m_closed;
+ bool m_stop;
+ };
+
+
+
+ //-----------------------------------------poly_container_reverse_adaptor
+ template<class Container> class poly_container_reverse_adaptor
+ {
+ public:
+ typedef typename Container::value_type vertex_type;
+
+ poly_container_reverse_adaptor() :
+ m_container(0),
+ m_index(-1),
+ m_closed(false),
+ m_stop(false)
+ {}
+
+ poly_container_reverse_adaptor(Container& data, bool closed) :
+ m_container(&data),
+ m_index(-1),
+ m_closed(closed),
+ m_stop(false)
+ {}
+
+ void init(Container& data, bool closed)
+ {
+ m_container = &data;
+ m_index = m_container->size() - 1;
+ m_closed = closed;
+ m_stop = false;
+ }
+
+ void rewind(unsigned)
+ {
+ m_index = m_container->size() - 1;
+ m_stop = false;
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ if(m_index >= 0)
+ {
+ bool first = m_index == int(m_container->size() - 1);
+ const vertex_type& v = (*m_container)[m_index--];
+ *x = v.x;
+ *y = v.y;
+ return first ? path_cmd_move_to : path_cmd_line_to;
+ }
+ *x = *y = 0.0;
+ if(m_closed && !m_stop)
+ {
+ m_stop = true;
+ return path_cmd_end_poly | path_flags_close;
+ }
+ return path_cmd_stop;
+ }
+
+ private:
+ Container* m_container;
+ int m_index;
+ bool m_closed;
+ bool m_stop;
+ };
+
+
+
+
+
+ //--------------------------------------------------------line_adaptor
+ class line_adaptor
+ {
+ public:
+ typedef double value_type;
+
+ line_adaptor() : m_line(m_coord, 2, false) {}
+ line_adaptor(double x1, double y1, double x2, double y2) :
+ m_line(m_coord, 2, false)
+ {
+ m_coord[0] = x1;
+ m_coord[1] = y1;
+ m_coord[2] = x2;
+ m_coord[3] = y2;
+ }
+
+ void init(double x1, double y1, double x2, double y2)
+ {
+ m_coord[0] = x1;
+ m_coord[1] = y1;
+ m_coord[2] = x2;
+ m_coord[3] = y2;
+ m_line.rewind(0);
+ }
+
+ void rewind(unsigned)
+ {
+ m_line.rewind(0);
+ }
+
+ unsigned vertex(double* x, double* y)
+ {
+ return m_line.vertex(x, y);
+ }
+
+ private:
+ double m_coord[4];
+ poly_plain_adaptor<double> m_line;
+ };
+
+
+
+
+
+
+
+
+
+
+
+
+
+ //---------------------------------------------------------------path_base
+ // A container to store vertices with their flags.
+ // A path consists of a number of contours separated with "move_to"
+ // commands. The path storage can keep and maintain more than one
+ // path.
+ // To navigate to the beginning of a particular path, use rewind(path_id);
+ // Where path_id is what start_new_path() returns. So, when you call
+ // start_new_path() you need to store its return value somewhere else
+ // to navigate to the path afterwards.
+ //
+ // See also: vertex_source concept
+ //------------------------------------------------------------------------
+ template<class VertexContainer> class path_base
+ {
+ public:
+ typedef VertexContainer container_type;
+ typedef path_base<VertexContainer> self_type;
+
+ //--------------------------------------------------------------------
+ path_base() : m_vertices(), m_iterator(0) {}
+ void remove_all() { m_vertices.remove_all(); m_iterator = 0; }
+ void free_all() { m_vertices.free_all(); m_iterator = 0; }
+
+ // Make path functions
+ //--------------------------------------------------------------------
+ unsigned start_new_path();
+
+ void move_to(double x, double y);
+ void move_rel(double dx, double dy);
+
+ void line_to(double x, double y);
+ void line_rel(double dx, double dy);
+
+ void hline_to(double x);
+ void hline_rel(double dx);
+
+ void vline_to(double y);
+ void vline_rel(double dy);
+
+ void arc_to(double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double x, double y);
+
+ void arc_rel(double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double dx, double dy);
+
+ void curve3(double x_ctrl, double y_ctrl,
+ double x_to, double y_to);
+
+ void curve3_rel(double dx_ctrl, double dy_ctrl,
+ double dx_to, double dy_to);
+
+ void curve3(double x_to, double y_to);
+
+ void curve3_rel(double dx_to, double dy_to);
+
+ void curve4(double x_ctrl1, double y_ctrl1,
+ double x_ctrl2, double y_ctrl2,
+ double x_to, double y_to);
+
+ void curve4_rel(double dx_ctrl1, double dy_ctrl1,
+ double dx_ctrl2, double dy_ctrl2,
+ double dx_to, double dy_to);
+
+ void curve4(double x_ctrl2, double y_ctrl2,
+ double x_to, double y_to);
+
+ void curve4_rel(double x_ctrl2, double y_ctrl2,
+ double x_to, double y_to);
+
+
+ void end_poly(unsigned flags = path_flags_close);
+ void close_polygon(unsigned flags = path_flags_none);
+
+ // Accessors
+ //--------------------------------------------------------------------
+ const container_type& vertices() const { return m_vertices; }
+ container_type& vertices() { return m_vertices; }
+
+ unsigned total_vertices() const;
+
+ void rel_to_abs(double* x, double* y) const;
+
+ unsigned last_vertex(double* x, double* y) const;
+ unsigned prev_vertex(double* x, double* y) const;
+
+ double last_x() const;
+ double last_y() const;
+
+ unsigned vertex(unsigned idx, double* x, double* y) const;
+ unsigned command(unsigned idx) const;
+
+ void modify_vertex(unsigned idx, double x, double y);
+ void modify_vertex(unsigned idx, double x, double y, unsigned cmd);
+ void modify_command(unsigned idx, unsigned cmd);
+
+ // VertexSource interface
+ //--------------------------------------------------------------------
+ void rewind(unsigned path_id);
+ unsigned vertex(double* x, double* y);
+
+ // Arrange the orientation of a polygon, all polygons in a path,
+ // or in all paths. After calling arrange_orientations() or
+ // arrange_orientations_all_paths(), all the polygons will have
+ // the same orientation, i.e. path_flags_cw or path_flags_ccw
+ //--------------------------------------------------------------------
+ unsigned arrange_polygon_orientation(unsigned start, path_flags_e orientation);
+ unsigned arrange_orientations(unsigned path_id, path_flags_e orientation);
+ void arrange_orientations_all_paths(path_flags_e orientation);
+ void invert_polygon(unsigned start);
+
+ // Flip all vertices horizontally or vertically,
+ // between x1 and x2, or between y1 and y2 respectively
+ //--------------------------------------------------------------------
+ void flip_x(double x1, double x2);
+ void flip_y(double y1, double y2);
+
+ // Concatenate path. The path is added as is.
+ //--------------------------------------------------------------------
+ template<class VertexSource>
+ void concat_path(VertexSource& vs, unsigned path_id = 0)
+ {
+ double x, y;
+ unsigned cmd;
+ vs.rewind(path_id);
+ while(!is_stop(cmd = vs.vertex(&x, &y)))
+ {
+ m_vertices.add_vertex(x, y, cmd);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Join path. The path is joined with the existing one, that is,
+ // it behaves as if the pen of a plotter was always down (drawing)
+ template<class VertexSource>
+ void join_path(VertexSource& vs, unsigned path_id = 0)
+ {
+ double x, y;
+ unsigned cmd;
+ vs.rewind(path_id);
+ cmd = vs.vertex(&x, &y);
+ if(!is_stop(cmd))
+ {
+ if(is_vertex(cmd))
+ {
+ double x0, y0;
+ unsigned cmd0 = last_vertex(&x0, &y0);
+ if(is_vertex(cmd0))
+ {
+ if(calc_distance(x, y, x0, y0) > vertex_dist_epsilon)
+ {
+ if(is_move_to(cmd)) cmd = path_cmd_line_to;
+ m_vertices.add_vertex(x, y, cmd);
+ }
+ }
+ else
+ {
+ if(is_stop(cmd0))
+ {
+ cmd = path_cmd_move_to;
+ }
+ else
+ {
+ if(is_move_to(cmd)) cmd = path_cmd_line_to;
+ }
+ m_vertices.add_vertex(x, y, cmd);
+ }
+ }
+ while(!is_stop(cmd = vs.vertex(&x, &y)))
+ {
+ m_vertices.add_vertex(x, y, is_move_to(cmd) ?
+ unsigned(path_cmd_line_to) :
+ cmd);
+ }
+ }
+ }
+
+ // Concatenate polygon/polyline.
+ //--------------------------------------------------------------------
+ template<class T> void concat_poly(const T* data,
+ unsigned num_points,
+ bool closed)
+ {
+ poly_plain_adaptor<T> poly(data, num_points, closed);
+ concat_path(poly);
+ }
+
+ // Join polygon/polyline continuously.
+ //--------------------------------------------------------------------
+ template<class T> void join_poly(const T* data,
+ unsigned num_points,
+ bool closed)
+ {
+ poly_plain_adaptor<T> poly(data, num_points, closed);
+ join_path(poly);
+ }
+
+ //--------------------------------------------------------------------
+ void translate(double dx, double dy, unsigned path_id=0);
+ void translate_all_paths(double dx, double dy);
+
+ //--------------------------------------------------------------------
+ template<class Trans>
+ void transform(const Trans& trans, unsigned path_id=0)
+ {
+ unsigned num_ver = m_vertices.total_vertices();
+ for(; path_id < num_ver; path_id++)
+ {
+ double x, y;
+ unsigned cmd = m_vertices.vertex(path_id, &x, &y);
+ if(is_stop(cmd)) break;
+ if(is_vertex(cmd))
+ {
+ trans.transform(&x, &y);
+ m_vertices.modify_vertex(path_id, x, y);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class Trans>
+ void transform_all_paths(const Trans& trans)
+ {
+ unsigned idx;
+ unsigned num_ver = m_vertices.total_vertices();
+ for(idx = 0; idx < num_ver; idx++)
+ {
+ double x, y;
+ if(is_vertex(m_vertices.vertex(idx, &x, &y)))
+ {
+ trans.transform(&x, &y);
+ m_vertices.modify_vertex(idx, x, y);
+ }
+ }
+ }
+
+
+
+ private:
+ unsigned perceive_polygon_orientation(unsigned start, unsigned end);
+ void invert_polygon(unsigned start, unsigned end);
+
+ VertexContainer m_vertices;
+ unsigned m_iterator;
+ };
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ unsigned path_base<VC>::start_new_path()
+ {
+ if(!is_stop(m_vertices.last_command()))
+ {
+ m_vertices.add_vertex(0.0, 0.0, path_cmd_stop);
+ }
+ return m_vertices.total_vertices();
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::rel_to_abs(double* x, double* y) const
+ {
+ if(m_vertices.total_vertices())
+ {
+ double x2;
+ double y2;
+ if(is_vertex(m_vertices.last_vertex(&x2, &y2)))
+ {
+ *x += x2;
+ *y += y2;
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::move_to(double x, double y)
+ {
+ m_vertices.add_vertex(x, y, path_cmd_move_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::move_rel(double dx, double dy)
+ {
+ rel_to_abs(&dx, &dy);
+ m_vertices.add_vertex(dx, dy, path_cmd_move_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::line_to(double x, double y)
+ {
+ m_vertices.add_vertex(x, y, path_cmd_line_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::line_rel(double dx, double dy)
+ {
+ rel_to_abs(&dx, &dy);
+ m_vertices.add_vertex(dx, dy, path_cmd_line_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::hline_to(double x)
+ {
+ m_vertices.add_vertex(x, last_y(), path_cmd_line_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::hline_rel(double dx)
+ {
+ double dy = 0;
+ rel_to_abs(&dx, &dy);
+ m_vertices.add_vertex(dx, dy, path_cmd_line_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::vline_to(double y)
+ {
+ m_vertices.add_vertex(last_x(), y, path_cmd_line_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::vline_rel(double dy)
+ {
+ double dx = 0;
+ rel_to_abs(&dx, &dy);
+ m_vertices.add_vertex(dx, dy, path_cmd_line_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::arc_to(double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double x, double y)
+ {
+ if(m_vertices.total_vertices() && is_vertex(m_vertices.last_command()))
+ {
+ const double epsilon = 1e-30;
+ double x0 = 0.0;
+ double y0 = 0.0;
+ m_vertices.last_vertex(&x0, &y0);
+
+ rx = fabs(rx);
+ ry = fabs(ry);
+
+ // Ensure radii are valid
+ //-------------------------
+ if(rx < epsilon || ry < epsilon)
+ {
+ line_to(x, y);
+ return;
+ }
+
+ if(calc_distance(x0, y0, x, y) < epsilon)
+ {
+ // If the endpoints (x, y) and (x0, y0) are identical, then this
+ // is equivalent to omitting the elliptical arc segment entirely.
+ return;
+ }
+ bezier_arc_svg a(x0, y0, rx, ry, angle, large_arc_flag, sweep_flag, x, y);
+ if(a.radii_ok())
+ {
+ join_path(a);
+ }
+ else
+ {
+ line_to(x, y);
+ }
+ }
+ else
+ {
+ move_to(x, y);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::arc_rel(double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double dx, double dy)
+ {
+ rel_to_abs(&dx, &dy);
+ arc_to(rx, ry, angle, large_arc_flag, sweep_flag, dx, dy);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve3(double x_ctrl, double y_ctrl,
+ double x_to, double y_to)
+ {
+ m_vertices.add_vertex(x_ctrl, y_ctrl, path_cmd_curve3);
+ m_vertices.add_vertex(x_to, y_to, path_cmd_curve3);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve3_rel(double dx_ctrl, double dy_ctrl,
+ double dx_to, double dy_to)
+ {
+ rel_to_abs(&dx_ctrl, &dy_ctrl);
+ rel_to_abs(&dx_to, &dy_to);
+ m_vertices.add_vertex(dx_ctrl, dy_ctrl, path_cmd_curve3);
+ m_vertices.add_vertex(dx_to, dy_to, path_cmd_curve3);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve3(double x_to, double y_to)
+ {
+ double x0;
+ double y0;
+ if(is_vertex(m_vertices.last_vertex(&x0, &y0)))
+ {
+ double x_ctrl;
+ double y_ctrl;
+ unsigned cmd = m_vertices.prev_vertex(&x_ctrl, &y_ctrl);
+ if(is_curve(cmd))
+ {
+ x_ctrl = x0 + x0 - x_ctrl;
+ y_ctrl = y0 + y0 - y_ctrl;
+ }
+ else
+ {
+ x_ctrl = x0;
+ y_ctrl = y0;
+ }
+ curve3(x_ctrl, y_ctrl, x_to, y_to);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve3_rel(double dx_to, double dy_to)
+ {
+ rel_to_abs(&dx_to, &dy_to);
+ curve3(dx_to, dy_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve4(double x_ctrl1, double y_ctrl1,
+ double x_ctrl2, double y_ctrl2,
+ double x_to, double y_to)
+ {
+ m_vertices.add_vertex(x_ctrl1, y_ctrl1, path_cmd_curve4);
+ m_vertices.add_vertex(x_ctrl2, y_ctrl2, path_cmd_curve4);
+ m_vertices.add_vertex(x_to, y_to, path_cmd_curve4);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve4_rel(double dx_ctrl1, double dy_ctrl1,
+ double dx_ctrl2, double dy_ctrl2,
+ double dx_to, double dy_to)
+ {
+ rel_to_abs(&dx_ctrl1, &dy_ctrl1);
+ rel_to_abs(&dx_ctrl2, &dy_ctrl2);
+ rel_to_abs(&dx_to, &dy_to);
+ m_vertices.add_vertex(dx_ctrl1, dy_ctrl1, path_cmd_curve4);
+ m_vertices.add_vertex(dx_ctrl2, dy_ctrl2, path_cmd_curve4);
+ m_vertices.add_vertex(dx_to, dy_to, path_cmd_curve4);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve4(double x_ctrl2, double y_ctrl2,
+ double x_to, double y_to)
+ {
+ double x0;
+ double y0;
+ if(is_vertex(last_vertex(&x0, &y0)))
+ {
+ double x_ctrl1;
+ double y_ctrl1;
+ unsigned cmd = prev_vertex(&x_ctrl1, &y_ctrl1);
+ if(is_curve(cmd))
+ {
+ x_ctrl1 = x0 + x0 - x_ctrl1;
+ y_ctrl1 = y0 + y0 - y_ctrl1;
+ }
+ else
+ {
+ x_ctrl1 = x0;
+ y_ctrl1 = y0;
+ }
+ curve4(x_ctrl1, y_ctrl1, x_ctrl2, y_ctrl2, x_to, y_to);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::curve4_rel(double dx_ctrl2, double dy_ctrl2,
+ double dx_to, double dy_to)
+ {
+ rel_to_abs(&dx_ctrl2, &dy_ctrl2);
+ rel_to_abs(&dx_to, &dy_to);
+ curve4(dx_ctrl2, dy_ctrl2, dx_to, dy_to);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::end_poly(unsigned flags)
+ {
+ if(is_vertex(m_vertices.last_command()))
+ {
+ m_vertices.add_vertex(0.0, 0.0, path_cmd_end_poly | flags);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::close_polygon(unsigned flags)
+ {
+ end_poly(path_flags_close | flags);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline unsigned path_base<VC>::total_vertices() const
+ {
+ return m_vertices.total_vertices();
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline unsigned path_base<VC>::last_vertex(double* x, double* y) const
+ {
+ return m_vertices.last_vertex(x, y);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline unsigned path_base<VC>::prev_vertex(double* x, double* y) const
+ {
+ return m_vertices.prev_vertex(x, y);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline double path_base<VC>::last_x() const
+ {
+ return m_vertices.last_x();
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline double path_base<VC>::last_y() const
+ {
+ return m_vertices.last_y();
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline unsigned path_base<VC>::vertex(unsigned idx, double* x, double* y) const
+ {
+ return m_vertices.vertex(idx, x, y);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline unsigned path_base<VC>::command(unsigned idx) const
+ {
+ return m_vertices.command(idx);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::modify_vertex(unsigned idx, double x, double y)
+ {
+ m_vertices.modify_vertex(idx, x, y);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::modify_vertex(unsigned idx, double x, double y, unsigned cmd)
+ {
+ m_vertices.modify_vertex(idx, x, y, cmd);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::modify_command(unsigned idx, unsigned cmd)
+ {
+ m_vertices.modify_command(idx, cmd);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline void path_base<VC>::rewind(unsigned path_id)
+ {
+ m_iterator = path_id;
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ inline unsigned path_base<VC>::vertex(double* x, double* y)
+ {
+ if(m_iterator >= m_vertices.total_vertices()) return path_cmd_stop;
+ return m_vertices.vertex(m_iterator++, x, y);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ unsigned path_base<VC>::perceive_polygon_orientation(unsigned start,
+ unsigned end)
+ {
+ // Calculate signed area (double area to be exact)
+ //---------------------
+ unsigned np = end - start;
+ double area = 0.0;
+ unsigned i;
+ for(i = 0; i < np; i++)
+ {
+ double x1, y1, x2, y2;
+ m_vertices.vertex(start + i, &x1, &y1);
+ m_vertices.vertex(start + (i + 1) % np, &x2, &y2);
+ area += x1 * y2 - y1 * x2;
+ }
+ return (area < 0.0) ? path_flags_cw : path_flags_ccw;
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::invert_polygon(unsigned start, unsigned end)
+ {
+ unsigned i;
+ unsigned tmp_cmd = m_vertices.command(start);
+
+ --end; // Make "end" inclusive
+
+ // Shift all commands to one position
+ for(i = start; i < end; i++)
+ {
+ m_vertices.modify_command(i, m_vertices.command(i + 1));
+ }
+
+ // Assign starting command to the ending command
+ m_vertices.modify_command(end, tmp_cmd);
+
+ // Reverse the polygon
+ while(end > start)
+ {
+ m_vertices.swap_vertices(start++, end--);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::invert_polygon(unsigned start)
+ {
+ // Skip all non-vertices at the beginning
+ while(start < m_vertices.total_vertices() &&
+ !is_vertex(m_vertices.command(start))) ++start;
+
+ // Skip all insignificant move_to
+ while(start+1 < m_vertices.total_vertices() &&
+ is_move_to(m_vertices.command(start)) &&
+ is_move_to(m_vertices.command(start+1))) ++start;
+
+ // Find the last vertex
+ unsigned end = start + 1;
+ while(end < m_vertices.total_vertices() &&
+ !is_next_poly(m_vertices.command(end))) ++end;
+
+ invert_polygon(start, end);
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ unsigned path_base<VC>::arrange_polygon_orientation(unsigned start,
+ path_flags_e orientation)
+ {
+ if(orientation == path_flags_none) return start;
+
+ // Skip all non-vertices at the beginning
+ while(start < m_vertices.total_vertices() &&
+ !is_vertex(m_vertices.command(start))) ++start;
+
+ // Skip all insignificant move_to
+ while(start+1 < m_vertices.total_vertices() &&
+ is_move_to(m_vertices.command(start)) &&
+ is_move_to(m_vertices.command(start+1))) ++start;
+
+ // Find the last vertex
+ unsigned end = start + 1;
+ while(end < m_vertices.total_vertices() &&
+ !is_next_poly(m_vertices.command(end))) ++end;
+
+ if(end - start > 2)
+ {
+ if(perceive_polygon_orientation(start, end) != unsigned(orientation))
+ {
+ // Invert polygon, set orientation flag, and skip all end_poly
+ invert_polygon(start, end);
+ unsigned cmd;
+ while(end < m_vertices.total_vertices() &&
+ is_end_poly(cmd = m_vertices.command(end)))
+ {
+ m_vertices.modify_command(end++, set_orientation(cmd, orientation));
+ }
+ }
+ }
+ return end;
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ unsigned path_base<VC>::arrange_orientations(unsigned start,
+ path_flags_e orientation)
+ {
+ if(orientation != path_flags_none)
+ {
+ while(start < m_vertices.total_vertices())
+ {
+ start = arrange_polygon_orientation(start, orientation);
+ if(is_stop(m_vertices.command(start)))
+ {
+ ++start;
+ break;
+ }
+ }
+ }
+ return start;
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::arrange_orientations_all_paths(path_flags_e orientation)
+ {
+ if(orientation != path_flags_none)
+ {
+ unsigned start = 0;
+ while(start < m_vertices.total_vertices())
+ {
+ start = arrange_orientations(start, orientation);
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::flip_x(double x1, double x2)
+ {
+ unsigned i;
+ double x, y;
+ for(i = 0; i < m_vertices.total_vertices(); i++)
+ {
+ unsigned cmd = m_vertices.vertex(i, &x, &y);
+ if(is_vertex(cmd))
+ {
+ m_vertices.modify_vertex(i, x2 - x + x1, y);
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::flip_y(double y1, double y2)
+ {
+ unsigned i;
+ double x, y;
+ for(i = 0; i < m_vertices.total_vertices(); i++)
+ {
+ unsigned cmd = m_vertices.vertex(i, &x, &y);
+ if(is_vertex(cmd))
+ {
+ m_vertices.modify_vertex(i, x, y2 - y + y1);
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::translate(double dx, double dy, unsigned path_id)
+ {
+ unsigned num_ver = m_vertices.total_vertices();
+ for(; path_id < num_ver; path_id++)
+ {
+ double x, y;
+ unsigned cmd = m_vertices.vertex(path_id, &x, &y);
+ if(is_stop(cmd)) break;
+ if(is_vertex(cmd))
+ {
+ x += dx;
+ y += dy;
+ m_vertices.modify_vertex(path_id, x, y);
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class VC>
+ void path_base<VC>::translate_all_paths(double dx, double dy)
+ {
+ unsigned idx;
+ unsigned num_ver = m_vertices.total_vertices();
+ for(idx = 0; idx < num_ver; idx++)
+ {
+ double x, y;
+ if(is_vertex(m_vertices.vertex(idx, &x, &y)))
+ {
+ x += dx;
+ y += dy;
+ m_vertices.modify_vertex(idx, x, y);
+ }
+ }
+ }
+
+ //-----------------------------------------------------vertex_stl_storage
+ template<class Container> class vertex_stl_storage
+ {
+ public:
+ typedef typename Container::value_type vertex_type;
+ typedef typename vertex_type::value_type value_type;
+
+ void remove_all() { m_vertices.clear(); }
+ void free_all() { m_vertices.clear(); }
+
+ void add_vertex(double x, double y, unsigned cmd)
+ {
+ m_vertices.push_back(vertex_type(value_type(x),
+ value_type(y),
+ int8u(cmd)));
+ }
+
+ void modify_vertex(unsigned idx, double x, double y)
+ {
+ vertex_type& v = m_vertices[idx];
+ v.x = value_type(x);
+ v.y = value_type(y);
+ }
+
+ void modify_vertex(unsigned idx, double x, double y, unsigned cmd)
+ {
+ vertex_type& v = m_vertices[idx];
+ v.x = value_type(x);
+ v.y = value_type(y);
+ v.cmd = int8u(cmd);
+ }
+
+ void modify_command(unsigned idx, unsigned cmd)
+ {
+ m_vertices[idx].cmd = int8u(cmd);
+ }
+
+ void swap_vertices(unsigned v1, unsigned v2)
+ {
+ vertex_type t = m_vertices[v1];
+ m_vertices[v1] = m_vertices[v2];
+ m_vertices[v2] = t;
+ }
+
+ unsigned last_command() const
+ {
+ return m_vertices.size() ?
+ m_vertices[m_vertices.size() - 1].cmd :
+ path_cmd_stop;
+ }
+
+ unsigned last_vertex(double* x, double* y) const
+ {
+ if(m_vertices.size() == 0)
+ {
+ *x = *y = 0.0;
+ return path_cmd_stop;
+ }
+ return vertex(m_vertices.size() - 1, x, y);
+ }
+
+ unsigned prev_vertex(double* x, double* y) const
+ {
+ if(m_vertices.size() < 2)
+ {
+ *x = *y = 0.0;
+ return path_cmd_stop;
+ }
+ return vertex(m_vertices.size() - 2, x, y);
+ }
+
+ double last_x() const
+ {
+ return m_vertices.size() ? m_vertices[m_vertices.size() - 1].x : 0.0;
+ }
+
+ double last_y() const
+ {
+ return m_vertices.size() ? m_vertices[m_vertices.size() - 1].y : 0.0;
+ }
+
+ unsigned total_vertices() const
+ {
+ return m_vertices.size();
+ }
+
+ unsigned vertex(unsigned idx, double* x, double* y) const
+ {
+ const vertex_type& v = m_vertices[idx];
+ *x = v.x;
+ *y = v.y;
+ return v.cmd;
+ }
+
+ unsigned command(unsigned idx) const
+ {
+ return m_vertices[idx].cmd;
+ }
+
+ private:
+ Container m_vertices;
+ };
+
+ //-----------------------------------------------------------path_storage
+ typedef path_base<vertex_block_storage<double> > path_storage;
+
+ // Example of declarations path_storage with pod_bvector as a container
+ //-----------------------------------------------------------------------
+ //typedef path_base<vertex_stl_storage<pod_bvector<vertex_d> > > path_storage;
+
+}
+
+
+
+// Example of declarations path_storage with std::vector as a container
+//---------------------------------------------------------------------------
+//#include <vector>
+//namespace agg
+//{
+// typedef path_base<vertex_stl_storage<std::vector<vertex_d> > > stl_path_storage;
+//}
+
+
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_amask_adaptor.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_amask_adaptor.h
new file mode 100644
index 00000000000..cf39c54ad55
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_amask_adaptor.h
@@ -0,0 +1,240 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_PIXFMT_AMASK_ADAPTOR_INCLUDED
+#define AGG_PIXFMT_AMASK_ADAPTOR_INCLUDED
+
+
+#include <string.h>
+#include "agg_array.h"
+#include "agg_rendering_buffer.h"
+
+
+namespace agg
+{
+ //==================================================pixfmt_amask_adaptor
+ template<class PixFmt, class AlphaMask> class pixfmt_amask_adaptor
+ {
+ public:
+ typedef PixFmt pixfmt_type;
+ typedef typename pixfmt_type::color_type color_type;
+ typedef typename pixfmt_type::row_data row_data;
+ typedef AlphaMask amask_type;
+ typedef typename amask_type::cover_type cover_type;
+
+ private:
+ enum span_extra_tail_e { span_extra_tail = 256 };
+
+ void realloc_span(unsigned len)
+ {
+ if(len > m_span.size())
+ {
+ m_span.resize(len + span_extra_tail);
+ }
+ }
+
+ void init_span(unsigned len)
+ {
+ realloc_span(len);
+ memset(&m_span[0], amask_type::cover_full, len * sizeof(cover_type));
+ }
+
+ void init_span(unsigned len, const cover_type* covers)
+ {
+ realloc_span(len);
+ memcpy(&m_span[0], covers, len * sizeof(cover_type));
+ }
+
+
+ public:
+ pixfmt_amask_adaptor(pixfmt_type& pixf, amask_type& mask) :
+ m_pixf(&pixf), m_mask(&mask), m_span()
+ {}
+
+ void attach_pixfmt(pixfmt_type& pixf) { m_pixf = &pixf; }
+ void attach_alpha_mask(amask_type& mask) { m_mask = &mask; }
+
+ //--------------------------------------------------------------------
+ template<class PixFmt2>
+ bool attach_pixfmt(PixFmt2& pixf, int x1, int y1, int x2, int y2)
+ {
+ return m_pixf->attach(pixf, x1, y1, x2, y2);
+ }
+
+ //--------------------------------------------------------------------
+ unsigned width() const { return m_pixf->width(); }
+ unsigned height() const { return m_pixf->height(); }
+
+ //--------------------------------------------------------------------
+ color_type pixel(int x, int y)
+ {
+ return m_pixf->pixel(x, y);
+ }
+
+ //--------------------------------------------------------------------
+ void copy_pixel(int x, int y, const color_type& c)
+ {
+ m_pixf->blend_pixel(x, y, c, m_mask->pixel(x, y));
+ }
+
+ //--------------------------------------------------------------------
+ void blend_pixel(int x, int y, const color_type& c, cover_type cover)
+ {
+ m_pixf->blend_pixel(x, y, c, m_mask->combine_pixel(x, y, cover));
+ }
+
+ //--------------------------------------------------------------------
+ void copy_hline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ realloc_span(len);
+ m_mask->fill_hspan(x, y, &m_span[0], len);
+ m_pixf->blend_solid_hspan(x, y, len, c, &m_span[0]);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_hline(int x, int y,
+ unsigned len,
+ const color_type& c,
+ cover_type cover)
+ {
+ init_span(len);
+ m_mask->combine_hspan(x, y, &m_span[0], len);
+ m_pixf->blend_solid_hspan(x, y, len, c, &m_span[0]);
+ }
+
+ //--------------------------------------------------------------------
+ void copy_vline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ realloc_span(len);
+ m_mask->fill_vspan(x, y, &m_span[0], len);
+ m_pixf->blend_solid_vspan(x, y, len, c, &m_span[0]);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_vline(int x, int y,
+ unsigned len,
+ const color_type& c,
+ cover_type cover)
+ {
+ init_span(len);
+ m_mask->combine_vspan(x, y, &m_span[0], len);
+ m_pixf->blend_solid_vspan(x, y, len, c, &m_span[0]);
+ }
+
+ //--------------------------------------------------------------------
+ void copy_from(const rendering_buffer& from,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len)
+ {
+ m_pixf->copy_from(from, xdst, ydst, xsrc, ysrc, len);
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_solid_hspan(int x, int y,
+ unsigned len,
+ const color_type& c,
+ const cover_type* covers)
+ {
+ init_span(len, covers);
+ m_mask->combine_hspan(x, y, &m_span[0], len);
+ m_pixf->blend_solid_hspan(x, y, len, c, &m_span[0]);
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_solid_vspan(int x, int y,
+ unsigned len,
+ const color_type& c,
+ const cover_type* covers)
+ {
+ init_span(len, covers);
+ m_mask->combine_vspan(x, y, &m_span[0], len);
+ m_pixf->blend_solid_vspan(x, y, len, c, &m_span[0]);
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_color_hspan(int x, int y, unsigned len, const color_type* colors)
+ {
+ realloc_span(len);
+ m_mask->fill_hspan(x, y, &m_span[0], len);
+ m_pixf->blend_color_hspan(x, y, len, colors, &m_span[0], cover_full);
+ }
+
+ //--------------------------------------------------------------------
+ void copy_color_vspan(int x, int y, unsigned len, const color_type* colors)
+ {
+ realloc_span(len);
+ m_mask->fill_vspan(x, y, &m_span[0], len);
+ m_pixf->blend_color_vspan(x, y, len, colors, &m_span[0], cover_full);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_color_hspan(int x, int y,
+ unsigned len,
+ const color_type* colors,
+ const cover_type* covers,
+ cover_type cover = cover_full)
+ {
+ if(covers)
+ {
+ init_span(len, covers);
+ m_mask->combine_hspan(x, y, &m_span[0], len);
+ }
+ else
+ {
+ realloc_span(len);
+ m_mask->fill_hspan(x, y, &m_span[0], len);
+ }
+ m_pixf->blend_color_hspan(x, y, len, colors, &m_span[0], cover);
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_color_vspan(int x, int y,
+ unsigned len,
+ const color_type* colors,
+ const cover_type* covers,
+ cover_type cover = cover_full)
+ {
+ if(covers)
+ {
+ init_span(len, covers);
+ m_mask->combine_vspan(x, y, &m_span[0], len);
+ }
+ else
+ {
+ realloc_span(len);
+ m_mask->fill_vspan(x, y, &m_span[0], len);
+ }
+ m_pixf->blend_color_vspan(x, y, len, colors, &m_span[0], cover);
+ }
+
+ private:
+ pixfmt_type* m_pixf;
+ const amask_type* m_mask;
+ pod_array<cover_type> m_span;
+ };
+
+}
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_base.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_base.h
new file mode 100644
index 00000000000..57ae19cfe04
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_base.h
@@ -0,0 +1,97 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_PIXFMT_BASE_INCLUDED
+#define AGG_PIXFMT_BASE_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_color_gray.h"
+#include "agg_color_rgba.h"
+
+namespace agg
+{
+ struct pixfmt_gray_tag
+ {
+ };
+
+ struct pixfmt_rgb_tag
+ {
+ };
+
+ struct pixfmt_rgba_tag
+ {
+ };
+
+ //--------------------------------------------------------------blender_base
+ template<class ColorT, class Order = void>
+ struct blender_base
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+
+ static rgba get(value_type r, value_type g, value_type b, value_type a, cover_type cover = cover_full)
+ {
+ if (cover > cover_none)
+ {
+ rgba c(
+ color_type::to_double(r),
+ color_type::to_double(g),
+ color_type::to_double(b),
+ color_type::to_double(a));
+
+ if (cover < cover_full)
+ {
+ double x = double(cover) / cover_full;
+ c.r *= x;
+ c.g *= x;
+ c.b *= x;
+ c.a *= x;
+ }
+
+ return c;
+ }
+ else return rgba::no_color();
+ }
+
+ static rgba get(const value_type* p, cover_type cover = cover_full)
+ {
+ return get(
+ p[order_type::R],
+ p[order_type::G],
+ p[order_type::B],
+ p[order_type::A],
+ cover);
+ }
+
+ static void set(value_type* p, value_type r, value_type g, value_type b, value_type a)
+ {
+ p[order_type::R] = r;
+ p[order_type::G] = g;
+ p[order_type::B] = b;
+ p[order_type::A] = a;
+ }
+
+ static void set(value_type* p, const rgba& c)
+ {
+ p[order_type::R] = color_type::from_double(c.r);
+ p[order_type::G] = color_type::from_double(c.g);
+ p[order_type::B] = color_type::from_double(c.b);
+ p[order_type::A] = color_type::from_double(c.a);
+ }
+ };
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_gray.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_gray.h
new file mode 100644
index 00000000000..438f04d33d4
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_gray.h
@@ -0,0 +1,737 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_PIXFMT_GRAY_INCLUDED
+#define AGG_PIXFMT_GRAY_INCLUDED
+
+#include <string.h>
+#include "agg_pixfmt_base.h"
+#include "agg_rendering_buffer.h"
+
+namespace agg
+{
+
+ //============================================================blender_gray
+ template<class ColorT> struct blender_gray
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ // Blend pixels using the non-premultiplied form of Alvy-Ray Smith's
+ // compositing function. Since the render buffer is opaque we skip the
+ // initial premultiply and final demultiply.
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cv, value_type alpha, cover_type cover)
+ {
+ blend_pix(p, cv, color_type::mult_cover(alpha, cover));
+ }
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cv, value_type alpha)
+ {
+ *p = color_type::lerp(*p, cv, alpha);
+ }
+ };
+
+
+ //======================================================blender_gray_pre
+ template<class ColorT> struct blender_gray_pre
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ // Blend pixels using the premultiplied form of Alvy-Ray Smith's
+ // compositing function.
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cv, value_type alpha, cover_type cover)
+ {
+ blend_pix(p, color_type::mult_cover(cv, cover), color_type::mult_cover(alpha, cover));
+ }
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cv, value_type alpha)
+ {
+ *p = color_type::prelerp(*p, cv, alpha);
+ }
+ };
+
+
+
+ //=====================================================apply_gamma_dir_gray
+ template<class ColorT, class GammaLut> class apply_gamma_dir_gray
+ {
+ public:
+ typedef typename ColorT::value_type value_type;
+
+ apply_gamma_dir_gray(const GammaLut& gamma) : m_gamma(gamma) {}
+
+ AGG_INLINE void operator () (value_type* p)
+ {
+ *p = m_gamma.dir(*p);
+ }
+
+ private:
+ const GammaLut& m_gamma;
+ };
+
+
+
+ //=====================================================apply_gamma_inv_gray
+ template<class ColorT, class GammaLut> class apply_gamma_inv_gray
+ {
+ public:
+ typedef typename ColorT::value_type value_type;
+
+ apply_gamma_inv_gray(const GammaLut& gamma) : m_gamma(gamma) {}
+
+ AGG_INLINE void operator () (value_type* p)
+ {
+ *p = m_gamma.inv(*p);
+ }
+
+ private:
+ const GammaLut& m_gamma;
+ };
+
+
+
+ //=================================================pixfmt_alpha_blend_gray
+ template<class Blender, class RenBuf, unsigned Step = 1, unsigned Offset = 0>
+ class pixfmt_alpha_blend_gray
+ {
+ public:
+ typedef pixfmt_gray_tag pixfmt_category;
+ typedef RenBuf rbuf_type;
+ typedef typename rbuf_type::row_data row_data;
+ typedef Blender blender_type;
+ typedef typename blender_type::color_type color_type;
+ typedef int order_type; // A fake one
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ enum
+ {
+ pix_width = sizeof(value_type) * Step,
+ pix_step = Step,
+ pix_offset = Offset,
+ };
+ struct pixel_type
+ {
+ value_type c[pix_step];
+
+ void set(value_type v)
+ {
+ c[0] = v;
+ }
+
+ void set(const color_type& color)
+ {
+ set(color.v);
+ }
+
+ void get(value_type& v) const
+ {
+ v = c[0];
+ }
+
+ color_type get() const
+ {
+ return color_type(c[0]);
+ }
+
+ pixel_type* next()
+ {
+ return this + 1;
+ }
+
+ const pixel_type* next() const
+ {
+ return this + 1;
+ }
+
+ pixel_type* advance(int n)
+ {
+ return this + n;
+ }
+
+ const pixel_type* advance(int n) const
+ {
+ return this + n;
+ }
+ };
+
+ private:
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p,
+ value_type v, value_type a,
+ unsigned cover)
+ {
+ blender_type::blend_pix(p->c, v, a, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, value_type v, value_type a)
+ {
+ blender_type::blend_pix(p->c, v, a);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover)
+ {
+ blender_type::blend_pix(p->c, c.v, c.a, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c)
+ {
+ blender_type::blend_pix(p->c, c.v, c.a);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, cover);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque())
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c);
+ }
+ }
+ }
+
+ public:
+ //--------------------------------------------------------------------
+ explicit pixfmt_alpha_blend_gray(rbuf_type& rb) :
+ m_rbuf(&rb)
+ {}
+ void attach(rbuf_type& rb) { m_rbuf = &rb; }
+ //--------------------------------------------------------------------
+
+ template<class PixFmt>
+ bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2)
+ {
+ rect_i r(x1, y1, x2, y2);
+ if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1)))
+ {
+ int stride = pixf.stride();
+ m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1),
+ (r.x2 - r.x1) + 1,
+ (r.y2 - r.y1) + 1,
+ stride);
+ return true;
+ }
+ return false;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE unsigned width() const { return m_rbuf->width(); }
+ AGG_INLINE unsigned height() const { return m_rbuf->height(); }
+ AGG_INLINE int stride() const { return m_rbuf->stride(); }
+
+ //--------------------------------------------------------------------
+ int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); }
+ const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); }
+ row_data row(int y) const { return m_rbuf->row(y); }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE int8u* pix_ptr(int x, int y)
+ {
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
+ }
+
+ AGG_INLINE const int8u* pix_ptr(int x, int y) const
+ {
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
+ }
+
+ // Return pointer to pixel value, forcing row to be allocated.
+ AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len)
+ {
+ return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step + pix_offset));
+ }
+
+ // Return pointer to pixel value, or null if row not allocated.
+ AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const
+ {
+ int8u* p = m_rbuf->row_ptr(y);
+ return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step + pix_offset)) : 0;
+ }
+
+ // Get pixel pointer from raw buffer pointer.
+ AGG_INLINE static pixel_type* pix_value_ptr(void* p)
+ {
+ return (pixel_type*)((value_type*)p + pix_offset);
+ }
+
+ // Get pixel pointer from raw buffer pointer.
+ AGG_INLINE static const pixel_type* pix_value_ptr(const void* p)
+ {
+ return (const pixel_type*)((const value_type*)p + pix_offset);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static void write_plain_color(void* p, color_type c)
+ {
+ // Grayscale formats are implicitly premultiplied.
+ c.premultiply();
+ pix_value_ptr(p)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static color_type read_plain_color(const void* p)
+ {
+ return pix_value_ptr(p)->get();
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static void make_pix(int8u* p, const color_type& c)
+ {
+ ((pixel_type*)p)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE color_type pixel(int x, int y) const
+ {
+ if (const pixel_type* p = pix_value_ptr(x, y))
+ {
+ return p->get();
+ }
+ return color_type::no_color();
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_pixel(int x, int y, const color_type& c)
+ {
+ pix_value_ptr(x, y, 1)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover)
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y, 1), c, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_hline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+ do
+ {
+ p->set(c);
+ p = p->next();
+ }
+ while(--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_vline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(c);
+ }
+ while (--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_hline(int x, int y,
+ unsigned len,
+ const color_type& c,
+ int8u cover)
+ {
+ if (!c.is_transparent())
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ do
+ {
+ p->set(c);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ blend_pix(p, c, cover);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_vline(int x, int y,
+ unsigned len,
+ const color_type& c,
+ int8u cover)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(c);
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ blend_pix(pix_value_ptr(x, y++, 1), c, cover);
+ }
+ while (--len);
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_solid_hspan(int x, int y,
+ unsigned len,
+ const color_type& c,
+ const int8u* covers)
+ {
+ if (!c.is_transparent())
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ do
+ {
+ if (c.is_opaque() && *covers == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, *covers);
+ }
+ p = p->next();
+ ++covers;
+ }
+ while (--len);
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_solid_vspan(int x, int y,
+ unsigned len,
+ const color_type& c,
+ const int8u* covers)
+ {
+ if (!c.is_transparent())
+ {
+ do
+ {
+ pixel_type* p = pix_value_ptr(x, y++, 1);
+
+ if (c.is_opaque() && *covers == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, *covers);
+ }
+ ++covers;
+ }
+ while (--len);
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_color_hspan(int x, int y,
+ unsigned len,
+ const color_type* colors)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ do
+ {
+ p->set(*colors++);
+ p = p->next();
+ }
+ while (--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_color_vspan(int x, int y,
+ unsigned len,
+ const color_type* colors)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(*colors++);
+ }
+ while (--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_color_hspan(int x, int y,
+ unsigned len,
+ const color_type* colors,
+ const int8u* covers,
+ int8u cover)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ if (covers)
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++, *covers++);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ if (cover == cover_mask)
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++, cover);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_color_vspan(int x, int y,
+ unsigned len,
+ const color_type* colors,
+ const int8u* covers,
+ int8u cover)
+ {
+ if (covers)
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, *covers++);
+ }
+ while (--len);
+ }
+ else
+ {
+ if (cover == cover_mask)
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++);
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, cover);
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class Function> void for_each_pixel(Function f)
+ {
+ unsigned y;
+ for (y = 0; y < height(); ++y)
+ {
+ row_data r = m_rbuf->row(y);
+ if (r.ptr)
+ {
+ unsigned len = r.x2 - r.x1 + 1;
+ pixel_type* p = pix_value_ptr(r.x1, y, len);
+ do
+ {
+ f(p->c);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLut> void apply_gamma_dir(const GammaLut& g)
+ {
+ for_each_pixel(apply_gamma_dir_gray<color_type, GammaLut>(g));
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLut> void apply_gamma_inv(const GammaLut& g)
+ {
+ for_each_pixel(apply_gamma_inv_gray<color_type, GammaLut>(g));
+ }
+
+ //--------------------------------------------------------------------
+ template<class RenBuf2>
+ void copy_from(const RenBuf2& from,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len)
+ {
+ if (const int8u* p = from.row_ptr(ysrc))
+ {
+ memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width,
+ p + xsrc * pix_width,
+ len * pix_width);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from single color, using grayscale surface as alpha channel.
+ template<class SrcPixelFormatRenderer>
+ void blend_from_color(const SrcPixelFormatRenderer& from,
+ const color_type& color,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+ typedef typename SrcPixelFormatRenderer::color_type src_color_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ do
+ {
+ copy_or_blend_pix(pdst, color, src_color_type::scale_cover(cover, psrc->c[0]));
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from color table, using grayscale surface as indexes into table.
+ // Obviously, this only works for integer value types.
+ template<class SrcPixelFormatRenderer>
+ void blend_from_lut(const SrcPixelFormatRenderer& from,
+ const color_type* color_lut,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ do
+ {
+ copy_or_blend_pix(pdst, color_lut[psrc->c[0]], cover);
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ }
+
+ private:
+ rbuf_type* m_rbuf;
+ };
+
+ typedef blender_gray<gray8> blender_gray8;
+ typedef blender_gray<sgray8> blender_sgray8;
+ typedef blender_gray<gray16> blender_gray16;
+ typedef blender_gray<gray32> blender_gray32;
+
+ typedef blender_gray_pre<gray8> blender_gray8_pre;
+ typedef blender_gray_pre<sgray8> blender_sgray8_pre;
+ typedef blender_gray_pre<gray16> blender_gray16_pre;
+ typedef blender_gray_pre<gray32> blender_gray32_pre;
+
+ typedef pixfmt_alpha_blend_gray<blender_gray8, rendering_buffer> pixfmt_gray8;
+ typedef pixfmt_alpha_blend_gray<blender_sgray8, rendering_buffer> pixfmt_sgray8;
+ typedef pixfmt_alpha_blend_gray<blender_gray16, rendering_buffer> pixfmt_gray16;
+ typedef pixfmt_alpha_blend_gray<blender_gray32, rendering_buffer> pixfmt_gray32;
+
+ typedef pixfmt_alpha_blend_gray<blender_gray8_pre, rendering_buffer> pixfmt_gray8_pre;
+ typedef pixfmt_alpha_blend_gray<blender_sgray8_pre, rendering_buffer> pixfmt_sgray8_pre;
+ typedef pixfmt_alpha_blend_gray<blender_gray16_pre, rendering_buffer> pixfmt_gray16_pre;
+ typedef pixfmt_alpha_blend_gray<blender_gray32_pre, rendering_buffer> pixfmt_gray32_pre;
+}
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_rgb.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_rgb.h
new file mode 100644
index 00000000000..7095fbce58d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_rgb.h
@@ -0,0 +1,994 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_PIXFMT_RGB_INCLUDED
+#define AGG_PIXFMT_RGB_INCLUDED
+
+#include <string.h>
+#include "agg_pixfmt_base.h"
+#include "agg_rendering_buffer.h"
+
+namespace agg
+{
+
+ //=====================================================apply_gamma_dir_rgb
+ template<class ColorT, class Order, class GammaLut> class apply_gamma_dir_rgb
+ {
+ public:
+ typedef typename ColorT::value_type value_type;
+
+ apply_gamma_dir_rgb(const GammaLut& gamma) : m_gamma(gamma) {}
+
+ AGG_INLINE void operator () (value_type* p)
+ {
+ p[Order::R] = m_gamma.dir(p[Order::R]);
+ p[Order::G] = m_gamma.dir(p[Order::G]);
+ p[Order::B] = m_gamma.dir(p[Order::B]);
+ }
+
+ private:
+ const GammaLut& m_gamma;
+ };
+
+
+
+ //=====================================================apply_gamma_inv_rgb
+ template<class ColorT, class Order, class GammaLut> class apply_gamma_inv_rgb
+ {
+ public:
+ typedef typename ColorT::value_type value_type;
+
+ apply_gamma_inv_rgb(const GammaLut& gamma) : m_gamma(gamma) {}
+
+ AGG_INLINE void operator () (value_type* p)
+ {
+ p[Order::R] = m_gamma.inv(p[Order::R]);
+ p[Order::G] = m_gamma.inv(p[Order::G]);
+ p[Order::B] = m_gamma.inv(p[Order::B]);
+ }
+
+ private:
+ const GammaLut& m_gamma;
+ };
+
+
+ //=========================================================blender_rgb
+ template<class ColorT, class Order>
+ struct blender_rgb
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ // Blend pixels using the non-premultiplied form of Alvy-Ray Smith's
+ // compositing function. Since the render buffer is opaque we skip the
+ // initial premultiply and final demultiply.
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
+ {
+ blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha)
+ {
+ p[Order::R] = color_type::lerp(p[Order::R], cr, alpha);
+ p[Order::G] = color_type::lerp(p[Order::G], cg, alpha);
+ p[Order::B] = color_type::lerp(p[Order::B], cb, alpha);
+ }
+ };
+
+ //======================================================blender_rgb_pre
+ template<class ColorT, class Order>
+ struct blender_rgb_pre
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ // Blend pixels using the premultiplied form of Alvy-Ray Smith's
+ // compositing function.
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
+ {
+ blend_pix(p,
+ color_type::mult_cover(cr, cover),
+ color_type::mult_cover(cg, cover),
+ color_type::mult_cover(cb, cover),
+ color_type::mult_cover(alpha, cover));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha)
+ {
+ p[Order::R] = color_type::prelerp(p[Order::R], cr, alpha);
+ p[Order::G] = color_type::prelerp(p[Order::G], cg, alpha);
+ p[Order::B] = color_type::prelerp(p[Order::B], cb, alpha);
+ }
+ };
+
+ //===================================================blender_rgb_gamma
+ template<class ColorT, class Order, class Gamma>
+ class blender_rgb_gamma : public blender_base<ColorT, Order>
+ {
+ public:
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef Gamma gamma_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ //--------------------------------------------------------------------
+ blender_rgb_gamma() : m_gamma(0) {}
+ void gamma(const gamma_type& g) { m_gamma = &g; }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
+ {
+ blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha)
+ {
+ calc_type r = m_gamma->dir(p[Order::R]);
+ calc_type g = m_gamma->dir(p[Order::G]);
+ calc_type b = m_gamma->dir(p[Order::B]);
+ p[Order::R] = m_gamma->inv(color_type::downscale((m_gamma->dir(cr) - r) * alpha) + r);
+ p[Order::G] = m_gamma->inv(color_type::downscale((m_gamma->dir(cg) - g) * alpha) + g);
+ p[Order::B] = m_gamma->inv(color_type::downscale((m_gamma->dir(cb) - b) * alpha) + b);
+ }
+
+ private:
+ const gamma_type* m_gamma;
+ };
+
+
+ //==================================================pixfmt_alpha_blend_rgb
+ template<class Blender, class RenBuf, unsigned Step, unsigned Offset = 0>
+ class pixfmt_alpha_blend_rgb
+ {
+ public:
+ typedef pixfmt_rgb_tag pixfmt_category;
+ typedef RenBuf rbuf_type;
+ typedef Blender blender_type;
+ typedef typename rbuf_type::row_data row_data;
+ typedef typename blender_type::color_type color_type;
+ typedef typename blender_type::order_type order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ enum
+ {
+ pix_step = Step,
+ pix_offset = Offset,
+ pix_width = sizeof(value_type) * pix_step
+ };
+ struct pixel_type
+ {
+ value_type c[pix_step];
+
+ void set(value_type r, value_type g, value_type b)
+ {
+ c[order_type::R] = r;
+ c[order_type::G] = g;
+ c[order_type::B] = b;
+ }
+
+ void set(const color_type& color)
+ {
+ set(color.r, color.g, color.b);
+ }
+
+ void get(value_type& r, value_type& g, value_type& b) const
+ {
+ r = c[order_type::R];
+ g = c[order_type::G];
+ b = c[order_type::B];
+ }
+
+ color_type get() const
+ {
+ return color_type(
+ c[order_type::R],
+ c[order_type::G],
+ c[order_type::B]);
+ }
+
+ pixel_type* next()
+ {
+ return this + 1;
+ }
+
+ const pixel_type* next() const
+ {
+ return this + 1;
+ }
+
+ pixel_type* advance(int n)
+ {
+ return this + n;
+ }
+
+ const pixel_type* advance(int n) const
+ {
+ return this + n;
+ }
+ };
+
+ private:
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p,
+ value_type r, value_type g, value_type b, value_type a,
+ unsigned cover)
+ {
+ m_blender.blend_pix(p->c, r, g, b, a, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p,
+ value_type r, value_type g, value_type b, value_type a)
+ {
+ m_blender.blend_pix(p->c, r, g, b, a);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover)
+ {
+ m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c)
+ {
+ m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, cover);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque())
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c);
+ }
+ }
+ }
+
+ public:
+ //--------------------------------------------------------------------
+ explicit pixfmt_alpha_blend_rgb(rbuf_type& rb) :
+ m_rbuf(&rb)
+ {}
+ void attach(rbuf_type& rb) { m_rbuf = &rb; }
+
+ //--------------------------------------------------------------------
+ template<class PixFmt>
+ bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2)
+ {
+ rect_i r(x1, y1, x2, y2);
+ if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1)))
+ {
+ int stride = pixf.stride();
+ m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1),
+ (r.x2 - r.x1) + 1,
+ (r.y2 - r.y1) + 1,
+ stride);
+ return true;
+ }
+ return false;
+ }
+
+ //--------------------------------------------------------------------
+ Blender& blender() { return m_blender; }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE unsigned width() const { return m_rbuf->width(); }
+ AGG_INLINE unsigned height() const { return m_rbuf->height(); }
+ AGG_INLINE int stride() const { return m_rbuf->stride(); }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); }
+ AGG_INLINE const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); }
+ AGG_INLINE row_data row(int y) const { return m_rbuf->row(y); }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE int8u* pix_ptr(int x, int y)
+ {
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
+ }
+
+ AGG_INLINE const int8u* pix_ptr(int x, int y) const
+ {
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
+ }
+
+ // Return pointer to pixel value, forcing row to be allocated.
+ AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len)
+ {
+ return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step + pix_offset));
+ }
+
+ // Return pointer to pixel value, or null if row not allocated.
+ AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const
+ {
+ int8u* p = m_rbuf->row_ptr(y);
+ return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step + pix_offset)) : 0;
+ }
+
+ // Get pixel pointer from raw buffer pointer.
+ AGG_INLINE static pixel_type* pix_value_ptr(void* p)
+ {
+ return (pixel_type*)((value_type*)p + pix_offset);
+ }
+
+ // Get pixel pointer from raw buffer pointer.
+ AGG_INLINE static const pixel_type* pix_value_ptr(const void* p)
+ {
+ return (const pixel_type*)((const value_type*)p + pix_offset);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static void write_plain_color(void* p, color_type c)
+ {
+ // RGB formats are implicitly premultiplied.
+ c.premultiply();
+ pix_value_ptr(p)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static color_type read_plain_color(const void* p)
+ {
+ return pix_value_ptr(p)->get();
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static void make_pix(int8u* p, const color_type& c)
+ {
+ ((pixel_type*)p)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE color_type pixel(int x, int y) const
+ {
+ if (const pixel_type* p = pix_value_ptr(x, y))
+ {
+ return p->get();
+ }
+ return color_type::no_color();
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_pixel(int x, int y, const color_type& c)
+ {
+ pix_value_ptr(x, y, 1)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover)
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y, 1), c, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_hline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+ do
+ {
+ p->set(c);
+ p = p->next();
+ }
+ while(--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_vline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(c);
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_hline(int x, int y,
+ unsigned len,
+ const color_type& c,
+ int8u cover)
+ {
+ if (!c.is_transparent())
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ do
+ {
+ p->set(c);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ blend_pix(p, c, cover);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_vline(int x, int y,
+ unsigned len,
+ const color_type& c,
+ int8u cover)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(c);
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ blend_pix(pix_value_ptr(x, y++, 1), c, cover);
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void blend_solid_hspan(int x, int y,
+ unsigned len,
+ const color_type& c,
+ const int8u* covers)
+ {
+ if (!c.is_transparent())
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ do
+ {
+ if (c.is_opaque() && *covers == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, *covers);
+ }
+ p = p->next();
+ ++covers;
+ }
+ while (--len);
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_solid_vspan(int x, int y,
+ unsigned len,
+ const color_type& c,
+ const int8u* covers)
+ {
+ if (!c.is_transparent())
+ {
+ do
+ {
+ pixel_type* p = pix_value_ptr(x, y++, 1);
+
+ if (c.is_opaque() && *covers == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, *covers);
+ }
+ ++covers;
+ }
+ while (--len);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void copy_color_hspan(int x, int y,
+ unsigned len,
+ const color_type* colors)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ do
+ {
+ p->set(*colors++);
+ p = p->next();
+ }
+ while (--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_color_vspan(int x, int y,
+ unsigned len,
+ const color_type* colors)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(*colors++);
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_color_hspan(int x, int y,
+ unsigned len,
+ const color_type* colors,
+ const int8u* covers,
+ int8u cover)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ if (covers)
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++, *covers++);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ if (cover == cover_mask)
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++, cover);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void blend_color_vspan(int x, int y,
+ unsigned len,
+ const color_type* colors,
+ const int8u* covers,
+ int8u cover)
+ {
+ if (covers)
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, *covers++);
+ }
+ while (--len);
+ }
+ else
+ {
+ if (cover == cover_mask)
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++);
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, cover);
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class Function> void for_each_pixel(Function f)
+ {
+ for (unsigned y = 0; y < height(); ++y)
+ {
+ row_data r = m_rbuf->row(y);
+ if (r.ptr)
+ {
+ unsigned len = r.x2 - r.x1 + 1;
+ pixel_type* p = pix_value_ptr(r.x1, y, len);
+ do
+ {
+ f(p->c);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLut> void apply_gamma_dir(const GammaLut& g)
+ {
+ for_each_pixel(apply_gamma_dir_rgb<color_type, order_type, GammaLut>(g));
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLut> void apply_gamma_inv(const GammaLut& g)
+ {
+ for_each_pixel(apply_gamma_inv_rgb<color_type, order_type, GammaLut>(g));
+ }
+
+ //--------------------------------------------------------------------
+ template<class RenBuf2>
+ void copy_from(const RenBuf2& from,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len)
+ {
+ if (const int8u* p = from.row_ptr(ysrc))
+ {
+ memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width,
+ p + xsrc * pix_width,
+ len * pix_width);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from an RGBA surface.
+ template<class SrcPixelFormatRenderer>
+ void blend_from(const SrcPixelFormatRenderer& from,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+ typedef typename SrcPixelFormatRenderer::order_type src_order;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ if (cover == cover_mask)
+ {
+ do
+ {
+ value_type alpha = psrc->c[src_order::A];
+ if (alpha <= color_type::empty_value())
+ {
+ if (alpha >= color_type::full_value())
+ {
+ pdst->c[order_type::R] = psrc->c[src_order::R];
+ pdst->c[order_type::G] = psrc->c[src_order::G];
+ pdst->c[order_type::B] = psrc->c[src_order::B];
+ }
+ else
+ {
+ blend_pix(pdst,
+ psrc->c[src_order::R],
+ psrc->c[src_order::G],
+ psrc->c[src_order::B],
+ alpha);
+ }
+ }
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while(--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(pdst, psrc->get(), cover);
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from single color, using grayscale surface as alpha channel.
+ template<class SrcPixelFormatRenderer>
+ void blend_from_color(const SrcPixelFormatRenderer& from,
+ const color_type& color,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+ typedef typename SrcPixelFormatRenderer::color_type src_color_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ do
+ {
+ copy_or_blend_pix(pdst, color, src_color_type::scale_cover(cover, psrc->c[0]));
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from color table, using grayscale surface as indexes into table.
+ // Obviously, this only works for integer value types.
+ template<class SrcPixelFormatRenderer>
+ void blend_from_lut(const SrcPixelFormatRenderer& from,
+ const color_type* color_lut,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ if (cover == cover_mask)
+ {
+ do
+ {
+ const color_type& color = color_lut[psrc->c[0]];
+ blend_pix(pdst, color);
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while(--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(pdst, color_lut[psrc->c[0]], cover);
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while(--len);
+ }
+ }
+ }
+
+ private:
+ rbuf_type* m_rbuf;
+ Blender m_blender;
+ };
+
+ //-----------------------------------------------------------------------
+ typedef blender_rgb<rgba8, order_rgb> blender_rgb24;
+ typedef blender_rgb<rgba8, order_bgr> blender_bgr24;
+ typedef blender_rgb<srgba8, order_rgb> blender_srgb24;
+ typedef blender_rgb<srgba8, order_bgr> blender_sbgr24;
+ typedef blender_rgb<rgba16, order_rgb> blender_rgb48;
+ typedef blender_rgb<rgba16, order_bgr> blender_bgr48;
+ typedef blender_rgb<rgba32, order_rgb> blender_rgb96;
+ typedef blender_rgb<rgba32, order_bgr> blender_bgr96;
+
+ typedef blender_rgb_pre<rgba8, order_rgb> blender_rgb24_pre;
+ typedef blender_rgb_pre<rgba8, order_bgr> blender_bgr24_pre;
+ typedef blender_rgb_pre<srgba8, order_rgb> blender_srgb24_pre;
+ typedef blender_rgb_pre<srgba8, order_bgr> blender_sbgr24_pre;
+ typedef blender_rgb_pre<rgba16, order_rgb> blender_rgb48_pre;
+ typedef blender_rgb_pre<rgba16, order_bgr> blender_bgr48_pre;
+ typedef blender_rgb_pre<rgba32, order_rgb> blender_rgb96_pre;
+ typedef blender_rgb_pre<rgba32, order_bgr> blender_bgr96_pre;
+
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24, rendering_buffer, 3> pixfmt_rgb24;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24, rendering_buffer, 3> pixfmt_bgr24;
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24, rendering_buffer, 3> pixfmt_srgb24;
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24, rendering_buffer, 3> pixfmt_sbgr24;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48, rendering_buffer, 3> pixfmt_rgb48;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48, rendering_buffer, 3> pixfmt_bgr48;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96, rendering_buffer, 3> pixfmt_rgb96;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96, rendering_buffer, 3> pixfmt_bgr96;
+
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24_pre, rendering_buffer, 3> pixfmt_rgb24_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24_pre, rendering_buffer, 3> pixfmt_bgr24_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24_pre, rendering_buffer, 3> pixfmt_srgb24_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24_pre, rendering_buffer, 3> pixfmt_sbgr24_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48_pre, rendering_buffer, 3> pixfmt_rgb48_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48_pre, rendering_buffer, 3> pixfmt_bgr48_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96_pre, rendering_buffer, 3> pixfmt_rgb96_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96_pre, rendering_buffer, 3> pixfmt_bgr96_pre;
+
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24, rendering_buffer, 4, 0> pixfmt_rgbx32;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24, rendering_buffer, 4, 1> pixfmt_xrgb32;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24, rendering_buffer, 4, 1> pixfmt_xbgr32;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24, rendering_buffer, 4, 0> pixfmt_bgrx32;
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24, rendering_buffer, 4, 0> pixfmt_srgbx32;
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24, rendering_buffer, 4, 1> pixfmt_sxrgb32;
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24, rendering_buffer, 4, 1> pixfmt_sxbgr32;
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24, rendering_buffer, 4, 0> pixfmt_sbgrx32;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48, rendering_buffer, 4, 0> pixfmt_rgbx64;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48, rendering_buffer, 4, 1> pixfmt_xrgb64;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48, rendering_buffer, 4, 1> pixfmt_xbgr64;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48, rendering_buffer, 4, 0> pixfmt_bgrx64;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96, rendering_buffer, 4, 0> pixfmt_rgbx128;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96, rendering_buffer, 4, 1> pixfmt_xrgb128;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96, rendering_buffer, 4, 1> pixfmt_xbgr128;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96, rendering_buffer, 4, 0> pixfmt_bgrx128;
+
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24_pre, rendering_buffer, 4, 0> pixfmt_rgbx32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24_pre, rendering_buffer, 4, 1> pixfmt_xrgb32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24_pre, rendering_buffer, 4, 1> pixfmt_xbgr32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24_pre, rendering_buffer, 4, 0> pixfmt_bgrx32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24_pre, rendering_buffer, 4, 0> pixfmt_srgbx32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24_pre, rendering_buffer, 4, 1> pixfmt_sxrgb32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24_pre, rendering_buffer, 4, 1> pixfmt_sxbgr32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24_pre, rendering_buffer, 4, 0> pixfmt_sbgrx32_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48_pre, rendering_buffer, 4, 0> pixfmt_rgbx64_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48_pre, rendering_buffer, 4, 1> pixfmt_xrgb64_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48_pre, rendering_buffer, 4, 1> pixfmt_xbgr64_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48_pre, rendering_buffer, 4, 0> pixfmt_bgrx64_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96_pre, rendering_buffer, 4, 0> pixfmt_rgbx128_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96_pre, rendering_buffer, 4, 1> pixfmt_xrgb128_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96_pre, rendering_buffer, 4, 1> pixfmt_xbgr128_pre;
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96_pre, rendering_buffer, 4, 0> pixfmt_bgrx128_pre;
+
+
+ //-----------------------------------------------------pixfmt_rgb24_gamma
+ template<class Gamma> class pixfmt_rgb24_gamma :
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_rgb, Gamma>, rendering_buffer, 3>
+ {
+ public:
+ pixfmt_rgb24_gamma(rendering_buffer& rb, const Gamma& g) :
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_rgb, Gamma>, rendering_buffer, 3>(rb)
+ {
+ this->blender().gamma(g);
+ }
+ };
+
+ //-----------------------------------------------------pixfmt_srgb24_gamma
+ template<class Gamma> class pixfmt_srgb24_gamma :
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_rgb, Gamma>, rendering_buffer, 3>
+ {
+ public:
+ pixfmt_srgb24_gamma(rendering_buffer& rb, const Gamma& g) :
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_rgb, Gamma>, rendering_buffer, 3>(rb)
+ {
+ this->blender().gamma(g);
+ }
+ };
+
+ //-----------------------------------------------------pixfmt_bgr24_gamma
+ template<class Gamma> class pixfmt_bgr24_gamma :
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_bgr, Gamma>, rendering_buffer, 3>
+ {
+ public:
+ pixfmt_bgr24_gamma(rendering_buffer& rb, const Gamma& g) :
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_bgr, Gamma>, rendering_buffer, 3>(rb)
+ {
+ this->blender().gamma(g);
+ }
+ };
+
+ //-----------------------------------------------------pixfmt_sbgr24_gamma
+ template<class Gamma> class pixfmt_sbgr24_gamma :
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_bgr, Gamma>, rendering_buffer, 3>
+ {
+ public:
+ pixfmt_sbgr24_gamma(rendering_buffer& rb, const Gamma& g) :
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_bgr, Gamma>, rendering_buffer, 3>(rb)
+ {
+ this->blender().gamma(g);
+ }
+ };
+
+ //-----------------------------------------------------pixfmt_rgb48_gamma
+ template<class Gamma> class pixfmt_rgb48_gamma :
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_rgb, Gamma>, rendering_buffer, 3>
+ {
+ public:
+ pixfmt_rgb48_gamma(rendering_buffer& rb, const Gamma& g) :
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_rgb, Gamma>, rendering_buffer, 3>(rb)
+ {
+ this->blender().gamma(g);
+ }
+ };
+
+ //-----------------------------------------------------pixfmt_bgr48_gamma
+ template<class Gamma> class pixfmt_bgr48_gamma :
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_bgr, Gamma>, rendering_buffer, 3>
+ {
+ public:
+ pixfmt_bgr48_gamma(rendering_buffer& rb, const Gamma& g) :
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_bgr, Gamma>, rendering_buffer, 3>(rb)
+ {
+ this->blender().gamma(g);
+ }
+ };
+
+}
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_rgba.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_rgba.h
new file mode 100644
index 00000000000..e9cd523b375
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_pixfmt_rgba.h
@@ -0,0 +1,2801 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_PIXFMT_RGBA_INCLUDED
+#define AGG_PIXFMT_RGBA_INCLUDED
+
+#include <string.h>
+#include <math.h>
+#include "agg_pixfmt_base.h"
+#include "agg_rendering_buffer.h"
+
+namespace agg
+{
+ template<class T> inline T sd_min(T a, T b) { return (a < b) ? a : b; }
+ template<class T> inline T sd_max(T a, T b) { return (a > b) ? a : b; }
+
+ inline rgba & clip(rgba & c)
+ {
+ if (c.a > 1) c.a = 1; else if (c.a < 0) c.a = 0;
+ if (c.r > c.a) c.r = c.a; else if (c.r < 0) c.r = 0;
+ if (c.g > c.a) c.g = c.a; else if (c.g < 0) c.g = 0;
+ if (c.b > c.a) c.b = c.a; else if (c.b < 0) c.b = 0;
+ return c;
+ }
+
+ //=========================================================multiplier_rgba
+ template<class ColorT, class Order>
+ struct multiplier_rgba
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void premultiply(value_type* p)
+ {
+ value_type a = p[Order::A];
+ p[Order::R] = color_type::multiply(p[Order::R], a);
+ p[Order::G] = color_type::multiply(p[Order::G], a);
+ p[Order::B] = color_type::multiply(p[Order::B], a);
+ }
+
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void demultiply(value_type* p)
+ {
+ value_type a = p[Order::A];
+ p[Order::R] = color_type::demultiply(p[Order::R], a);
+ p[Order::G] = color_type::demultiply(p[Order::G], a);
+ p[Order::B] = color_type::demultiply(p[Order::B], a);
+ }
+ };
+
+ //=====================================================apply_gamma_dir_rgba
+ template<class ColorT, class Order, class GammaLut>
+ class apply_gamma_dir_rgba
+ {
+ public:
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+
+ apply_gamma_dir_rgba(const GammaLut& gamma) : m_gamma(gamma) {}
+
+ AGG_INLINE void operator () (value_type* p)
+ {
+ p[Order::R] = m_gamma.dir(p[Order::R]);
+ p[Order::G] = m_gamma.dir(p[Order::G]);
+ p[Order::B] = m_gamma.dir(p[Order::B]);
+ }
+
+ private:
+ const GammaLut& m_gamma;
+ };
+
+ //=====================================================apply_gamma_inv_rgba
+ template<class ColorT, class Order, class GammaLut> class apply_gamma_inv_rgba
+ {
+ public:
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+
+ apply_gamma_inv_rgba(const GammaLut& gamma) : m_gamma(gamma) {}
+
+ AGG_INLINE void operator () (value_type* p)
+ {
+ p[Order::R] = m_gamma.inv(p[Order::R]);
+ p[Order::G] = m_gamma.inv(p[Order::G]);
+ p[Order::B] = m_gamma.inv(p[Order::B]);
+ }
+
+ private:
+ const GammaLut& m_gamma;
+ };
+
+
+ template<class ColorT, class Order>
+ struct conv_rgba_pre
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void set_plain_color(value_type* p, color_type c)
+ {
+ c.premultiply();
+ p[Order::R] = c.r;
+ p[Order::G] = c.g;
+ p[Order::B] = c.b;
+ p[Order::A] = c.a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE color_type get_plain_color(const value_type* p)
+ {
+ return color_type(
+ p[Order::R],
+ p[Order::G],
+ p[Order::B],
+ p[Order::A]).demultiply();
+ }
+ };
+
+ template<class ColorT, class Order>
+ struct conv_rgba_plain
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void set_plain_color(value_type* p, color_type c)
+ {
+ p[Order::R] = c.r;
+ p[Order::G] = c.g;
+ p[Order::B] = c.b;
+ p[Order::A] = c.a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE color_type get_plain_color(const value_type* p)
+ {
+ return color_type(
+ p[Order::R],
+ p[Order::G],
+ p[Order::B],
+ p[Order::A]);
+ }
+ };
+
+ //=============================================================blender_rgba
+ // Blends "plain" (i.e. non-premultiplied) colors into a premultiplied buffer.
+ template<class ColorT, class Order>
+ struct blender_rgba : conv_rgba_pre<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ // Blend pixels using the non-premultiplied form of Alvy-Ray Smith's
+ // compositing function. Since the render buffer is in fact premultiplied
+ // we omit the initial premultiplication and final demultiplication.
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
+ {
+ blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha)
+ {
+ p[Order::R] = color_type::lerp(p[Order::R], cr, alpha);
+ p[Order::G] = color_type::lerp(p[Order::G], cg, alpha);
+ p[Order::B] = color_type::lerp(p[Order::B], cb, alpha);
+ p[Order::A] = color_type::prelerp(p[Order::A], alpha, alpha);
+ }
+ };
+
+
+ //========================================================blender_rgba_pre
+ // Blends premultiplied colors into a premultiplied buffer.
+ template<class ColorT, class Order>
+ struct blender_rgba_pre : conv_rgba_pre<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ // Blend pixels using the premultiplied form of Alvy-Ray Smith's
+ // compositing function.
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
+ {
+ blend_pix(p,
+ color_type::mult_cover(cr, cover),
+ color_type::mult_cover(cg, cover),
+ color_type::mult_cover(cb, cover),
+ color_type::mult_cover(alpha, cover));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha)
+ {
+ p[Order::R] = color_type::prelerp(p[Order::R], cr, alpha);
+ p[Order::G] = color_type::prelerp(p[Order::G], cg, alpha);
+ p[Order::B] = color_type::prelerp(p[Order::B], cb, alpha);
+ p[Order::A] = color_type::prelerp(p[Order::A], alpha, alpha);
+ }
+ };
+
+ //======================================================blender_rgba_plain
+ // Blends "plain" (non-premultiplied) colors into a plain (non-premultiplied) buffer.
+ template<class ColorT, class Order>
+ struct blender_rgba_plain : conv_rgba_plain<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ // Blend pixels using the non-premultiplied form of Alvy-Ray Smith's
+ // compositing function.
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
+ {
+ blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha)
+ {
+ if (alpha > color_type::empty_value())
+ {
+ calc_type a = p[Order::A];
+ calc_type r = color_type::multiply(p[Order::R], a);
+ calc_type g = color_type::multiply(p[Order::G], a);
+ calc_type b = color_type::multiply(p[Order::B], a);
+ p[Order::R] = color_type::lerp(r, cr, alpha);
+ p[Order::G] = color_type::lerp(g, cg, alpha);
+ p[Order::B] = color_type::lerp(b, cb, alpha);
+ p[Order::A] = color_type::prelerp(a, alpha, alpha);
+ multiplier_rgba<ColorT, Order>::demultiply(p);
+ }
+ }
+ };
+
+ // SVG compositing operations.
+ // For specifications, see http://www.w3.org/TR/SVGCompositing/
+
+ //=========================================================comp_op_rgba_clear
+ template<class ColorT, class Order>
+ struct comp_op_rgba_clear : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = 0
+ // Da' = 0
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ if (cover >= cover_full)
+ {
+ p[0] = p[1] = p[2] = p[3] = color_type::empty_value();
+ }
+ else if (cover > cover_none)
+ {
+ set(p, get(p, cover_full - cover));
+ }
+ }
+ };
+
+ //===========================================================comp_op_rgba_src
+ template<class ColorT, class Order>
+ struct comp_op_rgba_src : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Sca
+ // Da' = Sa
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ if (cover >= cover_full)
+ {
+ set(p, r, g, b, a);
+ }
+ else
+ {
+ rgba s = get(r, g, b, a, cover);
+ rgba d = get(p, cover_full - cover);
+ d.r += s.r;
+ d.g += s.g;
+ d.b += s.b;
+ d.a += s.a;
+ set(p, d);
+ }
+ }
+ };
+
+ //===========================================================comp_op_rgba_dst
+ template<class ColorT, class Order>
+ struct comp_op_rgba_dst : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+
+ // Dca' = Dca.Sa + Dca.(1 - Sa) = Dca
+ // Da' = Da.Sa + Da.(1 - Sa) = Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ // Well, that was easy!
+ }
+ };
+
+ //======================================================comp_op_rgba_src_over
+ template<class ColorT, class Order>
+ struct comp_op_rgba_src_over : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Sca + Dca.(1 - Sa) = Dca + Sca - Dca.Sa
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+#if 1
+ blender_rgba_pre<ColorT, Order>::blend_pix(p, r, g, b, a, cover);
+#else
+ rgba s = get(r, g, b, a, cover);
+ rgba d = get(p);
+ d.r += s.r - d.r * s.a;
+ d.g += s.g - d.g * s.a;
+ d.b += s.b - d.b * s.a;
+ d.a += s.a - d.a * s.a;
+ set(p, d);
+#endif
+ }
+ };
+
+ //======================================================comp_op_rgba_dst_over
+ template<class ColorT, class Order>
+ struct comp_op_rgba_dst_over : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Dca + Sca.(1 - Da)
+ // Da' = Sa + Da - Sa.Da = Da + Sa.(1 - Da)
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ rgba d = get(p);
+ double d1a = 1 - d.a;
+ d.r += s.r * d1a;
+ d.g += s.g * d1a;
+ d.b += s.b * d1a;
+ d.a += s.a * d1a;
+ set(p, d);
+ }
+ };
+
+ //======================================================comp_op_rgba_src_in
+ template<class ColorT, class Order>
+ struct comp_op_rgba_src_in : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Sca.Da
+ // Da' = Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ double da = ColorT::to_double(p[Order::A]);
+ if (da > 0)
+ {
+ rgba s = get(r, g, b, a, cover);
+ rgba d = get(p, cover_full - cover);
+ d.r += s.r * da;
+ d.g += s.g * da;
+ d.b += s.b * da;
+ d.a += s.a * da;
+ set(p, d);
+ }
+ }
+ };
+
+ //======================================================comp_op_rgba_dst_in
+ template<class ColorT, class Order>
+ struct comp_op_rgba_dst_in : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Dca.Sa
+ // Da' = Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ double sa = ColorT::to_double(a);
+ rgba d = get(p, cover_full - cover);
+ rgba d2 = get(p, cover);
+ d.r += d2.r * sa;
+ d.g += d2.g * sa;
+ d.b += d2.b * sa;
+ d.a += d2.a * sa;
+ set(p, d);
+ }
+ };
+
+ //======================================================comp_op_rgba_src_out
+ template<class ColorT, class Order>
+ struct comp_op_rgba_src_out : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Sca.(1 - Da)
+ // Da' = Sa.(1 - Da)
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ rgba d = get(p, cover_full - cover);
+ double d1a = 1 - ColorT::to_double(p[Order::A]);
+ d.r += s.r * d1a;
+ d.g += s.g * d1a;
+ d.b += s.b * d1a;
+ d.a += s.a * d1a;
+ set(p, d);
+ }
+ };
+
+ //======================================================comp_op_rgba_dst_out
+ template<class ColorT, class Order>
+ struct comp_op_rgba_dst_out : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Dca.(1 - Sa)
+ // Da' = Da.(1 - Sa)
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba d = get(p, cover_full - cover);
+ rgba dc = get(p, cover);
+ double s1a = 1 - ColorT::to_double(a);
+ d.r += dc.r * s1a;
+ d.g += dc.g * s1a;
+ d.b += dc.b * s1a;
+ d.a += dc.a * s1a;
+ set(p, d);
+ }
+ };
+
+ //=====================================================comp_op_rgba_src_atop
+ template<class ColorT, class Order>
+ struct comp_op_rgba_src_atop : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Sca.Da + Dca.(1 - Sa)
+ // Da' = Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ rgba d = get(p);
+ double s1a = 1 - s.a;
+ d.r = s.r * d.a + d.r * s1a;
+ d.g = s.g * d.a + d.g * s1a;
+ d.b = s.b * d.a + d.g * s1a;
+ set(p, d);
+ }
+ };
+
+ //=====================================================comp_op_rgba_dst_atop
+ template<class ColorT, class Order>
+ struct comp_op_rgba_dst_atop : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Dca.Sa + Sca.(1 - Da)
+ // Da' = Sa
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba sc = get(r, g, b, a, cover);
+ rgba dc = get(p, cover);
+ rgba d = get(p, cover_full - cover);
+ double sa = ColorT::to_double(a);
+ double d1a = 1 - ColorT::to_double(p[Order::A]);
+ d.r += dc.r * sa + sc.r * d1a;
+ d.g += dc.g * sa + sc.g * d1a;
+ d.b += dc.b * sa + sc.b * d1a;
+ d.a += sc.a;
+ set(p, d);
+ }
+ };
+
+ //=========================================================comp_op_rgba_xor
+ template<class ColorT, class Order>
+ struct comp_op_rgba_xor : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Sca.(1 - Da) + Dca.(1 - Sa)
+ // Da' = Sa + Da - 2.Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ rgba d = get(p);
+ double s1a = 1 - s.a;
+ double d1a = 1 - ColorT::to_double(p[Order::A]);
+ d.r = s.r * d1a + d.r * s1a;
+ d.g = s.g * d1a + d.g * s1a;
+ d.b = s.b * d1a + d.b * s1a;
+ d.a = s.a + d.a - 2 * s.a * d.a;
+ set(p, d);
+ }
+ };
+
+ //=========================================================comp_op_rgba_plus
+ template<class ColorT, class Order>
+ struct comp_op_rgba_plus : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Sca + Dca
+ // Da' = Sa + Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ d.a = sd_min(d.a + s.a, 1.0);
+ d.r = sd_min(d.r + s.r, d.a);
+ d.g = sd_min(d.g + s.g, d.a);
+ d.b = sd_min(d.b + s.b, d.a);
+ set(p, clip(d));
+ }
+ }
+ };
+
+ //========================================================comp_op_rgba_minus
+ // Note: not included in SVG spec.
+ template<class ColorT, class Order>
+ struct comp_op_rgba_minus : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Dca - Sca
+ // Da' = 1 - (1 - Sa).(1 - Da) = Da + Sa - Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ d.a += s.a - s.a * d.a;
+ d.r = sd_max(d.r - s.r, 0.0);
+ d.g = sd_max(d.g - s.g, 0.0);
+ d.b = sd_max(d.b - s.b, 0.0);
+ set(p, clip(d));
+ }
+ }
+ };
+
+ //=====================================================comp_op_rgba_multiply
+ template<class ColorT, class Order>
+ struct comp_op_rgba_multiply : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Sca.Dca + Sca.(1 - Da) + Dca.(1 - Sa)
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ double s1a = 1 - s.a;
+ double d1a = 1 - d.a;
+ d.r = s.r * d.r + s.r * d1a + d.r * s1a;
+ d.g = s.g * d.g + s.g * d1a + d.g * s1a;
+ d.b = s.b * d.b + s.b * d1a + d.b * s1a;
+ d.a += s.a - s.a * d.a;
+ set(p, clip(d));
+ }
+ }
+ };
+
+ //=====================================================comp_op_rgba_screen
+ template<class ColorT, class Order>
+ struct comp_op_rgba_screen : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Sca + Dca - Sca.Dca
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ d.r += s.r - s.r * d.r;
+ d.g += s.g - s.g * d.g;
+ d.b += s.b - s.b * d.b;
+ d.a += s.a - s.a * d.a;
+ set(p, clip(d));
+ }
+ }
+ };
+
+ //=====================================================comp_op_rgba_overlay
+ template<class ColorT, class Order>
+ struct comp_op_rgba_overlay : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // if 2.Dca <= Da
+ // Dca' = 2.Sca.Dca + Sca.(1 - Da) + Dca.(1 - Sa)
+ // otherwise
+ // Dca' = Sa.Da - 2.(Da - Dca).(Sa - Sca) + Sca.(1 - Da) + Dca.(1 - Sa)
+ //
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a)
+ {
+ return (2 * dca <= da) ?
+ 2 * sca * dca + sca * d1a + dca * s1a :
+ sada - 2 * (da - dca) * (sa - sca) + sca * d1a + dca * s1a;
+ }
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ double d1a = 1 - d.a;
+ double s1a = 1 - s.a;
+ double sada = s.a * d.a;
+ d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a);
+ d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a);
+ d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a);
+ d.a += s.a - s.a * d.a;
+ set(p, clip(d));
+ }
+ }
+ };
+
+ //=====================================================comp_op_rgba_darken
+ template<class ColorT, class Order>
+ struct comp_op_rgba_darken : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = min(Sca.Da, Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa)
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ double d1a = 1 - d.a;
+ double s1a = 1 - s.a;
+ d.r = sd_min(s.r * d.a, d.r * s.a) + s.r * d1a + d.r * s1a;
+ d.g = sd_min(s.g * d.a, d.g * s.a) + s.g * d1a + d.g * s1a;
+ d.b = sd_min(s.b * d.a, d.b * s.a) + s.b * d1a + d.b * s1a;
+ d.a += s.a - s.a * d.a;
+ set(p, clip(d));
+ }
+ }
+ };
+
+ //=====================================================comp_op_rgba_lighten
+ template<class ColorT, class Order>
+ struct comp_op_rgba_lighten : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = max(Sca.Da, Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa)
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ double d1a = 1 - d.a;
+ double s1a = 1 - s.a;
+ d.r = sd_max(s.r * d.a, d.r * s.a) + s.r * d1a + d.r * s1a;
+ d.g = sd_max(s.g * d.a, d.g * s.a) + s.g * d1a + d.g * s1a;
+ d.b = sd_max(s.b * d.a, d.b * s.a) + s.b * d1a + d.b * s1a;
+ d.a += s.a - s.a * d.a;
+ set(p, clip(d));
+ }
+ }
+ };
+
+ //=====================================================comp_op_rgba_color_dodge
+ template<class ColorT, class Order>
+ struct comp_op_rgba_color_dodge : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // if Sca == Sa and Dca == 0
+ // Dca' = Sca.(1 - Da) + Dca.(1 - Sa) = Sca.(1 - Da)
+ // otherwise if Sca == Sa
+ // Dca' = Sa.Da + Sca.(1 - Da) + Dca.(1 - Sa)
+ // otherwise if Sca < Sa
+ // Dca' = Sa.Da.min(1, Dca/Da.Sa/(Sa - Sca)) + Sca.(1 - Da) + Dca.(1 - Sa)
+ //
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a)
+ {
+ if (sca < sa) return sada * sd_min(1.0, (dca / da) * sa / (sa - sca)) + sca * d1a + dca * s1a;
+ if (dca > 0) return sada + sca * d1a + dca * s1a;
+ return sca * d1a;
+ }
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ if (d.a > 0)
+ {
+ double sada = s.a * d.a;
+ double s1a = 1 - s.a;
+ double d1a = 1 - d.a;
+ d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a);
+ d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a);
+ d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a);
+ d.a += s.a - s.a * d.a;
+ set(p, clip(d));
+ }
+ else set(p, s);
+ }
+ }
+ };
+
+ //=====================================================comp_op_rgba_color_burn
+ template<class ColorT, class Order>
+ struct comp_op_rgba_color_burn : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // if Sca == 0 and Dca == Da
+ // Dca' = Sa.Da + Dca.(1 - Sa)
+ // otherwise if Sca == 0
+ // Dca' = Dca.(1 - Sa)
+ // otherwise if Sca > 0
+ // Dca' = Sa.Da.(1 - min(1, (1 - Dca/Da).Sa/Sca)) + Sca.(1 - Da) + Dca.(1 - Sa)
+ static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a)
+ {
+ if (sca > 0) return sada * (1 - sd_min(1.0, (1 - dca / da) * sa / sca)) + sca * d1a + dca * s1a;
+ if (dca > da) return sada + dca * s1a;
+ return dca * s1a;
+ }
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ if (d.a > 0)
+ {
+ double sada = s.a * d.a;
+ double s1a = 1 - s.a;
+ double d1a = 1 - d.a;
+ d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a);
+ d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a);
+ d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a);
+ d.a += s.a - sada;
+ set(p, clip(d));
+ }
+ else set(p, s);
+ }
+ }
+ };
+
+ //=====================================================comp_op_rgba_hard_light
+ template<class ColorT, class Order>
+ struct comp_op_rgba_hard_light : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // if 2.Sca < Sa
+ // Dca' = 2.Sca.Dca + Sca.(1 - Da) + Dca.(1 - Sa)
+ // otherwise
+ // Dca' = Sa.Da - 2.(Da - Dca).(Sa - Sca) + Sca.(1 - Da) + Dca.(1 - Sa)
+ //
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a)
+ {
+ return (2 * sca < sa) ?
+ 2 * sca * dca + sca * d1a + dca * s1a :
+ sada - 2 * (da - dca) * (sa - sca) + sca * d1a + dca * s1a;
+ }
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ double d1a = 1 - d.a;
+ double s1a = 1 - s.a;
+ double sada = s.a * d.a;
+ d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a);
+ d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a);
+ d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a);
+ d.a += s.a - sada;
+ set(p, clip(d));
+ }
+ }
+ };
+
+ //=====================================================comp_op_rgba_soft_light
+ template<class ColorT, class Order>
+ struct comp_op_rgba_soft_light : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // if 2.Sca <= Sa
+ // Dca' = Dca.Sa - (Sa.Da - 2.Sca.Da).Dca.Sa.(Sa.Da - Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa)
+ // otherwise if 2.Sca > Sa and 4.Dca <= Da
+ // Dca' = Dca.Sa + (2.Sca.Da - Sa.Da).((((16.Dsa.Sa - 12).Dsa.Sa + 4).Dsa.Da) - Dsa.Da) + Sca.(1 - Da) + Dca.(1 - Sa)
+ // otherwise if 2.Sca > Sa and 4.Dca > Da
+ // Dca' = Dca.Sa + (2.Sca.Da - Sa.Da).((Dca.Sa)^0.5 - Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa)
+ //
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE double calc(double dca, double sca, double da, double sa, double sada, double d1a, double s1a)
+ {
+ double dcasa = dca * sa;
+ if (2 * sca <= sa) return dcasa - (sada - 2 * sca * da) * dcasa * (sada - dcasa) + sca * d1a + dca * s1a;
+ if (4 * dca <= da) return dcasa + (2 * sca * da - sada) * ((((16 * dcasa - 12) * dcasa + 4) * dca * da) - dca * da) + sca * d1a + dca * s1a;
+ return dcasa + (2 * sca * da - sada) * (sqrt(dcasa) - dcasa) + sca * d1a + dca * s1a;
+ }
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ if (d.a > 0)
+ {
+ double sada = s.a * d.a;
+ double s1a = 1 - s.a;
+ double d1a = 1 - d.a;
+ d.r = calc(d.r, s.r, d.a, s.a, sada, d1a, s1a);
+ d.g = calc(d.g, s.g, d.a, s.a, sada, d1a, s1a);
+ d.b = calc(d.b, s.b, d.a, s.a, sada, d1a, s1a);
+ d.a += s.a - sada;
+ set(p, clip(d));
+ }
+ else set(p, s);
+ }
+ }
+ };
+
+ //=====================================================comp_op_rgba_difference
+ template<class ColorT, class Order>
+ struct comp_op_rgba_difference : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = Sca + Dca - 2.min(Sca.Da, Dca.Sa)
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ d.r += s.r - 2 * sd_min(s.r * d.a, d.r * s.a);
+ d.g += s.g - 2 * sd_min(s.g * d.a, d.g * s.a);
+ d.b += s.b - 2 * sd_min(s.b * d.a, d.b * s.a);
+ d.a += s.a - s.a * d.a;
+ set(p, clip(d));
+ }
+ }
+ };
+
+ //=====================================================comp_op_rgba_exclusion
+ template<class ColorT, class Order>
+ struct comp_op_rgba_exclusion : blender_base<ColorT, Order>
+ {
+ typedef ColorT color_type;
+ typedef typename color_type::value_type value_type;
+ using blender_base<ColorT, Order>::get;
+ using blender_base<ColorT, Order>::set;
+
+ // Dca' = (Sca.Da + Dca.Sa - 2.Sca.Dca) + Sca.(1 - Da) + Dca.(1 - Sa)
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ rgba s = get(r, g, b, a, cover);
+ if (s.a > 0)
+ {
+ rgba d = get(p);
+ double d1a = 1 - d.a;
+ double s1a = 1 - s.a;
+ d.r = (s.r * d.a + d.r * s.a - 2 * s.r * d.r) + s.r * d1a + d.r * s1a;
+ d.g = (s.g * d.a + d.g * s.a - 2 * s.g * d.g) + s.g * d1a + d.g * s1a;
+ d.b = (s.b * d.a + d.b * s.a - 2 * s.b * d.b) + s.b * d1a + d.b * s1a;
+ d.a += s.a - s.a * d.a;
+ set(p, clip(d));
+ }
+ }
+ };
+
+#if 0
+ //=====================================================comp_op_rgba_contrast
+ template<class ColorT, class Order> struct comp_op_rgba_contrast
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+ enum base_scale_e
+ {
+ base_shift = color_type::base_shift,
+ base_mask = color_type::base_mask
+ };
+
+
+ static AGG_INLINE void blend_pix(value_type* p,
+ unsigned sr, unsigned sg, unsigned sb,
+ unsigned sa, unsigned cover)
+ {
+ if (cover < 255)
+ {
+ sr = (sr * cover + 255) >> 8;
+ sg = (sg * cover + 255) >> 8;
+ sb = (sb * cover + 255) >> 8;
+ sa = (sa * cover + 255) >> 8;
+ }
+ long_type dr = p[Order::R];
+ long_type dg = p[Order::G];
+ long_type db = p[Order::B];
+ int da = p[Order::A];
+ long_type d2a = da >> 1;
+ unsigned s2a = sa >> 1;
+
+ int r = (int)((((dr - d2a) * int((sr - s2a)*2 + base_mask)) >> base_shift) + d2a);
+ int g = (int)((((dg - d2a) * int((sg - s2a)*2 + base_mask)) >> base_shift) + d2a);
+ int b = (int)((((db - d2a) * int((sb - s2a)*2 + base_mask)) >> base_shift) + d2a);
+
+ r = (r < 0) ? 0 : r;
+ g = (g < 0) ? 0 : g;
+ b = (b < 0) ? 0 : b;
+
+ p[Order::R] = (value_type)((r > da) ? da : r);
+ p[Order::G] = (value_type)((g > da) ? da : g);
+ p[Order::B] = (value_type)((b > da) ? da : b);
+ }
+ };
+
+ //=====================================================comp_op_rgba_invert
+ template<class ColorT, class Order> struct comp_op_rgba_invert
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+ enum base_scale_e
+ {
+ base_shift = color_type::base_shift,
+ base_mask = color_type::base_mask
+ };
+
+ // Dca' = (Da - Dca) * Sa + Dca.(1 - Sa)
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ unsigned sr, unsigned sg, unsigned sb,
+ unsigned sa, unsigned cover)
+ {
+ sa = (sa * cover + 255) >> 8;
+ if (sa)
+ {
+ calc_type da = p[Order::A];
+ calc_type dr = ((da - p[Order::R]) * sa + base_mask) >> base_shift;
+ calc_type dg = ((da - p[Order::G]) * sa + base_mask) >> base_shift;
+ calc_type db = ((da - p[Order::B]) * sa + base_mask) >> base_shift;
+ calc_type s1a = base_mask - sa;
+ p[Order::R] = (value_type)(dr + ((p[Order::R] * s1a + base_mask) >> base_shift));
+ p[Order::G] = (value_type)(dg + ((p[Order::G] * s1a + base_mask) >> base_shift));
+ p[Order::B] = (value_type)(db + ((p[Order::B] * s1a + base_mask) >> base_shift));
+ p[Order::A] = (value_type)(sa + da - ((sa * da + base_mask) >> base_shift));
+ }
+ }
+ };
+
+ //=================================================comp_op_rgba_invert_rgb
+ template<class ColorT, class Order> struct comp_op_rgba_invert_rgb
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+ enum base_scale_e
+ {
+ base_shift = color_type::base_shift,
+ base_mask = color_type::base_mask
+ };
+
+ // Dca' = (Da - Dca) * Sca + Dca.(1 - Sa)
+ // Da' = Sa + Da - Sa.Da
+ static AGG_INLINE void blend_pix(value_type* p,
+ unsigned sr, unsigned sg, unsigned sb,
+ unsigned sa, unsigned cover)
+ {
+ if (cover < 255)
+ {
+ sr = (sr * cover + 255) >> 8;
+ sg = (sg * cover + 255) >> 8;
+ sb = (sb * cover + 255) >> 8;
+ sa = (sa * cover + 255) >> 8;
+ }
+ if (sa)
+ {
+ calc_type da = p[Order::A];
+ calc_type dr = ((da - p[Order::R]) * sr + base_mask) >> base_shift;
+ calc_type dg = ((da - p[Order::G]) * sg + base_mask) >> base_shift;
+ calc_type db = ((da - p[Order::B]) * sb + base_mask) >> base_shift;
+ calc_type s1a = base_mask - sa;
+ p[Order::R] = (value_type)(dr + ((p[Order::R] * s1a + base_mask) >> base_shift));
+ p[Order::G] = (value_type)(dg + ((p[Order::G] * s1a + base_mask) >> base_shift));
+ p[Order::B] = (value_type)(db + ((p[Order::B] * s1a + base_mask) >> base_shift));
+ p[Order::A] = (value_type)(sa + da - ((sa * da + base_mask) >> base_shift));
+ }
+ }
+ };
+#endif
+
+
+ //======================================================comp_op_table_rgba
+ template<class ColorT, class Order> struct comp_op_table_rgba
+ {
+ typedef typename ColorT::value_type value_type;
+ typedef typename ColorT::calc_type calc_type;
+ typedef void (*comp_op_func_type)(value_type* p,
+ value_type cr,
+ value_type cg,
+ value_type cb,
+ value_type ca,
+ cover_type cover);
+ static comp_op_func_type g_comp_op_func[];
+ };
+
+ //==========================================================g_comp_op_func
+ template<class ColorT, class Order>
+ typename comp_op_table_rgba<ColorT, Order>::comp_op_func_type
+ comp_op_table_rgba<ColorT, Order>::g_comp_op_func[] =
+ {
+ comp_op_rgba_clear <ColorT,Order>::blend_pix,
+ comp_op_rgba_src <ColorT,Order>::blend_pix,
+ comp_op_rgba_dst <ColorT,Order>::blend_pix,
+ comp_op_rgba_src_over <ColorT,Order>::blend_pix,
+ comp_op_rgba_dst_over <ColorT,Order>::blend_pix,
+ comp_op_rgba_src_in <ColorT,Order>::blend_pix,
+ comp_op_rgba_dst_in <ColorT,Order>::blend_pix,
+ comp_op_rgba_src_out <ColorT,Order>::blend_pix,
+ comp_op_rgba_dst_out <ColorT,Order>::blend_pix,
+ comp_op_rgba_src_atop <ColorT,Order>::blend_pix,
+ comp_op_rgba_dst_atop <ColorT,Order>::blend_pix,
+ comp_op_rgba_xor <ColorT,Order>::blend_pix,
+ comp_op_rgba_plus <ColorT,Order>::blend_pix,
+ //comp_op_rgba_minus <ColorT,Order>::blend_pix,
+ comp_op_rgba_multiply <ColorT,Order>::blend_pix,
+ comp_op_rgba_screen <ColorT,Order>::blend_pix,
+ comp_op_rgba_overlay <ColorT,Order>::blend_pix,
+ comp_op_rgba_darken <ColorT,Order>::blend_pix,
+ comp_op_rgba_lighten <ColorT,Order>::blend_pix,
+ comp_op_rgba_color_dodge<ColorT,Order>::blend_pix,
+ comp_op_rgba_color_burn <ColorT,Order>::blend_pix,
+ comp_op_rgba_hard_light <ColorT,Order>::blend_pix,
+ comp_op_rgba_soft_light <ColorT,Order>::blend_pix,
+ comp_op_rgba_difference <ColorT,Order>::blend_pix,
+ comp_op_rgba_exclusion <ColorT,Order>::blend_pix,
+ //comp_op_rgba_contrast <ColorT,Order>::blend_pix,
+ //comp_op_rgba_invert <ColorT,Order>::blend_pix,
+ //comp_op_rgba_invert_rgb <ColorT,Order>::blend_pix,
+ 0
+ };
+
+
+ //==============================================================comp_op_e
+ enum comp_op_e
+ {
+ comp_op_clear, //----comp_op_clear
+ comp_op_src, //----comp_op_src
+ comp_op_dst, //----comp_op_dst
+ comp_op_src_over, //----comp_op_src_over
+ comp_op_dst_over, //----comp_op_dst_over
+ comp_op_src_in, //----comp_op_src_in
+ comp_op_dst_in, //----comp_op_dst_in
+ comp_op_src_out, //----comp_op_src_out
+ comp_op_dst_out, //----comp_op_dst_out
+ comp_op_src_atop, //----comp_op_src_atop
+ comp_op_dst_atop, //----comp_op_dst_atop
+ comp_op_xor, //----comp_op_xor
+ comp_op_plus, //----comp_op_plus
+ //comp_op_minus, //----comp_op_minus
+ comp_op_multiply, //----comp_op_multiply
+ comp_op_screen, //----comp_op_screen
+ comp_op_overlay, //----comp_op_overlay
+ comp_op_darken, //----comp_op_darken
+ comp_op_lighten, //----comp_op_lighten
+ comp_op_color_dodge, //----comp_op_color_dodge
+ comp_op_color_burn, //----comp_op_color_burn
+ comp_op_hard_light, //----comp_op_hard_light
+ comp_op_soft_light, //----comp_op_soft_light
+ comp_op_difference, //----comp_op_difference
+ comp_op_exclusion, //----comp_op_exclusion
+ //comp_op_contrast, //----comp_op_contrast
+ //comp_op_invert, //----comp_op_invert
+ //comp_op_invert_rgb, //----comp_op_invert_rgb
+
+ end_of_comp_op_e
+ };
+
+
+
+
+
+
+
+ //====================================================comp_op_adaptor_rgba
+ template<class ColorT, class Order>
+ struct comp_op_adaptor_rgba
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ static AGG_INLINE void blend_pix(unsigned op, value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ comp_op_table_rgba<ColorT, Order>::g_comp_op_func[op](p,
+ color_type::multiply(r, a),
+ color_type::multiply(g, a),
+ color_type::multiply(b, a),
+ a, cover);
+ }
+ };
+
+ //=========================================comp_op_adaptor_clip_to_dst_rgba
+ template<class ColorT, class Order>
+ struct comp_op_adaptor_clip_to_dst_rgba
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ static AGG_INLINE void blend_pix(unsigned op, value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ r = color_type::multiply(r, a);
+ g = color_type::multiply(g, a);
+ b = color_type::multiply(b, a);
+ value_type da = p[Order::A];
+ comp_op_table_rgba<ColorT, Order>::g_comp_op_func[op](p,
+ color_type::multiply(r, da),
+ color_type::multiply(g, da),
+ color_type::multiply(b, da),
+ color_type::multiply(a, da), cover);
+ }
+ };
+
+ //================================================comp_op_adaptor_rgba_pre
+ template<class ColorT, class Order>
+ struct comp_op_adaptor_rgba_pre
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ static AGG_INLINE void blend_pix(unsigned op, value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ comp_op_table_rgba<ColorT, Order>::g_comp_op_func[op](p, r, g, b, a, cover);
+ }
+ };
+
+ //=====================================comp_op_adaptor_clip_to_dst_rgba_pre
+ template<class ColorT, class Order>
+ struct comp_op_adaptor_clip_to_dst_rgba_pre
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ static AGG_INLINE void blend_pix(unsigned op, value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ value_type da = p[Order::A];
+ comp_op_table_rgba<ColorT, Order>::g_comp_op_func[op](p,
+ color_type::multiply(r, da),
+ color_type::multiply(g, da),
+ color_type::multiply(b, da),
+ color_type::multiply(a, da), cover);
+ }
+ };
+
+ //====================================================comp_op_adaptor_rgba_plain
+ template<class ColorT, class Order>
+ struct comp_op_adaptor_rgba_plain
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ static AGG_INLINE void blend_pix(unsigned op, value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ multiplier_rgba<ColorT, Order>::premultiply(p);
+ comp_op_adaptor_rgba<ColorT, Order>::blend_pix(op, p, r, g, b, a, cover);
+ multiplier_rgba<ColorT, Order>::demultiply(p);
+ }
+ };
+
+ //=========================================comp_op_adaptor_clip_to_dst_rgba_plain
+ template<class ColorT, class Order>
+ struct comp_op_adaptor_clip_to_dst_rgba_plain
+ {
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ static AGG_INLINE void blend_pix(unsigned op, value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ multiplier_rgba<ColorT, Order>::premultiply(p);
+ comp_op_adaptor_clip_to_dst_rgba<ColorT, Order>::blend_pix(op, p, r, g, b, a, cover);
+ multiplier_rgba<ColorT, Order>::demultiply(p);
+ }
+ };
+
+ //=======================================================comp_adaptor_rgba
+ template<class BlenderPre>
+ struct comp_adaptor_rgba
+ {
+ typedef typename BlenderPre::color_type color_type;
+ typedef typename BlenderPre::order_type order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ static AGG_INLINE void blend_pix(unsigned op, value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ BlenderPre::blend_pix(p,
+ color_type::multiply(r, a),
+ color_type::multiply(g, a),
+ color_type::multiply(b, a),
+ a, cover);
+ }
+ };
+
+ //==========================================comp_adaptor_clip_to_dst_rgba
+ template<class BlenderPre>
+ struct comp_adaptor_clip_to_dst_rgba
+ {
+ typedef typename BlenderPre::color_type color_type;
+ typedef typename BlenderPre::order_type order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ static AGG_INLINE void blend_pix(unsigned op, value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ r = color_type::multiply(r, a);
+ g = color_type::multiply(g, a);
+ b = color_type::multiply(b, a);
+ value_type da = p[order_type::A];
+ BlenderPre::blend_pix(p,
+ color_type::multiply(r, da),
+ color_type::multiply(g, da),
+ color_type::multiply(b, da),
+ color_type::multiply(a, da), cover);
+ }
+ };
+
+ //=======================================================comp_adaptor_rgba_pre
+ template<class BlenderPre>
+ struct comp_adaptor_rgba_pre
+ {
+ typedef typename BlenderPre::color_type color_type;
+ typedef typename BlenderPre::order_type order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ static AGG_INLINE void blend_pix(unsigned op, value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ BlenderPre::blend_pix(p, r, g, b, a, cover);
+ }
+ };
+
+ //======================================comp_adaptor_clip_to_dst_rgba_pre
+ template<class BlenderPre>
+ struct comp_adaptor_clip_to_dst_rgba_pre
+ {
+ typedef typename BlenderPre::color_type color_type;
+ typedef typename BlenderPre::order_type order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ static AGG_INLINE void blend_pix(unsigned op, value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ unsigned da = p[order_type::A];
+ BlenderPre::blend_pix(p,
+ color_type::multiply(r, da),
+ color_type::multiply(g, da),
+ color_type::multiply(b, da),
+ color_type::multiply(a, da),
+ cover);
+ }
+ };
+
+ //=======================================================comp_adaptor_rgba_plain
+ template<class BlenderPre>
+ struct comp_adaptor_rgba_plain
+ {
+ typedef typename BlenderPre::color_type color_type;
+ typedef typename BlenderPre::order_type order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ static AGG_INLINE void blend_pix(unsigned op, value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ multiplier_rgba<color_type, order_type>::premultiply(p);
+ comp_adaptor_rgba<BlenderPre>::blend_pix(op, p, r, g, b, a, cover);
+ multiplier_rgba<color_type, order_type>::demultiply(p);
+ }
+ };
+
+ //==========================================comp_adaptor_clip_to_dst_rgba_plain
+ template<class BlenderPre>
+ struct comp_adaptor_clip_to_dst_rgba_plain
+ {
+ typedef typename BlenderPre::color_type color_type;
+ typedef typename BlenderPre::order_type order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ static AGG_INLINE void blend_pix(unsigned op, value_type* p,
+ value_type r, value_type g, value_type b, value_type a, cover_type cover)
+ {
+ multiplier_rgba<color_type, order_type>::premultiply(p);
+ comp_adaptor_clip_to_dst_rgba<BlenderPre>::blend_pix(op, p, r, g, b, a, cover);
+ multiplier_rgba<color_type, order_type>::demultiply(p);
+ }
+ };
+
+
+ //=================================================pixfmt_alpha_blend_rgba
+ template<class Blender, class RenBuf>
+ class pixfmt_alpha_blend_rgba
+ {
+ public:
+ typedef pixfmt_rgba_tag pixfmt_category;
+ typedef RenBuf rbuf_type;
+ typedef typename rbuf_type::row_data row_data;
+ typedef Blender blender_type;
+ typedef typename blender_type::color_type color_type;
+ typedef typename blender_type::order_type order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ enum
+ {
+ pix_step = 4,
+ pix_width = sizeof(value_type) * pix_step,
+ };
+ struct pixel_type
+ {
+ value_type c[pix_step];
+
+ void set(value_type r, value_type g, value_type b, value_type a)
+ {
+ c[order_type::R] = r;
+ c[order_type::G] = g;
+ c[order_type::B] = b;
+ c[order_type::A] = a;
+ }
+
+ void set(const color_type& color)
+ {
+ set(color.r, color.g, color.b, color.a);
+ }
+
+ void get(value_type& r, value_type& g, value_type& b, value_type& a) const
+ {
+ r = c[order_type::R];
+ g = c[order_type::G];
+ b = c[order_type::B];
+ a = c[order_type::A];
+ }
+
+ color_type get() const
+ {
+ return color_type(
+ c[order_type::R],
+ c[order_type::G],
+ c[order_type::B],
+ c[order_type::A]);
+ }
+
+ pixel_type* next()
+ {
+ return this + 1;
+ }
+
+ const pixel_type* next() const
+ {
+ return this + 1;
+ }
+
+ pixel_type* advance(int n)
+ {
+ return this + n;
+ }
+
+ const pixel_type* advance(int n) const
+ {
+ return this + n;
+ }
+ };
+
+ private:
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover)
+ {
+ m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c)
+ {
+ m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ p->set(c.r, c.g, c.b, c.a);
+ }
+ else
+ {
+ m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a, cover);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque())
+ {
+ p->set(c.r, c.g, c.b, c.a);
+ }
+ else
+ {
+ m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a);
+ }
+ }
+ }
+
+ public:
+ //--------------------------------------------------------------------
+ pixfmt_alpha_blend_rgba() : m_rbuf(0) {}
+ explicit pixfmt_alpha_blend_rgba(rbuf_type& rb) : m_rbuf(&rb) {}
+ void attach(rbuf_type& rb) { m_rbuf = &rb; }
+
+ //--------------------------------------------------------------------
+ template<class PixFmt>
+ bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2)
+ {
+ rect_i r(x1, y1, x2, y2);
+ if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1)))
+ {
+ int stride = pixf.stride();
+ m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1),
+ (r.x2 - r.x1) + 1,
+ (r.y2 - r.y1) + 1,
+ stride);
+ return true;
+ }
+ return false;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE unsigned width() const { return m_rbuf->width(); }
+ AGG_INLINE unsigned height() const { return m_rbuf->height(); }
+ AGG_INLINE int stride() const { return m_rbuf->stride(); }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); }
+ AGG_INLINE const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); }
+ AGG_INLINE row_data row(int y) const { return m_rbuf->row(y); }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE int8u* pix_ptr(int x, int y)
+ {
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step);
+ }
+
+ AGG_INLINE const int8u* pix_ptr(int x, int y) const
+ {
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step);
+ }
+
+ // Return pointer to pixel value, forcing row to be allocated.
+ AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len)
+ {
+ return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step));
+ }
+
+ // Return pointer to pixel value, or null if row not allocated.
+ AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const
+ {
+ int8u* p = m_rbuf->row_ptr(y);
+ return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step)) : 0;
+ }
+
+ // Get pixel pointer from raw buffer pointer.
+ AGG_INLINE static pixel_type* pix_value_ptr(void* p)
+ {
+ return (pixel_type*)p;
+ }
+
+ // Get pixel pointer from raw buffer pointer.
+ AGG_INLINE static const pixel_type* pix_value_ptr(const void* p)
+ {
+ return (const pixel_type*)p;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static void write_plain_color(void* p, color_type c)
+ {
+ blender_type::set_plain_color(pix_value_ptr(p)->c, c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static color_type read_plain_color(const void* p)
+ {
+ return blender_type::get_plain_color(pix_value_ptr(p)->c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static void make_pix(int8u* p, const color_type& c)
+ {
+ ((pixel_type*)p)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE color_type pixel(int x, int y) const
+ {
+ if (const pixel_type* p = pix_value_ptr(x, y))
+ {
+ return p->get();
+ }
+ return color_type::no_color();
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_pixel(int x, int y, const color_type& c)
+ {
+ pix_value_ptr(x, y, 1)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover)
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y, 1), c, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_hline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ pixel_type v;
+ v.set(c);
+ pixel_type* p = pix_value_ptr(x, y, len);
+ do
+ {
+ *p = v;
+ p = p->next();
+ }
+ while (--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_vline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ pixel_type v;
+ v.set(c);
+ do
+ {
+ *pix_value_ptr(x, y++, 1) = v;
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_hline(int x, int y,
+ unsigned len,
+ const color_type& c,
+ int8u cover)
+ {
+ if (!c.is_transparent())
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ pixel_type v;
+ v.set(c);
+ do
+ {
+ *p = v;
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ if (cover == cover_mask)
+ {
+ do
+ {
+ blend_pix(p, c);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ blend_pix(p, c, cover);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_vline(int x, int y,
+ unsigned len,
+ const color_type& c,
+ int8u cover)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ pixel_type v;
+ v.set(c);
+ do
+ {
+ *pix_value_ptr(x, y++, 1) = v;
+ }
+ while (--len);
+ }
+ else
+ {
+ if (cover == cover_mask)
+ {
+ do
+ {
+ blend_pix(pix_value_ptr(x, y++, 1), c, c.a);
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ blend_pix(pix_value_ptr(x, y++, 1), c, cover);
+ }
+ while (--len);
+ }
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_solid_hspan(int x, int y,
+ unsigned len,
+ const color_type& c,
+ const int8u* covers)
+ {
+ if (!c.is_transparent())
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+ do
+ {
+ if (c.is_opaque() && *covers == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, *covers);
+ }
+ p = p->next();
+ ++covers;
+ }
+ while (--len);
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_solid_vspan(int x, int y,
+ unsigned len,
+ const color_type& c,
+ const int8u* covers)
+ {
+ if (!c.is_transparent())
+ {
+ do
+ {
+ pixel_type* p = pix_value_ptr(x, y++, 1);
+ if (c.is_opaque() && *covers == cover_mask)
+ {
+ p->set(c);
+ }
+ else
+ {
+ blend_pix(p, c, *covers);
+ }
+ ++covers;
+ }
+ while (--len);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void copy_color_hspan(int x, int y,
+ unsigned len,
+ const color_type* colors)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+ do
+ {
+ p->set(*colors++);
+ p = p->next();
+ }
+ while (--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_color_vspan(int x, int y,
+ unsigned len,
+ const color_type* colors)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(*colors++);
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_color_hspan(int x, int y,
+ unsigned len,
+ const color_type* colors,
+ const int8u* covers,
+ int8u cover)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+ if (covers)
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++, *covers++);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ if (cover == cover_mask)
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++);
+ p = p->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(p, *colors++, cover);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void blend_color_vspan(int x, int y,
+ unsigned len,
+ const color_type* colors,
+ const int8u* covers,
+ int8u cover)
+ {
+ if (covers)
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, *covers++);
+ }
+ while (--len);
+ }
+ else
+ {
+ if (cover == cover_mask)
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++);
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, cover);
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class Function> void for_each_pixel(Function f)
+ {
+ for (unsigned y = 0; y < height(); ++y)
+ {
+ row_data r = m_rbuf->row(y);
+ if (r.ptr)
+ {
+ unsigned len = r.x2 - r.x1 + 1;
+ pixel_type* p = pix_value_ptr(r.x1, y, len);
+ do
+ {
+ f(p->c);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void premultiply()
+ {
+ for_each_pixel(multiplier_rgba<color_type, order_type>::premultiply);
+ }
+
+ //--------------------------------------------------------------------
+ void demultiply()
+ {
+ for_each_pixel(multiplier_rgba<color_type, order_type>::demultiply);
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLut> void apply_gamma_dir(const GammaLut& g)
+ {
+ for_each_pixel(apply_gamma_dir_rgba<color_type, order_type, GammaLut>(g));
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLut> void apply_gamma_inv(const GammaLut& g)
+ {
+ for_each_pixel(apply_gamma_inv_rgba<color_type, order_type, GammaLut>(g));
+ }
+
+ //--------------------------------------------------------------------
+ template<class RenBuf2> void copy_from(const RenBuf2& from,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len)
+ {
+ if (const int8u* p = from.row_ptr(ysrc))
+ {
+ memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width,
+ p + xsrc * pix_width,
+ len * pix_width);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from another RGBA surface.
+ template<class SrcPixelFormatRenderer>
+ void blend_from(const SrcPixelFormatRenderer& from,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+ int srcinc = 1;
+ int dstinc = 1;
+
+ if (xdst > xsrc)
+ {
+ psrc = psrc->advance(len - 1);
+ pdst = pdst->advance(len - 1);
+ srcinc = -1;
+ dstinc = -1;
+ }
+
+ if (cover == cover_mask)
+ {
+ do
+ {
+ copy_or_blend_pix(pdst, psrc->get());
+ psrc = psrc->advance(srcinc);
+ pdst = pdst->advance(dstinc);
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(pdst, psrc->get(), cover);
+ psrc = psrc->advance(srcinc);
+ pdst = pdst->advance(dstinc);
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Combine single color with grayscale surface and blend.
+ template<class SrcPixelFormatRenderer>
+ void blend_from_color(const SrcPixelFormatRenderer& from,
+ const color_type& color,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+ typedef typename SrcPixelFormatRenderer::color_type src_color_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ do
+ {
+ copy_or_blend_pix(pdst, color,
+ src_color_type::scale_cover(cover, psrc->c[0]));
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from color table, using grayscale surface as indexes into table.
+ // Obviously, this only works for integer value types.
+ template<class SrcPixelFormatRenderer>
+ void blend_from_lut(const SrcPixelFormatRenderer& from,
+ const color_type* color_lut,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ if (cover == cover_mask)
+ {
+ do
+ {
+ copy_or_blend_pix(pdst, color_lut[psrc->c[0]]);
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ else
+ {
+ do
+ {
+ copy_or_blend_pix(pdst, color_lut[psrc->c[0]], cover);
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+ private:
+ rbuf_type* m_rbuf;
+ Blender m_blender;
+ };
+
+ //================================================pixfmt_custom_blend_rgba
+ template<class Blender, class RenBuf> class pixfmt_custom_blend_rgba
+ {
+ public:
+ typedef pixfmt_rgba_tag pixfmt_category;
+ typedef RenBuf rbuf_type;
+ typedef typename rbuf_type::row_data row_data;
+ typedef Blender blender_type;
+ typedef typename blender_type::color_type color_type;
+ typedef typename blender_type::order_type order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ enum
+ {
+ pix_step = 4,
+ pix_width = sizeof(value_type) * pix_step,
+ };
+ struct pixel_type
+ {
+ value_type c[pix_step];
+
+ void set(value_type r, value_type g, value_type b, value_type a)
+ {
+ c[order_type::R] = r;
+ c[order_type::G] = g;
+ c[order_type::B] = b;
+ c[order_type::A] = a;
+ }
+
+ void set(const color_type& color)
+ {
+ set(color.r, color.g, color.b, color.a);
+ }
+
+ void get(value_type& r, value_type& g, value_type& b, value_type& a) const
+ {
+ r = c[order_type::R];
+ g = c[order_type::G];
+ b = c[order_type::B];
+ a = c[order_type::A];
+ }
+
+ color_type get() const
+ {
+ return color_type(
+ c[order_type::R],
+ c[order_type::G],
+ c[order_type::B],
+ c[order_type::A]);
+ }
+
+ pixel_type* next()
+ {
+ return this + 1;
+ }
+
+ const pixel_type* next() const
+ {
+ return this + 1;
+ }
+
+ pixel_type* advance(int n)
+ {
+ return this + n;
+ }
+
+ const pixel_type* advance(int n) const
+ {
+ return this + n;
+ }
+ };
+
+
+ private:
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover = cover_full)
+ {
+ m_blender.blend_pix(m_comp_op, p->c, c.r, c.g, c.b, c.a, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover = cover_full)
+ {
+ if (!c.is_transparent())
+ {
+ if (c.is_opaque() && cover == cover_mask)
+ {
+ p->set(c.r, c.g, c.b, c.a);
+ }
+ else
+ {
+ blend_pix(p, c, cover);
+ }
+ }
+ }
+
+ public:
+ //--------------------------------------------------------------------
+ pixfmt_custom_blend_rgba() : m_rbuf(0), m_comp_op(3) {}
+ explicit pixfmt_custom_blend_rgba(rbuf_type& rb, unsigned comp_op=3) :
+ m_rbuf(&rb),
+ m_comp_op(comp_op)
+ {}
+ void attach(rbuf_type& rb) { m_rbuf = &rb; }
+
+ //--------------------------------------------------------------------
+ template<class PixFmt>
+ bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2)
+ {
+ rect_i r(x1, y1, x2, y2);
+ if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1)))
+ {
+ int stride = pixf.stride();
+ m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1),
+ (r.x2 - r.x1) + 1,
+ (r.y2 - r.y1) + 1,
+ stride);
+ return true;
+ }
+ return false;
+ }
+
+ //--------------------------------------------------------------------
+ void comp_op(unsigned op) { m_comp_op = op; }
+ unsigned comp_op() const { return m_comp_op; }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE unsigned width() const { return m_rbuf->width(); }
+ AGG_INLINE unsigned height() const { return m_rbuf->height(); }
+ AGG_INLINE int stride() const { return m_rbuf->stride(); }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); }
+ AGG_INLINE const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); }
+ AGG_INLINE row_data row(int y) const { return m_rbuf->row(y); }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE int8u* pix_ptr(int x, int y)
+ {
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step);
+ }
+
+ AGG_INLINE const int8u* pix_ptr(int x, int y) const
+ {
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step);
+ }
+
+ // Return pointer to pixel value, forcing row to be allocated.
+ AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len)
+ {
+ return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step));
+ }
+
+ // Return pointer to pixel value, or null if row not allocated.
+ AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const
+ {
+ int8u* p = m_rbuf->row_ptr(y);
+ return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step)) : 0;
+ }
+
+ // Get pixel pointer from raw buffer pointer.
+ AGG_INLINE static pixel_type* pix_value_ptr(void* p)
+ {
+ return (pixel_type*)p;
+ }
+
+ // Get pixel pointer from raw buffer pointer.
+ AGG_INLINE static const pixel_type* pix_value_ptr(const void* p)
+ {
+ return (const pixel_type*)p;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE static void make_pix(int8u* p, const color_type& c)
+ {
+ ((pixel_type*)p)->set(c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE color_type pixel(int x, int y) const
+ {
+ if (const pixel_type* p = pix_value_ptr(x, y))
+ {
+ return p->get();
+ }
+ return color_type::no_color();
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_pixel(int x, int y, const color_type& c)
+ {
+ make_pix(pix_value_ptr(x, y, 1), c);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover)
+ {
+ blend_pix(pix_value_ptr(x, y, 1), c, cover);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_hline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ pixel_type v;
+ v.set(c);
+ pixel_type* p = pix_value_ptr(x, y, len);
+ do
+ {
+ *p = v;
+ p = p->next();
+ }
+ while (--len);
+ }
+
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void copy_vline(int x, int y,
+ unsigned len,
+ const color_type& c)
+ {
+ pixel_type v;
+ v.set(c);
+ do
+ {
+ *pix_value_ptr(x, y++, 1) = v;
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_hline(int x, int y, unsigned len,
+ const color_type& c, int8u cover)
+ {
+
+ pixel_type* p = pix_value_ptr(x, y, len);
+ do
+ {
+ blend_pix(p, c, cover);
+ p = p->next();
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_vline(int x, int y, unsigned len,
+ const color_type& c, int8u cover)
+ {
+ do
+ {
+ blend_pix(pix_value_ptr(x, y++, 1), c, cover);
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_solid_hspan(int x, int y, unsigned len,
+ const color_type& c, const int8u* covers)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ do
+ {
+ blend_pix(p, c, *covers++);
+ p = p->next();
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_solid_vspan(int x, int y, unsigned len,
+ const color_type& c, const int8u* covers)
+ {
+ do
+ {
+ blend_pix(pix_value_ptr(x, y++, 1), c, *covers++);
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void copy_color_hspan(int x, int y,
+ unsigned len,
+ const color_type* colors)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ do
+ {
+ p->set(*colors++);
+ p = p->next();
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void copy_color_vspan(int x, int y,
+ unsigned len,
+ const color_type* colors)
+ {
+ do
+ {
+ pix_value_ptr(x, y++, 1)->set(*colors++);
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_color_hspan(int x, int y, unsigned len,
+ const color_type* colors,
+ const int8u* covers,
+ int8u cover)
+ {
+ pixel_type* p = pix_value_ptr(x, y, len);
+
+ do
+ {
+ blend_pix(p, *colors++, covers ? *covers++ : cover);
+ p = p->next();
+ }
+ while (--len);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_color_vspan(int x, int y, unsigned len,
+ const color_type* colors,
+ const int8u* covers,
+ int8u cover)
+ {
+ do
+ {
+ blend_pix(pix_value_ptr(x, y++, 1), *colors++, covers ? *covers++ : cover);
+ }
+ while (--len);
+
+ }
+
+ //--------------------------------------------------------------------
+ template<class Function> void for_each_pixel(Function f)
+ {
+ unsigned y;
+ for (y = 0; y < height(); ++y)
+ {
+ row_data r = m_rbuf->row(y);
+ if (r.ptr)
+ {
+ unsigned len = r.x2 - r.x1 + 1;
+ pixel_type* p = pix_value_ptr(r.x1, y, len);
+ do
+ {
+ f(p->c);
+ p = p->next();
+ }
+ while (--len);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void premultiply()
+ {
+ for_each_pixel(multiplier_rgba<color_type, order_type>::premultiply);
+ }
+
+ //--------------------------------------------------------------------
+ void demultiply()
+ {
+ for_each_pixel(multiplier_rgba<color_type, order_type>::demultiply);
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLut> void apply_gamma_dir(const GammaLut& g)
+ {
+ for_each_pixel(apply_gamma_dir_rgba<color_type, order_type, GammaLut>(g));
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaLut> void apply_gamma_inv(const GammaLut& g)
+ {
+ for_each_pixel(apply_gamma_inv_rgba<color_type, order_type, GammaLut>(g));
+ }
+
+ //--------------------------------------------------------------------
+ template<class RenBuf2> void copy_from(const RenBuf2& from,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len)
+ {
+ if (const int8u* p = from.row_ptr(ysrc))
+ {
+ memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width,
+ p + xsrc * pix_width,
+ len * pix_width);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from another RGBA surface.
+ template<class SrcPixelFormatRenderer>
+ void blend_from(const SrcPixelFormatRenderer& from,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+ int srcinc = 1;
+ int dstinc = 1;
+
+ if (xdst > xsrc)
+ {
+ psrc = psrc->advance(len - 1);
+ pdst = pdst->advance(len - 1);
+ srcinc = -1;
+ dstinc = -1;
+ }
+
+ do
+ {
+ blend_pix(pdst, psrc->get(), cover);
+ psrc = psrc->advance(srcinc);
+ pdst = pdst->advance(dstinc);
+ }
+ while (--len);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from single color, using grayscale surface as alpha channel.
+ template<class SrcPixelFormatRenderer>
+ void blend_from_color(const SrcPixelFormatRenderer& from,
+ const color_type& color,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+ typedef typename SrcPixelFormatRenderer::color_type src_color_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ do
+ {
+ blend_pix(pdst, color,
+ src_color_type::scale_cover(cover, psrc->c[0]));
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Blend from color table, using grayscale surface as indexes into table.
+ // Obviously, this only works for integer value types.
+ template<class SrcPixelFormatRenderer>
+ void blend_from_lut(const SrcPixelFormatRenderer& from,
+ const color_type* color_lut,
+ int xdst, int ydst,
+ int xsrc, int ysrc,
+ unsigned len,
+ int8u cover)
+ {
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
+
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
+ {
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
+
+ do
+ {
+ blend_pix(pdst, color_lut[psrc->c[0]], cover);
+ psrc = psrc->next();
+ pdst = pdst->next();
+ }
+ while (--len);
+ }
+ }
+
+ private:
+ rbuf_type* m_rbuf;
+ Blender m_blender;
+ unsigned m_comp_op;
+ };
+
+
+ //-----------------------------------------------------------------------
+ typedef blender_rgba<rgba8, order_rgba> blender_rgba32;
+ typedef blender_rgba<rgba8, order_argb> blender_argb32;
+ typedef blender_rgba<rgba8, order_abgr> blender_abgr32;
+ typedef blender_rgba<rgba8, order_bgra> blender_bgra32;
+
+ typedef blender_rgba<srgba8, order_rgba> blender_srgba32;
+ typedef blender_rgba<srgba8, order_argb> blender_sargb32;
+ typedef blender_rgba<srgba8, order_abgr> blender_sabgr32;
+ typedef blender_rgba<srgba8, order_bgra> blender_sbgra32;
+
+ typedef blender_rgba_pre<rgba8, order_rgba> blender_rgba32_pre;
+ typedef blender_rgba_pre<rgba8, order_argb> blender_argb32_pre;
+ typedef blender_rgba_pre<rgba8, order_abgr> blender_abgr32_pre;
+ typedef blender_rgba_pre<rgba8, order_bgra> blender_bgra32_pre;
+
+ typedef blender_rgba_pre<srgba8, order_rgba> blender_srgba32_pre;
+ typedef blender_rgba_pre<srgba8, order_argb> blender_sargb32_pre;
+ typedef blender_rgba_pre<srgba8, order_abgr> blender_sabgr32_pre;
+ typedef blender_rgba_pre<srgba8, order_bgra> blender_sbgra32_pre;
+
+ typedef blender_rgba_plain<rgba8, order_rgba> blender_rgba32_plain;
+ typedef blender_rgba_plain<rgba8, order_argb> blender_argb32_plain;
+ typedef blender_rgba_plain<rgba8, order_abgr> blender_abgr32_plain;
+ typedef blender_rgba_plain<rgba8, order_bgra> blender_bgra32_plain;
+
+ typedef blender_rgba_plain<srgba8, order_rgba> blender_srgba32_plain;
+ typedef blender_rgba_plain<srgba8, order_argb> blender_sargb32_plain;
+ typedef blender_rgba_plain<srgba8, order_abgr> blender_sabgr32_plain;
+ typedef blender_rgba_plain<srgba8, order_bgra> blender_sbgra32_plain;
+
+ typedef blender_rgba<rgba16, order_rgba> blender_rgba64;
+ typedef blender_rgba<rgba16, order_argb> blender_argb64;
+ typedef blender_rgba<rgba16, order_abgr> blender_abgr64;
+ typedef blender_rgba<rgba16, order_bgra> blender_bgra64;
+
+ typedef blender_rgba_pre<rgba16, order_rgba> blender_rgba64_pre;
+ typedef blender_rgba_pre<rgba16, order_argb> blender_argb64_pre;
+ typedef blender_rgba_pre<rgba16, order_abgr> blender_abgr64_pre;
+ typedef blender_rgba_pre<rgba16, order_bgra> blender_bgra64_pre;
+
+ typedef blender_rgba_plain<rgba16, order_rgba> blender_rgba64_plain;
+ typedef blender_rgba_plain<rgba16, order_argb> blender_argb64_plain;
+ typedef blender_rgba_plain<rgba16, order_abgr> blender_abgr64_plain;
+ typedef blender_rgba_plain<rgba16, order_bgra> blender_bgra64_plain;
+
+ typedef blender_rgba<rgba32, order_rgba> blender_rgba128;
+ typedef blender_rgba<rgba32, order_argb> blender_argb128;
+ typedef blender_rgba<rgba32, order_abgr> blender_abgr128;
+ typedef blender_rgba<rgba32, order_bgra> blender_bgra128;
+
+ typedef blender_rgba_pre<rgba32, order_rgba> blender_rgba128_pre;
+ typedef blender_rgba_pre<rgba32, order_argb> blender_argb128_pre;
+ typedef blender_rgba_pre<rgba32, order_abgr> blender_abgr128_pre;
+ typedef blender_rgba_pre<rgba32, order_bgra> blender_bgra128_pre;
+
+ typedef blender_rgba_plain<rgba32, order_rgba> blender_rgba128_plain;
+ typedef blender_rgba_plain<rgba32, order_argb> blender_argb128_plain;
+ typedef blender_rgba_plain<rgba32, order_abgr> blender_abgr128_plain;
+ typedef blender_rgba_plain<rgba32, order_bgra> blender_bgra128_plain;
+
+
+ //-----------------------------------------------------------------------
+ typedef pixfmt_alpha_blend_rgba<blender_rgba32, rendering_buffer> pixfmt_rgba32;
+ typedef pixfmt_alpha_blend_rgba<blender_argb32, rendering_buffer> pixfmt_argb32;
+ typedef pixfmt_alpha_blend_rgba<blender_abgr32, rendering_buffer> pixfmt_abgr32;
+ typedef pixfmt_alpha_blend_rgba<blender_bgra32, rendering_buffer> pixfmt_bgra32;
+
+ typedef pixfmt_alpha_blend_rgba<blender_srgba32, rendering_buffer> pixfmt_srgba32;
+ typedef pixfmt_alpha_blend_rgba<blender_sargb32, rendering_buffer> pixfmt_sargb32;
+ typedef pixfmt_alpha_blend_rgba<blender_sabgr32, rendering_buffer> pixfmt_sabgr32;
+ typedef pixfmt_alpha_blend_rgba<blender_sbgra32, rendering_buffer> pixfmt_sbgra32;
+
+ typedef pixfmt_alpha_blend_rgba<blender_rgba32_pre, rendering_buffer> pixfmt_rgba32_pre;
+ typedef pixfmt_alpha_blend_rgba<blender_argb32_pre, rendering_buffer> pixfmt_argb32_pre;
+ typedef pixfmt_alpha_blend_rgba<blender_abgr32_pre, rendering_buffer> pixfmt_abgr32_pre;
+ typedef pixfmt_alpha_blend_rgba<blender_bgra32_pre, rendering_buffer> pixfmt_bgra32_pre;
+
+ typedef pixfmt_alpha_blend_rgba<blender_srgba32_pre, rendering_buffer> pixfmt_srgba32_pre;
+ typedef pixfmt_alpha_blend_rgba<blender_sargb32_pre, rendering_buffer> pixfmt_sargb32_pre;
+ typedef pixfmt_alpha_blend_rgba<blender_sabgr32_pre, rendering_buffer> pixfmt_sabgr32_pre;
+ typedef pixfmt_alpha_blend_rgba<blender_sbgra32_pre, rendering_buffer> pixfmt_sbgra32_pre;
+
+ typedef pixfmt_alpha_blend_rgba<blender_rgba32_plain, rendering_buffer> pixfmt_rgba32_plain;
+ typedef pixfmt_alpha_blend_rgba<blender_argb32_plain, rendering_buffer> pixfmt_argb32_plain;
+ typedef pixfmt_alpha_blend_rgba<blender_abgr32_plain, rendering_buffer> pixfmt_abgr32_plain;
+ typedef pixfmt_alpha_blend_rgba<blender_bgra32_plain, rendering_buffer> pixfmt_bgra32_plain;
+
+ typedef pixfmt_alpha_blend_rgba<blender_srgba32_plain, rendering_buffer> pixfmt_srgba32_plain;
+ typedef pixfmt_alpha_blend_rgba<blender_sargb32_plain, rendering_buffer> pixfmt_sargb32_plain;
+ typedef pixfmt_alpha_blend_rgba<blender_sabgr32_plain, rendering_buffer> pixfmt_sabgr32_plain;
+ typedef pixfmt_alpha_blend_rgba<blender_sbgra32_plain, rendering_buffer> pixfmt_sbgra32_plain;
+
+ typedef pixfmt_alpha_blend_rgba<blender_rgba64, rendering_buffer> pixfmt_rgba64;
+ typedef pixfmt_alpha_blend_rgba<blender_argb64, rendering_buffer> pixfmt_argb64;
+ typedef pixfmt_alpha_blend_rgba<blender_abgr64, rendering_buffer> pixfmt_abgr64;
+ typedef pixfmt_alpha_blend_rgba<blender_bgra64, rendering_buffer> pixfmt_bgra64;
+
+ typedef pixfmt_alpha_blend_rgba<blender_rgba64_pre, rendering_buffer> pixfmt_rgba64_pre;
+ typedef pixfmt_alpha_blend_rgba<blender_argb64_pre, rendering_buffer> pixfmt_argb64_pre;
+ typedef pixfmt_alpha_blend_rgba<blender_abgr64_pre, rendering_buffer> pixfmt_abgr64_pre;
+ typedef pixfmt_alpha_blend_rgba<blender_bgra64_pre, rendering_buffer> pixfmt_bgra64_pre;
+
+ typedef pixfmt_alpha_blend_rgba<blender_rgba64_plain, rendering_buffer> pixfmt_rgba64_plain;
+ typedef pixfmt_alpha_blend_rgba<blender_argb64_plain, rendering_buffer> pixfmt_argb64_plain;
+ typedef pixfmt_alpha_blend_rgba<blender_abgr64_plain, rendering_buffer> pixfmt_abgr64_plain;
+ typedef pixfmt_alpha_blend_rgba<blender_bgra64_plain, rendering_buffer> pixfmt_bgra64_plain;
+
+ typedef pixfmt_alpha_blend_rgba<blender_rgba128, rendering_buffer> pixfmt_rgba128;
+ typedef pixfmt_alpha_blend_rgba<blender_argb128, rendering_buffer> pixfmt_argb128;
+ typedef pixfmt_alpha_blend_rgba<blender_abgr128, rendering_buffer> pixfmt_abgr128;
+ typedef pixfmt_alpha_blend_rgba<blender_bgra128, rendering_buffer> pixfmt_bgra128;
+
+ typedef pixfmt_alpha_blend_rgba<blender_rgba128_pre, rendering_buffer> pixfmt_rgba128_pre;
+ typedef pixfmt_alpha_blend_rgba<blender_argb128_pre, rendering_buffer> pixfmt_argb128_pre;
+ typedef pixfmt_alpha_blend_rgba<blender_abgr128_pre, rendering_buffer> pixfmt_abgr128_pre;
+ typedef pixfmt_alpha_blend_rgba<blender_bgra128_pre, rendering_buffer> pixfmt_bgra128_pre;
+
+ typedef pixfmt_alpha_blend_rgba<blender_rgba128_plain, rendering_buffer> pixfmt_rgba128_plain;
+ typedef pixfmt_alpha_blend_rgba<blender_argb128_plain, rendering_buffer> pixfmt_argb128_plain;
+ typedef pixfmt_alpha_blend_rgba<blender_abgr128_plain, rendering_buffer> pixfmt_abgr128_plain;
+ typedef pixfmt_alpha_blend_rgba<blender_bgra128_plain, rendering_buffer> pixfmt_bgra128_plain;
+
+}
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_cells_aa.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_cells_aa.h
new file mode 100644
index 00000000000..d1cc705405d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_cells_aa.h
@@ -0,0 +1,743 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+//
+// The author gratefully acknowleges the support of David Turner,
+// Robert Wilhelm, and Werner Lemberg - the authors of the FreeType
+// libray - in producing this work. See http://www.freetype.org for details.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for 32-bit screen coordinates has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_RASTERIZER_CELLS_AA_INCLUDED
+#define AGG_RASTERIZER_CELLS_AA_INCLUDED
+
+#include <stdexcept>
+#include <string.h>
+#include <math.h>
+#include "agg_math.h"
+#include "agg_array.h"
+
+
+namespace agg
+{
+
+ //-----------------------------------------------------rasterizer_cells_aa
+ // An internal class that implements the main rasterization algorithm.
+ // Used in the rasterizer. Should not be used direcly.
+ template<class Cell> class rasterizer_cells_aa
+ {
+ enum cell_block_scale_e
+ {
+ cell_block_shift = 12,
+ cell_block_size = 1 << cell_block_shift,
+ cell_block_mask = cell_block_size - 1,
+ cell_block_pool = 256
+ };
+
+ struct sorted_y
+ {
+ unsigned start;
+ unsigned num;
+ };
+
+ public:
+ typedef Cell cell_type;
+ typedef rasterizer_cells_aa<Cell> self_type;
+
+ ~rasterizer_cells_aa();
+ rasterizer_cells_aa(unsigned cell_block_limit=1024);
+
+ void reset();
+ void style(const cell_type& style_cell);
+ void line(int x1, int y1, int x2, int y2);
+
+ int min_x() const { return m_min_x; }
+ int min_y() const { return m_min_y; }
+ int max_x() const { return m_max_x; }
+ int max_y() const { return m_max_y; }
+
+ void sort_cells();
+
+ unsigned total_cells() const
+ {
+ return m_num_cells;
+ }
+
+ unsigned scanline_num_cells(unsigned y) const
+ {
+ return m_sorted_y[y - m_min_y].num;
+ }
+
+ const cell_type* const* scanline_cells(unsigned y) const
+ {
+ return m_sorted_cells.data() + m_sorted_y[y - m_min_y].start;
+ }
+
+ bool sorted() const { return m_sorted; }
+
+ private:
+ rasterizer_cells_aa(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ void set_curr_cell(int x, int y);
+ void add_curr_cell();
+ void render_hline(int ey, int x1, int y1, int x2, int y2);
+ void allocate_block();
+
+ private:
+ unsigned m_num_blocks;
+ unsigned m_max_blocks;
+ unsigned m_curr_block;
+ unsigned m_num_cells;
+ unsigned m_cell_block_limit;
+ cell_type** m_cells;
+ cell_type* m_curr_cell_ptr;
+ pod_vector<cell_type*> m_sorted_cells;
+ pod_vector<sorted_y> m_sorted_y;
+ cell_type m_curr_cell;
+ cell_type m_style_cell;
+ int m_min_x;
+ int m_min_y;
+ int m_max_x;
+ int m_max_y;
+ bool m_sorted;
+ };
+
+
+
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ rasterizer_cells_aa<Cell>::~rasterizer_cells_aa()
+ {
+ if(m_num_blocks)
+ {
+ cell_type** ptr = m_cells + m_num_blocks - 1;
+ while(m_num_blocks--)
+ {
+ pod_allocator<cell_type>::deallocate(*ptr, cell_block_size);
+ ptr--;
+ }
+ pod_allocator<cell_type*>::deallocate(m_cells, m_max_blocks);
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ rasterizer_cells_aa<Cell>::rasterizer_cells_aa(unsigned cell_block_limit) :
+ m_num_blocks(0),
+ m_max_blocks(0),
+ m_curr_block(0),
+ m_num_cells(0),
+ m_cell_block_limit(cell_block_limit),
+ m_cells(0),
+ m_curr_cell_ptr(0),
+ m_sorted_cells(),
+ m_sorted_y(),
+ m_min_x(0x7FFFFFFF),
+ m_min_y(0x7FFFFFFF),
+ m_max_x(-0x7FFFFFFF),
+ m_max_y(-0x7FFFFFFF),
+ m_sorted(false)
+ {
+ m_style_cell.initial();
+ m_curr_cell.initial();
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ void rasterizer_cells_aa<Cell>::reset()
+ {
+ m_num_cells = 0;
+ m_curr_block = 0;
+ m_curr_cell.initial();
+ m_style_cell.initial();
+ m_sorted = false;
+ m_min_x = 0x7FFFFFFF;
+ m_min_y = 0x7FFFFFFF;
+ m_max_x = -0x7FFFFFFF;
+ m_max_y = -0x7FFFFFFF;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ AGG_INLINE void rasterizer_cells_aa<Cell>::add_curr_cell()
+ {
+ if(m_curr_cell.area | m_curr_cell.cover)
+ {
+ if((m_num_cells & cell_block_mask) == 0)
+ {
+ if(m_num_blocks >= m_cell_block_limit) {
+ throw std::overflow_error("Exceeded cell block limit");
+ }
+ allocate_block();
+ }
+ *m_curr_cell_ptr++ = m_curr_cell;
+ ++m_num_cells;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ AGG_INLINE void rasterizer_cells_aa<Cell>::set_curr_cell(int x, int y)
+ {
+ if(m_curr_cell.not_equal(x, y, m_style_cell))
+ {
+ add_curr_cell();
+ m_curr_cell.style(m_style_cell);
+ m_curr_cell.x = x;
+ m_curr_cell.y = y;
+ m_curr_cell.cover = 0;
+ m_curr_cell.area = 0;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ AGG_INLINE void rasterizer_cells_aa<Cell>::render_hline(int ey,
+ int x1, int y1,
+ int x2, int y2)
+ {
+ int ex1 = x1 >> poly_subpixel_shift;
+ int ex2 = x2 >> poly_subpixel_shift;
+ int fx1 = x1 & poly_subpixel_mask;
+ int fx2 = x2 & poly_subpixel_mask;
+
+ int delta, p, first, dx;
+ int incr, lift, mod, rem;
+
+ //trivial case. Happens often
+ if(y1 == y2)
+ {
+ set_curr_cell(ex2, ey);
+ return;
+ }
+
+ //everything is located in a single cell. That is easy!
+ if(ex1 == ex2)
+ {
+ delta = y2 - y1;
+ m_curr_cell.cover += delta;
+ m_curr_cell.area += (fx1 + fx2) * delta;
+ return;
+ }
+
+ //ok, we'll have to render a run of adjacent cells on the same
+ //hline...
+ p = (poly_subpixel_scale - fx1) * (y2 - y1);
+ first = poly_subpixel_scale;
+ incr = 1;
+
+ dx = x2 - x1;
+
+ if(dx < 0)
+ {
+ p = fx1 * (y2 - y1);
+ first = 0;
+ incr = -1;
+ dx = -dx;
+ }
+
+ delta = p / dx;
+ mod = p % dx;
+
+ if(mod < 0)
+ {
+ delta--;
+ mod += dx;
+ }
+
+ m_curr_cell.cover += delta;
+ m_curr_cell.area += (fx1 + first) * delta;
+
+ ex1 += incr;
+ set_curr_cell(ex1, ey);
+ y1 += delta;
+
+ if(ex1 != ex2)
+ {
+ p = poly_subpixel_scale * (y2 - y1 + delta);
+ lift = p / dx;
+ rem = p % dx;
+
+ if (rem < 0)
+ {
+ lift--;
+ rem += dx;
+ }
+
+ mod -= dx;
+
+ while (ex1 != ex2)
+ {
+ delta = lift;
+ mod += rem;
+ if(mod >= 0)
+ {
+ mod -= dx;
+ delta++;
+ }
+
+ m_curr_cell.cover += delta;
+ m_curr_cell.area += poly_subpixel_scale * delta;
+ y1 += delta;
+ ex1 += incr;
+ set_curr_cell(ex1, ey);
+ }
+ }
+ delta = y2 - y1;
+ m_curr_cell.cover += delta;
+ m_curr_cell.area += (fx2 + poly_subpixel_scale - first) * delta;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ AGG_INLINE void rasterizer_cells_aa<Cell>::style(const cell_type& style_cell)
+ {
+ m_style_cell.style(style_cell);
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ void rasterizer_cells_aa<Cell>::line(int x1, int y1, int x2, int y2)
+ {
+ enum dx_limit_e { dx_limit = 16384 << poly_subpixel_shift };
+
+ int dx = x2 - x1;
+
+ if(dx >= dx_limit || dx <= -dx_limit)
+ {
+ int cx = (x1 + x2) >> 1;
+ int cy = (y1 + y2) >> 1;
+ line(x1, y1, cx, cy);
+ line(cx, cy, x2, y2);
+ return;
+ }
+
+ int dy = y2 - y1;
+ int ex1 = x1 >> poly_subpixel_shift;
+ int ex2 = x2 >> poly_subpixel_shift;
+ int ey1 = y1 >> poly_subpixel_shift;
+ int ey2 = y2 >> poly_subpixel_shift;
+ int fy1 = y1 & poly_subpixel_mask;
+ int fy2 = y2 & poly_subpixel_mask;
+
+ int x_from, x_to;
+ int p, rem, mod, lift, delta, first, incr;
+
+ if(ex1 < m_min_x) m_min_x = ex1;
+ if(ex1 > m_max_x) m_max_x = ex1;
+ if(ey1 < m_min_y) m_min_y = ey1;
+ if(ey1 > m_max_y) m_max_y = ey1;
+ if(ex2 < m_min_x) m_min_x = ex2;
+ if(ex2 > m_max_x) m_max_x = ex2;
+ if(ey2 < m_min_y) m_min_y = ey2;
+ if(ey2 > m_max_y) m_max_y = ey2;
+
+ set_curr_cell(ex1, ey1);
+
+ //everything is on a single hline
+ if(ey1 == ey2)
+ {
+ render_hline(ey1, x1, fy1, x2, fy2);
+ return;
+ }
+
+ //Vertical line - we have to calculate start and end cells,
+ //and then - the common values of the area and coverage for
+ //all cells of the line. We know exactly there's only one
+ //cell, so, we don't have to call render_hline().
+ incr = 1;
+ if(dx == 0)
+ {
+ int ex = x1 >> poly_subpixel_shift;
+ int two_fx = (x1 - (ex << poly_subpixel_shift)) << 1;
+ int area;
+
+ first = poly_subpixel_scale;
+ if(dy < 0)
+ {
+ first = 0;
+ incr = -1;
+ }
+
+ x_from = x1;
+
+ //render_hline(ey1, x_from, fy1, x_from, first);
+ delta = first - fy1;
+ m_curr_cell.cover += delta;
+ m_curr_cell.area += two_fx * delta;
+
+ ey1 += incr;
+ set_curr_cell(ex, ey1);
+
+ delta = first + first - poly_subpixel_scale;
+ area = two_fx * delta;
+ while(ey1 != ey2)
+ {
+ //render_hline(ey1, x_from, poly_subpixel_scale - first, x_from, first);
+ m_curr_cell.cover = delta;
+ m_curr_cell.area = area;
+ ey1 += incr;
+ set_curr_cell(ex, ey1);
+ }
+ //render_hline(ey1, x_from, poly_subpixel_scale - first, x_from, fy2);
+ delta = fy2 - poly_subpixel_scale + first;
+ m_curr_cell.cover += delta;
+ m_curr_cell.area += two_fx * delta;
+ return;
+ }
+
+ //ok, we have to render several hlines
+ p = (poly_subpixel_scale - fy1) * dx;
+ first = poly_subpixel_scale;
+
+ if(dy < 0)
+ {
+ p = fy1 * dx;
+ first = 0;
+ incr = -1;
+ dy = -dy;
+ }
+
+ delta = p / dy;
+ mod = p % dy;
+
+ if(mod < 0)
+ {
+ delta--;
+ mod += dy;
+ }
+
+ x_from = x1 + delta;
+ render_hline(ey1, x1, fy1, x_from, first);
+
+ ey1 += incr;
+ set_curr_cell(x_from >> poly_subpixel_shift, ey1);
+
+ if(ey1 != ey2)
+ {
+ p = poly_subpixel_scale * dx;
+ lift = p / dy;
+ rem = p % dy;
+
+ if(rem < 0)
+ {
+ lift--;
+ rem += dy;
+ }
+ mod -= dy;
+
+ while(ey1 != ey2)
+ {
+ delta = lift;
+ mod += rem;
+ if (mod >= 0)
+ {
+ mod -= dy;
+ delta++;
+ }
+
+ x_to = x_from + delta;
+ render_hline(ey1, x_from, poly_subpixel_scale - first, x_to, first);
+ x_from = x_to;
+
+ ey1 += incr;
+ set_curr_cell(x_from >> poly_subpixel_shift, ey1);
+ }
+ }
+ render_hline(ey1, x_from, poly_subpixel_scale - first, x2, fy2);
+ }
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ void rasterizer_cells_aa<Cell>::allocate_block()
+ {
+ if(m_curr_block >= m_num_blocks)
+ {
+ if(m_num_blocks >= m_max_blocks)
+ {
+ cell_type** new_cells =
+ pod_allocator<cell_type*>::allocate(m_max_blocks +
+ cell_block_pool);
+
+ if(m_cells)
+ {
+ memcpy(new_cells, m_cells, m_max_blocks * sizeof(cell_type*));
+ pod_allocator<cell_type*>::deallocate(m_cells, m_max_blocks);
+ }
+ m_cells = new_cells;
+ m_max_blocks += cell_block_pool;
+ }
+
+ m_cells[m_num_blocks++] =
+ pod_allocator<cell_type>::allocate(cell_block_size);
+
+ }
+ m_curr_cell_ptr = m_cells[m_curr_block++];
+ }
+
+
+
+ //------------------------------------------------------------------------
+ template <class T> static AGG_INLINE void swap_cells(T* a, T* b)
+ {
+ T temp = *a;
+ *a = *b;
+ *b = temp;
+ }
+
+
+ //------------------------------------------------------------------------
+ enum
+ {
+ qsort_threshold = 9
+ };
+
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ void qsort_cells(Cell** start, unsigned num)
+ {
+ Cell** stack[80];
+ Cell*** top;
+ Cell** limit;
+ Cell** base;
+
+ limit = start + num;
+ base = start;
+ top = stack;
+
+ for (;;)
+ {
+ int len = int(limit - base);
+
+ Cell** i;
+ Cell** j;
+ Cell** pivot;
+
+ if(len > qsort_threshold)
+ {
+ // we use base + len/2 as the pivot
+ pivot = base + len / 2;
+ swap_cells(base, pivot);
+
+ i = base + 1;
+ j = limit - 1;
+
+ // now ensure that *i <= *base <= *j
+ if((*j)->x < (*i)->x)
+ {
+ swap_cells(i, j);
+ }
+
+ if((*base)->x < (*i)->x)
+ {
+ swap_cells(base, i);
+ }
+
+ if((*j)->x < (*base)->x)
+ {
+ swap_cells(base, j);
+ }
+
+ for(;;)
+ {
+ int x = (*base)->x;
+ do i++; while( (*i)->x < x );
+ do j--; while( x < (*j)->x );
+
+ if(i > j)
+ {
+ break;
+ }
+
+ swap_cells(i, j);
+ }
+
+ swap_cells(base, j);
+
+ // now, push the largest sub-array
+ if(j - base > limit - i)
+ {
+ top[0] = base;
+ top[1] = j;
+ base = i;
+ }
+ else
+ {
+ top[0] = i;
+ top[1] = limit;
+ limit = j;
+ }
+ top += 2;
+ }
+ else
+ {
+ // the sub-array is small, perform insertion sort
+ j = base;
+ i = j + 1;
+
+ for(; i < limit; j = i, i++)
+ {
+ for(; j[1]->x < (*j)->x; j--)
+ {
+ swap_cells(j + 1, j);
+ if (j == base)
+ {
+ break;
+ }
+ }
+ }
+
+ if(top > stack)
+ {
+ top -= 2;
+ base = top[0];
+ limit = top[1];
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class Cell>
+ void rasterizer_cells_aa<Cell>::sort_cells()
+ {
+ if(m_sorted) return; //Perform sort only the first time.
+
+ add_curr_cell();
+ m_curr_cell.x = 0x7FFFFFFF;
+ m_curr_cell.y = 0x7FFFFFFF;
+ m_curr_cell.cover = 0;
+ m_curr_cell.area = 0;
+
+ if(m_num_cells == 0) return;
+
+// DBG: Check to see if min/max works well.
+//for(unsigned nc = 0; nc < m_num_cells; nc++)
+//{
+// cell_type* cell = m_cells[nc >> cell_block_shift] + (nc & cell_block_mask);
+// if(cell->x < m_min_x ||
+// cell->y < m_min_y ||
+// cell->x > m_max_x ||
+// cell->y > m_max_y)
+// {
+// cell = cell; // Breakpoint here
+// }
+//}
+ // Allocate the array of cell pointers
+ m_sorted_cells.allocate(m_num_cells, 16);
+
+ // Allocate and zero the Y array
+ m_sorted_y.allocate(m_max_y - m_min_y + 1, 16);
+ m_sorted_y.zero();
+
+ // Create the Y-histogram (count the numbers of cells for each Y)
+ cell_type** block_ptr = m_cells;
+ cell_type* cell_ptr;
+ unsigned nb = m_num_cells;
+ unsigned i;
+ while(nb)
+ {
+ cell_ptr = *block_ptr++;
+ i = (nb > cell_block_size) ? cell_block_size : nb;
+ nb -= i;
+ while(i--)
+ {
+ m_sorted_y[cell_ptr->y - m_min_y].start++;
+ ++cell_ptr;
+ }
+ }
+
+ // Convert the Y-histogram into the array of starting indexes
+ unsigned start = 0;
+ for(i = 0; i < m_sorted_y.size(); i++)
+ {
+ unsigned v = m_sorted_y[i].start;
+ m_sorted_y[i].start = start;
+ start += v;
+ }
+
+ // Fill the cell pointer array sorted by Y
+ block_ptr = m_cells;
+ nb = m_num_cells;
+ while(nb)
+ {
+ cell_ptr = *block_ptr++;
+ i = (nb > cell_block_size) ? cell_block_size : nb;
+ nb -= i;
+ while(i--)
+ {
+ sorted_y& curr_y = m_sorted_y[cell_ptr->y - m_min_y];
+ m_sorted_cells[curr_y.start + curr_y.num] = cell_ptr;
+ ++curr_y.num;
+ ++cell_ptr;
+ }
+ }
+
+ // Finally arrange the X-arrays
+ for(i = 0; i < m_sorted_y.size(); i++)
+ {
+ const sorted_y& curr_y = m_sorted_y[i];
+ if(curr_y.num)
+ {
+ qsort_cells(m_sorted_cells.data() + curr_y.start, curr_y.num);
+ }
+ }
+ m_sorted = true;
+ }
+
+
+
+ //------------------------------------------------------scanline_hit_test
+ class scanline_hit_test
+ {
+ public:
+ scanline_hit_test(int x) : m_x(x), m_hit(false) {}
+
+ void reset_spans() {}
+ void finalize(int) {}
+ void add_cell(int x, int)
+ {
+ if(m_x == x) m_hit = true;
+ }
+ void add_span(int x, int len, int)
+ {
+ if(m_x >= x && m_x < x+len) m_hit = true;
+ }
+ unsigned num_spans() const { return 1; }
+ bool hit() const { return m_hit; }
+
+ private:
+ int m_x;
+ bool m_hit;
+ };
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_scanline_aa.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_scanline_aa.h
new file mode 100644
index 00000000000..15832166462
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_scanline_aa.h
@@ -0,0 +1,481 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+//
+// The author gratefully acknowleges the support of David Turner,
+// Robert Wilhelm, and Werner Lemberg - the authors of the FreeType
+// libray - in producing this work. See http://www.freetype.org for details.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for 32-bit screen coordinates has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_RASTERIZER_SCANLINE_AA_INCLUDED
+#define AGG_RASTERIZER_SCANLINE_AA_INCLUDED
+
+#include "agg_rasterizer_cells_aa.h"
+#include "agg_rasterizer_sl_clip.h"
+#include "agg_rasterizer_scanline_aa_nogamma.h"
+#include "agg_gamma_functions.h"
+
+
+namespace agg
+{
+ //==================================================rasterizer_scanline_aa
+ // Polygon rasterizer that is used to render filled polygons with
+ // high-quality Anti-Aliasing. Internally, by default, the class uses
+ // integer coordinates in format 24.8, i.e. 24 bits for integer part
+ // and 8 bits for fractional - see poly_subpixel_shift. This class can be
+ // used in the following way:
+ //
+ // 1. filling_rule(filling_rule_e ft) - optional.
+ //
+ // 2. gamma() - optional.
+ //
+ // 3. reset()
+ //
+ // 4. move_to(x, y) / line_to(x, y) - make the polygon. One can create
+ // more than one contour, but each contour must consist of at least 3
+ // vertices, i.e. move_to(x1, y1); line_to(x2, y2); line_to(x3, y3);
+ // is the absolute minimum of vertices that define a triangle.
+ // The algorithm does not check either the number of vertices nor
+ // coincidence of their coordinates, but in the worst case it just
+ // won't draw anything.
+ // The orger of the vertices (clockwise or counterclockwise)
+ // is important when using the non-zero filling rule (fill_non_zero).
+ // In this case the vertex order of all the contours must be the same
+ // if you want your intersecting polygons to be without "holes".
+ // You actually can use different vertices order. If the contours do not
+ // intersect each other the order is not important anyway. If they do,
+ // contours with the same vertex order will be rendered without "holes"
+ // while the intersecting contours with different orders will have "holes".
+ //
+ // filling_rule() and gamma() can be called anytime before "sweeping".
+ //------------------------------------------------------------------------
+ template<class Clip=rasterizer_sl_clip_int> class rasterizer_scanline_aa
+ {
+ enum status
+ {
+ status_initial,
+ status_move_to,
+ status_line_to,
+ status_closed
+ };
+
+ public:
+ typedef Clip clip_type;
+ typedef typename Clip::conv_type conv_type;
+ typedef typename Clip::coord_type coord_type;
+
+ enum aa_scale_e
+ {
+ aa_shift = 8,
+ aa_scale = 1 << aa_shift,
+ aa_mask = aa_scale - 1,
+ aa_scale2 = aa_scale * 2,
+ aa_mask2 = aa_scale2 - 1
+ };
+
+ //--------------------------------------------------------------------
+ rasterizer_scanline_aa(unsigned cell_block_limit=1024) :
+ m_outline(cell_block_limit),
+ m_clipper(),
+ m_filling_rule(fill_non_zero),
+ m_auto_close(true),
+ m_start_x(0),
+ m_start_y(0),
+ m_status(status_initial)
+ {
+ int i;
+ for(i = 0; i < aa_scale; i++) m_gamma[i] = i;
+ }
+
+ //--------------------------------------------------------------------
+ template<class GammaF>
+ rasterizer_scanline_aa(const GammaF& gamma_function, unsigned cell_block_limit) :
+ m_outline(cell_block_limit),
+ m_clipper(m_outline),
+ m_filling_rule(fill_non_zero),
+ m_auto_close(true),
+ m_start_x(0),
+ m_start_y(0),
+ m_status(status_initial)
+ {
+ gamma(gamma_function);
+ }
+
+ //--------------------------------------------------------------------
+ void reset();
+ void reset_clipping();
+ void clip_box(double x1, double y1, double x2, double y2);
+ void filling_rule(filling_rule_e filling_rule);
+ void auto_close(bool flag) { m_auto_close = flag; }
+
+ //--------------------------------------------------------------------
+ template<class GammaF> void gamma(const GammaF& gamma_function)
+ {
+ int i;
+ for(i = 0; i < aa_scale; i++)
+ {
+ m_gamma[i] = uround(gamma_function(double(i) / aa_mask) * aa_mask);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ unsigned apply_gamma(unsigned cover) const
+ {
+ return m_gamma[cover];
+ }
+
+ //--------------------------------------------------------------------
+ void move_to(int x, int y);
+ void line_to(int x, int y);
+ void move_to_d(double x, double y);
+ void line_to_d(double x, double y);
+ void close_polygon();
+ void add_vertex(double x, double y, unsigned cmd);
+
+ void edge(int x1, int y1, int x2, int y2);
+ void edge_d(double x1, double y1, double x2, double y2);
+
+ //-------------------------------------------------------------------
+ template<class VertexSource>
+ void add_path(VertexSource& vs, unsigned path_id=0)
+ {
+ double x;
+ double y;
+
+ unsigned cmd;
+ vs.rewind(path_id);
+ if(m_outline.sorted()) reset();
+ while(!is_stop(cmd = vs.vertex(&x, &y)))
+ {
+ add_vertex(x, y, cmd);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ int min_x() const { return m_outline.min_x(); }
+ int min_y() const { return m_outline.min_y(); }
+ int max_x() const { return m_outline.max_x(); }
+ int max_y() const { return m_outline.max_y(); }
+
+ //--------------------------------------------------------------------
+ void sort();
+ bool rewind_scanlines();
+ bool navigate_scanline(int y);
+
+ //--------------------------------------------------------------------
+ AGG_INLINE unsigned calculate_alpha(int area) const
+ {
+ int cover = area >> (poly_subpixel_shift*2 + 1 - aa_shift);
+
+ if(cover < 0) cover = -cover;
+ if(m_filling_rule == fill_even_odd)
+ {
+ cover &= aa_mask2;
+ if(cover > aa_scale)
+ {
+ cover = aa_scale2 - cover;
+ }
+ }
+ if(cover > aa_mask) cover = aa_mask;
+ return m_gamma[cover];
+ }
+
+ //--------------------------------------------------------------------
+ template<class Scanline> bool sweep_scanline(Scanline& sl)
+ {
+ for(;;)
+ {
+ if(m_scan_y > m_outline.max_y()) return false;
+ sl.reset_spans();
+ unsigned num_cells = m_outline.scanline_num_cells(m_scan_y);
+ const cell_aa* const* cells = m_outline.scanline_cells(m_scan_y);
+ int cover = 0;
+
+ while(num_cells)
+ {
+ const cell_aa* cur_cell = *cells;
+ int x = cur_cell->x;
+ int area = cur_cell->area;
+ unsigned alpha;
+
+ cover += cur_cell->cover;
+
+ //accumulate all cells with the same X
+ while(--num_cells)
+ {
+ cur_cell = *++cells;
+ if(cur_cell->x != x) break;
+ area += cur_cell->area;
+ cover += cur_cell->cover;
+ }
+
+ if(area)
+ {
+ alpha = calculate_alpha((cover << (poly_subpixel_shift + 1)) - area);
+ if(alpha)
+ {
+ sl.add_cell(x, alpha);
+ }
+ x++;
+ }
+
+ if(num_cells && cur_cell->x > x)
+ {
+ alpha = calculate_alpha(cover << (poly_subpixel_shift + 1));
+ if(alpha)
+ {
+ sl.add_span(x, cur_cell->x - x, alpha);
+ }
+ }
+ }
+
+ if(sl.num_spans()) break;
+ ++m_scan_y;
+ }
+
+ sl.finalize(m_scan_y);
+ ++m_scan_y;
+ return true;
+ }
+
+ //--------------------------------------------------------------------
+ bool hit_test(int tx, int ty);
+
+
+ private:
+ //--------------------------------------------------------------------
+ // Disable copying
+ rasterizer_scanline_aa(const rasterizer_scanline_aa<Clip>&);
+ const rasterizer_scanline_aa<Clip>&
+ operator = (const rasterizer_scanline_aa<Clip>&);
+
+ private:
+ rasterizer_cells_aa<cell_aa> m_outline;
+ clip_type m_clipper;
+ int m_gamma[aa_scale];
+ filling_rule_e m_filling_rule;
+ bool m_auto_close;
+ coord_type m_start_x;
+ coord_type m_start_y;
+ unsigned m_status;
+ int m_scan_y;
+ };
+
+
+
+
+
+
+
+
+
+
+
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::reset()
+ {
+ m_outline.reset();
+ m_status = status_initial;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::filling_rule(filling_rule_e filling_rule)
+ {
+ m_filling_rule = filling_rule;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::clip_box(double x1, double y1,
+ double x2, double y2)
+ {
+ reset();
+ m_clipper.clip_box(conv_type::upscale(x1), conv_type::upscale(y1),
+ conv_type::upscale(x2), conv_type::upscale(y2));
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::reset_clipping()
+ {
+ reset();
+ m_clipper.reset_clipping();
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::close_polygon()
+ {
+ if(m_status == status_line_to)
+ {
+ m_clipper.line_to(m_outline, m_start_x, m_start_y);
+ m_status = status_closed;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::move_to(int x, int y)
+ {
+ if(m_outline.sorted()) reset();
+ if(m_auto_close) close_polygon();
+ m_clipper.move_to(m_start_x = conv_type::downscale(x),
+ m_start_y = conv_type::downscale(y));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::line_to(int x, int y)
+ {
+ m_clipper.line_to(m_outline,
+ conv_type::downscale(x),
+ conv_type::downscale(y));
+ m_status = status_line_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::move_to_d(double x, double y)
+ {
+ if(m_outline.sorted()) reset();
+ if(m_auto_close) close_polygon();
+ m_clipper.move_to(m_start_x = conv_type::upscale(x),
+ m_start_y = conv_type::upscale(y));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::line_to_d(double x, double y)
+ {
+ m_clipper.line_to(m_outline,
+ conv_type::upscale(x),
+ conv_type::upscale(y));
+ m_status = status_line_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::add_vertex(double x, double y, unsigned cmd)
+ {
+ if(is_move_to(cmd))
+ {
+ move_to_d(x, y);
+ }
+ else
+ if(is_vertex(cmd))
+ {
+ line_to_d(x, y);
+ }
+ else
+ if(is_close(cmd))
+ {
+ close_polygon();
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::edge(int x1, int y1, int x2, int y2)
+ {
+ if(m_outline.sorted()) reset();
+ m_clipper.move_to(conv_type::downscale(x1), conv_type::downscale(y1));
+ m_clipper.line_to(m_outline,
+ conv_type::downscale(x2),
+ conv_type::downscale(y2));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::edge_d(double x1, double y1,
+ double x2, double y2)
+ {
+ if(m_outline.sorted()) reset();
+ m_clipper.move_to(conv_type::upscale(x1), conv_type::upscale(y1));
+ m_clipper.line_to(m_outline,
+ conv_type::upscale(x2),
+ conv_type::upscale(y2));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa<Clip>::sort()
+ {
+ if(m_auto_close) close_polygon();
+ m_outline.sort_cells();
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ AGG_INLINE bool rasterizer_scanline_aa<Clip>::rewind_scanlines()
+ {
+ if(m_auto_close) close_polygon();
+ m_outline.sort_cells();
+ if(m_outline.total_cells() == 0)
+ {
+ return false;
+ }
+ m_scan_y = m_outline.min_y();
+ return true;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ AGG_INLINE bool rasterizer_scanline_aa<Clip>::navigate_scanline(int y)
+ {
+ if(m_auto_close) close_polygon();
+ m_outline.sort_cells();
+ if(m_outline.total_cells() == 0 ||
+ y < m_outline.min_y() ||
+ y > m_outline.max_y())
+ {
+ return false;
+ }
+ m_scan_y = y;
+ return true;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ bool rasterizer_scanline_aa<Clip>::hit_test(int tx, int ty)
+ {
+ if(!navigate_scanline(ty)) return false;
+ scanline_hit_test sl(tx);
+ sweep_scanline(sl);
+ return sl.hit();
+ }
+
+
+
+}
+
+
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_scanline_aa_nogamma.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_scanline_aa_nogamma.h
new file mode 100644
index 00000000000..7729b3359a1
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_scanline_aa_nogamma.h
@@ -0,0 +1,482 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+//
+// The author gratefully acknowleges the support of David Turner,
+// Robert Wilhelm, and Werner Lemberg - the authors of the FreeType
+// libray - in producing this work. See http://www.freetype.org for details.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for 32-bit screen coordinates has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_RASTERIZER_SCANLINE_AA_NOGAMMA_INCLUDED
+#define AGG_RASTERIZER_SCANLINE_AA_NOGAMMA_INCLUDED
+
+#include "agg_rasterizer_cells_aa.h"
+#include "agg_rasterizer_sl_clip.h"
+
+
+namespace agg
+{
+
+
+ //-----------------------------------------------------------------cell_aa
+ // A pixel cell. There're no constructors defined and it was done
+ // intentionally in order to avoid extra overhead when allocating an
+ // array of cells.
+ struct cell_aa
+ {
+ int x;
+ int y;
+ int cover;
+ int area;
+
+ void initial()
+ {
+ x = 0x7FFFFFFF;
+ y = 0x7FFFFFFF;
+ cover = 0;
+ area = 0;
+ }
+
+ void style(const cell_aa&) {}
+
+ int not_equal(int ex, int ey, const cell_aa&) const
+ {
+ return ex != x || ey != y;
+ }
+ };
+
+
+ //==================================================rasterizer_scanline_aa_nogamma
+ // Polygon rasterizer that is used to render filled polygons with
+ // high-quality Anti-Aliasing. Internally, by default, the class uses
+ // integer coordinates in format 24.8, i.e. 24 bits for integer part
+ // and 8 bits for fractional - see poly_subpixel_shift. This class can be
+ // used in the following way:
+ //
+ // 1. filling_rule(filling_rule_e ft) - optional.
+ //
+ // 2. gamma() - optional.
+ //
+ // 3. reset()
+ //
+ // 4. move_to(x, y) / line_to(x, y) - make the polygon. One can create
+ // more than one contour, but each contour must consist of at least 3
+ // vertices, i.e. move_to(x1, y1); line_to(x2, y2); line_to(x3, y3);
+ // is the absolute minimum of vertices that define a triangle.
+ // The algorithm does not check either the number of vertices nor
+ // coincidence of their coordinates, but in the worst case it just
+ // won't draw anything.
+ // The orger of the vertices (clockwise or counterclockwise)
+ // is important when using the non-zero filling rule (fill_non_zero).
+ // In this case the vertex order of all the contours must be the same
+ // if you want your intersecting polygons to be without "holes".
+ // You actually can use different vertices order. If the contours do not
+ // intersect each other the order is not important anyway. If they do,
+ // contours with the same vertex order will be rendered without "holes"
+ // while the intersecting contours with different orders will have "holes".
+ //
+ // filling_rule() and gamma() can be called anytime before "sweeping".
+ //------------------------------------------------------------------------
+ template<class Clip=rasterizer_sl_clip_int> class rasterizer_scanline_aa_nogamma
+ {
+ enum status
+ {
+ status_initial,
+ status_move_to,
+ status_line_to,
+ status_closed
+ };
+
+ public:
+ typedef Clip clip_type;
+ typedef typename Clip::conv_type conv_type;
+ typedef typename Clip::coord_type coord_type;
+
+ enum aa_scale_e
+ {
+ aa_shift = 8,
+ aa_scale = 1 << aa_shift,
+ aa_mask = aa_scale - 1,
+ aa_scale2 = aa_scale * 2,
+ aa_mask2 = aa_scale2 - 1
+ };
+
+ //--------------------------------------------------------------------
+ rasterizer_scanline_aa_nogamma(unsigned cell_block_limit=1024) :
+ m_outline(cell_block_limit),
+ m_clipper(),
+ m_filling_rule(fill_non_zero),
+ m_auto_close(true),
+ m_start_x(0),
+ m_start_y(0),
+ m_status(status_initial)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ void reset();
+ void reset_clipping();
+ void clip_box(double x1, double y1, double x2, double y2);
+ void filling_rule(filling_rule_e filling_rule);
+ void auto_close(bool flag) { m_auto_close = flag; }
+
+ //--------------------------------------------------------------------
+ unsigned apply_gamma(unsigned cover) const
+ {
+ return cover;
+ }
+
+ //--------------------------------------------------------------------
+ void move_to(int x, int y);
+ void line_to(int x, int y);
+ void move_to_d(double x, double y);
+ void line_to_d(double x, double y);
+ void close_polygon();
+ void add_vertex(double x, double y, unsigned cmd);
+
+ void edge(int x1, int y1, int x2, int y2);
+ void edge_d(double x1, double y1, double x2, double y2);
+
+ //-------------------------------------------------------------------
+ template<class VertexSource>
+ void add_path(VertexSource& vs, unsigned path_id=0)
+ {
+ double x;
+ double y;
+
+ unsigned cmd;
+ vs.rewind(path_id);
+ if(m_outline.sorted()) reset();
+ while(!is_stop(cmd = vs.vertex(&x, &y)))
+ {
+ add_vertex(x, y, cmd);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ int min_x() const { return m_outline.min_x(); }
+ int min_y() const { return m_outline.min_y(); }
+ int max_x() const { return m_outline.max_x(); }
+ int max_y() const { return m_outline.max_y(); }
+
+ //--------------------------------------------------------------------
+ void sort();
+ bool rewind_scanlines();
+ bool navigate_scanline(int y);
+
+ //--------------------------------------------------------------------
+ AGG_INLINE unsigned calculate_alpha(int area) const
+ {
+ int cover = area >> (poly_subpixel_shift*2 + 1 - aa_shift);
+
+ if(cover < 0) cover = -cover;
+ if(m_filling_rule == fill_even_odd)
+ {
+ cover &= aa_mask2;
+ if(cover > aa_scale)
+ {
+ cover = aa_scale2 - cover;
+ }
+ }
+ if(cover > aa_mask) cover = aa_mask;
+ return cover;
+ }
+
+ //--------------------------------------------------------------------
+ template<class Scanline> bool sweep_scanline(Scanline& sl)
+ {
+ for(;;)
+ {
+ if(m_scan_y > m_outline.max_y()) return false;
+ sl.reset_spans();
+ unsigned num_cells = m_outline.scanline_num_cells(m_scan_y);
+ const cell_aa* const* cells = m_outline.scanline_cells(m_scan_y);
+ int cover = 0;
+
+ while(num_cells)
+ {
+ const cell_aa* cur_cell = *cells;
+ int x = cur_cell->x;
+ int area = cur_cell->area;
+ unsigned alpha;
+
+ cover += cur_cell->cover;
+
+ //accumulate all cells with the same X
+ while(--num_cells)
+ {
+ cur_cell = *++cells;
+ if(cur_cell->x != x) break;
+ area += cur_cell->area;
+ cover += cur_cell->cover;
+ }
+
+ if(area)
+ {
+ alpha = calculate_alpha((cover << (poly_subpixel_shift + 1)) - area);
+ if(alpha)
+ {
+ sl.add_cell(x, alpha);
+ }
+ x++;
+ }
+
+ if(num_cells && cur_cell->x > x)
+ {
+ alpha = calculate_alpha(cover << (poly_subpixel_shift + 1));
+ if(alpha)
+ {
+ sl.add_span(x, cur_cell->x - x, alpha);
+ }
+ }
+ }
+
+ if(sl.num_spans()) break;
+ ++m_scan_y;
+ }
+
+ sl.finalize(m_scan_y);
+ ++m_scan_y;
+ return true;
+ }
+
+ //--------------------------------------------------------------------
+ bool hit_test(int tx, int ty);
+
+
+ private:
+ //--------------------------------------------------------------------
+ // Disable copying
+ rasterizer_scanline_aa_nogamma(const rasterizer_scanline_aa_nogamma<Clip>&);
+ const rasterizer_scanline_aa_nogamma<Clip>&
+ operator = (const rasterizer_scanline_aa_nogamma<Clip>&);
+
+ private:
+ rasterizer_cells_aa<cell_aa> m_outline;
+ clip_type m_clipper;
+ filling_rule_e m_filling_rule;
+ bool m_auto_close;
+ coord_type m_start_x;
+ coord_type m_start_y;
+ unsigned m_status;
+ int m_scan_y;
+ };
+
+
+
+
+
+
+
+
+
+
+
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::reset()
+ {
+ m_outline.reset();
+ m_status = status_initial;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::filling_rule(filling_rule_e filling_rule)
+ {
+ m_filling_rule = filling_rule;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::clip_box(double x1, double y1,
+ double x2, double y2)
+ {
+ reset();
+ m_clipper.clip_box(conv_type::upscale(x1), conv_type::upscale(y1),
+ conv_type::upscale(x2), conv_type::upscale(y2));
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::reset_clipping()
+ {
+ reset();
+ m_clipper.reset_clipping();
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::close_polygon()
+ {
+ if(m_status == status_line_to)
+ {
+ m_clipper.line_to(m_outline, m_start_x, m_start_y);
+ m_status = status_closed;
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::move_to(int x, int y)
+ {
+ if(m_outline.sorted()) reset();
+ if(m_auto_close) close_polygon();
+ m_clipper.move_to(m_start_x = conv_type::downscale(x),
+ m_start_y = conv_type::downscale(y));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::line_to(int x, int y)
+ {
+ m_clipper.line_to(m_outline,
+ conv_type::downscale(x),
+ conv_type::downscale(y));
+ m_status = status_line_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::move_to_d(double x, double y)
+ {
+ if(m_outline.sorted()) reset();
+ if(m_auto_close) close_polygon();
+ m_clipper.move_to(m_start_x = conv_type::upscale(x),
+ m_start_y = conv_type::upscale(y));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::line_to_d(double x, double y)
+ {
+ m_clipper.line_to(m_outline,
+ conv_type::upscale(x),
+ conv_type::upscale(y));
+ m_status = status_line_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::add_vertex(double x, double y, unsigned cmd)
+ {
+ if(is_move_to(cmd))
+ {
+ move_to_d(x, y);
+ }
+ else
+ if(is_vertex(cmd))
+ {
+ line_to_d(x, y);
+ }
+ else
+ if(is_close(cmd))
+ {
+ close_polygon();
+ }
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::edge(int x1, int y1, int x2, int y2)
+ {
+ if(m_outline.sorted()) reset();
+ m_clipper.move_to(conv_type::downscale(x1), conv_type::downscale(y1));
+ m_clipper.line_to(m_outline,
+ conv_type::downscale(x2),
+ conv_type::downscale(y2));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::edge_d(double x1, double y1,
+ double x2, double y2)
+ {
+ if(m_outline.sorted()) reset();
+ m_clipper.move_to(conv_type::upscale(x1), conv_type::upscale(y1));
+ m_clipper.line_to(m_outline,
+ conv_type::upscale(x2),
+ conv_type::upscale(y2));
+ m_status = status_move_to;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ void rasterizer_scanline_aa_nogamma<Clip>::sort()
+ {
+ if(m_auto_close) close_polygon();
+ m_outline.sort_cells();
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ AGG_INLINE bool rasterizer_scanline_aa_nogamma<Clip>::rewind_scanlines()
+ {
+ if(m_auto_close) close_polygon();
+ m_outline.sort_cells();
+ if(m_outline.total_cells() == 0)
+ {
+ return false;
+ }
+ m_scan_y = m_outline.min_y();
+ return true;
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ AGG_INLINE bool rasterizer_scanline_aa_nogamma<Clip>::navigate_scanline(int y)
+ {
+ if(m_auto_close) close_polygon();
+ m_outline.sort_cells();
+ if(m_outline.total_cells() == 0 ||
+ y < m_outline.min_y() ||
+ y > m_outline.max_y())
+ {
+ return false;
+ }
+ m_scan_y = y;
+ return true;
+ }
+
+ //------------------------------------------------------------------------
+ template<class Clip>
+ bool rasterizer_scanline_aa_nogamma<Clip>::hit_test(int tx, int ty)
+ {
+ if(!navigate_scanline(ty)) return false;
+ scanline_hit_test sl(tx);
+ sweep_scanline(sl);
+ return sl.hit();
+ }
+
+
+
+}
+
+
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_sl_clip.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_sl_clip.h
new file mode 100644
index 00000000000..e7ba065acce
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rasterizer_sl_clip.h
@@ -0,0 +1,351 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+#ifndef AGG_RASTERIZER_SL_CLIP_INCLUDED
+#define AGG_RASTERIZER_SL_CLIP_INCLUDED
+
+#include "agg_clip_liang_barsky.h"
+
+namespace agg
+{
+ //--------------------------------------------------------poly_max_coord_e
+ enum poly_max_coord_e
+ {
+ poly_max_coord = (1 << 30) - 1 //----poly_max_coord
+ };
+
+ //------------------------------------------------------------ras_conv_int
+ struct ras_conv_int
+ {
+ typedef int coord_type;
+ static AGG_INLINE int mul_div(double a, double b, double c)
+ {
+ return iround(a * b / c);
+ }
+ static int xi(int v) { return v; }
+ static int yi(int v) { return v; }
+ static int upscale(double v) { return iround(v * poly_subpixel_scale); }
+ static int downscale(int v) { return v; }
+ };
+
+ //--------------------------------------------------------ras_conv_int_sat
+ struct ras_conv_int_sat
+ {
+ typedef int coord_type;
+ static AGG_INLINE int mul_div(double a, double b, double c)
+ {
+ return saturation<poly_max_coord>::iround(a * b / c);
+ }
+ static int xi(int v) { return v; }
+ static int yi(int v) { return v; }
+ static int upscale(double v)
+ {
+ return saturation<poly_max_coord>::iround(v * poly_subpixel_scale);
+ }
+ static int downscale(int v) { return v; }
+ };
+
+ //---------------------------------------------------------ras_conv_int_3x
+ struct ras_conv_int_3x
+ {
+ typedef int coord_type;
+ static AGG_INLINE int mul_div(double a, double b, double c)
+ {
+ return iround(a * b / c);
+ }
+ static int xi(int v) { return v * 3; }
+ static int yi(int v) { return v; }
+ static int upscale(double v) { return iround(v * poly_subpixel_scale); }
+ static int downscale(int v) { return v; }
+ };
+
+ //-----------------------------------------------------------ras_conv_dbl
+ struct ras_conv_dbl
+ {
+ typedef double coord_type;
+ static AGG_INLINE double mul_div(double a, double b, double c)
+ {
+ return a * b / c;
+ }
+ static int xi(double v) { return iround(v * poly_subpixel_scale); }
+ static int yi(double v) { return iround(v * poly_subpixel_scale); }
+ static double upscale(double v) { return v; }
+ static double downscale(int v) { return v / double(poly_subpixel_scale); }
+ };
+
+ //--------------------------------------------------------ras_conv_dbl_3x
+ struct ras_conv_dbl_3x
+ {
+ typedef double coord_type;
+ static AGG_INLINE double mul_div(double a, double b, double c)
+ {
+ return a * b / c;
+ }
+ static int xi(double v) { return iround(v * poly_subpixel_scale * 3); }
+ static int yi(double v) { return iround(v * poly_subpixel_scale); }
+ static double upscale(double v) { return v; }
+ static double downscale(int v) { return v / double(poly_subpixel_scale); }
+ };
+
+
+
+
+
+ //------------------------------------------------------rasterizer_sl_clip
+ template<class Conv> class rasterizer_sl_clip
+ {
+ public:
+ typedef Conv conv_type;
+ typedef typename Conv::coord_type coord_type;
+ typedef rect_base<coord_type> rect_type;
+
+ //--------------------------------------------------------------------
+ rasterizer_sl_clip() :
+ m_clip_box(0,0,0,0),
+ m_x1(0),
+ m_y1(0),
+ m_f1(0),
+ m_clipping(false)
+ {}
+
+ //--------------------------------------------------------------------
+ void reset_clipping()
+ {
+ m_clipping = false;
+ }
+
+ //--------------------------------------------------------------------
+ void clip_box(coord_type x1, coord_type y1, coord_type x2, coord_type y2)
+ {
+ m_clip_box = rect_type(x1, y1, x2, y2);
+ m_clip_box.normalize();
+ m_clipping = true;
+ }
+
+ //--------------------------------------------------------------------
+ void move_to(coord_type x1, coord_type y1)
+ {
+ m_x1 = x1;
+ m_y1 = y1;
+ if(m_clipping) m_f1 = clipping_flags(x1, y1, m_clip_box);
+ }
+
+ private:
+ //------------------------------------------------------------------------
+ template<class Rasterizer>
+ AGG_INLINE void line_clip_y(Rasterizer& ras,
+ coord_type x1, coord_type y1,
+ coord_type x2, coord_type y2,
+ unsigned f1, unsigned f2) const
+ {
+ f1 &= 10;
+ f2 &= 10;
+ if((f1 | f2) == 0)
+ {
+ // Fully visible
+ ras.line(Conv::xi(x1), Conv::yi(y1), Conv::xi(x2), Conv::yi(y2));
+ }
+ else
+ {
+ if(f1 == f2)
+ {
+ // Invisible by Y
+ return;
+ }
+
+ coord_type tx1 = x1;
+ coord_type ty1 = y1;
+ coord_type tx2 = x2;
+ coord_type ty2 = y2;
+
+ if(f1 & 8) // y1 < clip.y1
+ {
+ tx1 = x1 + Conv::mul_div(m_clip_box.y1-y1, x2-x1, y2-y1);
+ ty1 = m_clip_box.y1;
+ }
+
+ if(f1 & 2) // y1 > clip.y2
+ {
+ tx1 = x1 + Conv::mul_div(m_clip_box.y2-y1, x2-x1, y2-y1);
+ ty1 = m_clip_box.y2;
+ }
+
+ if(f2 & 8) // y2 < clip.y1
+ {
+ tx2 = x1 + Conv::mul_div(m_clip_box.y1-y1, x2-x1, y2-y1);
+ ty2 = m_clip_box.y1;
+ }
+
+ if(f2 & 2) // y2 > clip.y2
+ {
+ tx2 = x1 + Conv::mul_div(m_clip_box.y2-y1, x2-x1, y2-y1);
+ ty2 = m_clip_box.y2;
+ }
+ ras.line(Conv::xi(tx1), Conv::yi(ty1),
+ Conv::xi(tx2), Conv::yi(ty2));
+ }
+ }
+
+
+ public:
+ //--------------------------------------------------------------------
+ template<class Rasterizer>
+ void line_to(Rasterizer& ras, coord_type x2, coord_type y2)
+ {
+ if(m_clipping)
+ {
+ unsigned f2 = clipping_flags(x2, y2, m_clip_box);
+
+ if((m_f1 & 10) == (f2 & 10) && (m_f1 & 10) != 0)
+ {
+ // Invisible by Y
+ m_x1 = x2;
+ m_y1 = y2;
+ m_f1 = f2;
+ return;
+ }
+
+ coord_type x1 = m_x1;
+ coord_type y1 = m_y1;
+ unsigned f1 = m_f1;
+ coord_type y3, y4;
+ unsigned f3, f4;
+
+ switch(((f1 & 5) << 1) | (f2 & 5))
+ {
+ case 0: // Visible by X
+ line_clip_y(ras, x1, y1, x2, y2, f1, f2);
+ break;
+
+ case 1: // x2 > clip.x2
+ y3 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
+ f3 = clipping_flags_y(y3, m_clip_box);
+ line_clip_y(ras, x1, y1, m_clip_box.x2, y3, f1, f3);
+ line_clip_y(ras, m_clip_box.x2, y3, m_clip_box.x2, y2, f3, f2);
+ break;
+
+ case 2: // x1 > clip.x2
+ y3 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
+ f3 = clipping_flags_y(y3, m_clip_box);
+ line_clip_y(ras, m_clip_box.x2, y1, m_clip_box.x2, y3, f1, f3);
+ line_clip_y(ras, m_clip_box.x2, y3, x2, y2, f3, f2);
+ break;
+
+ case 3: // x1 > clip.x2 && x2 > clip.x2
+ line_clip_y(ras, m_clip_box.x2, y1, m_clip_box.x2, y2, f1, f2);
+ break;
+
+ case 4: // x2 < clip.x1
+ y3 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
+ f3 = clipping_flags_y(y3, m_clip_box);
+ line_clip_y(ras, x1, y1, m_clip_box.x1, y3, f1, f3);
+ line_clip_y(ras, m_clip_box.x1, y3, m_clip_box.x1, y2, f3, f2);
+ break;
+
+ case 6: // x1 > clip.x2 && x2 < clip.x1
+ y3 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
+ y4 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
+ f3 = clipping_flags_y(y3, m_clip_box);
+ f4 = clipping_flags_y(y4, m_clip_box);
+ line_clip_y(ras, m_clip_box.x2, y1, m_clip_box.x2, y3, f1, f3);
+ line_clip_y(ras, m_clip_box.x2, y3, m_clip_box.x1, y4, f3, f4);
+ line_clip_y(ras, m_clip_box.x1, y4, m_clip_box.x1, y2, f4, f2);
+ break;
+
+ case 8: // x1 < clip.x1
+ y3 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
+ f3 = clipping_flags_y(y3, m_clip_box);
+ line_clip_y(ras, m_clip_box.x1, y1, m_clip_box.x1, y3, f1, f3);
+ line_clip_y(ras, m_clip_box.x1, y3, x2, y2, f3, f2);
+ break;
+
+ case 9: // x1 < clip.x1 && x2 > clip.x2
+ y3 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
+ y4 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
+ f3 = clipping_flags_y(y3, m_clip_box);
+ f4 = clipping_flags_y(y4, m_clip_box);
+ line_clip_y(ras, m_clip_box.x1, y1, m_clip_box.x1, y3, f1, f3);
+ line_clip_y(ras, m_clip_box.x1, y3, m_clip_box.x2, y4, f3, f4);
+ line_clip_y(ras, m_clip_box.x2, y4, m_clip_box.x2, y2, f4, f2);
+ break;
+
+ case 12: // x1 < clip.x1 && x2 < clip.x1
+ line_clip_y(ras, m_clip_box.x1, y1, m_clip_box.x1, y2, f1, f2);
+ break;
+ }
+ m_f1 = f2;
+ }
+ else
+ {
+ ras.line(Conv::xi(m_x1), Conv::yi(m_y1),
+ Conv::xi(x2), Conv::yi(y2));
+ }
+ m_x1 = x2;
+ m_y1 = y2;
+ }
+
+
+ private:
+ rect_type m_clip_box;
+ coord_type m_x1;
+ coord_type m_y1;
+ unsigned m_f1;
+ bool m_clipping;
+ };
+
+
+
+
+ //---------------------------------------------------rasterizer_sl_no_clip
+ class rasterizer_sl_no_clip
+ {
+ public:
+ typedef ras_conv_int conv_type;
+ typedef int coord_type;
+
+ rasterizer_sl_no_clip() : m_x1(0), m_y1(0) {}
+
+ void reset_clipping() {}
+ void clip_box(coord_type x1, coord_type y1, coord_type x2, coord_type y2) {}
+ void move_to(coord_type x1, coord_type y1) { m_x1 = x1; m_y1 = y1; }
+
+ template<class Rasterizer>
+ void line_to(Rasterizer& ras, coord_type x2, coord_type y2)
+ {
+ ras.line(m_x1, m_y1, x2, y2);
+ m_x1 = x2;
+ m_y1 = y2;
+ }
+
+ private:
+ int m_x1, m_y1;
+ };
+
+
+ // -----rasterizer_sl_clip_int
+ // -----rasterizer_sl_clip_int_sat
+ // -----rasterizer_sl_clip_int_3x
+ // -----rasterizer_sl_clip_dbl
+ // -----rasterizer_sl_clip_dbl_3x
+ //------------------------------------------------------------------------
+ typedef rasterizer_sl_clip<ras_conv_int> rasterizer_sl_clip_int;
+ typedef rasterizer_sl_clip<ras_conv_int_sat> rasterizer_sl_clip_int_sat;
+ typedef rasterizer_sl_clip<ras_conv_int_3x> rasterizer_sl_clip_int_3x;
+ typedef rasterizer_sl_clip<ras_conv_dbl> rasterizer_sl_clip_dbl;
+ typedef rasterizer_sl_clip<ras_conv_dbl_3x> rasterizer_sl_clip_dbl_3x;
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_renderer_base.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_renderer_base.h
new file mode 100644
index 00000000000..527c62f7891
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_renderer_base.h
@@ -0,0 +1,731 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// class renderer_base
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_RENDERER_BASE_INCLUDED
+#define AGG_RENDERER_BASE_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_rendering_buffer.h"
+
+namespace agg
+{
+
+ //-----------------------------------------------------------renderer_base
+ template<class PixelFormat> class renderer_base
+ {
+ public:
+ typedef PixelFormat pixfmt_type;
+ typedef typename pixfmt_type::color_type color_type;
+ typedef typename pixfmt_type::row_data row_data;
+
+ //--------------------------------------------------------------------
+ renderer_base() : m_ren(0), m_clip_box(1, 1, 0, 0) {}
+ explicit renderer_base(pixfmt_type& ren) :
+ m_ren(&ren),
+ m_clip_box(0, 0, ren.width() - 1, ren.height() - 1)
+ {}
+ void attach(pixfmt_type& ren)
+ {
+ m_ren = &ren;
+ m_clip_box = rect_i(0, 0, ren.width() - 1, ren.height() - 1);
+ }
+
+ //--------------------------------------------------------------------
+ const pixfmt_type& ren() const { return *m_ren; }
+ pixfmt_type& ren() { return *m_ren; }
+
+ //--------------------------------------------------------------------
+ unsigned width() const { return m_ren->width(); }
+ unsigned height() const { return m_ren->height(); }
+
+ //--------------------------------------------------------------------
+ bool clip_box(int x1, int y1, int x2, int y2)
+ {
+ rect_i cb(x1, y1, x2, y2);
+ cb.normalize();
+ if(cb.clip(rect_i(0, 0, width() - 1, height() - 1)))
+ {
+ m_clip_box = cb;
+ return true;
+ }
+ m_clip_box.x1 = 1;
+ m_clip_box.y1 = 1;
+ m_clip_box.x2 = 0;
+ m_clip_box.y2 = 0;
+ return false;
+ }
+
+ //--------------------------------------------------------------------
+ void reset_clipping(bool visibility)
+ {
+ if(visibility)
+ {
+ m_clip_box.x1 = 0;
+ m_clip_box.y1 = 0;
+ m_clip_box.x2 = width() - 1;
+ m_clip_box.y2 = height() - 1;
+ }
+ else
+ {
+ m_clip_box.x1 = 1;
+ m_clip_box.y1 = 1;
+ m_clip_box.x2 = 0;
+ m_clip_box.y2 = 0;
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void clip_box_naked(int x1, int y1, int x2, int y2)
+ {
+ m_clip_box.x1 = x1;
+ m_clip_box.y1 = y1;
+ m_clip_box.x2 = x2;
+ m_clip_box.y2 = y2;
+ }
+
+ //--------------------------------------------------------------------
+ bool inbox(int x, int y) const
+ {
+ return x >= m_clip_box.x1 && y >= m_clip_box.y1 &&
+ x <= m_clip_box.x2 && y <= m_clip_box.y2;
+ }
+
+ //--------------------------------------------------------------------
+ const rect_i& clip_box() const { return m_clip_box; }
+ int xmin() const { return m_clip_box.x1; }
+ int ymin() const { return m_clip_box.y1; }
+ int xmax() const { return m_clip_box.x2; }
+ int ymax() const { return m_clip_box.y2; }
+
+ //--------------------------------------------------------------------
+ const rect_i& bounding_clip_box() const { return m_clip_box; }
+ int bounding_xmin() const { return m_clip_box.x1; }
+ int bounding_ymin() const { return m_clip_box.y1; }
+ int bounding_xmax() const { return m_clip_box.x2; }
+ int bounding_ymax() const { return m_clip_box.y2; }
+
+ //--------------------------------------------------------------------
+ void clear(const color_type& c)
+ {
+ unsigned y;
+ if(width())
+ {
+ for(y = 0; y < height(); y++)
+ {
+ m_ren->copy_hline(0, y, width(), c);
+ }
+ }
+ }
+
+
+ //--------------------------------------------------------------------
+ void fill(const color_type& c)
+ {
+ unsigned y;
+ if(width())
+ {
+ for(y = 0; y < height(); y++)
+ {
+ m_ren->blend_hline(0, y, width(), c, cover_mask);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void copy_pixel(int x, int y, const color_type& c)
+ {
+ if(inbox(x, y))
+ {
+ m_ren->copy_pixel(x, y, c);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void blend_pixel(int x, int y, const color_type& c, cover_type cover)
+ {
+ if(inbox(x, y))
+ {
+ m_ren->blend_pixel(x, y, c, cover);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ color_type pixel(int x, int y) const
+ {
+ return inbox(x, y) ?
+ m_ren->pixel(x, y) :
+ color_type::no_color();
+ }
+
+ //--------------------------------------------------------------------
+ void copy_hline(int x1, int y, int x2, const color_type& c)
+ {
+ if(x1 > x2) { int t = x2; x2 = x1; x1 = t; }
+ if(y > ymax()) return;
+ if(y < ymin()) return;
+ if(x1 > xmax()) return;
+ if(x2 < xmin()) return;
+
+ if(x1 < xmin()) x1 = xmin();
+ if(x2 > xmax()) x2 = xmax();
+
+ m_ren->copy_hline(x1, y, x2 - x1 + 1, c);
+ }
+
+ //--------------------------------------------------------------------
+ void copy_vline(int x, int y1, int y2, const color_type& c)
+ {
+ if(y1 > y2) { int t = y2; y2 = y1; y1 = t; }
+ if(x > xmax()) return;
+ if(x < xmin()) return;
+ if(y1 > ymax()) return;
+ if(y2 < ymin()) return;
+
+ if(y1 < ymin()) y1 = ymin();
+ if(y2 > ymax()) y2 = ymax();
+
+ m_ren->copy_vline(x, y1, y2 - y1 + 1, c);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_hline(int x1, int y, int x2,
+ const color_type& c, cover_type cover)
+ {
+ if(x1 > x2) { int t = x2; x2 = x1; x1 = t; }
+ if(y > ymax()) return;
+ if(y < ymin()) return;
+ if(x1 > xmax()) return;
+ if(x2 < xmin()) return;
+
+ if(x1 < xmin()) x1 = xmin();
+ if(x2 > xmax()) x2 = xmax();
+
+ m_ren->blend_hline(x1, y, x2 - x1 + 1, c, cover);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_vline(int x, int y1, int y2,
+ const color_type& c, cover_type cover)
+ {
+ if(y1 > y2) { int t = y2; y2 = y1; y1 = t; }
+ if(x > xmax()) return;
+ if(x < xmin()) return;
+ if(y1 > ymax()) return;
+ if(y2 < ymin()) return;
+
+ if(y1 < ymin()) y1 = ymin();
+ if(y2 > ymax()) y2 = ymax();
+
+ m_ren->blend_vline(x, y1, y2 - y1 + 1, c, cover);
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_bar(int x1, int y1, int x2, int y2, const color_type& c)
+ {
+ rect_i rc(x1, y1, x2, y2);
+ rc.normalize();
+ if(rc.clip(clip_box()))
+ {
+ int y;
+ for(y = rc.y1; y <= rc.y2; y++)
+ {
+ m_ren->copy_hline(rc.x1, y, unsigned(rc.x2 - rc.x1 + 1), c);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void blend_bar(int x1, int y1, int x2, int y2,
+ const color_type& c, cover_type cover)
+ {
+ rect_i rc(x1, y1, x2, y2);
+ rc.normalize();
+ if(rc.clip(clip_box()))
+ {
+ int y;
+ for(y = rc.y1; y <= rc.y2; y++)
+ {
+ m_ren->blend_hline(rc.x1,
+ y,
+ unsigned(rc.x2 - rc.x1 + 1),
+ c,
+ cover);
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void blend_solid_hspan(int x, int y, int len,
+ const color_type& c,
+ const cover_type* covers)
+ {
+ if(y > ymax()) return;
+ if(y < ymin()) return;
+
+ if(x < xmin())
+ {
+ len -= xmin() - x;
+ if(len <= 0) return;
+ covers += xmin() - x;
+ x = xmin();
+ }
+ if(x + len > xmax())
+ {
+ len = xmax() - x + 1;
+ if(len <= 0) return;
+ }
+ m_ren->blend_solid_hspan(x, y, len, c, covers);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_solid_vspan(int x, int y, int len,
+ const color_type& c,
+ const cover_type* covers)
+ {
+ if(x > xmax()) return;
+ if(x < xmin()) return;
+
+ if(y < ymin())
+ {
+ len -= ymin() - y;
+ if(len <= 0) return;
+ covers += ymin() - y;
+ y = ymin();
+ }
+ if(y + len > ymax())
+ {
+ len = ymax() - y + 1;
+ if(len <= 0) return;
+ }
+ m_ren->blend_solid_vspan(x, y, len, c, covers);
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_color_hspan(int x, int y, int len, const color_type* colors)
+ {
+ if(y > ymax()) return;
+ if(y < ymin()) return;
+
+ if(x < xmin())
+ {
+ int d = xmin() - x;
+ len -= d;
+ if(len <= 0) return;
+ colors += d;
+ x = xmin();
+ }
+ if(x + len > xmax())
+ {
+ len = xmax() - x + 1;
+ if(len <= 0) return;
+ }
+ m_ren->copy_color_hspan(x, y, len, colors);
+ }
+
+
+ //--------------------------------------------------------------------
+ void copy_color_vspan(int x, int y, int len, const color_type* colors)
+ {
+ if(x > xmax()) return;
+ if(x < xmin()) return;
+
+ if(y < ymin())
+ {
+ int d = ymin() - y;
+ len -= d;
+ if(len <= 0) return;
+ colors += d;
+ y = ymin();
+ }
+ if(y + len > ymax())
+ {
+ len = ymax() - y + 1;
+ if(len <= 0) return;
+ }
+ m_ren->copy_color_vspan(x, y, len, colors);
+ }
+
+
+ //--------------------------------------------------------------------
+ void blend_color_hspan(int x, int y, int len,
+ const color_type* colors,
+ const cover_type* covers,
+ cover_type cover = agg::cover_full)
+ {
+ if(y > ymax()) return;
+ if(y < ymin()) return;
+
+ if(x < xmin())
+ {
+ int d = xmin() - x;
+ len -= d;
+ if(len <= 0) return;
+ if(covers) covers += d;
+ colors += d;
+ x = xmin();
+ }
+ if(x + len > xmax())
+ {
+ len = xmax() - x + 1;
+ if(len <= 0) return;
+ }
+ m_ren->blend_color_hspan(x, y, len, colors, covers, cover);
+ }
+
+ //--------------------------------------------------------------------
+ void blend_color_vspan(int x, int y, int len,
+ const color_type* colors,
+ const cover_type* covers,
+ cover_type cover = agg::cover_full)
+ {
+ if(x > xmax()) return;
+ if(x < xmin()) return;
+
+ if(y < ymin())
+ {
+ int d = ymin() - y;
+ len -= d;
+ if(len <= 0) return;
+ if(covers) covers += d;
+ colors += d;
+ y = ymin();
+ }
+ if(y + len > ymax())
+ {
+ len = ymax() - y + 1;
+ if(len <= 0) return;
+ }
+ m_ren->blend_color_vspan(x, y, len, colors, covers, cover);
+ }
+
+ //--------------------------------------------------------------------
+ rect_i clip_rect_area(rect_i& dst, rect_i& src, int wsrc, int hsrc) const
+ {
+ rect_i rc(0,0,0,0);
+ rect_i cb = clip_box();
+ ++cb.x2;
+ ++cb.y2;
+
+ if(src.x1 < 0)
+ {
+ dst.x1 -= src.x1;
+ src.x1 = 0;
+ }
+ if(src.y1 < 0)
+ {
+ dst.y1 -= src.y1;
+ src.y1 = 0;
+ }
+
+ if(src.x2 > wsrc) src.x2 = wsrc;
+ if(src.y2 > hsrc) src.y2 = hsrc;
+
+ if(dst.x1 < cb.x1)
+ {
+ src.x1 += cb.x1 - dst.x1;
+ dst.x1 = cb.x1;
+ }
+ if(dst.y1 < cb.y1)
+ {
+ src.y1 += cb.y1 - dst.y1;
+ dst.y1 = cb.y1;
+ }
+
+ if(dst.x2 > cb.x2) dst.x2 = cb.x2;
+ if(dst.y2 > cb.y2) dst.y2 = cb.y2;
+
+ rc.x2 = dst.x2 - dst.x1;
+ rc.y2 = dst.y2 - dst.y1;
+
+ if(rc.x2 > src.x2 - src.x1) rc.x2 = src.x2 - src.x1;
+ if(rc.y2 > src.y2 - src.y1) rc.y2 = src.y2 - src.y1;
+ return rc;
+ }
+
+ //--------------------------------------------------------------------
+ template<class RenBuf>
+ void copy_from(const RenBuf& src,
+ const rect_i* rect_src_ptr = 0,
+ int dx = 0,
+ int dy = 0)
+ {
+ rect_i rsrc(0, 0, src.width(), src.height());
+ if(rect_src_ptr)
+ {
+ rsrc.x1 = rect_src_ptr->x1;
+ rsrc.y1 = rect_src_ptr->y1;
+ rsrc.x2 = rect_src_ptr->x2 + 1;
+ rsrc.y2 = rect_src_ptr->y2 + 1;
+ }
+
+ // Version with xdst, ydst (absolute positioning)
+ //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
+
+ // Version with dx, dy (relative positioning)
+ rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
+
+ rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
+
+ if(rc.x2 > 0)
+ {
+ int incy = 1;
+ if(rdst.y1 > rsrc.y1)
+ {
+ rsrc.y1 += rc.y2 - 1;
+ rdst.y1 += rc.y2 - 1;
+ incy = -1;
+ }
+ while(rc.y2 > 0)
+ {
+ m_ren->copy_from(src,
+ rdst.x1, rdst.y1,
+ rsrc.x1, rsrc.y1,
+ rc.x2);
+ rdst.y1 += incy;
+ rsrc.y1 += incy;
+ --rc.y2;
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class SrcPixelFormatRenderer>
+ void blend_from(const SrcPixelFormatRenderer& src,
+ const rect_i* rect_src_ptr = 0,
+ int dx = 0,
+ int dy = 0,
+ cover_type cover = agg::cover_full)
+ {
+ rect_i rsrc(0, 0, src.width(), src.height());
+ if(rect_src_ptr)
+ {
+ rsrc.x1 = rect_src_ptr->x1;
+ rsrc.y1 = rect_src_ptr->y1;
+ rsrc.x2 = rect_src_ptr->x2 + 1;
+ rsrc.y2 = rect_src_ptr->y2 + 1;
+ }
+
+ // Version with xdst, ydst (absolute positioning)
+ //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
+
+ // Version with dx, dy (relative positioning)
+ rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
+ rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
+
+ if(rc.x2 > 0)
+ {
+ int incy = 1;
+ if(rdst.y1 > rsrc.y1)
+ {
+ rsrc.y1 += rc.y2 - 1;
+ rdst.y1 += rc.y2 - 1;
+ incy = -1;
+ }
+ while(rc.y2 > 0)
+ {
+ typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1);
+ if(rw.ptr)
+ {
+ int x1src = rsrc.x1;
+ int x1dst = rdst.x1;
+ int len = rc.x2;
+ if(rw.x1 > x1src)
+ {
+ x1dst += rw.x1 - x1src;
+ len -= rw.x1 - x1src;
+ x1src = rw.x1;
+ }
+ if(len > 0)
+ {
+ if(x1src + len-1 > rw.x2)
+ {
+ len -= x1src + len - rw.x2 - 1;
+ }
+ if(len > 0)
+ {
+ m_ren->blend_from(src,
+ x1dst, rdst.y1,
+ x1src, rsrc.y1,
+ len,
+ cover);
+ }
+ }
+ }
+ rdst.y1 += incy;
+ rsrc.y1 += incy;
+ --rc.y2;
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class SrcPixelFormatRenderer>
+ void blend_from_color(const SrcPixelFormatRenderer& src,
+ const color_type& color,
+ const rect_i* rect_src_ptr = 0,
+ int dx = 0,
+ int dy = 0,
+ cover_type cover = agg::cover_full)
+ {
+ rect_i rsrc(0, 0, src.width(), src.height());
+ if(rect_src_ptr)
+ {
+ rsrc.x1 = rect_src_ptr->x1;
+ rsrc.y1 = rect_src_ptr->y1;
+ rsrc.x2 = rect_src_ptr->x2 + 1;
+ rsrc.y2 = rect_src_ptr->y2 + 1;
+ }
+
+ // Version with xdst, ydst (absolute positioning)
+ //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
+
+ // Version with dx, dy (relative positioning)
+ rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
+ rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
+
+ if(rc.x2 > 0)
+ {
+ int incy = 1;
+ if(rdst.y1 > rsrc.y1)
+ {
+ rsrc.y1 += rc.y2 - 1;
+ rdst.y1 += rc.y2 - 1;
+ incy = -1;
+ }
+ while(rc.y2 > 0)
+ {
+ typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1);
+ if(rw.ptr)
+ {
+ int x1src = rsrc.x1;
+ int x1dst = rdst.x1;
+ int len = rc.x2;
+ if(rw.x1 > x1src)
+ {
+ x1dst += rw.x1 - x1src;
+ len -= rw.x1 - x1src;
+ x1src = rw.x1;
+ }
+ if(len > 0)
+ {
+ if(x1src + len-1 > rw.x2)
+ {
+ len -= x1src + len - rw.x2 - 1;
+ }
+ if(len > 0)
+ {
+ m_ren->blend_from_color(src,
+ color,
+ x1dst, rdst.y1,
+ x1src, rsrc.y1,
+ len,
+ cover);
+ }
+ }
+ }
+ rdst.y1 += incy;
+ rsrc.y1 += incy;
+ --rc.y2;
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ template<class SrcPixelFormatRenderer>
+ void blend_from_lut(const SrcPixelFormatRenderer& src,
+ const color_type* color_lut,
+ const rect_i* rect_src_ptr = 0,
+ int dx = 0,
+ int dy = 0,
+ cover_type cover = agg::cover_full)
+ {
+ rect_i rsrc(0, 0, src.width(), src.height());
+ if(rect_src_ptr)
+ {
+ rsrc.x1 = rect_src_ptr->x1;
+ rsrc.y1 = rect_src_ptr->y1;
+ rsrc.x2 = rect_src_ptr->x2 + 1;
+ rsrc.y2 = rect_src_ptr->y2 + 1;
+ }
+
+ // Version with xdst, ydst (absolute positioning)
+ //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
+
+ // Version with dx, dy (relative positioning)
+ rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
+ rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
+
+ if(rc.x2 > 0)
+ {
+ int incy = 1;
+ if(rdst.y1 > rsrc.y1)
+ {
+ rsrc.y1 += rc.y2 - 1;
+ rdst.y1 += rc.y2 - 1;
+ incy = -1;
+ }
+ while(rc.y2 > 0)
+ {
+ typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1);
+ if(rw.ptr)
+ {
+ int x1src = rsrc.x1;
+ int x1dst = rdst.x1;
+ int len = rc.x2;
+ if(rw.x1 > x1src)
+ {
+ x1dst += rw.x1 - x1src;
+ len -= rw.x1 - x1src;
+ x1src = rw.x1;
+ }
+ if(len > 0)
+ {
+ if(x1src + len-1 > rw.x2)
+ {
+ len -= x1src + len - rw.x2 - 1;
+ }
+ if(len > 0)
+ {
+ m_ren->blend_from_lut(src,
+ color_lut,
+ x1dst, rdst.y1,
+ x1src, rsrc.y1,
+ len,
+ cover);
+ }
+ }
+ }
+ rdst.y1 += incy;
+ rsrc.y1 += incy;
+ --rc.y2;
+ }
+ }
+ }
+
+ private:
+ pixfmt_type* m_ren;
+ rect_i m_clip_box;
+ };
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_renderer_scanline.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_renderer_scanline.h
new file mode 100644
index 00000000000..6d65056c537
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_renderer_scanline.h
@@ -0,0 +1,852 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_RENDERER_SCANLINE_INCLUDED
+#define AGG_RENDERER_SCANLINE_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_renderer_base.h"
+
+namespace agg
+{
+
+ //================================================render_scanline_aa_solid
+ template<class Scanline, class BaseRenderer, class ColorT>
+ void render_scanline_aa_solid(const Scanline& sl,
+ BaseRenderer& ren,
+ const ColorT& color)
+ {
+ int y = sl.y();
+ unsigned num_spans = sl.num_spans();
+ typename Scanline::const_iterator span = sl.begin();
+
+ for(;;)
+ {
+ int x = span->x;
+ if(span->len > 0)
+ {
+ ren.blend_solid_hspan(x, y, (unsigned)span->len,
+ color,
+ span->covers);
+ }
+ else
+ {
+ ren.blend_hline(x, y, (unsigned)(x - span->len - 1),
+ color,
+ *(span->covers));
+ }
+ if(--num_spans == 0) break;
+ ++span;
+ }
+ }
+
+ //===============================================render_scanlines_aa_solid
+ template<class Rasterizer, class Scanline,
+ class BaseRenderer, class ColorT>
+ void render_scanlines_aa_solid(Rasterizer& ras, Scanline& sl,
+ BaseRenderer& ren, const ColorT& color)
+ {
+ if(ras.rewind_scanlines())
+ {
+ // Explicitly convert "color" to the BaseRenderer color type.
+ // For example, it can be called with color type "rgba", while
+ // "rgba8" is needed. Otherwise it will be implicitly
+ // converted in the loop many times.
+ //----------------------
+ typename BaseRenderer::color_type ren_color(color);
+
+ sl.reset(ras.min_x(), ras.max_x());
+ while(ras.sweep_scanline(sl))
+ {
+ //render_scanline_aa_solid(sl, ren, ren_color);
+
+ // This code is equivalent to the above call (copy/paste).
+ // It's just a "manual" optimization for old compilers,
+ // like Microsoft Visual C++ v6.0
+ //-------------------------------
+ int y = sl.y();
+ unsigned num_spans = sl.num_spans();
+ typename Scanline::const_iterator span = sl.begin();
+
+ for(;;)
+ {
+ int x = span->x;
+ if(span->len > 0)
+ {
+ ren.blend_solid_hspan(x, y, (unsigned)span->len,
+ ren_color,
+ span->covers);
+ }
+ else
+ {
+ ren.blend_hline(x, y, (unsigned)(x - span->len - 1),
+ ren_color,
+ *(span->covers));
+ }
+ if(--num_spans == 0) break;
+ ++span;
+ }
+ }
+ }
+ }
+
+ //==============================================renderer_scanline_aa_solid
+ template<class BaseRenderer> class renderer_scanline_aa_solid
+ {
+ public:
+ typedef BaseRenderer base_ren_type;
+ typedef typename base_ren_type::color_type color_type;
+
+ //--------------------------------------------------------------------
+ renderer_scanline_aa_solid() : m_ren(0) {}
+ explicit renderer_scanline_aa_solid(base_ren_type& ren) : m_ren(&ren) {}
+ void attach(base_ren_type& ren)
+ {
+ m_ren = &ren;
+ }
+
+ //--------------------------------------------------------------------
+ void color(const color_type& c) { m_color = c; }
+ const color_type& color() const { return m_color; }
+
+ //--------------------------------------------------------------------
+ void prepare() {}
+
+ //--------------------------------------------------------------------
+ template<class Scanline> void render(const Scanline& sl)
+ {
+ render_scanline_aa_solid(sl, *m_ren, m_color);
+ }
+
+ private:
+ base_ren_type* m_ren;
+ color_type m_color;
+ };
+
+
+
+
+
+
+
+
+
+
+
+
+
+ //======================================================render_scanline_aa
+ template<class Scanline, class BaseRenderer,
+ class SpanAllocator, class SpanGenerator>
+ void render_scanline_aa(const Scanline& sl, BaseRenderer& ren,
+ SpanAllocator& alloc, SpanGenerator& span_gen)
+ {
+ int y = sl.y();
+
+ unsigned num_spans = sl.num_spans();
+ typename Scanline::const_iterator span = sl.begin();
+ for(;;)
+ {
+ int x = span->x;
+ int len = span->len;
+ const typename Scanline::cover_type* covers = span->covers;
+
+ if(len < 0) len = -len;
+ typename BaseRenderer::color_type* colors = alloc.allocate(len);
+ span_gen.generate(colors, x, y, len);
+ ren.blend_color_hspan(x, y, len, colors,
+ (span->len < 0) ? 0 : covers, *covers);
+
+ if(--num_spans == 0) break;
+ ++span;
+ }
+ }
+
+ //=====================================================render_scanlines_aa
+ template<class Rasterizer, class Scanline, class BaseRenderer,
+ class SpanAllocator, class SpanGenerator>
+ void render_scanlines_aa(Rasterizer& ras, Scanline& sl, BaseRenderer& ren,
+ SpanAllocator& alloc, SpanGenerator& span_gen)
+ {
+ if(ras.rewind_scanlines())
+ {
+ sl.reset(ras.min_x(), ras.max_x());
+ span_gen.prepare();
+ while(ras.sweep_scanline(sl))
+ {
+ render_scanline_aa(sl, ren, alloc, span_gen);
+ }
+ }
+ }
+
+ //====================================================renderer_scanline_aa
+ template<class BaseRenderer, class SpanAllocator, class SpanGenerator>
+ class renderer_scanline_aa
+ {
+ public:
+ typedef BaseRenderer base_ren_type;
+ typedef SpanAllocator alloc_type;
+ typedef SpanGenerator span_gen_type;
+
+ //--------------------------------------------------------------------
+ renderer_scanline_aa() : m_ren(0), m_alloc(0), m_span_gen(0) {}
+ renderer_scanline_aa(base_ren_type& ren,
+ alloc_type& alloc,
+ span_gen_type& span_gen) :
+ m_ren(&ren),
+ m_alloc(&alloc),
+ m_span_gen(&span_gen)
+ {}
+ void attach(base_ren_type& ren,
+ alloc_type& alloc,
+ span_gen_type& span_gen)
+ {
+ m_ren = &ren;
+ m_alloc = &alloc;
+ m_span_gen = &span_gen;
+ }
+
+ //--------------------------------------------------------------------
+ void prepare() { m_span_gen->prepare(); }
+
+ //--------------------------------------------------------------------
+ template<class Scanline> void render(const Scanline& sl)
+ {
+ render_scanline_aa(sl, *m_ren, *m_alloc, *m_span_gen);
+ }
+
+ private:
+ base_ren_type* m_ren;
+ alloc_type* m_alloc;
+ span_gen_type* m_span_gen;
+ };
+
+
+
+
+
+
+ //===============================================render_scanline_bin_solid
+ template<class Scanline, class BaseRenderer, class ColorT>
+ void render_scanline_bin_solid(const Scanline& sl,
+ BaseRenderer& ren,
+ const ColorT& color)
+ {
+ unsigned num_spans = sl.num_spans();
+ typename Scanline::const_iterator span = sl.begin();
+ for(;;)
+ {
+ ren.blend_hline(span->x,
+ sl.y(),
+ span->x - 1 + ((span->len < 0) ?
+ -span->len :
+ span->len),
+ color,
+ cover_full);
+ if(--num_spans == 0) break;
+ ++span;
+ }
+ }
+
+ //==============================================render_scanlines_bin_solid
+ template<class Rasterizer, class Scanline,
+ class BaseRenderer, class ColorT>
+ void render_scanlines_bin_solid(Rasterizer& ras, Scanline& sl,
+ BaseRenderer& ren, const ColorT& color)
+ {
+ if(ras.rewind_scanlines())
+ {
+ // Explicitly convert "color" to the BaseRenderer color type.
+ // For example, it can be called with color type "rgba", while
+ // "rgba8" is needed. Otherwise it will be implicitly
+ // converted in the loop many times.
+ //----------------------
+ typename BaseRenderer::color_type ren_color(color);
+
+ sl.reset(ras.min_x(), ras.max_x());
+ while(ras.sweep_scanline(sl))
+ {
+ //render_scanline_bin_solid(sl, ren, ren_color);
+
+ // This code is equivalent to the above call (copy/paste).
+ // It's just a "manual" optimization for old compilers,
+ // like Microsoft Visual C++ v6.0
+ //-------------------------------
+ unsigned num_spans = sl.num_spans();
+ typename Scanline::const_iterator span = sl.begin();
+ for(;;)
+ {
+ ren.blend_hline(span->x,
+ sl.y(),
+ span->x - 1 + ((span->len < 0) ?
+ -span->len :
+ span->len),
+ ren_color,
+ cover_full);
+ if(--num_spans == 0) break;
+ ++span;
+ }
+ }
+ }
+ }
+
+ //=============================================renderer_scanline_bin_solid
+ template<class BaseRenderer> class renderer_scanline_bin_solid
+ {
+ public:
+ typedef BaseRenderer base_ren_type;
+ typedef typename base_ren_type::color_type color_type;
+
+ //--------------------------------------------------------------------
+ renderer_scanline_bin_solid() : m_ren(0) {}
+ explicit renderer_scanline_bin_solid(base_ren_type& ren) : m_ren(&ren) {}
+ void attach(base_ren_type& ren)
+ {
+ m_ren = &ren;
+ }
+
+ //--------------------------------------------------------------------
+ void color(const color_type& c) { m_color = c; }
+ const color_type& color() const { return m_color; }
+
+ //--------------------------------------------------------------------
+ void prepare() {}
+
+ //--------------------------------------------------------------------
+ template<class Scanline> void render(const Scanline& sl)
+ {
+ render_scanline_bin_solid(sl, *m_ren, m_color);
+ }
+
+ private:
+ base_ren_type* m_ren;
+ color_type m_color;
+ };
+
+
+
+
+
+
+
+
+ //======================================================render_scanline_bin
+ template<class Scanline, class BaseRenderer,
+ class SpanAllocator, class SpanGenerator>
+ void render_scanline_bin(const Scanline& sl, BaseRenderer& ren,
+ SpanAllocator& alloc, SpanGenerator& span_gen)
+ {
+ int y = sl.y();
+
+ unsigned num_spans = sl.num_spans();
+ typename Scanline::const_iterator span = sl.begin();
+ for(;;)
+ {
+ int x = span->x;
+ int len = span->len;
+ if(len < 0) len = -len;
+ typename BaseRenderer::color_type* colors = alloc.allocate(len);
+ span_gen.generate(colors, x, y, len);
+ ren.blend_color_hspan(x, y, len, colors, 0, cover_full);
+ if(--num_spans == 0) break;
+ ++span;
+ }
+ }
+
+ //=====================================================render_scanlines_bin
+ template<class Rasterizer, class Scanline, class BaseRenderer,
+ class SpanAllocator, class SpanGenerator>
+ void render_scanlines_bin(Rasterizer& ras, Scanline& sl, BaseRenderer& ren,
+ SpanAllocator& alloc, SpanGenerator& span_gen)
+ {
+ if(ras.rewind_scanlines())
+ {
+ sl.reset(ras.min_x(), ras.max_x());
+ span_gen.prepare();
+ while(ras.sweep_scanline(sl))
+ {
+ render_scanline_bin(sl, ren, alloc, span_gen);
+ }
+ }
+ }
+
+ //====================================================renderer_scanline_bin
+ template<class BaseRenderer, class SpanAllocator, class SpanGenerator>
+ class renderer_scanline_bin
+ {
+ public:
+ typedef BaseRenderer base_ren_type;
+ typedef SpanAllocator alloc_type;
+ typedef SpanGenerator span_gen_type;
+
+ //--------------------------------------------------------------------
+ renderer_scanline_bin() : m_ren(0), m_alloc(0), m_span_gen(0) {}
+ renderer_scanline_bin(base_ren_type& ren,
+ alloc_type& alloc,
+ span_gen_type& span_gen) :
+ m_ren(&ren),
+ m_alloc(&alloc),
+ m_span_gen(&span_gen)
+ {}
+ void attach(base_ren_type& ren,
+ alloc_type& alloc,
+ span_gen_type& span_gen)
+ {
+ m_ren = &ren;
+ m_alloc = &alloc;
+ m_span_gen = &span_gen;
+ }
+
+ //--------------------------------------------------------------------
+ void prepare() { m_span_gen->prepare(); }
+
+ //--------------------------------------------------------------------
+ template<class Scanline> void render(const Scanline& sl)
+ {
+ render_scanline_bin(sl, *m_ren, *m_alloc, *m_span_gen);
+ }
+
+ private:
+ base_ren_type* m_ren;
+ alloc_type* m_alloc;
+ span_gen_type* m_span_gen;
+ };
+
+
+
+
+
+
+
+
+
+
+ //========================================================render_scanlines
+ template<class Rasterizer, class Scanline, class Renderer>
+ void render_scanlines(Rasterizer& ras, Scanline& sl, Renderer& ren)
+ {
+ if(ras.rewind_scanlines())
+ {
+ sl.reset(ras.min_x(), ras.max_x());
+ ren.prepare();
+ while(ras.sweep_scanline(sl))
+ {
+ ren.render(sl);
+ }
+ }
+ }
+
+ //========================================================render_all_paths
+ template<class Rasterizer, class Scanline, class Renderer,
+ class VertexSource, class ColorStorage, class PathId>
+ void render_all_paths(Rasterizer& ras,
+ Scanline& sl,
+ Renderer& r,
+ VertexSource& vs,
+ const ColorStorage& as,
+ const PathId& path_id,
+ unsigned num_paths)
+ {
+ for(unsigned i = 0; i < num_paths; i++)
+ {
+ ras.reset();
+ ras.add_path(vs, path_id[i]);
+ r.color(as[i]);
+ render_scanlines(ras, sl, r);
+ }
+ }
+
+
+
+
+
+
+ //=============================================render_scanlines_compound
+ template<class Rasterizer,
+ class ScanlineAA,
+ class ScanlineBin,
+ class BaseRenderer,
+ class SpanAllocator,
+ class StyleHandler>
+ void render_scanlines_compound(Rasterizer& ras,
+ ScanlineAA& sl_aa,
+ ScanlineBin& sl_bin,
+ BaseRenderer& ren,
+ SpanAllocator& alloc,
+ StyleHandler& sh)
+ {
+ if(ras.rewind_scanlines())
+ {
+ int min_x = ras.min_x();
+ int len = ras.max_x() - min_x + 2;
+ sl_aa.reset(min_x, ras.max_x());
+ sl_bin.reset(min_x, ras.max_x());
+
+ typedef typename BaseRenderer::color_type color_type;
+ color_type* color_span = alloc.allocate(len * 2);
+ color_type* mix_buffer = color_span + len;
+ unsigned num_spans;
+
+ unsigned num_styles;
+ unsigned style;
+ bool solid;
+ while((num_styles = ras.sweep_styles()) > 0)
+ {
+ typename ScanlineAA::const_iterator span_aa;
+ if(num_styles == 1)
+ {
+ // Optimization for a single style. Happens often
+ //-------------------------
+ if(ras.sweep_scanline(sl_aa, 0))
+ {
+ style = ras.style(0);
+ if(sh.is_solid(style))
+ {
+ // Just solid fill
+ //-----------------------
+ render_scanline_aa_solid(sl_aa, ren, sh.color(style));
+ }
+ else
+ {
+ // Arbitrary span generator
+ //-----------------------
+ span_aa = sl_aa.begin();
+ num_spans = sl_aa.num_spans();
+ for(;;)
+ {
+ len = span_aa->len;
+ sh.generate_span(color_span,
+ span_aa->x,
+ sl_aa.y(),
+ len,
+ style);
+
+ ren.blend_color_hspan(span_aa->x,
+ sl_aa.y(),
+ span_aa->len,
+ color_span,
+ span_aa->covers);
+ if(--num_spans == 0) break;
+ ++span_aa;
+ }
+ }
+ }
+ }
+ else
+ {
+ if(ras.sweep_scanline(sl_bin, -1))
+ {
+ // Clear the spans of the mix_buffer
+ //--------------------
+ typename ScanlineBin::const_iterator span_bin = sl_bin.begin();
+ num_spans = sl_bin.num_spans();
+ for(;;)
+ {
+ memset(mix_buffer + span_bin->x - min_x,
+ 0,
+ span_bin->len * sizeof(color_type));
+
+ if(--num_spans == 0) break;
+ ++span_bin;
+ }
+
+ unsigned i;
+ for(i = 0; i < num_styles; i++)
+ {
+ style = ras.style(i);
+ solid = sh.is_solid(style);
+
+ if(ras.sweep_scanline(sl_aa, i))
+ {
+ color_type* colors;
+ color_type* cspan;
+ typename ScanlineAA::cover_type* covers;
+ span_aa = sl_aa.begin();
+ num_spans = sl_aa.num_spans();
+ if(solid)
+ {
+ // Just solid fill
+ //-----------------------
+ for(;;)
+ {
+ color_type c = sh.color(style);
+ len = span_aa->len;
+ colors = mix_buffer + span_aa->x - min_x;
+ covers = span_aa->covers;
+ do
+ {
+ if(*covers == cover_full)
+ {
+ *colors = c;
+ }
+ else
+ {
+ colors->add(c, *covers);
+ }
+ ++colors;
+ ++covers;
+ }
+ while(--len);
+ if(--num_spans == 0) break;
+ ++span_aa;
+ }
+ }
+ else
+ {
+ // Arbitrary span generator
+ //-----------------------
+ for(;;)
+ {
+ len = span_aa->len;
+ colors = mix_buffer + span_aa->x - min_x;
+ cspan = color_span;
+ sh.generate_span(cspan,
+ span_aa->x,
+ sl_aa.y(),
+ len,
+ style);
+ covers = span_aa->covers;
+ do
+ {
+ if(*covers == cover_full)
+ {
+ *colors = *cspan;
+ }
+ else
+ {
+ colors->add(*cspan, *covers);
+ }
+ ++cspan;
+ ++colors;
+ ++covers;
+ }
+ while(--len);
+ if(--num_spans == 0) break;
+ ++span_aa;
+ }
+ }
+ }
+ }
+
+ // Emit the blended result as a color hspan
+ //-------------------------
+ span_bin = sl_bin.begin();
+ num_spans = sl_bin.num_spans();
+ for(;;)
+ {
+ ren.blend_color_hspan(span_bin->x,
+ sl_bin.y(),
+ span_bin->len,
+ mix_buffer + span_bin->x - min_x,
+ 0,
+ cover_full);
+ if(--num_spans == 0) break;
+ ++span_bin;
+ }
+ } // if(ras.sweep_scanline(sl_bin, -1))
+ } // if(num_styles == 1) ... else
+ } // while((num_styles = ras.sweep_styles()) > 0)
+ } // if(ras.rewind_scanlines())
+ }
+
+ //=======================================render_scanlines_compound_layered
+ template<class Rasterizer,
+ class ScanlineAA,
+ class BaseRenderer,
+ class SpanAllocator,
+ class StyleHandler>
+ void render_scanlines_compound_layered(Rasterizer& ras,
+ ScanlineAA& sl_aa,
+ BaseRenderer& ren,
+ SpanAllocator& alloc,
+ StyleHandler& sh)
+ {
+ if(ras.rewind_scanlines())
+ {
+ int min_x = ras.min_x();
+ int len = ras.max_x() - min_x + 2;
+ sl_aa.reset(min_x, ras.max_x());
+
+ typedef typename BaseRenderer::color_type color_type;
+ color_type* color_span = alloc.allocate(len * 2);
+ color_type* mix_buffer = color_span + len;
+ cover_type* cover_buffer = ras.allocate_cover_buffer(len);
+ unsigned num_spans;
+
+ unsigned num_styles;
+ unsigned style;
+ bool solid;
+ while((num_styles = ras.sweep_styles()) > 0)
+ {
+ typename ScanlineAA::const_iterator span_aa;
+ if(num_styles == 1)
+ {
+ // Optimization for a single style. Happens often
+ //-------------------------
+ if(ras.sweep_scanline(sl_aa, 0))
+ {
+ style = ras.style(0);
+ if(sh.is_solid(style))
+ {
+ // Just solid fill
+ //-----------------------
+ render_scanline_aa_solid(sl_aa, ren, sh.color(style));
+ }
+ else
+ {
+ // Arbitrary span generator
+ //-----------------------
+ span_aa = sl_aa.begin();
+ num_spans = sl_aa.num_spans();
+ for(;;)
+ {
+ len = span_aa->len;
+ sh.generate_span(color_span,
+ span_aa->x,
+ sl_aa.y(),
+ len,
+ style);
+
+ ren.blend_color_hspan(span_aa->x,
+ sl_aa.y(),
+ span_aa->len,
+ color_span,
+ span_aa->covers);
+ if(--num_spans == 0) break;
+ ++span_aa;
+ }
+ }
+ }
+ }
+ else
+ {
+ int sl_start = ras.scanline_start();
+ unsigned sl_len = ras.scanline_length();
+
+ if(sl_len)
+ {
+ memset(mix_buffer + sl_start - min_x,
+ 0,
+ sl_len * sizeof(color_type));
+
+ memset(cover_buffer + sl_start - min_x,
+ 0,
+ sl_len * sizeof(cover_type));
+
+ int sl_y = 0x7FFFFFFF;
+ unsigned i;
+ for(i = 0; i < num_styles; i++)
+ {
+ style = ras.style(i);
+ solid = sh.is_solid(style);
+
+ if(ras.sweep_scanline(sl_aa, i))
+ {
+ unsigned cover;
+ color_type* colors;
+ color_type* cspan;
+ cover_type* src_covers;
+ cover_type* dst_covers;
+ span_aa = sl_aa.begin();
+ num_spans = sl_aa.num_spans();
+ sl_y = sl_aa.y();
+ if(solid)
+ {
+ // Just solid fill
+ //-----------------------
+ for(;;)
+ {
+ color_type c = sh.color(style);
+ len = span_aa->len;
+ colors = mix_buffer + span_aa->x - min_x;
+ src_covers = span_aa->covers;
+ dst_covers = cover_buffer + span_aa->x - min_x;
+ do
+ {
+ cover = *src_covers;
+ if(*dst_covers + cover > cover_full)
+ {
+ cover = cover_full - *dst_covers;
+ }
+ if(cover)
+ {
+ colors->add(c, cover);
+ *dst_covers += cover;
+ }
+ ++colors;
+ ++src_covers;
+ ++dst_covers;
+ }
+ while(--len);
+ if(--num_spans == 0) break;
+ ++span_aa;
+ }
+ }
+ else
+ {
+ // Arbitrary span generator
+ //-----------------------
+ for(;;)
+ {
+ len = span_aa->len;
+ colors = mix_buffer + span_aa->x - min_x;
+ cspan = color_span;
+ sh.generate_span(cspan,
+ span_aa->x,
+ sl_aa.y(),
+ len,
+ style);
+ src_covers = span_aa->covers;
+ dst_covers = cover_buffer + span_aa->x - min_x;
+ do
+ {
+ cover = *src_covers;
+ if(*dst_covers + cover > cover_full)
+ {
+ cover = cover_full - *dst_covers;
+ }
+ if(cover)
+ {
+ colors->add(*cspan, cover);
+ *dst_covers += cover;
+ }
+ ++cspan;
+ ++colors;
+ ++src_covers;
+ ++dst_covers;
+ }
+ while(--len);
+ if(--num_spans == 0) break;
+ ++span_aa;
+ }
+ }
+ }
+ }
+ ren.blend_color_hspan(sl_start,
+ sl_y,
+ sl_len,
+ mix_buffer + sl_start - min_x,
+ 0,
+ cover_full);
+ } //if(sl_len)
+ } //if(num_styles == 1) ... else
+ } //while((num_styles = ras.sweep_styles()) > 0)
+ } //if(ras.rewind_scanlines())
+ }
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rendering_buffer.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rendering_buffer.h
new file mode 100644
index 00000000000..191347f63e2
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_rendering_buffer.h
@@ -0,0 +1,300 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// class rendering_buffer
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_RENDERING_BUFFER_INCLUDED
+#define AGG_RENDERING_BUFFER_INCLUDED
+
+#include "agg_array.h"
+
+namespace agg
+{
+
+ //===========================================================row_accessor
+ template<class T> class row_accessor
+ {
+ public:
+ typedef const_row_info<T> row_data;
+
+ //-------------------------------------------------------------------
+ row_accessor() :
+ m_buf(0),
+ m_start(0),
+ m_width(0),
+ m_height(0),
+ m_stride(0)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ row_accessor(T* buf, unsigned width, unsigned height, int stride) :
+ m_buf(0),
+ m_start(0),
+ m_width(0),
+ m_height(0),
+ m_stride(0)
+ {
+ attach(buf, width, height, stride);
+ }
+
+
+ //--------------------------------------------------------------------
+ void attach(T* buf, unsigned width, unsigned height, int stride)
+ {
+ m_buf = m_start = buf;
+ m_width = width;
+ m_height = height;
+ m_stride = stride;
+ if(stride < 0)
+ {
+ m_start = m_buf - (AGG_INT64)(height - 1) * stride;
+ }
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE T* buf() { return m_buf; }
+ AGG_INLINE const T* buf() const { return m_buf; }
+ AGG_INLINE unsigned width() const { return m_width; }
+ AGG_INLINE unsigned height() const { return m_height; }
+ AGG_INLINE int stride() const { return m_stride; }
+ AGG_INLINE unsigned stride_abs() const
+ {
+ return (m_stride < 0) ? unsigned(-m_stride) : unsigned(m_stride);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE T* row_ptr(int, int y, unsigned)
+ {
+ return m_start + y * (AGG_INT64)m_stride;
+ }
+ AGG_INLINE T* row_ptr(int y) { return m_start + y * (AGG_INT64)m_stride; }
+ AGG_INLINE const T* row_ptr(int y) const { return m_start + y * (AGG_INT64)m_stride; }
+ AGG_INLINE row_data row (int y) const
+ {
+ return row_data(0, m_width-1, row_ptr(y));
+ }
+
+ //--------------------------------------------------------------------
+ template<class RenBuf>
+ void copy_from(const RenBuf& src)
+ {
+ unsigned h = height();
+ if(src.height() < h) h = src.height();
+
+ unsigned l = stride_abs();
+ if(src.stride_abs() < l) l = src.stride_abs();
+
+ l *= sizeof(T);
+
+ unsigned y;
+ unsigned w = width();
+ for (y = 0; y < h; y++)
+ {
+ memcpy(row_ptr(0, y, w), src.row_ptr(y), l);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void clear(T value)
+ {
+ unsigned y;
+ unsigned w = width();
+ unsigned stride = stride_abs();
+ for(y = 0; y < height(); y++)
+ {
+ T* p = row_ptr(0, y, w);
+ unsigned x;
+ for(x = 0; x < stride; x++)
+ {
+ *p++ = value;
+ }
+ }
+ }
+
+ private:
+ //--------------------------------------------------------------------
+ T* m_buf; // Pointer to renrdering buffer
+ T* m_start; // Pointer to first pixel depending on stride
+ unsigned m_width; // Width in pixels
+ unsigned m_height; // Height in pixels
+ int m_stride; // Number of bytes per row. Can be < 0
+ };
+
+
+
+
+ //==========================================================row_ptr_cache
+ template<class T> class row_ptr_cache
+ {
+ public:
+ typedef const_row_info<T> row_data;
+
+ //-------------------------------------------------------------------
+ row_ptr_cache() :
+ m_buf(0),
+ m_rows(),
+ m_width(0),
+ m_height(0),
+ m_stride(0)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ row_ptr_cache(T* buf, unsigned width, unsigned height, int stride) :
+ m_buf(0),
+ m_rows(),
+ m_width(0),
+ m_height(0),
+ m_stride(0)
+ {
+ attach(buf, width, height, stride);
+ }
+
+ //--------------------------------------------------------------------
+ void attach(T* buf, unsigned width, unsigned height, int stride)
+ {
+ m_buf = buf;
+ m_width = width;
+ m_height = height;
+ m_stride = stride;
+ if(height > m_rows.size())
+ {
+ m_rows.resize(height);
+ }
+
+ T* row_ptr = m_buf;
+
+ if(stride < 0)
+ {
+ row_ptr = m_buf - (AGG_INT64)(height - 1) * stride;
+ }
+
+ T** rows = &m_rows[0];
+
+ while(height--)
+ {
+ *rows++ = row_ptr;
+ row_ptr += stride;
+ }
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE T* buf() { return m_buf; }
+ AGG_INLINE const T* buf() const { return m_buf; }
+ AGG_INLINE unsigned width() const { return m_width; }
+ AGG_INLINE unsigned height() const { return m_height; }
+ AGG_INLINE int stride() const { return m_stride; }
+ AGG_INLINE unsigned stride_abs() const
+ {
+ return (m_stride < 0) ? unsigned(-m_stride) : unsigned(m_stride);
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE T* row_ptr(int, int y, unsigned)
+ {
+ return m_rows[y];
+ }
+ AGG_INLINE T* row_ptr(int y) { return m_rows[y]; }
+ AGG_INLINE const T* row_ptr(int y) const { return m_rows[y]; }
+ AGG_INLINE row_data row (int y) const
+ {
+ return row_data(0, m_width-1, m_rows[y]);
+ }
+
+ //--------------------------------------------------------------------
+ T const* const* rows() const { return &m_rows[0]; }
+
+ //--------------------------------------------------------------------
+ template<class RenBuf>
+ void copy_from(const RenBuf& src)
+ {
+ unsigned h = height();
+ if(src.height() < h) h = src.height();
+
+ unsigned l = stride_abs();
+ if(src.stride_abs() < l) l = src.stride_abs();
+
+ l *= sizeof(T);
+
+ unsigned y;
+ unsigned w = width();
+ for (y = 0; y < h; y++)
+ {
+ memcpy(row_ptr(0, y, w), src.row_ptr(y), l);
+ }
+ }
+
+ //--------------------------------------------------------------------
+ void clear(T value)
+ {
+ unsigned y;
+ unsigned w = width();
+ unsigned stride = stride_abs();
+ for(y = 0; y < height(); y++)
+ {
+ T* p = row_ptr(0, y, w);
+ unsigned x;
+ for(x = 0; x < stride; x++)
+ {
+ *p++ = value;
+ }
+ }
+ }
+
+ private:
+ //--------------------------------------------------------------------
+ T* m_buf; // Pointer to renrdering buffer
+ pod_array<T*> m_rows; // Pointers to each row of the buffer
+ unsigned m_width; // Width in pixels
+ unsigned m_height; // Height in pixels
+ int m_stride; // Number of bytes per row. Can be < 0
+ };
+
+
+
+
+ //========================================================rendering_buffer
+ //
+ // The definition of the main type for accessing the rows in the frame
+ // buffer. It provides functionality to navigate to the rows in a
+ // rectangular matrix, from top to bottom or from bottom to top depending
+ // on stride.
+ //
+ // row_accessor is cheap to create/destroy, but performs one multiplication
+ // when calling row_ptr().
+ //
+ // row_ptr_cache creates an array of pointers to rows, so, the access
+ // via row_ptr() may be faster. But it requires memory allocation
+ // when creating. For example, on typical Intel Pentium hardware
+ // row_ptr_cache speeds span_image_filter_rgb_nn up to 10%
+ //
+ // It's used only in short hand typedefs like pixfmt_rgba32 and can be
+ // redefined in agg_config.h
+ // In real applications you can use both, depending on your needs
+ //------------------------------------------------------------------------
+#ifdef AGG_RENDERING_BUFFER
+ typedef AGG_RENDERING_BUFFER rendering_buffer;
+#else
+// typedef row_ptr_cache<int8u> rendering_buffer;
+ typedef row_accessor<int8u> rendering_buffer;
+#endif
+
+}
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_bin.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_bin.h
new file mode 100644
index 00000000000..660292b6136
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_bin.h
@@ -0,0 +1,264 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Class scanline_bin - binary scanline.
+//
+//----------------------------------------------------------------------------
+//
+// Adaptation for 32-bit screen coordinates (scanline32_bin) has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_SCANLINE_BIN_INCLUDED
+#define AGG_SCANLINE_BIN_INCLUDED
+
+#include "agg_array.h"
+
+namespace agg
+{
+
+ //=============================================================scanline_bin
+ //
+ // This is binary scaline container which supports the interface
+ // used in the rasterizer::render(). See description of agg_scanline_u8
+ // for details.
+ //
+ //------------------------------------------------------------------------
+ class scanline_bin
+ {
+ public:
+ typedef int32 coord_type;
+
+ struct span
+ {
+ int16 x;
+ int16 len;
+ };
+
+ typedef const span* const_iterator;
+
+ //--------------------------------------------------------------------
+ scanline_bin() :
+ m_last_x(0x7FFFFFF0),
+ m_spans(),
+ m_cur_span(0)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ void reset(int min_x, int max_x)
+ {
+ unsigned max_len = max_x - min_x + 3;
+ if(max_len > m_spans.size())
+ {
+ m_spans.resize(max_len);
+ }
+ m_last_x = 0x7FFFFFF0;
+ m_cur_span = &m_spans[0];
+ }
+
+ //--------------------------------------------------------------------
+ void add_cell(int x, unsigned)
+ {
+ if(x == m_last_x+1)
+ {
+ m_cur_span->len++;
+ }
+ else
+ {
+ ++m_cur_span;
+ m_cur_span->x = (int16)x;
+ m_cur_span->len = 1;
+ }
+ m_last_x = x;
+ }
+
+ //--------------------------------------------------------------------
+ void add_span(int x, unsigned len, unsigned)
+ {
+ if(x == m_last_x+1)
+ {
+ m_cur_span->len = (int16)(m_cur_span->len + len);
+ }
+ else
+ {
+ ++m_cur_span;
+ m_cur_span->x = (int16)x;
+ m_cur_span->len = (int16)len;
+ }
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void add_cells(int x, unsigned len, const void*)
+ {
+ add_span(x, len, 0);
+ }
+
+ //--------------------------------------------------------------------
+ void finalize(int y)
+ {
+ m_y = y;
+ }
+
+ //--------------------------------------------------------------------
+ void reset_spans()
+ {
+ m_last_x = 0x7FFFFFF0;
+ m_cur_span = &m_spans[0];
+ }
+
+ //--------------------------------------------------------------------
+ int y() const { return m_y; }
+ unsigned num_spans() const { return unsigned(m_cur_span - &m_spans[0]); }
+ const_iterator begin() const { return &m_spans[1]; }
+
+ private:
+ scanline_bin(const scanline_bin&);
+ const scanline_bin operator = (const scanline_bin&);
+
+ int m_last_x;
+ int m_y;
+ pod_array<span> m_spans;
+ span* m_cur_span;
+ };
+
+
+
+
+
+
+ //===========================================================scanline32_bin
+ class scanline32_bin
+ {
+ public:
+ typedef int32 coord_type;
+
+ //--------------------------------------------------------------------
+ struct span
+ {
+ span() {}
+ span(coord_type x_, coord_type len_) : x(x_), len(len_) {}
+
+ coord_type x;
+ coord_type len;
+ };
+ typedef pod_bvector<span, 4> span_array_type;
+
+
+ //--------------------------------------------------------------------
+ class const_iterator
+ {
+ public:
+ const_iterator(const span_array_type& spans) :
+ m_spans(spans),
+ m_span_idx(0)
+ {}
+
+ const span& operator*() const { return m_spans[m_span_idx]; }
+ const span* operator->() const { return &m_spans[m_span_idx]; }
+
+ void operator ++ () { ++m_span_idx; }
+
+ private:
+ const span_array_type& m_spans;
+ unsigned m_span_idx;
+ };
+
+
+ //--------------------------------------------------------------------
+ scanline32_bin() : m_max_len(0), m_last_x(0x7FFFFFF0) {}
+
+ //--------------------------------------------------------------------
+ void reset(int min_x, int max_x)
+ {
+ m_last_x = 0x7FFFFFF0;
+ m_spans.remove_all();
+ }
+
+ //--------------------------------------------------------------------
+ void add_cell(int x, unsigned)
+ {
+ if(x == m_last_x+1)
+ {
+ m_spans.last().len++;
+ }
+ else
+ {
+ m_spans.add(span(coord_type(x), 1));
+ }
+ m_last_x = x;
+ }
+
+ //--------------------------------------------------------------------
+ void add_span(int x, unsigned len, unsigned)
+ {
+ if(x == m_last_x+1)
+ {
+ m_spans.last().len += coord_type(len);
+ }
+ else
+ {
+ m_spans.add(span(coord_type(x), coord_type(len)));
+ }
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void add_cells(int x, unsigned len, const void*)
+ {
+ add_span(x, len, 0);
+ }
+
+ //--------------------------------------------------------------------
+ void finalize(int y)
+ {
+ m_y = y;
+ }
+
+ //--------------------------------------------------------------------
+ void reset_spans()
+ {
+ m_last_x = 0x7FFFFFF0;
+ m_spans.remove_all();
+ }
+
+ //--------------------------------------------------------------------
+ int y() const { return m_y; }
+ unsigned num_spans() const { return m_spans.size(); }
+ const_iterator begin() const { return const_iterator(m_spans); }
+
+ private:
+ scanline32_bin(const scanline32_bin&);
+ const scanline32_bin operator = (const scanline32_bin&);
+
+ unsigned m_max_len;
+ int m_last_x;
+ int m_y;
+ span_array_type m_spans;
+ };
+
+
+
+
+
+}
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_p.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_p.h
new file mode 100644
index 00000000000..1d1cbe72f1e
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_p.h
@@ -0,0 +1,329 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Class scanline_p - a general purpose scanline container with packed spans.
+//
+//----------------------------------------------------------------------------
+//
+// Adaptation for 32-bit screen coordinates (scanline32_p) has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_SCANLINE_P_INCLUDED
+#define AGG_SCANLINE_P_INCLUDED
+
+#include "agg_array.h"
+
+namespace agg
+{
+
+ //=============================================================scanline_p8
+ //
+ // This is a general purpose scaline container which supports the interface
+ // used in the rasterizer::render(). See description of scanline_u8
+ // for details.
+ //
+ //------------------------------------------------------------------------
+ class scanline_p8
+ {
+ public:
+ typedef scanline_p8 self_type;
+ typedef int8u cover_type;
+ typedef int16 coord_type;
+
+ //--------------------------------------------------------------------
+ struct span
+ {
+ coord_type x;
+ coord_type len; // If negative, it's a solid span, covers is valid
+ const cover_type* covers;
+ };
+
+ typedef span* iterator;
+ typedef const span* const_iterator;
+
+ scanline_p8() :
+ m_last_x(0x7FFFFFF0),
+ m_covers(),
+ m_cover_ptr(0),
+ m_spans(),
+ m_cur_span(0)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ void reset(int min_x, int max_x)
+ {
+ unsigned max_len = max_x - min_x + 3;
+ if(max_len > m_spans.size())
+ {
+ m_spans.resize(max_len);
+ m_covers.resize(max_len);
+ }
+ m_last_x = 0x7FFFFFF0;
+ m_cover_ptr = &m_covers[0];
+ m_cur_span = &m_spans[0];
+ m_cur_span->len = 0;
+ }
+
+ //--------------------------------------------------------------------
+ void add_cell(int x, unsigned cover)
+ {
+ *m_cover_ptr = (cover_type)cover;
+ if(x == m_last_x+1 && m_cur_span->len > 0)
+ {
+ m_cur_span->len++;
+ }
+ else
+ {
+ m_cur_span++;
+ m_cur_span->covers = m_cover_ptr;
+ m_cur_span->x = (int16)x;
+ m_cur_span->len = 1;
+ }
+ m_last_x = x;
+ m_cover_ptr++;
+ }
+
+ //--------------------------------------------------------------------
+ void add_cells(int x, unsigned len, const cover_type* covers)
+ {
+ memcpy(m_cover_ptr, covers, len * sizeof(cover_type));
+ if(x == m_last_x+1 && m_cur_span->len > 0)
+ {
+ m_cur_span->len += (int16)len;
+ }
+ else
+ {
+ m_cur_span++;
+ m_cur_span->covers = m_cover_ptr;
+ m_cur_span->x = (int16)x;
+ m_cur_span->len = (int16)len;
+ }
+ m_cover_ptr += len;
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void add_span(int x, unsigned len, unsigned cover)
+ {
+ if(x == m_last_x+1 &&
+ m_cur_span->len < 0 &&
+ cover == *m_cur_span->covers)
+ {
+ m_cur_span->len -= (int16)len;
+ }
+ else
+ {
+ *m_cover_ptr = (cover_type)cover;
+ m_cur_span++;
+ m_cur_span->covers = m_cover_ptr++;
+ m_cur_span->x = (int16)x;
+ m_cur_span->len = (int16)(-int(len));
+ }
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void finalize(int y)
+ {
+ m_y = y;
+ }
+
+ //--------------------------------------------------------------------
+ void reset_spans()
+ {
+ m_last_x = 0x7FFFFFF0;
+ m_cover_ptr = &m_covers[0];
+ m_cur_span = &m_spans[0];
+ m_cur_span->len = 0;
+ }
+
+ //--------------------------------------------------------------------
+ int y() const { return m_y; }
+ unsigned num_spans() const { return unsigned(m_cur_span - &m_spans[0]); }
+ const_iterator begin() const { return &m_spans[1]; }
+
+ private:
+ scanline_p8(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ int m_last_x;
+ int m_y;
+ pod_array<cover_type> m_covers;
+ cover_type* m_cover_ptr;
+ pod_array<span> m_spans;
+ span* m_cur_span;
+ };
+
+
+
+
+
+
+
+
+ //==========================================================scanline32_p8
+ class scanline32_p8
+ {
+ public:
+ typedef scanline32_p8 self_type;
+ typedef int8u cover_type;
+ typedef int32 coord_type;
+
+ struct span
+ {
+ span() {}
+ span(coord_type x_, coord_type len_, const cover_type* covers_) :
+ x(x_), len(len_), covers(covers_) {}
+
+ coord_type x;
+ coord_type len; // If negative, it's a solid span, covers is valid
+ const cover_type* covers;
+ };
+ typedef pod_bvector<span, 4> span_array_type;
+
+
+ //--------------------------------------------------------------------
+ class const_iterator
+ {
+ public:
+ const_iterator(const span_array_type& spans) :
+ m_spans(spans),
+ m_span_idx(0)
+ {}
+
+ const span& operator*() const { return m_spans[m_span_idx]; }
+ const span* operator->() const { return &m_spans[m_span_idx]; }
+
+ void operator ++ () { ++m_span_idx; }
+
+ private:
+ const span_array_type& m_spans;
+ unsigned m_span_idx;
+ };
+
+ //--------------------------------------------------------------------
+ scanline32_p8() :
+ m_max_len(0),
+ m_last_x(0x7FFFFFF0),
+ m_covers(),
+ m_cover_ptr(0)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ void reset(int min_x, int max_x)
+ {
+ unsigned max_len = max_x - min_x + 3;
+ if(max_len > m_covers.size())
+ {
+ m_covers.resize(max_len);
+ }
+ m_last_x = 0x7FFFFFF0;
+ m_cover_ptr = &m_covers[0];
+ m_spans.remove_all();
+ }
+
+ //--------------------------------------------------------------------
+ void add_cell(int x, unsigned cover)
+ {
+ *m_cover_ptr = cover_type(cover);
+ if(x == m_last_x+1 && m_spans.size() && m_spans.last().len > 0)
+ {
+ m_spans.last().len++;
+ }
+ else
+ {
+ m_spans.add(span(coord_type(x), 1, m_cover_ptr));
+ }
+ m_last_x = x;
+ m_cover_ptr++;
+ }
+
+ //--------------------------------------------------------------------
+ void add_cells(int x, unsigned len, const cover_type* covers)
+ {
+ memcpy(m_cover_ptr, covers, len * sizeof(cover_type));
+ if(x == m_last_x+1 && m_spans.size() && m_spans.last().len > 0)
+ {
+ m_spans.last().len += coord_type(len);
+ }
+ else
+ {
+ m_spans.add(span(coord_type(x), coord_type(len), m_cover_ptr));
+ }
+ m_cover_ptr += len;
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void add_span(int x, unsigned len, unsigned cover)
+ {
+ if(x == m_last_x+1 &&
+ m_spans.size() &&
+ m_spans.last().len < 0 &&
+ cover == *m_spans.last().covers)
+ {
+ m_spans.last().len -= coord_type(len);
+ }
+ else
+ {
+ *m_cover_ptr = cover_type(cover);
+ m_spans.add(span(coord_type(x), -coord_type(len), m_cover_ptr++));
+ }
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void finalize(int y)
+ {
+ m_y = y;
+ }
+
+ //--------------------------------------------------------------------
+ void reset_spans()
+ {
+ m_last_x = 0x7FFFFFF0;
+ m_cover_ptr = &m_covers[0];
+ m_spans.remove_all();
+ }
+
+ //--------------------------------------------------------------------
+ int y() const { return m_y; }
+ unsigned num_spans() const { return m_spans.size(); }
+ const_iterator begin() const { return const_iterator(m_spans); }
+
+ private:
+ scanline32_p8(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ unsigned m_max_len;
+ int m_last_x;
+ int m_y;
+ pod_array<cover_type> m_covers;
+ cover_type* m_cover_ptr;
+ span_array_type m_spans;
+ };
+
+
+}
+
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_storage_aa.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_storage_aa.h
new file mode 100644
index 00000000000..b3471fce768
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_storage_aa.h
@@ -0,0 +1,815 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for 32-bit screen coordinates has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_SCANLINE_STORAGE_AA_INCLUDED
+#define AGG_SCANLINE_STORAGE_AA_INCLUDED
+
+#include <string.h>
+#include <stdlib.h>
+#include <math.h>
+#include "agg_array.h"
+
+
+namespace agg
+{
+
+ //----------------------------------------------scanline_cell_storage
+ template<class T> class scanline_cell_storage
+ {
+ struct extra_span
+ {
+ unsigned len;
+ T* ptr;
+ };
+
+ public:
+ typedef T value_type;
+
+ //---------------------------------------------------------------
+ ~scanline_cell_storage()
+ {
+ remove_all();
+ }
+
+ //---------------------------------------------------------------
+ scanline_cell_storage() :
+ m_cells(128-2),
+ m_extra_storage()
+ {}
+
+
+ // Copying
+ //---------------------------------------------------------------
+ scanline_cell_storage(const scanline_cell_storage<T>& v) :
+ m_cells(v.m_cells),
+ m_extra_storage()
+ {
+ copy_extra_storage(v);
+ }
+
+ //---------------------------------------------------------------
+ const scanline_cell_storage<T>&
+ operator = (const scanline_cell_storage<T>& v)
+ {
+ remove_all();
+ m_cells = v.m_cells;
+ copy_extra_storage(v);
+ return *this;
+ }
+
+ //---------------------------------------------------------------
+ void remove_all()
+ {
+ int i;
+ for(i = m_extra_storage.size()-1; i >= 0; --i)
+ {
+ pod_allocator<T>::deallocate(m_extra_storage[i].ptr,
+ m_extra_storage[i].len);
+ }
+ m_extra_storage.remove_all();
+ m_cells.remove_all();
+ }
+
+ //---------------------------------------------------------------
+ int add_cells(const T* cells, unsigned num_cells)
+ {
+ int idx = m_cells.allocate_continuous_block(num_cells);
+ if(idx >= 0)
+ {
+ T* ptr = &m_cells[idx];
+ memcpy(ptr, cells, sizeof(T) * num_cells);
+ return idx;
+ }
+ extra_span s;
+ s.len = num_cells;
+ s.ptr = pod_allocator<T>::allocate(num_cells);
+ memcpy(s.ptr, cells, sizeof(T) * num_cells);
+ m_extra_storage.add(s);
+ return -int(m_extra_storage.size());
+ }
+
+ //---------------------------------------------------------------
+ const T* operator [] (int idx) const
+ {
+ if(idx >= 0)
+ {
+ if((unsigned)idx >= m_cells.size()) return 0;
+ return &m_cells[(unsigned)idx];
+ }
+ unsigned i = unsigned(-idx - 1);
+ if(i >= m_extra_storage.size()) return 0;
+ return m_extra_storage[i].ptr;
+ }
+
+ //---------------------------------------------------------------
+ T* operator [] (int idx)
+ {
+ if(idx >= 0)
+ {
+ if((unsigned)idx >= m_cells.size()) return 0;
+ return &m_cells[(unsigned)idx];
+ }
+ unsigned i = unsigned(-idx - 1);
+ if(i >= m_extra_storage.size()) return 0;
+ return m_extra_storage[i].ptr;
+ }
+
+ private:
+ void copy_extra_storage(const scanline_cell_storage<T>& v)
+ {
+ unsigned i;
+ for(i = 0; i < v.m_extra_storage.size(); ++i)
+ {
+ const extra_span& src = v.m_extra_storage[i];
+ extra_span dst;
+ dst.len = src.len;
+ dst.ptr = pod_allocator<T>::allocate(dst.len);
+ memcpy(dst.ptr, src.ptr, dst.len * sizeof(T));
+ m_extra_storage.add(dst);
+ }
+ }
+
+ pod_bvector<T, 12> m_cells;
+ pod_bvector<extra_span, 6> m_extra_storage;
+ };
+
+
+
+
+
+
+ //-----------------------------------------------scanline_storage_aa
+ template<class T> class scanline_storage_aa
+ {
+ public:
+ typedef T cover_type;
+
+ //---------------------------------------------------------------
+ struct span_data
+ {
+ int32 x;
+ int32 len; // If negative, it's a solid span, covers is valid
+ int covers_id; // The index of the cells in the scanline_cell_storage
+ };
+
+ //---------------------------------------------------------------
+ struct scanline_data
+ {
+ int y;
+ unsigned num_spans;
+ unsigned start_span;
+ };
+
+
+ //---------------------------------------------------------------
+ class embedded_scanline
+ {
+ public:
+
+ //-----------------------------------------------------------
+ class const_iterator
+ {
+ public:
+ struct span
+ {
+ int32 x;
+ int32 len; // If negative, it's a solid span, covers is valid
+ const T* covers;
+ };
+
+ const_iterator() : m_storage(0) {}
+ const_iterator(embedded_scanline& sl) :
+ m_storage(sl.m_storage),
+ m_span_idx(sl.m_scanline.start_span)
+ {
+ init_span();
+ }
+
+ const span& operator*() const { return m_span; }
+ const span* operator->() const { return &m_span; }
+
+ void operator ++ ()
+ {
+ ++m_span_idx;
+ init_span();
+ }
+
+ private:
+ void init_span()
+ {
+ const span_data& s = m_storage->span_by_index(m_span_idx);
+ m_span.x = s.x;
+ m_span.len = s.len;
+ m_span.covers = m_storage->covers_by_index(s.covers_id);
+ }
+
+ scanline_storage_aa* m_storage;
+ unsigned m_span_idx;
+ span m_span;
+ };
+
+ friend class const_iterator;
+
+
+ //-----------------------------------------------------------
+ embedded_scanline(const scanline_storage_aa& storage) :
+ m_storage(&storage)
+ {
+ init(0);
+ }
+
+ //-----------------------------------------------------------
+ void reset(int, int) {}
+ unsigned num_spans() const { return m_scanline.num_spans; }
+ int y() const { return m_scanline.y; }
+ const_iterator begin() const { return const_iterator(*this); }
+
+ //-----------------------------------------------------------
+ void init(unsigned scanline_idx)
+ {
+ m_scanline_idx = scanline_idx;
+ m_scanline = m_storage->scanline_by_index(m_scanline_idx);
+ }
+
+ private:
+ const scanline_storage_aa* m_storage;
+ scanline_data m_scanline;
+ unsigned m_scanline_idx;
+ };
+
+
+ //---------------------------------------------------------------
+ scanline_storage_aa() :
+ m_covers(),
+ m_spans(256-2), // Block increment size
+ m_scanlines(),
+ m_min_x( 0x7FFFFFFF),
+ m_min_y( 0x7FFFFFFF),
+ m_max_x(-0x7FFFFFFF),
+ m_max_y(-0x7FFFFFFF),
+ m_cur_scanline(0)
+ {
+ m_fake_scanline.y = 0;
+ m_fake_scanline.num_spans = 0;
+ m_fake_scanline.start_span = 0;
+ m_fake_span.x = 0;
+ m_fake_span.len = 0;
+ m_fake_span.covers_id = 0;
+ }
+
+ // Renderer Interface
+ //---------------------------------------------------------------
+ void prepare()
+ {
+ m_covers.remove_all();
+ m_scanlines.remove_all();
+ m_spans.remove_all();
+ m_min_x = 0x7FFFFFFF;
+ m_min_y = 0x7FFFFFFF;
+ m_max_x = -0x7FFFFFFF;
+ m_max_y = -0x7FFFFFFF;
+ m_cur_scanline = 0;
+ }
+
+ //---------------------------------------------------------------
+ template<class Scanline> void render(const Scanline& sl)
+ {
+ scanline_data sl_this;
+
+ int y = sl.y();
+ if(y < m_min_y) m_min_y = y;
+ if(y > m_max_y) m_max_y = y;
+
+ sl_this.y = y;
+ sl_this.num_spans = sl.num_spans();
+ sl_this.start_span = m_spans.size();
+ typename Scanline::const_iterator span_iterator = sl.begin();
+
+ unsigned num_spans = sl_this.num_spans;
+ for(;;)
+ {
+ span_data sp;
+
+ sp.x = span_iterator->x;
+ sp.len = span_iterator->len;
+ int len = abs(int(sp.len));
+ sp.covers_id =
+ m_covers.add_cells(span_iterator->covers,
+ unsigned(len));
+ m_spans.add(sp);
+ int x1 = sp.x;
+ int x2 = sp.x + len - 1;
+ if(x1 < m_min_x) m_min_x = x1;
+ if(x2 > m_max_x) m_max_x = x2;
+ if(--num_spans == 0) break;
+ ++span_iterator;
+ }
+ m_scanlines.add(sl_this);
+ }
+
+
+ //---------------------------------------------------------------
+ // Iterate scanlines interface
+ int min_x() const { return m_min_x; }
+ int min_y() const { return m_min_y; }
+ int max_x() const { return m_max_x; }
+ int max_y() const { return m_max_y; }
+
+ //---------------------------------------------------------------
+ bool rewind_scanlines()
+ {
+ m_cur_scanline = 0;
+ return m_scanlines.size() > 0;
+ }
+
+
+ //---------------------------------------------------------------
+ template<class Scanline> bool sweep_scanline(Scanline& sl)
+ {
+ sl.reset_spans();
+ for(;;)
+ {
+ if(m_cur_scanline >= m_scanlines.size()) return false;
+ const scanline_data& sl_this = m_scanlines[m_cur_scanline];
+
+ unsigned num_spans = sl_this.num_spans;
+ unsigned span_idx = sl_this.start_span;
+ do
+ {
+ const span_data& sp = m_spans[span_idx++];
+ const T* covers = covers_by_index(sp.covers_id);
+ if(sp.len < 0)
+ {
+ sl.add_span(sp.x, unsigned(-sp.len), *covers);
+ }
+ else
+ {
+ sl.add_cells(sp.x, sp.len, covers);
+ }
+ }
+ while(--num_spans);
+ ++m_cur_scanline;
+ if(sl.num_spans())
+ {
+ sl.finalize(sl_this.y);
+ break;
+ }
+ }
+ return true;
+ }
+
+
+ //---------------------------------------------------------------
+ // Specialization for embedded_scanline
+ bool sweep_scanline(embedded_scanline& sl)
+ {
+ do
+ {
+ if(m_cur_scanline >= m_scanlines.size()) return false;
+ sl.init(m_cur_scanline);
+ ++m_cur_scanline;
+ }
+ while(sl.num_spans() == 0);
+ return true;
+ }
+
+ //---------------------------------------------------------------
+ unsigned byte_size() const
+ {
+ unsigned i;
+ unsigned size = sizeof(int32) * 4; // min_x, min_y, max_x, max_y
+
+ for(i = 0; i < m_scanlines.size(); ++i)
+ {
+ size += sizeof(int32) * 3; // scanline size in bytes, Y, num_spans
+
+ const scanline_data& sl_this = m_scanlines[i];
+
+ unsigned num_spans = sl_this.num_spans;
+ unsigned span_idx = sl_this.start_span;
+ do
+ {
+ const span_data& sp = m_spans[span_idx++];
+
+ size += sizeof(int32) * 2; // X, span_len
+ if(sp.len < 0)
+ {
+ size += sizeof(T); // cover
+ }
+ else
+ {
+ size += sizeof(T) * unsigned(sp.len); // covers
+ }
+ }
+ while(--num_spans);
+ }
+ return size;
+ }
+
+
+ //---------------------------------------------------------------
+ static void write_int32(int8u* dst, int32 val)
+ {
+ dst[0] = ((const int8u*)&val)[0];
+ dst[1] = ((const int8u*)&val)[1];
+ dst[2] = ((const int8u*)&val)[2];
+ dst[3] = ((const int8u*)&val)[3];
+ }
+
+
+ //---------------------------------------------------------------
+ void serialize(int8u* data) const
+ {
+ unsigned i;
+
+ write_int32(data, min_x()); // min_x
+ data += sizeof(int32);
+ write_int32(data, min_y()); // min_y
+ data += sizeof(int32);
+ write_int32(data, max_x()); // max_x
+ data += sizeof(int32);
+ write_int32(data, max_y()); // max_y
+ data += sizeof(int32);
+
+ for(i = 0; i < m_scanlines.size(); ++i)
+ {
+ const scanline_data& sl_this = m_scanlines[i];
+
+ int8u* size_ptr = data;
+ data += sizeof(int32); // Reserve space for scanline size in bytes
+
+ write_int32(data, sl_this.y); // Y
+ data += sizeof(int32);
+
+ write_int32(data, sl_this.num_spans); // num_spans
+ data += sizeof(int32);
+
+ unsigned num_spans = sl_this.num_spans;
+ unsigned span_idx = sl_this.start_span;
+ do
+ {
+ const span_data& sp = m_spans[span_idx++];
+ const T* covers = covers_by_index(sp.covers_id);
+
+ write_int32(data, sp.x); // X
+ data += sizeof(int32);
+
+ write_int32(data, sp.len); // span_len
+ data += sizeof(int32);
+
+ if(sp.len < 0)
+ {
+ memcpy(data, covers, sizeof(T));
+ data += sizeof(T);
+ }
+ else
+ {
+ memcpy(data, covers, unsigned(sp.len) * sizeof(T));
+ data += sizeof(T) * unsigned(sp.len);
+ }
+ }
+ while(--num_spans);
+ write_int32(size_ptr, int32(unsigned(data - size_ptr)));
+ }
+ }
+
+
+ //---------------------------------------------------------------
+ const scanline_data& scanline_by_index(unsigned i) const
+ {
+ return (i < m_scanlines.size()) ? m_scanlines[i] : m_fake_scanline;
+ }
+
+ //---------------------------------------------------------------
+ const span_data& span_by_index(unsigned i) const
+ {
+ return (i < m_spans.size()) ? m_spans[i] : m_fake_span;
+ }
+
+ //---------------------------------------------------------------
+ const T* covers_by_index(int i) const
+ {
+ return m_covers[i];
+ }
+
+ private:
+ scanline_cell_storage<T> m_covers;
+ pod_bvector<span_data, 10> m_spans;
+ pod_bvector<scanline_data, 8> m_scanlines;
+ span_data m_fake_span;
+ scanline_data m_fake_scanline;
+ int m_min_x;
+ int m_min_y;
+ int m_max_x;
+ int m_max_y;
+ unsigned m_cur_scanline;
+ };
+
+
+ typedef scanline_storage_aa<int8u> scanline_storage_aa8; //--------scanline_storage_aa8
+ typedef scanline_storage_aa<int16u> scanline_storage_aa16; //--------scanline_storage_aa16
+ typedef scanline_storage_aa<int32u> scanline_storage_aa32; //--------scanline_storage_aa32
+
+
+
+
+ //------------------------------------------serialized_scanlines_adaptor_aa
+ template<class T> class serialized_scanlines_adaptor_aa
+ {
+ public:
+ typedef T cover_type;
+
+ //---------------------------------------------------------------------
+ class embedded_scanline
+ {
+ public:
+ typedef T cover_type;
+
+ //-----------------------------------------------------------------
+ class const_iterator
+ {
+ public:
+ struct span
+ {
+ int32 x;
+ int32 len; // If negative, it's a solid span, "covers" is valid
+ const T* covers;
+ };
+
+ const_iterator() : m_ptr(0) {}
+ const_iterator(const embedded_scanline* sl) :
+ m_ptr(sl->m_ptr),
+ m_dx(sl->m_dx)
+ {
+ init_span();
+ }
+
+ const span& operator*() const { return m_span; }
+ const span* operator->() const { return &m_span; }
+
+ void operator ++ ()
+ {
+ if(m_span.len < 0)
+ {
+ m_ptr += sizeof(T);
+ }
+ else
+ {
+ m_ptr += m_span.len * sizeof(T);
+ }
+ init_span();
+ }
+
+ private:
+ int read_int32()
+ {
+ int32 val;
+ ((int8u*)&val)[0] = *m_ptr++;
+ ((int8u*)&val)[1] = *m_ptr++;
+ ((int8u*)&val)[2] = *m_ptr++;
+ ((int8u*)&val)[3] = *m_ptr++;
+ return val;
+ }
+
+ void init_span()
+ {
+ m_span.x = read_int32() + m_dx;
+ m_span.len = read_int32();
+ m_span.covers = m_ptr;
+ }
+
+ const int8u* m_ptr;
+ span m_span;
+ int m_dx;
+ };
+
+ friend class const_iterator;
+
+
+ //-----------------------------------------------------------------
+ embedded_scanline() : m_ptr(0), m_y(0), m_num_spans(0) {}
+
+ //-----------------------------------------------------------------
+ void reset(int, int) {}
+ unsigned num_spans() const { return m_num_spans; }
+ int y() const { return m_y; }
+ const_iterator begin() const { return const_iterator(this); }
+
+
+ private:
+ //-----------------------------------------------------------------
+ int read_int32()
+ {
+ int32 val;
+ ((int8u*)&val)[0] = *m_ptr++;
+ ((int8u*)&val)[1] = *m_ptr++;
+ ((int8u*)&val)[2] = *m_ptr++;
+ ((int8u*)&val)[3] = *m_ptr++;
+ return val;
+ }
+
+ public:
+ //-----------------------------------------------------------------
+ void init(const int8u* ptr, int dx, int dy)
+ {
+ m_ptr = ptr;
+ m_y = read_int32() + dy;
+ m_num_spans = unsigned(read_int32());
+ m_dx = dx;
+ }
+
+ private:
+ const int8u* m_ptr;
+ int m_y;
+ unsigned m_num_spans;
+ int m_dx;
+ };
+
+
+
+ public:
+ //--------------------------------------------------------------------
+ serialized_scanlines_adaptor_aa() :
+ m_data(0),
+ m_end(0),
+ m_ptr(0),
+ m_dx(0),
+ m_dy(0),
+ m_min_x(0x7FFFFFFF),
+ m_min_y(0x7FFFFFFF),
+ m_max_x(-0x7FFFFFFF),
+ m_max_y(-0x7FFFFFFF)
+ {}
+
+ //--------------------------------------------------------------------
+ serialized_scanlines_adaptor_aa(const int8u* data, unsigned size,
+ double dx, double dy) :
+ m_data(data),
+ m_end(data + size),
+ m_ptr(data),
+ m_dx(iround(dx)),
+ m_dy(iround(dy)),
+ m_min_x(0x7FFFFFFF),
+ m_min_y(0x7FFFFFFF),
+ m_max_x(-0x7FFFFFFF),
+ m_max_y(-0x7FFFFFFF)
+ {}
+
+ //--------------------------------------------------------------------
+ void init(const int8u* data, unsigned size, double dx, double dy)
+ {
+ m_data = data;
+ m_end = data + size;
+ m_ptr = data;
+ m_dx = iround(dx);
+ m_dy = iround(dy);
+ m_min_x = 0x7FFFFFFF;
+ m_min_y = 0x7FFFFFFF;
+ m_max_x = -0x7FFFFFFF;
+ m_max_y = -0x7FFFFFFF;
+ }
+
+ private:
+ //--------------------------------------------------------------------
+ int read_int32()
+ {
+ int32 val;
+ ((int8u*)&val)[0] = *m_ptr++;
+ ((int8u*)&val)[1] = *m_ptr++;
+ ((int8u*)&val)[2] = *m_ptr++;
+ ((int8u*)&val)[3] = *m_ptr++;
+ return val;
+ }
+
+ //--------------------------------------------------------------------
+ unsigned read_int32u()
+ {
+ int32u val;
+ ((int8u*)&val)[0] = *m_ptr++;
+ ((int8u*)&val)[1] = *m_ptr++;
+ ((int8u*)&val)[2] = *m_ptr++;
+ ((int8u*)&val)[3] = *m_ptr++;
+ return val;
+ }
+
+ public:
+ // Iterate scanlines interface
+ //--------------------------------------------------------------------
+ bool rewind_scanlines()
+ {
+ m_ptr = m_data;
+ if(m_ptr < m_end)
+ {
+ m_min_x = read_int32u() + m_dx;
+ m_min_y = read_int32u() + m_dy;
+ m_max_x = read_int32u() + m_dx;
+ m_max_y = read_int32u() + m_dy;
+ }
+ return m_ptr < m_end;
+ }
+
+ //--------------------------------------------------------------------
+ int min_x() const { return m_min_x; }
+ int min_y() const { return m_min_y; }
+ int max_x() const { return m_max_x; }
+ int max_y() const { return m_max_y; }
+
+ //--------------------------------------------------------------------
+ template<class Scanline> bool sweep_scanline(Scanline& sl)
+ {
+ sl.reset_spans();
+ for(;;)
+ {
+ if(m_ptr >= m_end) return false;
+
+ read_int32(); // Skip scanline size in bytes
+ int y = read_int32() + m_dy;
+ unsigned num_spans = read_int32();
+
+ do
+ {
+ int x = read_int32() + m_dx;
+ int len = read_int32();
+
+ if(len < 0)
+ {
+ sl.add_span(x, unsigned(-len), *m_ptr);
+ m_ptr += sizeof(T);
+ }
+ else
+ {
+ sl.add_cells(x, len, m_ptr);
+ m_ptr += len * sizeof(T);
+ }
+ }
+ while(--num_spans);
+
+ if(sl.num_spans())
+ {
+ sl.finalize(y);
+ break;
+ }
+ }
+ return true;
+ }
+
+
+ //--------------------------------------------------------------------
+ // Specialization for embedded_scanline
+ bool sweep_scanline(embedded_scanline& sl)
+ {
+ do
+ {
+ if(m_ptr >= m_end) return false;
+
+ unsigned byte_size = read_int32u();
+ sl.init(m_ptr, m_dx, m_dy);
+ m_ptr += byte_size - sizeof(int32);
+ }
+ while(sl.num_spans() == 0);
+ return true;
+ }
+
+ private:
+ const int8u* m_data;
+ const int8u* m_end;
+ const int8u* m_ptr;
+ int m_dx;
+ int m_dy;
+ int m_min_x;
+ int m_min_y;
+ int m_max_x;
+ int m_max_y;
+ };
+
+
+
+ typedef serialized_scanlines_adaptor_aa<int8u> serialized_scanlines_adaptor_aa8; //----serialized_scanlines_adaptor_aa8
+ typedef serialized_scanlines_adaptor_aa<int16u> serialized_scanlines_adaptor_aa16; //----serialized_scanlines_adaptor_aa16
+ typedef serialized_scanlines_adaptor_aa<int32u> serialized_scanlines_adaptor_aa32; //----serialized_scanlines_adaptor_aa32
+
+}
+
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_storage_bin.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_storage_bin.h
new file mode 100644
index 00000000000..3ab1adca516
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_storage_bin.h
@@ -0,0 +1,586 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for 32-bit screen coordinates has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+
+
+#ifndef AGG_SCANLINE_STORAGE_BIN_INCLUDED
+#define AGG_SCANLINE_STORAGE_BIN_INCLUDED
+
+#include <string.h>
+#include <stdlib.h>
+#include <math.h>
+#include "agg_array.h"
+
+
+namespace agg
+{
+
+ //-----------------------------------------------scanline_storage_bin
+ class scanline_storage_bin
+ {
+ public:
+ //---------------------------------------------------------------
+ struct span_data
+ {
+ int32 x;
+ int32 len;
+ };
+
+ //---------------------------------------------------------------
+ struct scanline_data
+ {
+ int y;
+ unsigned num_spans;
+ unsigned start_span;
+ };
+
+
+ //---------------------------------------------------------------
+ class embedded_scanline
+ {
+ public:
+
+ //-----------------------------------------------------------
+ class const_iterator
+ {
+ public:
+ const_iterator() : m_storage(0) {}
+ const_iterator(const embedded_scanline* sl) :
+ m_storage(sl->m_storage),
+ m_span_idx(sl->m_scanline.start_span)
+ {
+ m_span = m_storage->span_by_index(m_span_idx);
+ }
+
+ const span_data& operator*() const { return m_span; }
+ const span_data* operator->() const { return &m_span; }
+
+ void operator ++ ()
+ {
+ ++m_span_idx;
+ m_span = m_storage->span_by_index(m_span_idx);
+ }
+
+ private:
+ const scanline_storage_bin* m_storage;
+ unsigned m_span_idx;
+ span_data m_span;
+ };
+
+ friend class const_iterator;
+
+
+ //-----------------------------------------------------------
+ embedded_scanline(scanline_storage_bin& storage) :
+ m_storage(&storage)
+ {
+ setup(0);
+ }
+
+ //-----------------------------------------------------------
+ void reset(int, int) {}
+ unsigned num_spans() const { return m_scanline.num_spans; }
+ int y() const { return m_scanline.y; }
+ const_iterator begin() const { return const_iterator(this); }
+
+ //-----------------------------------------------------------
+ void setup(unsigned scanline_idx)
+ {
+ m_scanline_idx = scanline_idx;
+ m_scanline = m_storage->scanline_by_index(m_scanline_idx);
+ }
+
+ private:
+ scanline_storage_bin* m_storage;
+ scanline_data m_scanline;
+ unsigned m_scanline_idx;
+ };
+
+
+ //---------------------------------------------------------------
+ scanline_storage_bin() :
+ m_spans(256-2), // Block increment size
+ m_scanlines(),
+ m_min_x( 0x7FFFFFFF),
+ m_min_y( 0x7FFFFFFF),
+ m_max_x(-0x7FFFFFFF),
+ m_max_y(-0x7FFFFFFF),
+ m_cur_scanline(0)
+ {
+ m_fake_scanline.y = 0;
+ m_fake_scanline.num_spans = 0;
+ m_fake_scanline.start_span = 0;
+ m_fake_span.x = 0;
+ m_fake_span.len = 0;
+ }
+
+ // Renderer Interface
+ //---------------------------------------------------------------
+ void prepare()
+ {
+ m_scanlines.remove_all();
+ m_spans.remove_all();
+ m_min_x = 0x7FFFFFFF;
+ m_min_y = 0x7FFFFFFF;
+ m_max_x = -0x7FFFFFFF;
+ m_max_y = -0x7FFFFFFF;
+ m_cur_scanline = 0;
+ }
+
+ //---------------------------------------------------------------
+ template<class Scanline> void render(const Scanline& sl)
+ {
+ scanline_data sl_this;
+
+ int y = sl.y();
+ if(y < m_min_y) m_min_y = y;
+ if(y > m_max_y) m_max_y = y;
+
+ sl_this.y = y;
+ sl_this.num_spans = sl.num_spans();
+ sl_this.start_span = m_spans.size();
+ typename Scanline::const_iterator span_iterator = sl.begin();
+
+ unsigned num_spans = sl_this.num_spans;
+ for(;;)
+ {
+ span_data sp;
+ sp.x = span_iterator->x;
+ sp.len = (int32)abs((int)(span_iterator->len));
+ m_spans.add(sp);
+ int x1 = sp.x;
+ int x2 = sp.x + sp.len - 1;
+ if(x1 < m_min_x) m_min_x = x1;
+ if(x2 > m_max_x) m_max_x = x2;
+ if(--num_spans == 0) break;
+ ++span_iterator;
+ }
+ m_scanlines.add(sl_this);
+ }
+
+
+ //---------------------------------------------------------------
+ // Iterate scanlines interface
+ int min_x() const { return m_min_x; }
+ int min_y() const { return m_min_y; }
+ int max_x() const { return m_max_x; }
+ int max_y() const { return m_max_y; }
+
+ //---------------------------------------------------------------
+ bool rewind_scanlines()
+ {
+ m_cur_scanline = 0;
+ return m_scanlines.size() > 0;
+ }
+
+
+ //---------------------------------------------------------------
+ template<class Scanline> bool sweep_scanline(Scanline& sl)
+ {
+ sl.reset_spans();
+ for(;;)
+ {
+ if(m_cur_scanline >= m_scanlines.size()) return false;
+ const scanline_data& sl_this = m_scanlines[m_cur_scanline];
+
+ unsigned num_spans = sl_this.num_spans;
+ unsigned span_idx = sl_this.start_span;
+ do
+ {
+ const span_data& sp = m_spans[span_idx++];
+ sl.add_span(sp.x, sp.len, cover_full);
+ }
+ while(--num_spans);
+
+ ++m_cur_scanline;
+ if(sl.num_spans())
+ {
+ sl.finalize(sl_this.y);
+ break;
+ }
+ }
+ return true;
+ }
+
+
+ //---------------------------------------------------------------
+ // Specialization for embedded_scanline
+ bool sweep_scanline(embedded_scanline& sl)
+ {
+ do
+ {
+ if(m_cur_scanline >= m_scanlines.size()) return false;
+ sl.setup(m_cur_scanline);
+ ++m_cur_scanline;
+ }
+ while(sl.num_spans() == 0);
+ return true;
+ }
+
+
+ //---------------------------------------------------------------
+ unsigned byte_size() const
+ {
+ unsigned i;
+ unsigned size = sizeof(int32) * 4; // min_x, min_y, max_x, max_y
+
+ for(i = 0; i < m_scanlines.size(); ++i)
+ {
+ size += sizeof(int32) * 2 + // Y, num_spans
+ unsigned(m_scanlines[i].num_spans) * sizeof(int32) * 2; // X, span_len
+ }
+ return size;
+ }
+
+
+ //---------------------------------------------------------------
+ static void write_int32(int8u* dst, int32 val)
+ {
+ dst[0] = ((const int8u*)&val)[0];
+ dst[1] = ((const int8u*)&val)[1];
+ dst[2] = ((const int8u*)&val)[2];
+ dst[3] = ((const int8u*)&val)[3];
+ }
+
+
+ //---------------------------------------------------------------
+ void serialize(int8u* data) const
+ {
+ unsigned i;
+
+ write_int32(data, min_x()); // min_x
+ data += sizeof(int32);
+ write_int32(data, min_y()); // min_y
+ data += sizeof(int32);
+ write_int32(data, max_x()); // max_x
+ data += sizeof(int32);
+ write_int32(data, max_y()); // max_y
+ data += sizeof(int32);
+
+ for(i = 0; i < m_scanlines.size(); ++i)
+ {
+ const scanline_data& sl_this = m_scanlines[i];
+
+ write_int32(data, sl_this.y); // Y
+ data += sizeof(int32);
+
+ write_int32(data, sl_this.num_spans); // num_spans
+ data += sizeof(int32);
+
+ unsigned num_spans = sl_this.num_spans;
+ unsigned span_idx = sl_this.start_span;
+ do
+ {
+ const span_data& sp = m_spans[span_idx++];
+
+ write_int32(data, sp.x); // X
+ data += sizeof(int32);
+
+ write_int32(data, sp.len); // len
+ data += sizeof(int32);
+ }
+ while(--num_spans);
+ }
+ }
+
+
+ //---------------------------------------------------------------
+ const scanline_data& scanline_by_index(unsigned i) const
+ {
+ return (i < m_scanlines.size()) ? m_scanlines[i] : m_fake_scanline;
+ }
+
+ //---------------------------------------------------------------
+ const span_data& span_by_index(unsigned i) const
+ {
+ return (i < m_spans.size()) ? m_spans[i] : m_fake_span;
+ }
+
+
+ private:
+ pod_bvector<span_data, 10> m_spans;
+ pod_bvector<scanline_data, 8> m_scanlines;
+ span_data m_fake_span;
+ scanline_data m_fake_scanline;
+ int m_min_x;
+ int m_min_y;
+ int m_max_x;
+ int m_max_y;
+ unsigned m_cur_scanline;
+ };
+
+
+
+
+
+
+
+
+
+
+
+
+
+ //---------------------------------------serialized_scanlines_adaptor_bin
+ class serialized_scanlines_adaptor_bin
+ {
+ public:
+ typedef bool cover_type;
+
+ //--------------------------------------------------------------------
+ class embedded_scanline
+ {
+ public:
+
+ //----------------------------------------------------------------
+ class const_iterator
+ {
+ public:
+ struct span
+ {
+ int32 x;
+ int32 len;
+ };
+
+ const_iterator() : m_ptr(0) {}
+ const_iterator(const embedded_scanline* sl) :
+ m_ptr(sl->m_ptr),
+ m_dx(sl->m_dx)
+ {
+ m_span.x = read_int32() + m_dx;
+ m_span.len = read_int32();
+ }
+
+ const span& operator*() const { return m_span; }
+ const span* operator->() const { return &m_span; }
+
+ void operator ++ ()
+ {
+ m_span.x = read_int32() + m_dx;
+ m_span.len = read_int32();
+ }
+
+ private:
+ int read_int32()
+ {
+ int32 val;
+ ((int8u*)&val)[0] = *m_ptr++;
+ ((int8u*)&val)[1] = *m_ptr++;
+ ((int8u*)&val)[2] = *m_ptr++;
+ ((int8u*)&val)[3] = *m_ptr++;
+ return val;
+ }
+
+ const int8u* m_ptr;
+ span m_span;
+ int m_dx;
+ };
+
+ friend class const_iterator;
+
+
+ //----------------------------------------------------------------
+ embedded_scanline() : m_ptr(0), m_y(0), m_num_spans(0) {}
+
+ //----------------------------------------------------------------
+ void reset(int, int) {}
+ unsigned num_spans() const { return m_num_spans; }
+ int y() const { return m_y; }
+ const_iterator begin() const { return const_iterator(this); }
+
+
+ private:
+ //----------------------------------------------------------------
+ int read_int32()
+ {
+ int32 val;
+ ((int8u*)&val)[0] = *m_ptr++;
+ ((int8u*)&val)[1] = *m_ptr++;
+ ((int8u*)&val)[2] = *m_ptr++;
+ ((int8u*)&val)[3] = *m_ptr++;
+ return val;
+ }
+
+ public:
+ //----------------------------------------------------------------
+ void init(const int8u* ptr, int dx, int dy)
+ {
+ m_ptr = ptr;
+ m_y = read_int32() + dy;
+ m_num_spans = unsigned(read_int32());
+ m_dx = dx;
+ }
+
+ private:
+ const int8u* m_ptr;
+ int m_y;
+ unsigned m_num_spans;
+ int m_dx;
+ };
+
+
+
+ public:
+ //--------------------------------------------------------------------
+ serialized_scanlines_adaptor_bin() :
+ m_data(0),
+ m_end(0),
+ m_ptr(0),
+ m_dx(0),
+ m_dy(0),
+ m_min_x(0x7FFFFFFF),
+ m_min_y(0x7FFFFFFF),
+ m_max_x(-0x7FFFFFFF),
+ m_max_y(-0x7FFFFFFF)
+ {}
+
+ //--------------------------------------------------------------------
+ serialized_scanlines_adaptor_bin(const int8u* data, unsigned size,
+ double dx, double dy) :
+ m_data(data),
+ m_end(data + size),
+ m_ptr(data),
+ m_dx(iround(dx)),
+ m_dy(iround(dy)),
+ m_min_x(0x7FFFFFFF),
+ m_min_y(0x7FFFFFFF),
+ m_max_x(-0x7FFFFFFF),
+ m_max_y(-0x7FFFFFFF)
+ {}
+
+ //--------------------------------------------------------------------
+ void init(const int8u* data, unsigned size, double dx, double dy)
+ {
+ m_data = data;
+ m_end = data + size;
+ m_ptr = data;
+ m_dx = iround(dx);
+ m_dy = iround(dy);
+ m_min_x = 0x7FFFFFFF;
+ m_min_y = 0x7FFFFFFF;
+ m_max_x = -0x7FFFFFFF;
+ m_max_y = -0x7FFFFFFF;
+ }
+
+ private:
+ //--------------------------------------------------------------------
+ int read_int32()
+ {
+ int32 val;
+ ((int8u*)&val)[0] = *m_ptr++;
+ ((int8u*)&val)[1] = *m_ptr++;
+ ((int8u*)&val)[2] = *m_ptr++;
+ ((int8u*)&val)[3] = *m_ptr++;
+ return val;
+ }
+
+ public:
+ // Iterate scanlines interface
+ //--------------------------------------------------------------------
+ bool rewind_scanlines()
+ {
+ m_ptr = m_data;
+ if(m_ptr < m_end)
+ {
+ m_min_x = read_int32() + m_dx;
+ m_min_y = read_int32() + m_dy;
+ m_max_x = read_int32() + m_dx;
+ m_max_y = read_int32() + m_dy;
+ }
+ return m_ptr < m_end;
+ }
+
+ //--------------------------------------------------------------------
+ int min_x() const { return m_min_x; }
+ int min_y() const { return m_min_y; }
+ int max_x() const { return m_max_x; }
+ int max_y() const { return m_max_y; }
+
+ //--------------------------------------------------------------------
+ template<class Scanline> bool sweep_scanline(Scanline& sl)
+ {
+ sl.reset_spans();
+ for(;;)
+ {
+ if(m_ptr >= m_end) return false;
+
+ int y = read_int32() + m_dy;
+ unsigned num_spans = read_int32();
+
+ do
+ {
+ int x = read_int32() + m_dx;
+ int len = read_int32();
+
+ if(len < 0) len = -len;
+ sl.add_span(x, unsigned(len), cover_full);
+ }
+ while(--num_spans);
+
+ if(sl.num_spans())
+ {
+ sl.finalize(y);
+ break;
+ }
+ }
+ return true;
+ }
+
+
+ //--------------------------------------------------------------------
+ // Specialization for embedded_scanline
+ bool sweep_scanline(embedded_scanline& sl)
+ {
+ do
+ {
+ if(m_ptr >= m_end) return false;
+
+ sl.init(m_ptr, m_dx, m_dy);
+
+ // Jump to the next scanline
+ //--------------------------
+ read_int32(); // Y
+ int num_spans = read_int32(); // num_spans
+ m_ptr += num_spans * sizeof(int32) * 2;
+ }
+ while(sl.num_spans() == 0);
+ return true;
+ }
+
+ private:
+ const int8u* m_data;
+ const int8u* m_end;
+ const int8u* m_ptr;
+ int m_dx;
+ int m_dy;
+ int m_min_x;
+ int m_min_y;
+ int m_max_x;
+ int m_max_y;
+ };
+
+
+
+}
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_u.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_u.h
new file mode 100644
index 00000000000..2628f55f47b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_scanline_u.h
@@ -0,0 +1,499 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for 32-bit screen coordinates (scanline32_u) has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_SCANLINE_U_INCLUDED
+#define AGG_SCANLINE_U_INCLUDED
+
+#include "agg_array.h"
+
+namespace agg
+{
+ //=============================================================scanline_u8
+ //
+ // Unpacked scanline container class
+ //
+ // This class is used to transfer data from a scanline rasterizer
+ // to the rendering buffer. It's organized very simple. The class stores
+ // information of horizontal spans to render it into a pixel-map buffer.
+ // Each span has staring X, length, and an array of bytes that determine the
+ // cover-values for each pixel.
+ // Before using this class you should know the minimal and maximal pixel
+ // coordinates of your scanline. The protocol of using is:
+ // 1. reset(min_x, max_x)
+ // 2. add_cell() / add_span() - accumulate scanline.
+ // When forming one scanline the next X coordinate must be always greater
+ // than the last stored one, i.e. it works only with ordered coordinates.
+ // 3. Call finalize(y) and render the scanline.
+ // 3. Call reset_spans() to prepare for the new scanline.
+ //
+ // 4. Rendering:
+ //
+ // Scanline provides an iterator class that allows you to extract
+ // the spans and the cover values for each pixel. Be aware that clipping
+ // has not been done yet, so you should perform it yourself.
+ // Use scanline_u8::iterator to render spans:
+ //-------------------------------------------------------------------------
+ //
+ // int y = sl.y(); // Y-coordinate of the scanline
+ //
+ // ************************************
+ // ...Perform vertical clipping here...
+ // ************************************
+ //
+ // scanline_u8::const_iterator span = sl.begin();
+ //
+ // unsigned char* row = m_rbuf->row(y); // The address of the beginning
+ // // of the current row
+ //
+ // unsigned num_spans = sl.num_spans(); // Number of spans. It's guaranteed that
+ // // num_spans is always greater than 0.
+ //
+ // do
+ // {
+ // const scanline_u8::cover_type* covers =
+ // span->covers; // The array of the cover values
+ //
+ // int num_pix = span->len; // Number of pixels of the span.
+ // // Always greater than 0, still it's
+ // // better to use "int" instead of
+ // // "unsigned" because it's more
+ // // convenient for clipping
+ // int x = span->x;
+ //
+ // **************************************
+ // ...Perform horizontal clipping here...
+ // ...you have x, covers, and pix_count..
+ // **************************************
+ //
+ // unsigned char* dst = row + x; // Calculate the start address of the row.
+ // // In this case we assume a simple
+ // // grayscale image 1-byte per pixel.
+ // do
+ // {
+ // *dst++ = *covers++; // Hypotetical rendering.
+ // }
+ // while(--num_pix);
+ //
+ // ++span;
+ // }
+ // while(--num_spans); // num_spans cannot be 0, so this loop is quite safe
+ //------------------------------------------------------------------------
+ //
+ // The question is: why should we accumulate the whole scanline when we
+ // could render just separate spans when they're ready?
+ // That's because using the scanline is generally faster. When is consists
+ // of more than one span the conditions for the processor cash system
+ // are better, because switching between two different areas of memory
+ // (that can be very large) occurs less frequently.
+ //------------------------------------------------------------------------
+ class scanline_u8
+ {
+ public:
+ typedef scanline_u8 self_type;
+ typedef int8u cover_type;
+ typedef int16 coord_type;
+
+ //--------------------------------------------------------------------
+ struct span
+ {
+ coord_type x;
+ coord_type len;
+ cover_type* covers;
+ };
+
+ typedef span* iterator;
+ typedef const span* const_iterator;
+
+ //--------------------------------------------------------------------
+ scanline_u8() :
+ m_min_x(0),
+ m_last_x(0x7FFFFFF0),
+ m_cur_span(0)
+ {}
+
+ //--------------------------------------------------------------------
+ void reset(int min_x, int max_x)
+ {
+ unsigned max_len = max_x - min_x + 2;
+ if(max_len > m_spans.size())
+ {
+ m_spans.resize(max_len);
+ m_covers.resize(max_len);
+ }
+ m_last_x = 0x7FFFFFF0;
+ m_min_x = min_x;
+ m_cur_span = &m_spans[0];
+ }
+
+ //--------------------------------------------------------------------
+ void add_cell(int x, unsigned cover)
+ {
+ x -= m_min_x;
+ m_covers[x] = (cover_type)cover;
+ if(x == m_last_x+1)
+ {
+ m_cur_span->len++;
+ }
+ else
+ {
+ m_cur_span++;
+ m_cur_span->x = (coord_type)(x + m_min_x);
+ m_cur_span->len = 1;
+ m_cur_span->covers = &m_covers[x];
+ }
+ m_last_x = x;
+ }
+
+ //--------------------------------------------------------------------
+ void add_cells(int x, unsigned len, const cover_type* covers)
+ {
+ x -= m_min_x;
+ memcpy(&m_covers[x], covers, len * sizeof(cover_type));
+ if(x == m_last_x+1)
+ {
+ m_cur_span->len += (coord_type)len;
+ }
+ else
+ {
+ m_cur_span++;
+ m_cur_span->x = (coord_type)(x + m_min_x);
+ m_cur_span->len = (coord_type)len;
+ m_cur_span->covers = &m_covers[x];
+ }
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void add_span(int x, unsigned len, unsigned cover)
+ {
+ x -= m_min_x;
+ memset(&m_covers[x], cover, len);
+ if(x == m_last_x+1)
+ {
+ m_cur_span->len += (coord_type)len;
+ }
+ else
+ {
+ m_cur_span++;
+ m_cur_span->x = (coord_type)(x + m_min_x);
+ m_cur_span->len = (coord_type)len;
+ m_cur_span->covers = &m_covers[x];
+ }
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void finalize(int y)
+ {
+ m_y = y;
+ }
+
+ //--------------------------------------------------------------------
+ void reset_spans()
+ {
+ m_last_x = 0x7FFFFFF0;
+ m_cur_span = &m_spans[0];
+ }
+
+ //--------------------------------------------------------------------
+ int y() const { return m_y; }
+ unsigned num_spans() const { return unsigned(m_cur_span - &m_spans[0]); }
+ const_iterator begin() const { return &m_spans[1]; }
+ iterator begin() { return &m_spans[1]; }
+
+ private:
+ scanline_u8(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ private:
+ int m_min_x;
+ int m_last_x;
+ int m_y;
+ pod_array<cover_type> m_covers;
+ pod_array<span> m_spans;
+ span* m_cur_span;
+ };
+
+
+
+
+ //==========================================================scanline_u8_am
+ //
+ // The scanline container with alpha-masking
+ //
+ //------------------------------------------------------------------------
+ template<class AlphaMask>
+ class scanline_u8_am : public scanline_u8
+ {
+ public:
+ typedef scanline_u8 base_type;
+ typedef AlphaMask alpha_mask_type;
+ typedef base_type::cover_type cover_type;
+ typedef base_type::coord_type coord_type;
+
+ scanline_u8_am() : base_type(), m_alpha_mask(0) {}
+ scanline_u8_am(AlphaMask& am) : base_type(), m_alpha_mask(&am) {}
+
+ //--------------------------------------------------------------------
+ void finalize(int span_y)
+ {
+ base_type::finalize(span_y);
+ if(m_alpha_mask)
+ {
+ typename base_type::iterator span = base_type::begin();
+ unsigned count = base_type::num_spans();
+ do
+ {
+ m_alpha_mask->combine_hspan(span->x,
+ base_type::y(),
+ span->covers,
+ span->len);
+ ++span;
+ }
+ while(--count);
+ }
+ }
+
+ private:
+ AlphaMask* m_alpha_mask;
+ };
+
+
+
+
+ //===========================================================scanline32_u8
+ class scanline32_u8
+ {
+ public:
+ typedef scanline32_u8 self_type;
+ typedef int8u cover_type;
+ typedef int32 coord_type;
+
+ //--------------------------------------------------------------------
+ struct span
+ {
+ span() {}
+ span(coord_type x_, coord_type len_, cover_type* covers_) :
+ x(x_), len(len_), covers(covers_) {}
+
+ coord_type x;
+ coord_type len;
+ cover_type* covers;
+ };
+
+ typedef pod_bvector<span, 4> span_array_type;
+
+ //--------------------------------------------------------------------
+ class const_iterator
+ {
+ public:
+ const_iterator(const span_array_type& spans) :
+ m_spans(spans),
+ m_span_idx(0)
+ {}
+
+ const span& operator*() const { return m_spans[m_span_idx]; }
+ const span* operator->() const { return &m_spans[m_span_idx]; }
+
+ void operator ++ () { ++m_span_idx; }
+
+ private:
+ const span_array_type& m_spans;
+ unsigned m_span_idx;
+ };
+
+ //--------------------------------------------------------------------
+ class iterator
+ {
+ public:
+ iterator(span_array_type& spans) :
+ m_spans(spans),
+ m_span_idx(0)
+ {}
+
+ span& operator*() { return m_spans[m_span_idx]; }
+ span* operator->() { return &m_spans[m_span_idx]; }
+
+ void operator ++ () { ++m_span_idx; }
+
+ private:
+ span_array_type& m_spans;
+ unsigned m_span_idx;
+ };
+
+
+
+ //--------------------------------------------------------------------
+ scanline32_u8() :
+ m_min_x(0),
+ m_last_x(0x7FFFFFF0),
+ m_covers()
+ {}
+
+ //--------------------------------------------------------------------
+ void reset(int min_x, int max_x)
+ {
+ unsigned max_len = max_x - min_x + 2;
+ if(max_len > m_covers.size())
+ {
+ m_covers.resize(max_len);
+ }
+ m_last_x = 0x7FFFFFF0;
+ m_min_x = min_x;
+ m_spans.remove_all();
+ }
+
+ //--------------------------------------------------------------------
+ void add_cell(int x, unsigned cover)
+ {
+ x -= m_min_x;
+ m_covers[x] = cover_type(cover);
+ if(x == m_last_x+1)
+ {
+ m_spans.last().len++;
+ }
+ else
+ {
+ m_spans.add(span(coord_type(x + m_min_x), 1, &m_covers[x]));
+ }
+ m_last_x = x;
+ }
+
+ //--------------------------------------------------------------------
+ void add_cells(int x, unsigned len, const cover_type* covers)
+ {
+ x -= m_min_x;
+ memcpy(&m_covers[x], covers, len * sizeof(cover_type));
+ if(x == m_last_x+1)
+ {
+ m_spans.last().len += coord_type(len);
+ }
+ else
+ {
+ m_spans.add(span(coord_type(x + m_min_x),
+ coord_type(len),
+ &m_covers[x]));
+ }
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void add_span(int x, unsigned len, unsigned cover)
+ {
+ x -= m_min_x;
+ memset(&m_covers[x], cover, len);
+ if(x == m_last_x+1)
+ {
+ m_spans.last().len += coord_type(len);
+ }
+ else
+ {
+ m_spans.add(span(coord_type(x + m_min_x),
+ coord_type(len),
+ &m_covers[x]));
+ }
+ m_last_x = x + len - 1;
+ }
+
+ //--------------------------------------------------------------------
+ void finalize(int y)
+ {
+ m_y = y;
+ }
+
+ //--------------------------------------------------------------------
+ void reset_spans()
+ {
+ m_last_x = 0x7FFFFFF0;
+ m_spans.remove_all();
+ }
+
+ //--------------------------------------------------------------------
+ int y() const { return m_y; }
+ unsigned num_spans() const { return m_spans.size(); }
+ const_iterator begin() const { return const_iterator(m_spans); }
+ iterator begin() { return iterator(m_spans); }
+
+ private:
+ scanline32_u8(const self_type&);
+ const self_type& operator = (const self_type&);
+
+ private:
+ int m_min_x;
+ int m_last_x;
+ int m_y;
+ pod_array<cover_type> m_covers;
+ span_array_type m_spans;
+ };
+
+
+
+
+ //========================================================scanline32_u8_am
+ //
+ // The scanline container with alpha-masking
+ //
+ //------------------------------------------------------------------------
+ template<class AlphaMask>
+ class scanline32_u8_am : public scanline32_u8
+ {
+ public:
+ typedef scanline32_u8 base_type;
+ typedef AlphaMask alpha_mask_type;
+ typedef base_type::cover_type cover_type;
+ typedef base_type::coord_type coord_type;
+
+
+ scanline32_u8_am() : base_type(), m_alpha_mask(0) {}
+ scanline32_u8_am(AlphaMask& am) : base_type(), m_alpha_mask(&am) {}
+
+ //--------------------------------------------------------------------
+ void finalize(int span_y)
+ {
+ base_type::finalize(span_y);
+ if(m_alpha_mask)
+ {
+ typename base_type::iterator span = base_type::begin();
+ unsigned count = base_type::num_spans();
+ do
+ {
+ m_alpha_mask->combine_hspan(span->x,
+ base_type::y(),
+ span->covers,
+ span->len);
+ ++span;
+ }
+ while(--count);
+ }
+ }
+
+ private:
+ AlphaMask* m_alpha_mask;
+ };
+
+
+
+}
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_shorten_path.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_shorten_path.h
new file mode 100644
index 00000000000..dd9929ff974
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_shorten_path.h
@@ -0,0 +1,66 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_SHORTEN_PATH_INCLUDED
+#define AGG_SHORTEN_PATH_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_vertex_sequence.h"
+
+namespace agg
+{
+
+ //===========================================================shorten_path
+ template<class VertexSequence>
+ void shorten_path(VertexSequence& vs, double s, unsigned closed = 0)
+ {
+ typedef typename VertexSequence::value_type vertex_type;
+
+ if(s > 0.0 && vs.size() > 1)
+ {
+ double d;
+ int n = int(vs.size() - 2);
+ while(n)
+ {
+ d = vs[n].dist;
+ if(d > s) break;
+ vs.remove_last();
+ s -= d;
+ --n;
+ }
+ if(vs.size() < 2)
+ {
+ vs.remove_all();
+ }
+ else
+ {
+ n = vs.size() - 1;
+ vertex_type& prev = vs[n-1];
+ vertex_type& last = vs[n];
+ d = (prev.dist - s) / prev.dist;
+ double x = prev.x + (last.x - prev.x) * d;
+ double y = prev.y + (last.y - prev.y) * d;
+ last.x = x;
+ last.y = y;
+ if(!prev(last)) vs.remove_last();
+ vs.close(closed != 0);
+ }
+ }
+ }
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_allocator.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_allocator.h
new file mode 100644
index 00000000000..201b69bb01e
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_allocator.h
@@ -0,0 +1,54 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_SPAN_ALLOCATOR_INCLUDED
+#define AGG_SPAN_ALLOCATOR_INCLUDED
+
+#include "agg_array.h"
+
+namespace agg
+{
+ //----------------------------------------------------------span_allocator
+ template<class ColorT> class span_allocator
+ {
+ public:
+ typedef ColorT color_type;
+
+ //--------------------------------------------------------------------
+ AGG_INLINE color_type* allocate(unsigned span_len)
+ {
+ if(span_len > m_span.size())
+ {
+ // To reduce the number of reallocs we align the
+ // span_len to 256 color elements.
+ // Well, I just like this number and it looks reasonable.
+ //-----------------------
+ m_span.resize(((span_len + 255) >> 8) << 8);
+ }
+ return &m_span[0];
+ }
+
+ AGG_INLINE color_type* span() { return &m_span[0]; }
+ AGG_INLINE unsigned max_span_len() const { return m_span.size(); }
+
+ private:
+ pod_array<color_type> m_span;
+ };
+}
+
+
+#endif
+
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_converter.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_converter.h
new file mode 100644
index 00000000000..91d0f87c256
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_converter.h
@@ -0,0 +1,56 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_SPAN_CONVERTER_INCLUDED
+#define AGG_SPAN_CONVERTER_INCLUDED
+
+#include "agg_basics.h"
+
+namespace agg
+{
+ //----------------------------------------------------------span_converter
+ template<class SpanGenerator, class SpanConverter> class span_converter
+ {
+ public:
+ typedef typename SpanGenerator::color_type color_type;
+
+ span_converter(SpanGenerator& span_gen, SpanConverter& span_cnv) :
+ m_span_gen(&span_gen), m_span_cnv(&span_cnv) {}
+
+ void attach_generator(SpanGenerator& span_gen) { m_span_gen = &span_gen; }
+ void attach_converter(SpanConverter& span_cnv) { m_span_cnv = &span_cnv; }
+
+ //--------------------------------------------------------------------
+ void prepare()
+ {
+ m_span_gen->prepare();
+ m_span_cnv->prepare();
+ }
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ m_span_gen->generate(span, x, y, len);
+ m_span_cnv->generate(span, x, y, len);
+ }
+
+ private:
+ SpanGenerator* m_span_gen;
+ SpanConverter* m_span_cnv;
+ };
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_gouraud.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_gouraud.h
new file mode 100644
index 00000000000..2986c88feec
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_gouraud.h
@@ -0,0 +1,172 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_SPAN_GOURAUD_INCLUDED
+#define AGG_SPAN_GOURAUD_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_math.h"
+
+namespace agg
+{
+
+ //============================================================span_gouraud
+ template<class ColorT> class span_gouraud
+ {
+ public:
+ typedef ColorT color_type;
+
+ struct coord_type
+ {
+ double x;
+ double y;
+ color_type color;
+ };
+
+ //--------------------------------------------------------------------
+ span_gouraud() :
+ m_vertex(0)
+ {
+ m_cmd[0] = path_cmd_stop;
+ }
+
+ //--------------------------------------------------------------------
+ span_gouraud(const color_type& c1,
+ const color_type& c2,
+ const color_type& c3,
+ double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double d) :
+ m_vertex(0)
+ {
+ colors(c1, c2, c3);
+ triangle(x1, y1, x2, y2, x3, y3, d);
+ }
+
+ //--------------------------------------------------------------------
+ void colors(ColorT c1, ColorT c2, ColorT c3)
+ {
+ m_coord[0].color = c1;
+ m_coord[1].color = c2;
+ m_coord[2].color = c3;
+ }
+
+ //--------------------------------------------------------------------
+ // Sets the triangle and dilates it if needed.
+ // The trick here is to calculate beveled joins in the vertices of the
+ // triangle and render it as a 6-vertex polygon.
+ // It's necessary to achieve numerical stability.
+ // However, the coordinates to interpolate colors are calculated
+ // as miter joins (calc_intersection).
+ void triangle(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double d)
+ {
+ m_coord[0].x = m_x[0] = x1;
+ m_coord[0].y = m_y[0] = y1;
+ m_coord[1].x = m_x[1] = x2;
+ m_coord[1].y = m_y[1] = y2;
+ m_coord[2].x = m_x[2] = x3;
+ m_coord[2].y = m_y[2] = y3;
+ m_cmd[0] = path_cmd_move_to;
+ m_cmd[1] = path_cmd_line_to;
+ m_cmd[2] = path_cmd_line_to;
+ m_cmd[3] = path_cmd_stop;
+
+ if(d != 0.0)
+ {
+ dilate_triangle(m_coord[0].x, m_coord[0].y,
+ m_coord[1].x, m_coord[1].y,
+ m_coord[2].x, m_coord[2].y,
+ m_x, m_y, d);
+
+ calc_intersection(m_x[4], m_y[4], m_x[5], m_y[5],
+ m_x[0], m_y[0], m_x[1], m_y[1],
+ &m_coord[0].x, &m_coord[0].y);
+
+ calc_intersection(m_x[0], m_y[0], m_x[1], m_y[1],
+ m_x[2], m_y[2], m_x[3], m_y[3],
+ &m_coord[1].x, &m_coord[1].y);
+
+ calc_intersection(m_x[2], m_y[2], m_x[3], m_y[3],
+ m_x[4], m_y[4], m_x[5], m_y[5],
+ &m_coord[2].x, &m_coord[2].y);
+ m_cmd[3] = path_cmd_line_to;
+ m_cmd[4] = path_cmd_line_to;
+ m_cmd[5] = path_cmd_line_to;
+ m_cmd[6] = path_cmd_stop;
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Vertex Source Interface to feed the coordinates to the rasterizer
+ void rewind(unsigned)
+ {
+ m_vertex = 0;
+ }
+
+ //--------------------------------------------------------------------
+ unsigned vertex(double* x, double* y)
+ {
+ *x = m_x[m_vertex];
+ *y = m_y[m_vertex];
+ return m_cmd[m_vertex++];
+ }
+
+ protected:
+ //--------------------------------------------------------------------
+ void arrange_vertices(coord_type* coord) const
+ {
+ coord[0] = m_coord[0];
+ coord[1] = m_coord[1];
+ coord[2] = m_coord[2];
+
+ if(m_coord[0].y > m_coord[2].y)
+ {
+ coord[0] = m_coord[2];
+ coord[2] = m_coord[0];
+ }
+
+ coord_type tmp;
+ if(coord[0].y > coord[1].y)
+ {
+ tmp = coord[1];
+ coord[1] = coord[0];
+ coord[0] = tmp;
+ }
+
+ if(coord[1].y > coord[2].y)
+ {
+ tmp = coord[2];
+ coord[2] = coord[1];
+ coord[1] = tmp;
+ }
+ }
+
+ private:
+ //--------------------------------------------------------------------
+ coord_type m_coord[3];
+ double m_x[8];
+ double m_y[8];
+ unsigned m_cmd[8];
+ unsigned m_vertex;
+ };
+
+}
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_gouraud_rgba.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_gouraud_rgba.h
new file mode 100644
index 00000000000..1b285720297
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_gouraud_rgba.h
@@ -0,0 +1,277 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_SPAN_GOURAUD_RGBA_INCLUDED
+#define AGG_SPAN_GOURAUD_RGBA_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_color_rgba.h"
+#include "agg_dda_line.h"
+#include "agg_span_gouraud.h"
+
+namespace agg
+{
+
+ //=======================================================span_gouraud_rgba
+ template<class ColorT> class span_gouraud_rgba : public span_gouraud<ColorT>
+ {
+ public:
+ typedef ColorT color_type;
+ typedef typename ColorT::value_type value_type;
+ typedef span_gouraud<color_type> base_type;
+ typedef typename base_type::coord_type coord_type;
+ enum subpixel_scale_e
+ {
+ subpixel_shift = 4,
+ subpixel_scale = 1 << subpixel_shift
+ };
+
+ private:
+ //--------------------------------------------------------------------
+ struct rgba_calc
+ {
+ void init(const coord_type& c1, const coord_type& c2)
+ {
+ m_x1 = c1.x - 0.5;
+ m_y1 = c1.y - 0.5;
+ m_dx = c2.x - c1.x;
+ double dy = c2.y - c1.y;
+ m_1dy = (dy < 1e-5) ? 1e5 : 1.0 / dy;
+ m_r1 = c1.color.r;
+ m_g1 = c1.color.g;
+ m_b1 = c1.color.b;
+ m_a1 = c1.color.a;
+ m_dr = c2.color.r - m_r1;
+ m_dg = c2.color.g - m_g1;
+ m_db = c2.color.b - m_b1;
+ m_da = c2.color.a - m_a1;
+ }
+
+ void calc(double y)
+ {
+ double k = (y - m_y1) * m_1dy;
+ if(k < 0.0) k = 0.0;
+ if(k > 1.0) k = 1.0;
+ m_r = m_r1 + iround(m_dr * k);
+ m_g = m_g1 + iround(m_dg * k);
+ m_b = m_b1 + iround(m_db * k);
+ m_a = m_a1 + iround(m_da * k);
+ m_x = iround((m_x1 + m_dx * k) * subpixel_scale);
+ }
+
+ double m_x1;
+ double m_y1;
+ double m_dx;
+ double m_1dy;
+ int m_r1;
+ int m_g1;
+ int m_b1;
+ int m_a1;
+ int m_dr;
+ int m_dg;
+ int m_db;
+ int m_da;
+ int m_r;
+ int m_g;
+ int m_b;
+ int m_a;
+ int m_x;
+ };
+
+ public:
+
+ //--------------------------------------------------------------------
+ span_gouraud_rgba() {}
+ span_gouraud_rgba(const color_type& c1,
+ const color_type& c2,
+ const color_type& c3,
+ double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double d = 0) :
+ base_type(c1, c2, c3, x1, y1, x2, y2, x3, y3, d)
+ {}
+
+ //--------------------------------------------------------------------
+ void prepare()
+ {
+ coord_type coord[3];
+ base_type::arrange_vertices(coord);
+
+ m_y2 = int(coord[1].y);
+
+ m_swap = cross_product(coord[0].x, coord[0].y,
+ coord[2].x, coord[2].y,
+ coord[1].x, coord[1].y) < 0.0;
+
+ m_rgba1.init(coord[0], coord[2]);
+ m_rgba2.init(coord[0], coord[1]);
+ m_rgba3.init(coord[1], coord[2]);
+ }
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ m_rgba1.calc(y);//(m_rgba1.m_1dy > 2) ? m_rgba1.m_y1 : y);
+ const rgba_calc* pc1 = &m_rgba1;
+ const rgba_calc* pc2 = &m_rgba2;
+
+ if(y <= m_y2)
+ {
+ // Bottom part of the triangle (first subtriangle)
+ //-------------------------
+ m_rgba2.calc(y + m_rgba2.m_1dy);
+ }
+ else
+ {
+ // Upper part (second subtriangle)
+ m_rgba3.calc(y - m_rgba3.m_1dy);
+ //-------------------------
+ pc2 = &m_rgba3;
+ }
+
+ if(m_swap)
+ {
+ // It means that the triangle is oriented clockwise,
+ // so that we need to swap the controlling structures
+ //-------------------------
+ const rgba_calc* t = pc2;
+ pc2 = pc1;
+ pc1 = t;
+ }
+
+ // Get the horizontal length with subpixel accuracy
+ // and protect it from division by zero
+ //-------------------------
+ int nlen = abs(pc2->m_x - pc1->m_x);
+ if(nlen <= 0) nlen = 1;
+
+ dda_line_interpolator<14> r(pc1->m_r, pc2->m_r, nlen);
+ dda_line_interpolator<14> g(pc1->m_g, pc2->m_g, nlen);
+ dda_line_interpolator<14> b(pc1->m_b, pc2->m_b, nlen);
+ dda_line_interpolator<14> a(pc1->m_a, pc2->m_a, nlen);
+
+ // Calculate the starting point of the gradient with subpixel
+ // accuracy and correct (roll back) the interpolators.
+ // This operation will also clip the beginning of the span
+ // if necessary.
+ //-------------------------
+ int start = pc1->m_x - (x << subpixel_shift);
+ r -= start;
+ g -= start;
+ b -= start;
+ a -= start;
+ nlen += start;
+
+ int vr, vg, vb, va;
+ enum lim_e { lim = color_type::base_mask };
+
+ // Beginning part of the span. Since we rolled back the
+ // interpolators, the color values may have overflow.
+ // So that, we render the beginning part with checking
+ // for overflow. It lasts until "start" is positive;
+ // typically it's 1-2 pixels, but may be more in some cases.
+ //-------------------------
+ while(len && start > 0)
+ {
+ vr = r.y();
+ vg = g.y();
+ vb = b.y();
+ va = a.y();
+ if(vr < 0) { vr = 0; }; if(vr > lim) { vr = lim; };
+ if(vg < 0) { vg = 0; }; if(vg > lim) { vg = lim; };
+ if(vb < 0) { vb = 0; }; if(vb > lim) { vb = lim; };
+ if(va < 0) { va = 0; }; if(va > lim) { va = lim; };
+ span->r = (value_type)vr;
+ span->g = (value_type)vg;
+ span->b = (value_type)vb;
+ span->a = (value_type)va;
+ r += subpixel_scale;
+ g += subpixel_scale;
+ b += subpixel_scale;
+ a += subpixel_scale;
+ nlen -= subpixel_scale;
+ start -= subpixel_scale;
+ ++span;
+ --len;
+ }
+
+ // Middle part, no checking for overflow.
+ // Actual spans can be longer than the calculated length
+ // because of anti-aliasing, thus, the interpolators can
+ // overflow. But while "nlen" is positive we are safe.
+ //-------------------------
+ while(len && nlen > 0)
+ {
+ span->r = (value_type)r.y();
+ span->g = (value_type)g.y();
+ span->b = (value_type)b.y();
+ span->a = (value_type)a.y();
+ r += subpixel_scale;
+ g += subpixel_scale;
+ b += subpixel_scale;
+ a += subpixel_scale;
+ nlen -= subpixel_scale;
+ ++span;
+ --len;
+ }
+
+ // Ending part; checking for overflow.
+ // Typically it's 1-2 pixels, but may be more in some cases.
+ //-------------------------
+ while(len)
+ {
+ vr = r.y();
+ vg = g.y();
+ vb = b.y();
+ va = a.y();
+ if(vr < 0) { vr = 0; }; if(vr > lim) { vr = lim; };
+ if(vg < 0) { vg = 0; }; if(vg > lim) { vg = lim; };
+ if(vb < 0) { vb = 0; }; if(vb > lim) { vb = lim; };
+ if(va < 0) { va = 0; }; if(va > lim) { va = lim; };
+ span->r = (value_type)vr;
+ span->g = (value_type)vg;
+ span->b = (value_type)vb;
+ span->a = (value_type)va;
+ r += subpixel_scale;
+ g += subpixel_scale;
+ b += subpixel_scale;
+ a += subpixel_scale;
+ ++span;
+ --len;
+ }
+ }
+
+ private:
+ bool m_swap;
+ int m_y2;
+ rgba_calc m_rgba1;
+ rgba_calc m_rgba2;
+ rgba_calc m_rgba3;
+ };
+
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_image_filter.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_image_filter.h
new file mode 100644
index 00000000000..2f613e5d86c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_image_filter.h
@@ -0,0 +1,246 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Image transformations with filtering. Span generator base class
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_SPAN_IMAGE_FILTER_INCLUDED
+#define AGG_SPAN_IMAGE_FILTER_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_image_filters.h"
+#include "agg_span_interpolator_linear.h"
+
+namespace agg
+{
+
+ //-------------------------------------------------------span_image_filter
+ template<class Source, class Interpolator> class span_image_filter
+ {
+ public:
+ typedef Source source_type;
+ typedef Interpolator interpolator_type;
+
+ //--------------------------------------------------------------------
+ span_image_filter() {}
+ span_image_filter(source_type& src,
+ interpolator_type& interpolator,
+ image_filter_lut* filter) :
+ m_src(&src),
+ m_interpolator(&interpolator),
+ m_filter(filter),
+ m_dx_dbl(0.5),
+ m_dy_dbl(0.5),
+ m_dx_int(image_subpixel_scale / 2),
+ m_dy_int(image_subpixel_scale / 2)
+ {}
+ void attach(source_type& v) { m_src = &v; }
+
+ //--------------------------------------------------------------------
+ source_type& source() { return *m_src; }
+ const source_type& source() const { return *m_src; }
+ const image_filter_lut& filter() const { return *m_filter; }
+ int filter_dx_int() const { return m_dx_int; }
+ int filter_dy_int() const { return m_dy_int; }
+ double filter_dx_dbl() const { return m_dx_dbl; }
+ double filter_dy_dbl() const { return m_dy_dbl; }
+
+ //--------------------------------------------------------------------
+ void interpolator(interpolator_type& v) { m_interpolator = &v; }
+ void filter(image_filter_lut& v) { m_filter = &v; }
+ void filter_offset(double dx, double dy)
+ {
+ m_dx_dbl = dx;
+ m_dy_dbl = dy;
+ m_dx_int = iround(dx * image_subpixel_scale);
+ m_dy_int = iround(dy * image_subpixel_scale);
+ }
+ void filter_offset(double d) { filter_offset(d, d); }
+
+ //--------------------------------------------------------------------
+ interpolator_type& interpolator() { return *m_interpolator; }
+
+ //--------------------------------------------------------------------
+ void prepare() {}
+
+ //--------------------------------------------------------------------
+ private:
+ source_type* m_src;
+ interpolator_type* m_interpolator;
+ image_filter_lut* m_filter;
+ double m_dx_dbl;
+ double m_dy_dbl;
+ unsigned m_dx_int;
+ unsigned m_dy_int;
+ };
+
+
+
+
+ //==============================================span_image_resample_affine
+ template<class Source>
+ class span_image_resample_affine :
+ public span_image_filter<Source, span_interpolator_linear<trans_affine> >
+ {
+ public:
+ typedef Source source_type;
+ typedef span_interpolator_linear<trans_affine> interpolator_type;
+ typedef span_image_filter<source_type, interpolator_type> base_type;
+
+ //--------------------------------------------------------------------
+ span_image_resample_affine() :
+ m_scale_limit(200.0),
+ m_blur_x(1.0),
+ m_blur_y(1.0)
+ {}
+
+ //--------------------------------------------------------------------
+ span_image_resample_affine(source_type& src,
+ interpolator_type& inter,
+ image_filter_lut& filter) :
+ base_type(src, inter, &filter),
+ m_scale_limit(200.0),
+ m_blur_x(1.0),
+ m_blur_y(1.0)
+ {}
+
+
+ //--------------------------------------------------------------------
+ int scale_limit() const { return uround(m_scale_limit); }
+ void scale_limit(int v) { m_scale_limit = v; }
+
+ //--------------------------------------------------------------------
+ double blur_x() const { return m_blur_x; }
+ double blur_y() const { return m_blur_y; }
+ void blur_x(double v) { m_blur_x = v; }
+ void blur_y(double v) { m_blur_y = v; }
+ void blur(double v) { m_blur_x = m_blur_y = v; }
+
+ //--------------------------------------------------------------------
+ void prepare()
+ {
+ double scale_x;
+ double scale_y;
+
+ base_type::interpolator().transformer().scaling_abs(&scale_x, &scale_y);
+
+ if(scale_x * scale_y > m_scale_limit)
+ {
+ scale_x = scale_x * m_scale_limit / (scale_x * scale_y);
+ scale_y = scale_y * m_scale_limit / (scale_x * scale_y);
+ }
+
+ if(scale_x < 1) scale_x = 1;
+ if(scale_y < 1) scale_y = 1;
+
+ if(scale_x > m_scale_limit) scale_x = m_scale_limit;
+ if(scale_y > m_scale_limit) scale_y = m_scale_limit;
+
+ scale_x *= m_blur_x;
+ scale_y *= m_blur_y;
+
+ if(scale_x < 1) scale_x = 1;
+ if(scale_y < 1) scale_y = 1;
+
+ m_rx = uround( scale_x * double(image_subpixel_scale));
+ m_rx_inv = uround(1.0/scale_x * double(image_subpixel_scale));
+
+ m_ry = uround( scale_y * double(image_subpixel_scale));
+ m_ry_inv = uround(1.0/scale_y * double(image_subpixel_scale));
+ }
+
+ protected:
+ int m_rx;
+ int m_ry;
+ int m_rx_inv;
+ int m_ry_inv;
+
+ private:
+ double m_scale_limit;
+ double m_blur_x;
+ double m_blur_y;
+ };
+
+
+
+ //=====================================================span_image_resample
+ template<class Source, class Interpolator>
+ class span_image_resample :
+ public span_image_filter<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_filter<source_type, interpolator_type> base_type;
+
+ //--------------------------------------------------------------------
+ span_image_resample() :
+ m_scale_limit(20),
+ m_blur_x(image_subpixel_scale),
+ m_blur_y(image_subpixel_scale)
+ {}
+
+ //--------------------------------------------------------------------
+ span_image_resample(source_type& src,
+ interpolator_type& inter,
+ image_filter_lut& filter) :
+ base_type(src, inter, &filter),
+ m_scale_limit(20),
+ m_blur_x(image_subpixel_scale),
+ m_blur_y(image_subpixel_scale)
+ {}
+
+ //--------------------------------------------------------------------
+ int scale_limit() const { return m_scale_limit; }
+ void scale_limit(int v) { m_scale_limit = v; }
+
+ //--------------------------------------------------------------------
+ double blur_x() const { return double(m_blur_x) / double(image_subpixel_scale); }
+ double blur_y() const { return double(m_blur_y) / double(image_subpixel_scale); }
+ void blur_x(double v) { m_blur_x = uround(v * double(image_subpixel_scale)); }
+ void blur_y(double v) { m_blur_y = uround(v * double(image_subpixel_scale)); }
+ void blur(double v) { m_blur_x =
+ m_blur_y = uround(v * double(image_subpixel_scale)); }
+
+ protected:
+ AGG_INLINE void adjust_scale(int* rx, int* ry)
+ {
+ if(*rx < image_subpixel_scale) *rx = image_subpixel_scale;
+ if(*ry < image_subpixel_scale) *ry = image_subpixel_scale;
+ if(*rx > image_subpixel_scale * m_scale_limit)
+ {
+ *rx = image_subpixel_scale * m_scale_limit;
+ }
+ if(*ry > image_subpixel_scale * m_scale_limit)
+ {
+ *ry = image_subpixel_scale * m_scale_limit;
+ }
+ *rx = (*rx * m_blur_x) >> image_subpixel_shift;
+ *ry = (*ry * m_blur_y) >> image_subpixel_shift;
+ if(*rx < image_subpixel_scale) *rx = image_subpixel_scale;
+ if(*ry < image_subpixel_scale) *ry = image_subpixel_scale;
+ }
+
+ int m_scale_limit;
+ int m_blur_x;
+ int m_blur_y;
+ };
+
+
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_image_filter_gray.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_image_filter_gray.h
new file mode 100644
index 00000000000..e2c688e004c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_image_filter_gray.h
@@ -0,0 +1,723 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_SPAN_IMAGE_FILTER_GRAY_INCLUDED
+#define AGG_SPAN_IMAGE_FILTER_GRAY_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_color_gray.h"
+#include "agg_span_image_filter.h"
+
+
+namespace agg
+{
+
+ //==============================================span_image_filter_gray_nn
+ template<class Source, class Interpolator>
+ class span_image_filter_gray_nn :
+ public span_image_filter<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_filter<source_type, interpolator_type> base_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ //--------------------------------------------------------------------
+ span_image_filter_gray_nn() {}
+ span_image_filter_gray_nn(source_type& src,
+ interpolator_type& inter) :
+ base_type(src, inter, 0)
+ {}
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+ do
+ {
+ base_type::interpolator().coordinates(&x, &y);
+ span->v = *(const value_type*)
+ base_type::source().span(x >> image_subpixel_shift,
+ y >> image_subpixel_shift,
+ 1);
+ span->a = color_type::full_value();
+ ++span;
+ ++base_type::interpolator();
+ } while(--len);
+ }
+ };
+
+
+
+ //=========================================span_image_filter_gray_bilinear
+ template<class Source, class Interpolator>
+ class span_image_filter_gray_bilinear :
+ public span_image_filter<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_filter<source_type, interpolator_type> base_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ //--------------------------------------------------------------------
+ span_image_filter_gray_bilinear() {}
+ span_image_filter_gray_bilinear(source_type& src,
+ interpolator_type& inter) :
+ base_type(src, inter, 0)
+ {}
+
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+ long_type fg;
+ const value_type *fg_ptr;
+ do
+ {
+ int x_hr;
+ int y_hr;
+
+ base_type::interpolator().coordinates(&x_hr, &y_hr);
+
+ x_hr -= base_type::filter_dx_int();
+ y_hr -= base_type::filter_dy_int();
+
+ int x_lr = x_hr >> image_subpixel_shift;
+ int y_lr = y_hr >> image_subpixel_shift;
+
+ fg = 0;
+
+ x_hr &= image_subpixel_mask;
+ y_hr &= image_subpixel_mask;
+
+ fg_ptr = (const value_type*)base_type::source().span(x_lr, y_lr, 2);
+ fg += *fg_ptr * (image_subpixel_scale - x_hr) * (image_subpixel_scale - y_hr);
+
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ fg += *fg_ptr * x_hr * (image_subpixel_scale - y_hr);
+
+ fg_ptr = (const value_type*)base_type::source().next_y();
+ fg += *fg_ptr * (image_subpixel_scale - x_hr) * y_hr;
+
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ fg += *fg_ptr * x_hr * y_hr;
+
+ span->v = color_type::downshift(fg, image_subpixel_shift * 2);
+ span->a = color_type::full_value();
+ ++span;
+ ++base_type::interpolator();
+
+ } while(--len);
+ }
+ };
+
+
+ //====================================span_image_filter_gray_bilinear_clip
+ template<class Source, class Interpolator>
+ class span_image_filter_gray_bilinear_clip :
+ public span_image_filter<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_filter<source_type, interpolator_type> base_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ //--------------------------------------------------------------------
+ span_image_filter_gray_bilinear_clip() {}
+ span_image_filter_gray_bilinear_clip(source_type& src,
+ const color_type& back_color,
+ interpolator_type& inter) :
+ base_type(src, inter, 0),
+ m_back_color(back_color)
+ {}
+ const color_type& background_color() const { return m_back_color; }
+ void background_color(const color_type& v) { m_back_color = v; }
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+ long_type fg;
+ long_type src_alpha;
+ value_type back_v = m_back_color.v;
+ value_type back_a = m_back_color.a;
+
+ const value_type *fg_ptr;
+
+ int maxx = base_type::source().width() - 1;
+ int maxy = base_type::source().height() - 1;
+
+ do
+ {
+ int x_hr;
+ int y_hr;
+
+ base_type::interpolator().coordinates(&x_hr, &y_hr);
+
+ x_hr -= base_type::filter_dx_int();
+ y_hr -= base_type::filter_dy_int();
+
+ int x_lr = x_hr >> image_subpixel_shift;
+ int y_lr = y_hr >> image_subpixel_shift;
+
+ if(x_lr >= 0 && y_lr >= 0 &&
+ x_lr < maxx && y_lr < maxy)
+ {
+ fg = 0;
+
+ x_hr &= image_subpixel_mask;
+ y_hr &= image_subpixel_mask;
+ fg_ptr = (const value_type*)base_type::source().row_ptr(y_lr) + x_lr;
+
+ fg += *fg_ptr++ * (image_subpixel_scale - x_hr) * (image_subpixel_scale - y_hr);
+ fg += *fg_ptr++ * (image_subpixel_scale - y_hr) * x_hr;
+
+ ++y_lr;
+ fg_ptr = (const value_type*)base_type::source().row_ptr(y_lr) + x_lr;
+
+ fg += *fg_ptr++ * (image_subpixel_scale - x_hr) * y_hr;
+ fg += *fg_ptr++ * x_hr * y_hr;
+
+ fg = color_type::downshift(fg, image_subpixel_shift * 2);
+ src_alpha = color_type::full_value();
+ }
+ else
+ {
+ unsigned weight;
+ if(x_lr < -1 || y_lr < -1 ||
+ x_lr > maxx || y_lr > maxy)
+ {
+ fg = back_v;
+ src_alpha = back_a;
+ }
+ else
+ {
+ fg = src_alpha = 0;
+
+ x_hr &= image_subpixel_mask;
+ y_hr &= image_subpixel_mask;
+
+ weight = (image_subpixel_scale - x_hr) *
+ (image_subpixel_scale - y_hr);
+ if(x_lr >= 0 && y_lr >= 0 &&
+ x_lr <= maxx && y_lr <= maxy)
+ {
+ fg += weight *
+ *((const value_type*)base_type::source().row_ptr(y_lr) + x_lr);
+ src_alpha += weight * color_type::full_value();
+ }
+ else
+ {
+ fg += back_v * weight;
+ src_alpha += back_a * weight;
+ }
+
+ x_lr++;
+
+ weight = x_hr * (image_subpixel_scale - y_hr);
+ if(x_lr >= 0 && y_lr >= 0 &&
+ x_lr <= maxx && y_lr <= maxy)
+ {
+ fg += weight *
+ *((const value_type*)base_type::source().row_ptr(y_lr) + x_lr);
+ src_alpha += weight * color_type::full_value();
+ }
+ else
+ {
+ fg += back_v * weight;
+ src_alpha += back_a * weight;
+ }
+
+ x_lr--;
+ y_lr++;
+
+ weight = (image_subpixel_scale - x_hr) * y_hr;
+ if(x_lr >= 0 && y_lr >= 0 &&
+ x_lr <= maxx && y_lr <= maxy)
+ {
+ fg += weight *
+ *((const value_type*)base_type::source().row_ptr(y_lr) + x_lr);
+ src_alpha += weight * color_type::full_value();
+ }
+ else
+ {
+ fg += back_v * weight;
+ src_alpha += back_a * weight;
+ }
+
+ x_lr++;
+
+ weight = x_hr * y_hr;
+ if(x_lr >= 0 && y_lr >= 0 &&
+ x_lr <= maxx && y_lr <= maxy)
+ {
+ fg += weight *
+ *((const value_type*)base_type::source().row_ptr(y_lr) + x_lr);
+ src_alpha += weight * color_type::full_value();
+ }
+ else
+ {
+ fg += back_v * weight;
+ src_alpha += back_a * weight;
+ }
+
+ fg = color_type::downshift(fg, image_subpixel_shift * 2);
+ src_alpha = color_type::downshift(src_alpha, image_subpixel_shift * 2);
+ }
+ }
+
+ span->v = (value_type)fg;
+ span->a = (value_type)src_alpha;
+ ++span;
+ ++base_type::interpolator();
+
+ } while(--len);
+ }
+ private:
+ color_type m_back_color;
+ };
+
+
+
+ //==============================================span_image_filter_gray_2x2
+ template<class Source, class Interpolator>
+ class span_image_filter_gray_2x2 :
+ public span_image_filter<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_filter<source_type, interpolator_type> base_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ //--------------------------------------------------------------------
+ span_image_filter_gray_2x2() {}
+ span_image_filter_gray_2x2(source_type& src,
+ interpolator_type& inter,
+ image_filter_lut& filter) :
+ base_type(src, inter, &filter)
+ {}
+
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+
+ long_type fg;
+
+ const value_type *fg_ptr;
+ const int16* weight_array = base_type::filter().weight_array() +
+ ((base_type::filter().diameter()/2 - 1) <<
+ image_subpixel_shift);
+ do
+ {
+ int x_hr;
+ int y_hr;
+
+ base_type::interpolator().coordinates(&x_hr, &y_hr);
+
+ x_hr -= base_type::filter_dx_int();
+ y_hr -= base_type::filter_dy_int();
+
+ int x_lr = x_hr >> image_subpixel_shift;
+ int y_lr = y_hr >> image_subpixel_shift;
+
+ unsigned weight;
+ fg = 0;
+
+ x_hr &= image_subpixel_mask;
+ y_hr &= image_subpixel_mask;
+
+ fg_ptr = (const value_type*)base_type::source().span(x_lr, y_lr, 2);
+ weight = (weight_array[x_hr + image_subpixel_scale] *
+ weight_array[y_hr + image_subpixel_scale] +
+ image_filter_scale / 2) >>
+ image_filter_shift;
+ fg += weight * *fg_ptr;
+
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ weight = (weight_array[x_hr] *
+ weight_array[y_hr + image_subpixel_scale] +
+ image_filter_scale / 2) >>
+ image_filter_shift;
+ fg += weight * *fg_ptr;
+
+ fg_ptr = (const value_type*)base_type::source().next_y();
+ weight = (weight_array[x_hr + image_subpixel_scale] *
+ weight_array[y_hr] +
+ image_filter_scale / 2) >>
+ image_filter_shift;
+ fg += weight * *fg_ptr;
+
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ weight = (weight_array[x_hr] *
+ weight_array[y_hr] +
+ image_filter_scale / 2) >>
+ image_filter_shift;
+ fg += weight * *fg_ptr;
+
+ fg >>= image_filter_shift;
+ if(fg > color_type::full_value()) fg = color_type::full_value();
+
+ span->v = (value_type)fg;
+ span->a = color_type::full_value();
+ ++span;
+ ++base_type::interpolator();
+ } while(--len);
+ }
+ };
+
+
+
+ //==================================================span_image_filter_gray
+ template<class Source, class Interpolator>
+ class span_image_filter_gray :
+ public span_image_filter<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_filter<source_type, interpolator_type> base_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ //--------------------------------------------------------------------
+ span_image_filter_gray() {}
+ span_image_filter_gray(source_type& src,
+ interpolator_type& inter,
+ image_filter_lut& filter) :
+ base_type(src, inter, &filter)
+ {}
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+
+ long_type fg;
+ const value_type *fg_ptr;
+
+ unsigned diameter = base_type::filter().diameter();
+ int start = base_type::filter().start();
+ const int16* weight_array = base_type::filter().weight_array();
+
+ int x_count;
+ int weight_y;
+
+ do
+ {
+ base_type::interpolator().coordinates(&x, &y);
+
+ x -= base_type::filter_dx_int();
+ y -= base_type::filter_dy_int();
+
+ int x_hr = x;
+ int y_hr = y;
+
+ int x_lr = x_hr >> image_subpixel_shift;
+ int y_lr = y_hr >> image_subpixel_shift;
+
+ fg = 0;
+
+ int x_fract = x_hr & image_subpixel_mask;
+ unsigned y_count = diameter;
+
+ y_hr = image_subpixel_mask - (y_hr & image_subpixel_mask);
+ fg_ptr = (const value_type*)base_type::source().span(x_lr + start,
+ y_lr + start,
+ diameter);
+ for(;;)
+ {
+ x_count = diameter;
+ weight_y = weight_array[y_hr];
+ x_hr = image_subpixel_mask - x_fract;
+ for(;;)
+ {
+ fg += *fg_ptr *
+ ((weight_y * weight_array[x_hr] +
+ image_filter_scale / 2) >>
+ image_filter_shift);
+ if(--x_count == 0) break;
+ x_hr += image_subpixel_scale;
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ }
+
+ if(--y_count == 0) break;
+ y_hr += image_subpixel_scale;
+ fg_ptr = (const value_type*)base_type::source().next_y();
+ }
+
+ fg = color_type::downshift(fg, image_filter_shift);
+ if(fg < 0) fg = 0;
+ if(fg > color_type::full_value()) fg = color_type::full_value();
+ span->v = (value_type)fg;
+ span->a = color_type::full_value();
+
+ ++span;
+ ++base_type::interpolator();
+
+ } while(--len);
+ }
+ };
+
+
+
+ //=========================================span_image_resample_gray_affine
+ template<class Source>
+ class span_image_resample_gray_affine :
+ public span_image_resample_affine<Source>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef span_image_resample_affine<source_type> base_type;
+ typedef typename base_type::interpolator_type interpolator_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::long_type long_type;
+ enum base_scale_e
+ {
+ downscale_shift = image_filter_shift
+ };
+
+ //--------------------------------------------------------------------
+ span_image_resample_gray_affine() {}
+ span_image_resample_gray_affine(source_type& src,
+ interpolator_type& inter,
+ image_filter_lut& filter) :
+ base_type(src, inter, filter)
+ {}
+
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+
+ long_type fg;
+
+ int diameter = base_type::filter().diameter();
+ int filter_scale = diameter << image_subpixel_shift;
+ int radius_x = (diameter * base_type::m_rx) >> 1;
+ int radius_y = (diameter * base_type::m_ry) >> 1;
+ int len_x_lr =
+ (diameter * base_type::m_rx + image_subpixel_mask) >>
+ image_subpixel_shift;
+
+ const int16* weight_array = base_type::filter().weight_array();
+
+ do
+ {
+ base_type::interpolator().coordinates(&x, &y);
+
+ x += base_type::filter_dx_int() - radius_x;
+ y += base_type::filter_dy_int() - radius_y;
+
+ fg = 0;
+
+ int y_lr = y >> image_subpixel_shift;
+ int y_hr = ((image_subpixel_mask - (y & image_subpixel_mask)) *
+ base_type::m_ry_inv) >>
+ image_subpixel_shift;
+ int total_weight = 0;
+ int x_lr = x >> image_subpixel_shift;
+ int x_hr = ((image_subpixel_mask - (x & image_subpixel_mask)) *
+ base_type::m_rx_inv) >>
+ image_subpixel_shift;
+
+ int x_hr2 = x_hr;
+ const value_type* fg_ptr =
+ (const value_type*)base_type::source().span(x_lr, y_lr, len_x_lr);
+ for(;;)
+ {
+ int weight_y = weight_array[y_hr];
+ x_hr = x_hr2;
+ for(;;)
+ {
+ int weight = (weight_y * weight_array[x_hr] +
+ image_filter_scale / 2) >>
+ downscale_shift;
+
+ fg += *fg_ptr * weight;
+ total_weight += weight;
+ x_hr += base_type::m_rx_inv;
+ if(x_hr >= filter_scale) break;
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ }
+ y_hr += base_type::m_ry_inv;
+ if(y_hr >= filter_scale) break;
+ fg_ptr = (const value_type*)base_type::source().next_y();
+ }
+
+ fg /= total_weight;
+ if(fg < 0) fg = 0;
+ if(fg > color_type::full_value()) fg = color_type::full_value();
+
+ span->v = (value_type)fg;
+ span->a = color_type::full_value();
+
+ ++span;
+ ++base_type::interpolator();
+ } while(--len);
+ }
+ };
+
+
+
+ //================================================span_image_resample_gray
+ template<class Source, class Interpolator>
+ class span_image_resample_gray :
+ public span_image_resample<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_resample<source_type, interpolator_type> base_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::long_type long_type;
+ enum base_scale_e
+ {
+ downscale_shift = image_filter_shift
+ };
+
+ //--------------------------------------------------------------------
+ span_image_resample_gray() {}
+ span_image_resample_gray(source_type& src,
+ interpolator_type& inter,
+ image_filter_lut& filter) :
+ base_type(src, inter, filter)
+ {}
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+ long_type fg;
+
+ int diameter = base_type::filter().diameter();
+ int filter_scale = diameter << image_subpixel_shift;
+
+ const int16* weight_array = base_type::filter().weight_array();
+ do
+ {
+ int rx;
+ int ry;
+ int rx_inv = image_subpixel_scale;
+ int ry_inv = image_subpixel_scale;
+ base_type::interpolator().coordinates(&x, &y);
+ base_type::interpolator().local_scale(&rx, &ry);
+ base_type::adjust_scale(&rx, &ry);
+
+ rx_inv = image_subpixel_scale * image_subpixel_scale / rx;
+ ry_inv = image_subpixel_scale * image_subpixel_scale / ry;
+
+ int radius_x = (diameter * rx) >> 1;
+ int radius_y = (diameter * ry) >> 1;
+ int len_x_lr =
+ (diameter * rx + image_subpixel_mask) >>
+ image_subpixel_shift;
+
+ x += base_type::filter_dx_int() - radius_x;
+ y += base_type::filter_dy_int() - radius_y;
+
+ fg = 0;
+
+ int y_lr = y >> image_subpixel_shift;
+ int y_hr = ((image_subpixel_mask - (y & image_subpixel_mask)) *
+ ry_inv) >>
+ image_subpixel_shift;
+ int total_weight = 0;
+ int x_lr = x >> image_subpixel_shift;
+ int x_hr = ((image_subpixel_mask - (x & image_subpixel_mask)) *
+ rx_inv) >>
+ image_subpixel_shift;
+ int x_hr2 = x_hr;
+ const value_type* fg_ptr =
+ (const value_type*)base_type::source().span(x_lr, y_lr, len_x_lr);
+
+ for(;;)
+ {
+ int weight_y = weight_array[y_hr];
+ x_hr = x_hr2;
+ for(;;)
+ {
+ int weight = (weight_y * weight_array[x_hr] +
+ image_filter_scale / 2) >>
+ downscale_shift;
+ fg += *fg_ptr * weight;
+ total_weight += weight;
+ x_hr += rx_inv;
+ if(x_hr >= filter_scale) break;
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ }
+ y_hr += ry_inv;
+ if(y_hr >= filter_scale) break;
+ fg_ptr = (const value_type*)base_type::source().next_y();
+ }
+
+ fg /= total_weight;
+ if(fg < 0) fg = 0;
+ if(fg > color_type::full_value()) fg = color_type::full_value();
+
+ span->v = (value_type)fg;
+ span->a = color_type::full_value();
+
+ ++span;
+ ++base_type::interpolator();
+ } while(--len);
+ }
+ };
+
+
+}
+
+
+#endif
+
+
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_image_filter_rgba.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_image_filter_rgba.h
new file mode 100644
index 00000000000..af7a1a2ef0d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_image_filter_rgba.h
@@ -0,0 +1,890 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_SPAN_IMAGE_FILTER_RGBA_INCLUDED
+#define AGG_SPAN_IMAGE_FILTER_RGBA_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_color_rgba.h"
+#include "agg_span_image_filter.h"
+
+
+namespace agg
+{
+
+ //==============================================span_image_filter_rgba_nn
+ template<class Source, class Interpolator>
+ class span_image_filter_rgba_nn :
+ public span_image_filter<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef typename source_type::order_type order_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_filter<source_type, interpolator_type> base_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ //--------------------------------------------------------------------
+ span_image_filter_rgba_nn() {}
+ span_image_filter_rgba_nn(source_type& src,
+ interpolator_type& inter) :
+ base_type(src, inter, 0)
+ {}
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+ do
+ {
+ base_type::interpolator().coordinates(&x, &y);
+ const value_type* fg_ptr = (const value_type*)
+ base_type::source().span(x >> image_subpixel_shift,
+ y >> image_subpixel_shift,
+ 1);
+ span->r = fg_ptr[order_type::R];
+ span->g = fg_ptr[order_type::G];
+ span->b = fg_ptr[order_type::B];
+ span->a = fg_ptr[order_type::A];
+ ++span;
+ ++base_type::interpolator();
+
+ } while(--len);
+ }
+ };
+
+
+
+ //=========================================span_image_filter_rgba_bilinear
+ template<class Source, class Interpolator>
+ class span_image_filter_rgba_bilinear :
+ public span_image_filter<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef typename source_type::order_type order_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_filter<source_type, interpolator_type> base_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ //--------------------------------------------------------------------
+ span_image_filter_rgba_bilinear() {}
+ span_image_filter_rgba_bilinear(source_type& src,
+ interpolator_type& inter) :
+ base_type(src, inter, 0)
+ {}
+
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+
+ long_type fg[4];
+ const value_type *fg_ptr;
+
+ do
+ {
+ int x_hr;
+ int y_hr;
+
+ base_type::interpolator().coordinates(&x_hr, &y_hr);
+
+ x_hr -= base_type::filter_dx_int();
+ y_hr -= base_type::filter_dy_int();
+
+ int x_lr = x_hr >> image_subpixel_shift;
+ int y_lr = y_hr >> image_subpixel_shift;
+
+ unsigned weight;
+
+ fg[0] =
+ fg[1] =
+ fg[2] =
+ fg[3] = image_subpixel_scale * image_subpixel_scale / 2;
+
+ x_hr &= image_subpixel_mask;
+ y_hr &= image_subpixel_mask;
+
+ fg_ptr = (const value_type*)base_type::source().span(x_lr, y_lr, 2);
+ weight = (image_subpixel_scale - x_hr) *
+ (image_subpixel_scale - y_hr);
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr;
+
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ weight = x_hr * (image_subpixel_scale - y_hr);
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr;
+
+ fg_ptr = (const value_type*)base_type::source().next_y();
+ weight = (image_subpixel_scale - x_hr) * y_hr;
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr;
+
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ weight = x_hr * y_hr;
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr;
+
+ span->r = value_type(color_type::downshift(fg[order_type::R], image_subpixel_shift * 2));
+ span->g = value_type(color_type::downshift(fg[order_type::G], image_subpixel_shift * 2));
+ span->b = value_type(color_type::downshift(fg[order_type::B], image_subpixel_shift * 2));
+ span->a = value_type(color_type::downshift(fg[order_type::A], image_subpixel_shift * 2));
+
+ ++span;
+ ++base_type::interpolator();
+
+ } while(--len);
+ }
+ };
+
+
+ //====================================span_image_filter_rgba_bilinear_clip
+ template<class Source, class Interpolator>
+ class span_image_filter_rgba_bilinear_clip :
+ public span_image_filter<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef typename source_type::order_type order_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_filter<source_type, interpolator_type> base_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ //--------------------------------------------------------------------
+ span_image_filter_rgba_bilinear_clip() {}
+ span_image_filter_rgba_bilinear_clip(source_type& src,
+ const color_type& back_color,
+ interpolator_type& inter) :
+ base_type(src, inter, 0),
+ m_back_color(back_color)
+ {}
+ const color_type& background_color() const { return m_back_color; }
+ void background_color(const color_type& v) { m_back_color = v; }
+
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+
+ long_type fg[4];
+ value_type back_r = m_back_color.r;
+ value_type back_g = m_back_color.g;
+ value_type back_b = m_back_color.b;
+ value_type back_a = m_back_color.a;
+
+ const value_type *fg_ptr;
+ int maxx = base_type::source().width() - 1;
+ int maxy = base_type::source().height() - 1;
+
+ do
+ {
+ int x_hr;
+ int y_hr;
+
+ base_type::interpolator().coordinates(&x_hr, &y_hr);
+
+ x_hr -= base_type::filter_dx_int();
+ y_hr -= base_type::filter_dy_int();
+
+ int x_lr = x_hr >> image_subpixel_shift;
+ int y_lr = y_hr >> image_subpixel_shift;
+
+ unsigned weight;
+
+ if(x_lr >= 0 && y_lr >= 0 &&
+ x_lr < maxx && y_lr < maxy)
+ {
+ fg[0] = fg[1] = fg[2] = fg[3] = 0;
+
+ x_hr &= image_subpixel_mask;
+ y_hr &= image_subpixel_mask;
+
+ fg_ptr = (const value_type*)
+ base_type::source().row_ptr(y_lr) + (x_lr << 2);
+
+ weight = (image_subpixel_scale - x_hr) *
+ (image_subpixel_scale - y_hr);
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr++;
+
+ weight = x_hr * (image_subpixel_scale - y_hr);
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr++;
+
+ ++y_lr;
+ fg_ptr = (const value_type*)
+ base_type::source().row_ptr(y_lr) + (x_lr << 2);
+
+ weight = (image_subpixel_scale - x_hr) * y_hr;
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr++;
+
+ weight = x_hr * y_hr;
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr++;
+
+ fg[0] = color_type::downshift(fg[0], image_subpixel_shift * 2);
+ fg[1] = color_type::downshift(fg[1], image_subpixel_shift * 2);
+ fg[2] = color_type::downshift(fg[2], image_subpixel_shift * 2);
+ fg[3] = color_type::downshift(fg[3], image_subpixel_shift * 2);
+ }
+ else
+ {
+ if(x_lr < -1 || y_lr < -1 ||
+ x_lr > maxx || y_lr > maxy)
+ {
+ fg[order_type::R] = back_r;
+ fg[order_type::G] = back_g;
+ fg[order_type::B] = back_b;
+ fg[order_type::A] = back_a;
+ }
+ else
+ {
+ fg[0] = fg[1] = fg[2] = fg[3] = 0;
+
+ x_hr &= image_subpixel_mask;
+ y_hr &= image_subpixel_mask;
+
+ weight = (image_subpixel_scale - x_hr) *
+ (image_subpixel_scale - y_hr);
+ if(x_lr >= 0 && y_lr >= 0 &&
+ x_lr <= maxx && y_lr <= maxy)
+ {
+ fg_ptr = (const value_type*)
+ base_type::source().row_ptr(y_lr) + (x_lr << 2);
+
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr++;
+ }
+ else
+ {
+ fg[order_type::R] += back_r * weight;
+ fg[order_type::G] += back_g * weight;
+ fg[order_type::B] += back_b * weight;
+ fg[order_type::A] += back_a * weight;
+ }
+
+ x_lr++;
+
+ weight = x_hr * (image_subpixel_scale - y_hr);
+ if(x_lr >= 0 && y_lr >= 0 &&
+ x_lr <= maxx && y_lr <= maxy)
+ {
+ fg_ptr = (const value_type*)
+ base_type::source().row_ptr(y_lr) + (x_lr << 2);
+
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr++;
+ }
+ else
+ {
+ fg[order_type::R] += back_r * weight;
+ fg[order_type::G] += back_g * weight;
+ fg[order_type::B] += back_b * weight;
+ fg[order_type::A] += back_a * weight;
+ }
+
+ x_lr--;
+ y_lr++;
+
+ weight = (image_subpixel_scale - x_hr) * y_hr;
+ if(x_lr >= 0 && y_lr >= 0 &&
+ x_lr <= maxx && y_lr <= maxy)
+ {
+ fg_ptr = (const value_type*)
+ base_type::source().row_ptr(y_lr) + (x_lr << 2);
+
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr++;
+ }
+ else
+ {
+ fg[order_type::R] += back_r * weight;
+ fg[order_type::G] += back_g * weight;
+ fg[order_type::B] += back_b * weight;
+ fg[order_type::A] += back_a * weight;
+ }
+
+ x_lr++;
+
+ weight = x_hr * y_hr;
+ if(x_lr >= 0 && y_lr >= 0 &&
+ x_lr <= maxx && y_lr <= maxy)
+ {
+ fg_ptr = (const value_type*)
+ base_type::source().row_ptr(y_lr) + (x_lr << 2);
+
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr++;
+ }
+ else
+ {
+ fg[order_type::R] += back_r * weight;
+ fg[order_type::G] += back_g * weight;
+ fg[order_type::B] += back_b * weight;
+ fg[order_type::A] += back_a * weight;
+ }
+
+ fg[0] = color_type::downshift(fg[0], image_subpixel_shift * 2);
+ fg[1] = color_type::downshift(fg[1], image_subpixel_shift * 2);
+ fg[2] = color_type::downshift(fg[2], image_subpixel_shift * 2);
+ fg[3] = color_type::downshift(fg[3], image_subpixel_shift * 2);
+ }
+ }
+
+ span->r = (value_type)fg[order_type::R];
+ span->g = (value_type)fg[order_type::G];
+ span->b = (value_type)fg[order_type::B];
+ span->a = (value_type)fg[order_type::A];
+ ++span;
+ ++base_type::interpolator();
+
+ } while(--len);
+ }
+ private:
+ color_type m_back_color;
+ };
+
+
+ //==============================================span_image_filter_rgba_2x2
+ template<class Source, class Interpolator>
+ class span_image_filter_rgba_2x2 :
+ public span_image_filter<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef typename source_type::order_type order_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_filter<source_type, interpolator_type> base_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ //--------------------------------------------------------------------
+ span_image_filter_rgba_2x2() {}
+ span_image_filter_rgba_2x2(source_type& src,
+ interpolator_type& inter,
+ image_filter_lut& filter) :
+ base_type(src, inter, &filter)
+ {}
+
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+
+ long_type fg[4];
+
+ const value_type *fg_ptr;
+ const int16* weight_array = base_type::filter().weight_array() +
+ ((base_type::filter().diameter()/2 - 1) <<
+ image_subpixel_shift);
+
+ do
+ {
+ int x_hr;
+ int y_hr;
+
+ base_type::interpolator().coordinates(&x_hr, &y_hr);
+
+ x_hr -= base_type::filter_dx_int();
+ y_hr -= base_type::filter_dy_int();
+
+ int x_lr = x_hr >> image_subpixel_shift;
+ int y_lr = y_hr >> image_subpixel_shift;
+
+ unsigned weight;
+ fg[0] = fg[1] = fg[2] = fg[3] = 0;
+
+ x_hr &= image_subpixel_mask;
+ y_hr &= image_subpixel_mask;
+
+ fg_ptr = (const value_type*)base_type::source().span(x_lr, y_lr, 2);
+ weight = (weight_array[x_hr + image_subpixel_scale] *
+ weight_array[y_hr + image_subpixel_scale] +
+ image_filter_scale / 2) >>
+ image_filter_shift;
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr;
+
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ weight = (weight_array[x_hr] *
+ weight_array[y_hr + image_subpixel_scale] +
+ image_filter_scale / 2) >>
+ image_filter_shift;
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr;
+
+ fg_ptr = (const value_type*)base_type::source().next_y();
+ weight = (weight_array[x_hr + image_subpixel_scale] *
+ weight_array[y_hr] +
+ image_filter_scale / 2) >>
+ image_filter_shift;
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr;
+
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ weight = (weight_array[x_hr] *
+ weight_array[y_hr] +
+ image_filter_scale / 2) >>
+ image_filter_shift;
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr;
+
+ fg[0] = color_type::downshift(fg[0], image_filter_shift);
+ fg[1] = color_type::downshift(fg[1], image_filter_shift);
+ fg[2] = color_type::downshift(fg[2], image_filter_shift);
+ fg[3] = color_type::downshift(fg[3], image_filter_shift);
+
+ if(fg[order_type::A] > color_type::full_value()) fg[order_type::A] = color_type::full_value();
+ if(fg[order_type::R] > fg[order_type::A]) fg[order_type::R] = fg[order_type::A];
+ if(fg[order_type::G] > fg[order_type::A]) fg[order_type::G] = fg[order_type::A];
+ if(fg[order_type::B] > fg[order_type::A]) fg[order_type::B] = fg[order_type::A];
+
+ span->r = (value_type)fg[order_type::R];
+ span->g = (value_type)fg[order_type::G];
+ span->b = (value_type)fg[order_type::B];
+ span->a = (value_type)fg[order_type::A];
+ ++span;
+ ++base_type::interpolator();
+
+ } while(--len);
+ }
+ };
+
+
+
+ //==================================================span_image_filter_rgba
+ template<class Source, class Interpolator>
+ class span_image_filter_rgba :
+ public span_image_filter<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef typename source_type::order_type order_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_filter<source_type, interpolator_type> base_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+
+ //--------------------------------------------------------------------
+ span_image_filter_rgba() {}
+ span_image_filter_rgba(source_type& src,
+ interpolator_type& inter,
+ image_filter_lut& filter) :
+ base_type(src, inter, &filter)
+ {}
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+
+ long_type fg[4];
+ const value_type *fg_ptr;
+
+ unsigned diameter = base_type::filter().diameter();
+ int start = base_type::filter().start();
+ const int16* weight_array = base_type::filter().weight_array();
+
+ int x_count;
+ int weight_y;
+
+ do
+ {
+ base_type::interpolator().coordinates(&x, &y);
+
+ x -= base_type::filter_dx_int();
+ y -= base_type::filter_dy_int();
+
+ int x_hr = x;
+ int y_hr = y;
+
+ int x_lr = x_hr >> image_subpixel_shift;
+ int y_lr = y_hr >> image_subpixel_shift;
+
+ fg[0] = fg[1] = fg[2] = fg[3] = 0;
+
+ int x_fract = x_hr & image_subpixel_mask;
+ unsigned y_count = diameter;
+
+ y_hr = image_subpixel_mask - (y_hr & image_subpixel_mask);
+ fg_ptr = (const value_type*)base_type::source().span(x_lr + start,
+ y_lr + start,
+ diameter);
+ for(;;)
+ {
+ x_count = diameter;
+ weight_y = weight_array[y_hr];
+ x_hr = image_subpixel_mask - x_fract;
+ for(;;)
+ {
+ int weight = (weight_y * weight_array[x_hr] +
+ image_filter_scale / 2) >>
+ image_filter_shift;
+
+ fg[0] += weight * *fg_ptr++;
+ fg[1] += weight * *fg_ptr++;
+ fg[2] += weight * *fg_ptr++;
+ fg[3] += weight * *fg_ptr;
+
+ if(--x_count == 0) break;
+ x_hr += image_subpixel_scale;
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ }
+
+ if(--y_count == 0) break;
+ y_hr += image_subpixel_scale;
+ fg_ptr = (const value_type*)base_type::source().next_y();
+ }
+
+ fg[0] = color_type::downshift(fg[0], image_filter_shift);
+ fg[1] = color_type::downshift(fg[1], image_filter_shift);
+ fg[2] = color_type::downshift(fg[2], image_filter_shift);
+ fg[3] = color_type::downshift(fg[3], image_filter_shift);
+
+ if(fg[0] < 0) fg[0] = 0;
+ if(fg[1] < 0) fg[1] = 0;
+ if(fg[2] < 0) fg[2] = 0;
+ if(fg[3] < 0) fg[3] = 0;
+
+ if(fg[order_type::A] > color_type::full_value()) fg[order_type::A] = color_type::full_value();
+ if(fg[order_type::R] > fg[order_type::A]) fg[order_type::R] = fg[order_type::A];
+ if(fg[order_type::G] > fg[order_type::A]) fg[order_type::G] = fg[order_type::A];
+ if(fg[order_type::B] > fg[order_type::A]) fg[order_type::B] = fg[order_type::A];
+
+ span->r = (value_type)fg[order_type::R];
+ span->g = (value_type)fg[order_type::G];
+ span->b = (value_type)fg[order_type::B];
+ span->a = (value_type)fg[order_type::A];
+ ++span;
+ ++base_type::interpolator();
+
+ } while(--len);
+ }
+ };
+
+
+
+ //========================================span_image_resample_rgba_affine
+ template<class Source>
+ class span_image_resample_rgba_affine :
+ public span_image_resample_affine<Source>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef typename source_type::order_type order_type;
+ typedef span_image_resample_affine<source_type> base_type;
+ typedef typename base_type::interpolator_type interpolator_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::long_type long_type;
+ enum base_scale_e
+ {
+ downscale_shift = image_filter_shift
+ };
+
+ //--------------------------------------------------------------------
+ span_image_resample_rgba_affine() {}
+ span_image_resample_rgba_affine(source_type& src,
+ interpolator_type& inter,
+ image_filter_lut& filter) :
+ base_type(src, inter, filter)
+ {}
+
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+
+ long_type fg[4];
+
+ int diameter = base_type::filter().diameter();
+ int filter_scale = diameter << image_subpixel_shift;
+ int radius_x = (diameter * base_type::m_rx) >> 1;
+ int radius_y = (diameter * base_type::m_ry) >> 1;
+ int len_x_lr =
+ (diameter * base_type::m_rx + image_subpixel_mask) >>
+ image_subpixel_shift;
+
+ const int16* weight_array = base_type::filter().weight_array();
+
+ do
+ {
+ base_type::interpolator().coordinates(&x, &y);
+
+ x += base_type::filter_dx_int() - radius_x;
+ y += base_type::filter_dy_int() - radius_y;
+
+ fg[0] = fg[1] = fg[2] = fg[3] = 0;
+
+ int y_lr = y >> image_subpixel_shift;
+ int y_hr = ((image_subpixel_mask - (y & image_subpixel_mask)) *
+ base_type::m_ry_inv) >>
+ image_subpixel_shift;
+ int total_weight = 0;
+ int x_lr = x >> image_subpixel_shift;
+ int x_hr = ((image_subpixel_mask - (x & image_subpixel_mask)) *
+ base_type::m_rx_inv) >>
+ image_subpixel_shift;
+
+ int x_hr2 = x_hr;
+ const value_type* fg_ptr =
+ (const value_type*)base_type::source().span(x_lr, y_lr, len_x_lr);
+ for(;;)
+ {
+ int weight_y = weight_array[y_hr];
+ x_hr = x_hr2;
+ for(;;)
+ {
+ int weight = (weight_y * weight_array[x_hr] +
+ image_filter_scale / 2) >>
+ downscale_shift;
+
+ fg[0] += *fg_ptr++ * weight;
+ fg[1] += *fg_ptr++ * weight;
+ fg[2] += *fg_ptr++ * weight;
+ fg[3] += *fg_ptr++ * weight;
+ total_weight += weight;
+ x_hr += base_type::m_rx_inv;
+ if(x_hr >= filter_scale) break;
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ }
+ y_hr += base_type::m_ry_inv;
+ if(y_hr >= filter_scale) break;
+ fg_ptr = (const value_type*)base_type::source().next_y();
+ }
+
+ fg[0] /= total_weight;
+ fg[1] /= total_weight;
+ fg[2] /= total_weight;
+ fg[3] /= total_weight;
+
+ if(fg[0] < 0) fg[0] = 0;
+ if(fg[1] < 0) fg[1] = 0;
+ if(fg[2] < 0) fg[2] = 0;
+ if(fg[3] < 0) fg[3] = 0;
+
+ if(fg[order_type::A] > color_type::full_value()) fg[order_type::A] = color_type::full_value();
+ if(fg[order_type::R] > fg[order_type::A]) fg[order_type::R] = fg[order_type::A];
+ if(fg[order_type::G] > fg[order_type::A]) fg[order_type::G] = fg[order_type::A];
+ if(fg[order_type::B] > fg[order_type::A]) fg[order_type::B] = fg[order_type::A];
+
+ span->r = (value_type)fg[order_type::R];
+ span->g = (value_type)fg[order_type::G];
+ span->b = (value_type)fg[order_type::B];
+ span->a = (value_type)fg[order_type::A];
+
+ ++span;
+ ++base_type::interpolator();
+ } while(--len);
+ }
+ };
+
+
+
+ //==============================================span_image_resample_rgba
+ template<class Source, class Interpolator>
+ class span_image_resample_rgba :
+ public span_image_resample<Source, Interpolator>
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef typename source_type::order_type order_type;
+ typedef Interpolator interpolator_type;
+ typedef span_image_resample<source_type, interpolator_type> base_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::long_type long_type;
+ enum base_scale_e
+ {
+ downscale_shift = image_filter_shift
+ };
+
+ //--------------------------------------------------------------------
+ span_image_resample_rgba() {}
+ span_image_resample_rgba(source_type& src,
+ interpolator_type& inter,
+ image_filter_lut& filter) :
+ base_type(src, inter, filter)
+ {}
+
+ //--------------------------------------------------------------------
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ base_type::interpolator().begin(x + base_type::filter_dx_dbl(),
+ y + base_type::filter_dy_dbl(), len);
+ long_type fg[4];
+
+ int diameter = base_type::filter().diameter();
+ int filter_scale = diameter << image_subpixel_shift;
+
+ const int16* weight_array = base_type::filter().weight_array();
+ do
+ {
+ int rx;
+ int ry;
+ int rx_inv = image_subpixel_scale;
+ int ry_inv = image_subpixel_scale;
+ base_type::interpolator().coordinates(&x, &y);
+ base_type::interpolator().local_scale(&rx, &ry);
+ base_type::adjust_scale(&rx, &ry);
+
+ rx_inv = image_subpixel_scale * image_subpixel_scale / rx;
+ ry_inv = image_subpixel_scale * image_subpixel_scale / ry;
+
+ int radius_x = (diameter * rx) >> 1;
+ int radius_y = (diameter * ry) >> 1;
+ int len_x_lr =
+ (diameter * rx + image_subpixel_mask) >>
+ image_subpixel_shift;
+
+ x += base_type::filter_dx_int() - radius_x;
+ y += base_type::filter_dy_int() - radius_y;
+
+ fg[0] = fg[1] = fg[2] = fg[3] = 0;
+
+ int y_lr = y >> image_subpixel_shift;
+ int y_hr = ((image_subpixel_mask - (y & image_subpixel_mask)) *
+ ry_inv) >>
+ image_subpixel_shift;
+ int total_weight = 0;
+ int x_lr = x >> image_subpixel_shift;
+ int x_hr = ((image_subpixel_mask - (x & image_subpixel_mask)) *
+ rx_inv) >>
+ image_subpixel_shift;
+ int x_hr2 = x_hr;
+ const value_type* fg_ptr =
+ (const value_type*)base_type::source().span(x_lr, y_lr, len_x_lr);
+
+ for(;;)
+ {
+ int weight_y = weight_array[y_hr];
+ x_hr = x_hr2;
+ for(;;)
+ {
+ int weight = (weight_y * weight_array[x_hr] +
+ image_filter_scale / 2) >>
+ downscale_shift;
+ fg[0] += *fg_ptr++ * weight;
+ fg[1] += *fg_ptr++ * weight;
+ fg[2] += *fg_ptr++ * weight;
+ fg[3] += *fg_ptr++ * weight;
+ total_weight += weight;
+ x_hr += rx_inv;
+ if(x_hr >= filter_scale) break;
+ fg_ptr = (const value_type*)base_type::source().next_x();
+ }
+ y_hr += ry_inv;
+ if(y_hr >= filter_scale) break;
+ fg_ptr = (const value_type*)base_type::source().next_y();
+ }
+
+ fg[0] /= total_weight;
+ fg[1] /= total_weight;
+ fg[2] /= total_weight;
+ fg[3] /= total_weight;
+
+ if(fg[0] < 0) fg[0] = 0;
+ if(fg[1] < 0) fg[1] = 0;
+ if(fg[2] < 0) fg[2] = 0;
+ if(fg[3] < 0) fg[3] = 0;
+
+ if(fg[order_type::A] > color_type::full_value()) fg[order_type::A] = color_type::full_value();
+ if(fg[order_type::R] > fg[order_type::R]) fg[order_type::R] = fg[order_type::R];
+ if(fg[order_type::G] > fg[order_type::G]) fg[order_type::G] = fg[order_type::G];
+ if(fg[order_type::B] > fg[order_type::B]) fg[order_type::B] = fg[order_type::B];
+
+ span->r = (value_type)fg[order_type::R];
+ span->g = (value_type)fg[order_type::G];
+ span->b = (value_type)fg[order_type::B];
+ span->a = (value_type)fg[order_type::A];
+
+ ++span;
+ ++base_type::interpolator();
+ } while(--len);
+ }
+ };
+
+
+}
+
+
+#endif
+
+
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_interpolator_adaptor.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_interpolator_adaptor.h
new file mode 100644
index 00000000000..0fdfa774796
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_interpolator_adaptor.h
@@ -0,0 +1,77 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_SPAN_INTERPOLATOR_ADAPTOR_INCLUDED
+#define AGG_SPAN_INTERPOLATOR_ADAPTOR_INCLUDED
+
+#include "agg_basics.h"
+
+namespace agg
+{
+
+ //===============================================span_interpolator_adaptor
+ template<class Interpolator, class Distortion>
+ class span_interpolator_adaptor : public Interpolator
+ {
+ public:
+ typedef Interpolator base_type;
+ typedef typename base_type::trans_type trans_type;
+ typedef Distortion distortion_type;
+
+ //--------------------------------------------------------------------
+ span_interpolator_adaptor() {}
+ span_interpolator_adaptor(trans_type& trans,
+ distortion_type& dist) :
+ base_type(trans),
+ m_distortion(&dist)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ span_interpolator_adaptor(trans_type& trans,
+ distortion_type& dist,
+ double x, double y, unsigned len) :
+ base_type(trans, x, y, len),
+ m_distortion(&dist)
+ {
+ }
+
+ //--------------------------------------------------------------------
+ distortion_type& distortion() const
+ {
+ return *m_distortion;
+ }
+
+ //--------------------------------------------------------------------
+ void distortion(distortion_type& dist)
+ {
+ m_distortion = dist;
+ }
+
+ //--------------------------------------------------------------------
+ void coordinates(int* x, int* y) const
+ {
+ base_type::coordinates(x, y);
+ m_distortion->calculate(x, y);
+ }
+
+ private:
+ //--------------------------------------------------------------------
+ distortion_type* m_distortion;
+ };
+}
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_interpolator_linear.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_interpolator_linear.h
new file mode 100644
index 00000000000..ef10505ce11
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_interpolator_linear.h
@@ -0,0 +1,232 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_SPAN_INTERPOLATOR_LINEAR_INCLUDED
+#define AGG_SPAN_INTERPOLATOR_LINEAR_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_dda_line.h"
+#include "agg_trans_affine.h"
+
+namespace agg
+{
+
+ //================================================span_interpolator_linear
+ template<class Transformer = trans_affine, unsigned SubpixelShift = 8>
+ class span_interpolator_linear
+ {
+ public:
+ typedef Transformer trans_type;
+
+ enum subpixel_scale_e
+ {
+ subpixel_shift = SubpixelShift,
+ subpixel_scale = 1 << subpixel_shift
+ };
+
+ //--------------------------------------------------------------------
+ span_interpolator_linear() {}
+ span_interpolator_linear(trans_type& trans) : m_trans(&trans) {}
+ span_interpolator_linear(trans_type& trans,
+ double x, double y, unsigned len) :
+ m_trans(&trans)
+ {
+ begin(x, y, len);
+ }
+
+ //----------------------------------------------------------------
+ const trans_type& transformer() const { return *m_trans; }
+ void transformer(trans_type& trans) { m_trans = &trans; }
+
+ //----------------------------------------------------------------
+ void begin(double x, double y, unsigned len)
+ {
+ double tx;
+ double ty;
+
+ tx = x;
+ ty = y;
+ m_trans->transform(&tx, &ty);
+ int x1 = iround(tx * subpixel_scale);
+ int y1 = iround(ty * subpixel_scale);
+
+ tx = x + len;
+ ty = y;
+ m_trans->transform(&tx, &ty);
+ int x2 = iround(tx * subpixel_scale);
+ int y2 = iround(ty * subpixel_scale);
+
+ m_li_x = dda2_line_interpolator(x1, x2, len);
+ m_li_y = dda2_line_interpolator(y1, y2, len);
+ }
+
+ //----------------------------------------------------------------
+ void resynchronize(double xe, double ye, unsigned len)
+ {
+ m_trans->transform(&xe, &ye);
+ m_li_x = dda2_line_interpolator(m_li_x.y(), iround(xe * subpixel_scale), len);
+ m_li_y = dda2_line_interpolator(m_li_y.y(), iround(ye * subpixel_scale), len);
+ }
+
+ //----------------------------------------------------------------
+ void operator++()
+ {
+ ++m_li_x;
+ ++m_li_y;
+ }
+
+ //----------------------------------------------------------------
+ void coordinates(int* x, int* y) const
+ {
+ *x = m_li_x.y();
+ *y = m_li_y.y();
+ }
+
+ private:
+ trans_type* m_trans;
+ dda2_line_interpolator m_li_x;
+ dda2_line_interpolator m_li_y;
+ };
+
+
+
+
+
+
+ //=====================================span_interpolator_linear_subdiv
+ template<class Transformer = trans_affine, unsigned SubpixelShift = 8>
+ class span_interpolator_linear_subdiv
+ {
+ public:
+ typedef Transformer trans_type;
+
+ enum subpixel_scale_e
+ {
+ subpixel_shift = SubpixelShift,
+ subpixel_scale = 1 << subpixel_shift
+ };
+
+
+ //----------------------------------------------------------------
+ span_interpolator_linear_subdiv() :
+ m_subdiv_shift(4),
+ m_subdiv_size(1 << m_subdiv_shift),
+ m_subdiv_mask(m_subdiv_size - 1) {}
+
+ span_interpolator_linear_subdiv(trans_type& trans,
+ unsigned subdiv_shift = 4) :
+ m_subdiv_shift(subdiv_shift),
+ m_subdiv_size(1 << m_subdiv_shift),
+ m_subdiv_mask(m_subdiv_size - 1),
+ m_trans(&trans) {}
+
+ span_interpolator_linear_subdiv(trans_type& trans,
+ double x, double y, unsigned len,
+ unsigned subdiv_shift = 4) :
+ m_subdiv_shift(subdiv_shift),
+ m_subdiv_size(1 << m_subdiv_shift),
+ m_subdiv_mask(m_subdiv_size - 1),
+ m_trans(&trans)
+ {
+ begin(x, y, len);
+ }
+
+ //----------------------------------------------------------------
+ const trans_type& transformer() const { return *m_trans; }
+ void transformer(const trans_type& trans) { m_trans = &trans; }
+
+ //----------------------------------------------------------------
+ unsigned subdiv_shift() const { return m_subdiv_shift; }
+ void subdiv_shift(unsigned shift)
+ {
+ m_subdiv_shift = shift;
+ m_subdiv_size = 1 << m_subdiv_shift;
+ m_subdiv_mask = m_subdiv_size - 1;
+ }
+
+ //----------------------------------------------------------------
+ void begin(double x, double y, unsigned len)
+ {
+ double tx;
+ double ty;
+ m_pos = 1;
+ m_src_x = iround(x * subpixel_scale) + subpixel_scale;
+ m_src_y = y;
+ m_len = len;
+
+ if(len > m_subdiv_size) len = m_subdiv_size;
+ tx = x;
+ ty = y;
+ m_trans->transform(&tx, &ty);
+ int x1 = iround(tx * subpixel_scale);
+ int y1 = iround(ty * subpixel_scale);
+
+ tx = x + len;
+ ty = y;
+ m_trans->transform(&tx, &ty);
+
+ m_li_x = dda2_line_interpolator(x1, iround(tx * subpixel_scale), len);
+ m_li_y = dda2_line_interpolator(y1, iround(ty * subpixel_scale), len);
+ }
+
+ //----------------------------------------------------------------
+ void operator++()
+ {
+ ++m_li_x;
+ ++m_li_y;
+ if(m_pos >= m_subdiv_size)
+ {
+ unsigned len = m_len;
+ if(len > m_subdiv_size) len = m_subdiv_size;
+ double tx = double(m_src_x) / double(subpixel_scale) + len;
+ double ty = m_src_y;
+ m_trans->transform(&tx, &ty);
+ m_li_x = dda2_line_interpolator(m_li_x.y(), iround(tx * subpixel_scale), len);
+ m_li_y = dda2_line_interpolator(m_li_y.y(), iround(ty * subpixel_scale), len);
+ m_pos = 0;
+ }
+ m_src_x += subpixel_scale;
+ ++m_pos;
+ --m_len;
+ }
+
+ //----------------------------------------------------------------
+ void coordinates(int* x, int* y) const
+ {
+ *x = m_li_x.y();
+ *y = m_li_y.y();
+ }
+
+ private:
+ unsigned m_subdiv_shift;
+ unsigned m_subdiv_size;
+ unsigned m_subdiv_mask;
+ trans_type* m_trans;
+ dda2_line_interpolator m_li_x;
+ dda2_line_interpolator m_li_y;
+ int m_src_x;
+ double m_src_y;
+ unsigned m_pos;
+ unsigned m_len;
+ };
+
+
+}
+
+
+
+#endif
+
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_pattern_rgba.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_pattern_rgba.h
new file mode 100644
index 00000000000..d47d2a6c022
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_span_pattern_rgba.h
@@ -0,0 +1,94 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+//----------------------------------------------------------------------------
+
+
+#ifndef AGG_SPAN_PATTERN_RGBA_INCLUDED
+#define AGG_SPAN_PATTERN_RGBA_INCLUDED
+
+#include "agg_basics.h"
+
+namespace agg
+{
+
+ //======================================================span_pattern_rgba
+ template<class Source> class span_pattern_rgba
+ {
+ public:
+ typedef Source source_type;
+ typedef typename source_type::color_type color_type;
+ typedef typename source_type::order_type order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+
+ //--------------------------------------------------------------------
+ span_pattern_rgba() {}
+ span_pattern_rgba(source_type& src,
+ unsigned offset_x, unsigned offset_y) :
+ m_src(&src),
+ m_offset_x(offset_x),
+ m_offset_y(offset_y)
+ {}
+
+ //--------------------------------------------------------------------
+ void attach(source_type& v) { m_src = &v; }
+ source_type& source() { return *m_src; }
+ const source_type& source() const { return *m_src; }
+
+ //--------------------------------------------------------------------
+ void offset_x(unsigned v) { m_offset_x = v; }
+ void offset_y(unsigned v) { m_offset_y = v; }
+ unsigned offset_x() const { return m_offset_x; }
+ unsigned offset_y() const { return m_offset_y; }
+ void alpha(value_type) {}
+ value_type alpha() const { return 0; }
+
+ //--------------------------------------------------------------------
+ void prepare() {}
+ void generate(color_type* span, int x, int y, unsigned len)
+ {
+ x += m_offset_x;
+ y += m_offset_y;
+ const value_type* p = (const value_type*)m_src->span(x, y, len);
+ do
+ {
+ span->r = p[order_type::R];
+ span->g = p[order_type::G];
+ span->b = p[order_type::B];
+ span->a = p[order_type::A];
+ p = (const value_type*)m_src->next_x();
+ ++span;
+ }
+ while(--len);
+ }
+
+ private:
+ source_type* m_src;
+ unsigned m_offset_x;
+ unsigned m_offset_y;
+
+ };
+
+}
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_trans_affine.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_trans_affine.h
new file mode 100644
index 00000000000..1a611638833
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_trans_affine.h
@@ -0,0 +1,518 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Affine transformation classes.
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_TRANS_AFFINE_INCLUDED
+#define AGG_TRANS_AFFINE_INCLUDED
+
+#include <math.h>
+#include "agg_basics.h"
+
+namespace agg
+{
+ const double affine_epsilon = 1e-14;
+
+ //============================================================trans_affine
+ //
+ // See Implementation agg_trans_affine.cpp
+ //
+ // Affine transformation are linear transformations in Cartesian coordinates
+ // (strictly speaking not only in Cartesian, but for the beginning we will
+ // think so). They are rotation, scaling, translation and skewing.
+ // After any affine transformation a line segment remains a line segment
+ // and it will never become a curve.
+ //
+ // There will be no math about matrix calculations, since it has been
+ // described many times. Ask yourself a very simple question:
+ // "why do we need to understand and use some matrix stuff instead of just
+ // rotating, scaling and so on". The answers are:
+ //
+ // 1. Any combination of transformations can be done by only 4 multiplications
+ // and 4 additions in floating point.
+ // 2. One matrix transformation is equivalent to the number of consecutive
+ // discrete transformations, i.e. the matrix "accumulates" all transformations
+ // in the order of their settings. Suppose we have 4 transformations:
+ // * rotate by 30 degrees,
+ // * scale X to 2.0,
+ // * scale Y to 1.5,
+ // * move to (100, 100).
+ // The result will depend on the order of these transformations,
+ // and the advantage of matrix is that the sequence of discret calls:
+ // rotate(30), scaleX(2.0), scaleY(1.5), move(100,100)
+ // will have exactly the same result as the following matrix transformations:
+ //
+ // affine_matrix m;
+ // m *= rotate_matrix(30);
+ // m *= scaleX_matrix(2.0);
+ // m *= scaleY_matrix(1.5);
+ // m *= move_matrix(100,100);
+ //
+ // m.transform_my_point_at_last(x, y);
+ //
+ // What is the good of it? In real life we will set-up the matrix only once
+ // and then transform many points, let alone the convenience to set any
+ // combination of transformations.
+ //
+ // So, how to use it? Very easy - literally as it's shown above. Not quite,
+ // let us write a correct example:
+ //
+ // agg::trans_affine m;
+ // m *= agg::trans_affine_rotation(30.0 * 3.1415926 / 180.0);
+ // m *= agg::trans_affine_scaling(2.0, 1.5);
+ // m *= agg::trans_affine_translation(100.0, 100.0);
+ // m.transform(&x, &y);
+ //
+ // The affine matrix is all you need to perform any linear transformation,
+ // but all transformations have origin point (0,0). It means that we need to
+ // use 2 translations if we want to rotate someting around (100,100):
+ //
+ // m *= agg::trans_affine_translation(-100.0, -100.0); // move to (0,0)
+ // m *= agg::trans_affine_rotation(30.0 * 3.1415926 / 180.0); // rotate
+ // m *= agg::trans_affine_translation(100.0, 100.0); // move back to (100,100)
+ //----------------------------------------------------------------------
+ struct trans_affine
+ {
+ double sx, shy, shx, sy, tx, ty;
+
+ //------------------------------------------ Construction
+ // Identity matrix
+ trans_affine() :
+ sx(1.0), shy(0.0), shx(0.0), sy(1.0), tx(0.0), ty(0.0)
+ {}
+
+ // Custom matrix. Usually used in derived classes
+ trans_affine(double v0, double v1, double v2,
+ double v3, double v4, double v5) :
+ sx(v0), shy(v1), shx(v2), sy(v3), tx(v4), ty(v5)
+ {}
+
+ // Custom matrix from m[6]
+ explicit trans_affine(const double* m) :
+ sx(m[0]), shy(m[1]), shx(m[2]), sy(m[3]), tx(m[4]), ty(m[5])
+ {}
+
+ // Rectangle to a parallelogram.
+ trans_affine(double x1, double y1, double x2, double y2,
+ const double* parl)
+ {
+ rect_to_parl(x1, y1, x2, y2, parl);
+ }
+
+ // Parallelogram to a rectangle.
+ trans_affine(const double* parl,
+ double x1, double y1, double x2, double y2)
+ {
+ parl_to_rect(parl, x1, y1, x2, y2);
+ }
+
+ // Arbitrary parallelogram transformation.
+ trans_affine(const double* src, const double* dst)
+ {
+ parl_to_parl(src, dst);
+ }
+
+ //---------------------------------- Parellelogram transformations
+ // transform a parallelogram to another one. Src and dst are
+ // pointers to arrays of three points (double[6], x1,y1,...) that
+ // identify three corners of the parallelograms assuming implicit
+ // fourth point. The arguments are arrays of double[6] mapped
+ // to x1,y1, x2,y2, x3,y3 where the coordinates are:
+ // *-----------------*
+ // / (x3,y3)/
+ // / /
+ // /(x1,y1) (x2,y2)/
+ // *-----------------*
+ const trans_affine& parl_to_parl(const double* src,
+ const double* dst);
+
+ const trans_affine& rect_to_parl(double x1, double y1,
+ double x2, double y2,
+ const double* parl);
+
+ const trans_affine& parl_to_rect(const double* parl,
+ double x1, double y1,
+ double x2, double y2);
+
+
+ //------------------------------------------ Operations
+ // Reset - load an identity matrix
+ const trans_affine& reset();
+
+ // Direct transformations operations
+ const trans_affine& translate(double x, double y);
+ const trans_affine& rotate(double a);
+ const trans_affine& scale(double s);
+ const trans_affine& scale(double x, double y);
+
+ // Multiply matrix to another one
+ const trans_affine& multiply(const trans_affine& m);
+
+ // Multiply "m" to "this" and assign the result to "this"
+ const trans_affine& premultiply(const trans_affine& m);
+
+ // Multiply matrix to inverse of another one
+ const trans_affine& multiply_inv(const trans_affine& m);
+
+ // Multiply inverse of "m" to "this" and assign the result to "this"
+ const trans_affine& premultiply_inv(const trans_affine& m);
+
+ // Invert matrix. Do not try to invert degenerate matrices,
+ // there's no check for validity. If you set scale to 0 and
+ // then try to invert matrix, expect unpredictable result.
+ const trans_affine& invert();
+
+ // Mirroring around X
+ const trans_affine& flip_x();
+
+ // Mirroring around Y
+ const trans_affine& flip_y();
+
+ //------------------------------------------- Load/Store
+ // Store matrix to an array [6] of double
+ void store_to(double* m) const
+ {
+ *m++ = sx; *m++ = shy; *m++ = shx; *m++ = sy; *m++ = tx; *m++ = ty;
+ }
+
+ // Load matrix from an array [6] of double
+ const trans_affine& load_from(const double* m)
+ {
+ sx = *m++; shy = *m++; shx = *m++; sy = *m++; tx = *m++; ty = *m++;
+ return *this;
+ }
+
+ //------------------------------------------- Operators
+
+ // Multiply the matrix by another one
+ const trans_affine& operator *= (const trans_affine& m)
+ {
+ return multiply(m);
+ }
+
+ // Multiply the matrix by inverse of another one
+ const trans_affine& operator /= (const trans_affine& m)
+ {
+ return multiply_inv(m);
+ }
+
+ // Multiply the matrix by another one and return
+ // the result in a separete matrix.
+ trans_affine operator * (const trans_affine& m) const
+ {
+ return trans_affine(*this).multiply(m);
+ }
+
+ // Multiply the matrix by inverse of another one
+ // and return the result in a separete matrix.
+ trans_affine operator / (const trans_affine& m) const
+ {
+ return trans_affine(*this).multiply_inv(m);
+ }
+
+ // Calculate and return the inverse matrix
+ trans_affine operator ~ () const
+ {
+ trans_affine ret = *this;
+ return ret.invert();
+ }
+
+ // Equal operator with default epsilon
+ bool operator == (const trans_affine& m) const
+ {
+ return is_equal(m, affine_epsilon);
+ }
+
+ // Not Equal operator with default epsilon
+ bool operator != (const trans_affine& m) const
+ {
+ return !is_equal(m, affine_epsilon);
+ }
+
+ //-------------------------------------------- Transformations
+ // Direct transformation of x and y
+ void transform(double* x, double* y) const;
+
+ // Direct transformation of x and y, 2x2 matrix only, no translation
+ void transform_2x2(double* x, double* y) const;
+
+ // Inverse transformation of x and y. It works slower than the
+ // direct transformation. For massive operations it's better to
+ // invert() the matrix and then use direct transformations.
+ void inverse_transform(double* x, double* y) const;
+
+ //-------------------------------------------- Auxiliary
+ // Calculate the determinant of matrix
+ double determinant() const
+ {
+ return sx * sy - shy * shx;
+ }
+
+ // Calculate the reciprocal of the determinant
+ double determinant_reciprocal() const
+ {
+ return 1.0 / (sx * sy - shy * shx);
+ }
+
+ // Get the average scale (by X and Y).
+ // Basically used to calculate the approximation_scale when
+ // decomposinting curves into line segments.
+ double scale() const;
+
+ // Check to see if the matrix is not degenerate
+ bool is_valid(double epsilon = affine_epsilon) const;
+
+ // Check to see if it's an identity matrix
+ bool is_identity(double epsilon = affine_epsilon) const;
+
+ // Check to see if two matrices are equal
+ bool is_equal(const trans_affine& m, double epsilon = affine_epsilon) const;
+
+ // Determine the major parameters. Use with caution considering
+ // possible degenerate cases.
+ double rotation() const;
+ void translation(double* dx, double* dy) const;
+ void scaling(double* x, double* y) const;
+ void scaling_abs(double* x, double* y) const;
+ };
+
+ //------------------------------------------------------------------------
+ inline void trans_affine::transform(double* x, double* y) const
+ {
+ double tmp = *x;
+ *x = tmp * sx + *y * shx + tx;
+ *y = tmp * shy + *y * sy + ty;
+ }
+
+ //------------------------------------------------------------------------
+ inline void trans_affine::transform_2x2(double* x, double* y) const
+ {
+ double tmp = *x;
+ *x = tmp * sx + *y * shx;
+ *y = tmp * shy + *y * sy;
+ }
+
+ //------------------------------------------------------------------------
+ inline void trans_affine::inverse_transform(double* x, double* y) const
+ {
+ double d = determinant_reciprocal();
+ double a = (*x - tx) * d;
+ double b = (*y - ty) * d;
+ *x = a * sy - b * shx;
+ *y = b * sx - a * shy;
+ }
+
+ //------------------------------------------------------------------------
+ inline double trans_affine::scale() const
+ {
+ double x = 0.707106781 * sx + 0.707106781 * shx;
+ double y = 0.707106781 * shy + 0.707106781 * sy;
+ return sqrt(x*x + y*y);
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::translate(double x, double y)
+ {
+ tx += x;
+ ty += y;
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::rotate(double a)
+ {
+ double ca = cos(a);
+ double sa = sin(a);
+ double t0 = sx * ca - shy * sa;
+ double t2 = shx * ca - sy * sa;
+ double t4 = tx * ca - ty * sa;
+ shy = sx * sa + shy * ca;
+ sy = shx * sa + sy * ca;
+ ty = tx * sa + ty * ca;
+ sx = t0;
+ shx = t2;
+ tx = t4;
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::scale(double x, double y)
+ {
+ double mm0 = x; // Possible hint for the optimizer
+ double mm3 = y;
+ sx *= mm0;
+ shx *= mm0;
+ tx *= mm0;
+ shy *= mm3;
+ sy *= mm3;
+ ty *= mm3;
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::scale(double s)
+ {
+ double m = s; // Possible hint for the optimizer
+ sx *= m;
+ shx *= m;
+ tx *= m;
+ shy *= m;
+ sy *= m;
+ ty *= m;
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::premultiply(const trans_affine& m)
+ {
+ trans_affine t = m;
+ return *this = t.multiply(*this);
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::multiply_inv(const trans_affine& m)
+ {
+ trans_affine t = m;
+ t.invert();
+ return multiply(t);
+ }
+
+ //------------------------------------------------------------------------
+ inline const trans_affine& trans_affine::premultiply_inv(const trans_affine& m)
+ {
+ trans_affine t = m;
+ t.invert();
+ return *this = t.multiply(*this);
+ }
+
+ //------------------------------------------------------------------------
+ inline void trans_affine::scaling_abs(double* x, double* y) const
+ {
+ // Used to calculate scaling coefficients in image resampling.
+ // When there is considerable shear this method gives us much
+ // better estimation than just sx, sy.
+ *x = sqrt(sx * sx + shx * shx);
+ *y = sqrt(shy * shy + sy * sy);
+ }
+
+ //====================================================trans_affine_rotation
+ // Rotation matrix. sin() and cos() are calculated twice for the same angle.
+ // There's no harm because the performance of sin()/cos() is very good on all
+ // modern processors. Besides, this operation is not going to be invoked too
+ // often.
+ class trans_affine_rotation : public trans_affine
+ {
+ public:
+ trans_affine_rotation(double a) :
+ trans_affine(cos(a), sin(a), -sin(a), cos(a), 0.0, 0.0)
+ {}
+ };
+
+ //====================================================trans_affine_scaling
+ // Scaling matrix. x, y - scale coefficients by X and Y respectively
+ class trans_affine_scaling : public trans_affine
+ {
+ public:
+ trans_affine_scaling(double x, double y) :
+ trans_affine(x, 0.0, 0.0, y, 0.0, 0.0)
+ {}
+
+ trans_affine_scaling(double s) :
+ trans_affine(s, 0.0, 0.0, s, 0.0, 0.0)
+ {}
+ };
+
+ //================================================trans_affine_translation
+ // Translation matrix
+ class trans_affine_translation : public trans_affine
+ {
+ public:
+ trans_affine_translation(double x, double y) :
+ trans_affine(1.0, 0.0, 0.0, 1.0, x, y)
+ {}
+ };
+
+ //====================================================trans_affine_skewing
+ // Sckewing (shear) matrix
+ class trans_affine_skewing : public trans_affine
+ {
+ public:
+ trans_affine_skewing(double x, double y) :
+ trans_affine(1.0, tan(y), tan(x), 1.0, 0.0, 0.0)
+ {}
+ };
+
+
+ //===============================================trans_affine_line_segment
+ // Rotate, Scale and Translate, associating 0...dist with line segment
+ // x1,y1,x2,y2
+ class trans_affine_line_segment : public trans_affine
+ {
+ public:
+ trans_affine_line_segment(double x1, double y1, double x2, double y2,
+ double dist)
+ {
+ double dx = x2 - x1;
+ double dy = y2 - y1;
+ if(dist > 0.0)
+ {
+ multiply(trans_affine_scaling(sqrt(dx * dx + dy * dy) / dist));
+ }
+ multiply(trans_affine_rotation(atan2(dy, dx)));
+ multiply(trans_affine_translation(x1, y1));
+ }
+ };
+
+
+ //============================================trans_affine_reflection_unit
+ // Reflection matrix. Reflect coordinates across the line through
+ // the origin containing the unit vector (ux, uy).
+ // Contributed by John Horigan
+ class trans_affine_reflection_unit : public trans_affine
+ {
+ public:
+ trans_affine_reflection_unit(double ux, double uy) :
+ trans_affine(2.0 * ux * ux - 1.0,
+ 2.0 * ux * uy,
+ 2.0 * ux * uy,
+ 2.0 * uy * uy - 1.0,
+ 0.0, 0.0)
+ {}
+ };
+
+
+ //=================================================trans_affine_reflection
+ // Reflection matrix. Reflect coordinates across the line through
+ // the origin at the angle a or containing the non-unit vector (x, y).
+ // Contributed by John Horigan
+ class trans_affine_reflection : public trans_affine_reflection_unit
+ {
+ public:
+ trans_affine_reflection(double a) :
+ trans_affine_reflection_unit(cos(a), sin(a))
+ {}
+
+
+ trans_affine_reflection(double x, double y) :
+ trans_affine_reflection_unit(x / sqrt(x * x + y * y), y / sqrt(x * x + y * y))
+ {}
+ };
+
+}
+
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vcgen_contour.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vcgen_contour.h
new file mode 100644
index 00000000000..8c25da13f5a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vcgen_contour.h
@@ -0,0 +1,94 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_VCGEN_CONTOUR_INCLUDED
+#define AGG_VCGEN_CONTOUR_INCLUDED
+
+#include "agg_math_stroke.h"
+
+namespace agg
+{
+
+ //----------------------------------------------------------vcgen_contour
+ //
+ // See Implementation agg_vcgen_contour.cpp
+ //
+ class vcgen_contour
+ {
+ enum status_e
+ {
+ initial,
+ ready,
+ outline,
+ out_vertices,
+ end_poly,
+ stop
+ };
+
+ public:
+ typedef vertex_sequence<vertex_dist, 6> vertex_storage;
+ typedef pod_bvector<point_d, 6> coord_storage;
+
+ vcgen_contour();
+
+ void line_cap(line_cap_e lc) { m_stroker.line_cap(lc); }
+ void line_join(line_join_e lj) { m_stroker.line_join(lj); }
+ void inner_join(inner_join_e ij) { m_stroker.inner_join(ij); }
+
+ line_cap_e line_cap() const { return m_stroker.line_cap(); }
+ line_join_e line_join() const { return m_stroker.line_join(); }
+ inner_join_e inner_join() const { return m_stroker.inner_join(); }
+
+ void width(double w) { m_stroker.width(m_width = w); }
+ void miter_limit(double ml) { m_stroker.miter_limit(ml); }
+ void miter_limit_theta(double t) { m_stroker.miter_limit_theta(t); }
+ void inner_miter_limit(double ml) { m_stroker.inner_miter_limit(ml); }
+ void approximation_scale(double as) { m_stroker.approximation_scale(as); }
+
+ double width() const { return m_width; }
+ double miter_limit() const { return m_stroker.miter_limit(); }
+ double inner_miter_limit() const { return m_stroker.inner_miter_limit(); }
+ double approximation_scale() const { return m_stroker.approximation_scale(); }
+
+ void auto_detect_orientation(bool v) { m_auto_detect = v; }
+ bool auto_detect_orientation() const { return m_auto_detect; }
+
+ // Generator interface
+ void remove_all();
+ void add_vertex(double x, double y, unsigned cmd);
+
+ // Vertex Source Interface
+ void rewind(unsigned path_id);
+ unsigned vertex(double* x, double* y);
+
+ private:
+ vcgen_contour(const vcgen_contour&);
+ const vcgen_contour& operator = (const vcgen_contour&);
+
+ math_stroke<coord_storage> m_stroker;
+ double m_width;
+ vertex_storage m_src_vertices;
+ coord_storage m_out_vertices;
+ status_e m_status;
+ unsigned m_src_vertex;
+ unsigned m_out_vertex;
+ unsigned m_closed;
+ unsigned m_orientation;
+ bool m_auto_detect;
+ };
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vcgen_dash.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vcgen_dash.h
new file mode 100644
index 00000000000..c87dce4c815
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vcgen_dash.h
@@ -0,0 +1,93 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Line dash generator
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_VCGEN_DASH_INCLUDED
+#define AGG_VCGEN_DASH_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_vertex_sequence.h"
+
+namespace agg
+{
+
+ //---------------------------------------------------------------vcgen_dash
+ //
+ // See Implementation agg_vcgen_dash.cpp
+ //
+ class vcgen_dash
+ {
+ enum max_dashes_e
+ {
+ max_dashes = 32
+ };
+
+ enum status_e
+ {
+ initial,
+ ready,
+ polyline,
+ stop
+ };
+
+ public:
+ typedef vertex_sequence<vertex_dist, 6> vertex_storage;
+
+ vcgen_dash();
+
+ void remove_all_dashes();
+ void add_dash(double dash_len, double gap_len);
+ void dash_start(double ds);
+
+ void shorten(double s) { m_shorten = s; }
+ double shorten() const { return m_shorten; }
+
+ // Vertex Generator Interface
+ void remove_all();
+ void add_vertex(double x, double y, unsigned cmd);
+
+ // Vertex Source Interface
+ void rewind(unsigned path_id);
+ unsigned vertex(double* x, double* y);
+
+ private:
+ vcgen_dash(const vcgen_dash&);
+ const vcgen_dash& operator = (const vcgen_dash&);
+
+ void calc_dash_start(double ds);
+
+ double m_dashes[max_dashes];
+ double m_total_dash_len;
+ unsigned m_num_dashes;
+ double m_dash_start;
+ double m_shorten;
+ double m_curr_dash_start;
+ unsigned m_curr_dash;
+ double m_curr_rest;
+ const vertex_dist* m_v1;
+ const vertex_dist* m_v2;
+
+ vertex_storage m_src_vertices;
+ unsigned m_closed;
+ status_e m_status;
+ unsigned m_src_vertex;
+ };
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vcgen_stroke.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vcgen_stroke.h
new file mode 100644
index 00000000000..778223fe40e
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vcgen_stroke.h
@@ -0,0 +1,102 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_VCGEN_STROKE_INCLUDED
+#define AGG_VCGEN_STROKE_INCLUDED
+
+#include "agg_math_stroke.h"
+
+
+namespace agg
+{
+
+ //============================================================vcgen_stroke
+ //
+ // See Implementation agg_vcgen_stroke.cpp
+ // Stroke generator
+ //
+ //------------------------------------------------------------------------
+ class vcgen_stroke
+ {
+ enum status_e
+ {
+ initial,
+ ready,
+ cap1,
+ cap2,
+ outline1,
+ close_first,
+ outline2,
+ out_vertices,
+ end_poly1,
+ end_poly2,
+ stop
+ };
+
+ public:
+ typedef vertex_sequence<vertex_dist, 6> vertex_storage;
+ typedef pod_bvector<point_d, 6> coord_storage;
+
+ vcgen_stroke();
+
+ void line_cap(line_cap_e lc) { m_stroker.line_cap(lc); }
+ void line_join(line_join_e lj) { m_stroker.line_join(lj); }
+ void inner_join(inner_join_e ij) { m_stroker.inner_join(ij); }
+
+ line_cap_e line_cap() const { return m_stroker.line_cap(); }
+ line_join_e line_join() const { return m_stroker.line_join(); }
+ inner_join_e inner_join() const { return m_stroker.inner_join(); }
+
+ void width(double w) { m_stroker.width(w); }
+ void miter_limit(double ml) { m_stroker.miter_limit(ml); }
+ void miter_limit_theta(double t) { m_stroker.miter_limit_theta(t); }
+ void inner_miter_limit(double ml) { m_stroker.inner_miter_limit(ml); }
+ void approximation_scale(double as) { m_stroker.approximation_scale(as); }
+
+ double width() const { return m_stroker.width(); }
+ double miter_limit() const { return m_stroker.miter_limit(); }
+ double inner_miter_limit() const { return m_stroker.inner_miter_limit(); }
+ double approximation_scale() const { return m_stroker.approximation_scale(); }
+
+ void shorten(double s) { m_shorten = s; }
+ double shorten() const { return m_shorten; }
+
+ // Vertex Generator Interface
+ void remove_all();
+ void add_vertex(double x, double y, unsigned cmd);
+
+ // Vertex Source Interface
+ void rewind(unsigned path_id);
+ unsigned vertex(double* x, double* y);
+
+ private:
+ vcgen_stroke(const vcgen_stroke&);
+ const vcgen_stroke& operator = (const vcgen_stroke&);
+
+ math_stroke<coord_storage> m_stroker;
+ vertex_storage m_src_vertices;
+ coord_storage m_out_vertices;
+ double m_shorten;
+ unsigned m_closed;
+ status_e m_status;
+ status_e m_prev_status;
+ unsigned m_src_vertex;
+ unsigned m_out_vertex;
+ };
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vertex_sequence.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vertex_sequence.h
new file mode 100644
index 00000000000..2ad0701b37a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vertex_sequence.h
@@ -0,0 +1,172 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// vertex_sequence container and vertex_dist struct
+//
+//----------------------------------------------------------------------------
+#ifndef AGG_VERTEX_SEQUENCE_INCLUDED
+#define AGG_VERTEX_SEQUENCE_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_array.h"
+#include "agg_math.h"
+
+namespace agg
+{
+
+ //----------------------------------------------------------vertex_sequence
+ // Modified agg::pod_bvector. The data is interpreted as a sequence
+ // of vertices. It means that the type T must expose:
+ //
+ // bool T::operator() (const T& val)
+ //
+ // that is called every time new vertex is being added. The main purpose
+ // of this operator is the possibility to calculate some values during
+ // adding and to return true if the vertex fits some criteria or false if
+ // it doesn't. In the last case the new vertex is not added.
+ //
+ // The simple example is filtering coinciding vertices with calculation
+ // of the distance between the current and previous ones:
+ //
+ // struct vertex_dist
+ // {
+ // double x;
+ // double y;
+ // double dist;
+ //
+ // vertex_dist() {}
+ // vertex_dist(double x_, double y_) :
+ // x(x_),
+ // y(y_),
+ // dist(0.0)
+ // {
+ // }
+ //
+ // bool operator () (const vertex_dist& val)
+ // {
+ // return (dist = calc_distance(x, y, val.x, val.y)) > EPSILON;
+ // }
+ // };
+ //
+ // Function close() calls this operator and removes the last vertex if
+ // necessary.
+ //------------------------------------------------------------------------
+ template<class T, unsigned S=6>
+ class vertex_sequence : public pod_bvector<T, S>
+ {
+ public:
+ typedef pod_bvector<T, S> base_type;
+
+ void add(const T& val);
+ void modify_last(const T& val);
+ void close(bool remove_flag);
+ };
+
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void vertex_sequence<T, S>::add(const T& val)
+ {
+ if(base_type::size() > 1)
+ {
+ if(!(*this)[base_type::size() - 2]((*this)[base_type::size() - 1]))
+ {
+ base_type::remove_last();
+ }
+ }
+ base_type::add(val);
+ }
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void vertex_sequence<T, S>::modify_last(const T& val)
+ {
+ base_type::remove_last();
+ add(val);
+ }
+
+
+
+ //------------------------------------------------------------------------
+ template<class T, unsigned S>
+ void vertex_sequence<T, S>::close(bool closed)
+ {
+ while(base_type::size() > 1)
+ {
+ if((*this)[base_type::size() - 2]((*this)[base_type::size() - 1])) break;
+ T t = (*this)[base_type::size() - 1];
+ base_type::remove_last();
+ modify_last(t);
+ }
+
+ if(closed)
+ {
+ while(base_type::size() > 1)
+ {
+ if((*this)[base_type::size() - 1]((*this)[0])) break;
+ base_type::remove_last();
+ }
+ }
+ }
+
+
+ //-------------------------------------------------------------vertex_dist
+ // Vertex (x, y) with the distance to the next one. The last vertex has
+ // distance between the last and the first points if the polygon is closed
+ // and 0.0 if it's a polyline.
+ struct vertex_dist
+ {
+ double x;
+ double y;
+ double dist;
+
+ vertex_dist() {}
+ vertex_dist(double x_, double y_) :
+ x(x_),
+ y(y_),
+ dist(0.0)
+ {
+ }
+
+ bool operator () (const vertex_dist& val)
+ {
+ bool ret = (dist = calc_distance(x, y, val.x, val.y)) > vertex_dist_epsilon;
+ if(!ret) dist = 1.0 / vertex_dist_epsilon;
+ return ret;
+ }
+ };
+
+
+
+ //--------------------------------------------------------vertex_dist_cmd
+ // Save as the above but with additional "command" value
+ struct vertex_dist_cmd : public vertex_dist
+ {
+ unsigned cmd;
+
+ vertex_dist_cmd() {}
+ vertex_dist_cmd(double x_, double y_, unsigned cmd_) :
+ vertex_dist(x_, y_),
+ cmd(cmd_)
+ {
+ }
+ };
+
+
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vpgen_segmentator.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vpgen_segmentator.h
new file mode 100644
index 00000000000..29b3c9fa6f0
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/agg_vpgen_segmentator.h
@@ -0,0 +1,61 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#ifndef AGG_VPGEN_SEGMENTATOR_INCLUDED
+#define AGG_VPGEN_SEGMENTATOR_INCLUDED
+
+#include <math.h>
+#include "agg_basics.h"
+
+namespace agg
+{
+
+ //=======================================================vpgen_segmentator
+ //
+ // See Implementation agg_vpgen_segmentator.cpp
+ //
+ class vpgen_segmentator
+ {
+ public:
+ vpgen_segmentator() : m_approximation_scale(1.0) {}
+
+ void approximation_scale(double s) { m_approximation_scale = s; }
+ double approximation_scale() const { return m_approximation_scale; }
+
+ static bool auto_close() { return false; }
+ static bool auto_unclose() { return false; }
+
+ void reset() { m_cmd = path_cmd_stop; }
+ void move_to(double x, double y);
+ void line_to(double x, double y);
+ unsigned vertex(double* x, double* y);
+
+ private:
+ double m_approximation_scale;
+ double m_x1;
+ double m_y1;
+ double m_dx;
+ double m_dy;
+ double m_dl;
+ double m_ddl;
+ unsigned m_cmd;
+ };
+
+
+
+}
+
+#endif
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/util/agg_color_conv.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/util/agg_color_conv.h
new file mode 100644
index 00000000000..8b8a0a55d2b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/util/agg_color_conv.h
@@ -0,0 +1,128 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Conversion from one colorspace/pixel format to another
+//
+//----------------------------------------------------------------------------
+
+#ifndef AGG_COLOR_CONV_INCLUDED
+#define AGG_COLOR_CONV_INCLUDED
+
+#include <string.h>
+#include "agg_basics.h"
+#include "agg_rendering_buffer.h"
+
+
+
+
+namespace agg
+{
+
+ //--------------------------------------------------------------color_conv
+ template<class RenBuf, class CopyRow>
+ void color_conv(RenBuf* dst, const RenBuf* src, CopyRow copy_row_functor)
+ {
+ unsigned width = src->width();
+ unsigned height = src->height();
+
+ if(dst->width() < width) width = dst->width();
+ if(dst->height() < height) height = dst->height();
+
+ if(width)
+ {
+ unsigned y;
+ for(y = 0; y < height; y++)
+ {
+ copy_row_functor(dst->row_ptr(0, y, width),
+ src->row_ptr(y),
+ width);
+ }
+ }
+ }
+
+
+ //---------------------------------------------------------color_conv_row
+ template<class CopyRow>
+ void color_conv_row(int8u* dst,
+ const int8u* src,
+ unsigned width,
+ CopyRow copy_row_functor)
+ {
+ copy_row_functor(dst, src, width);
+ }
+
+
+ //---------------------------------------------------------color_conv_same
+ template<int BPP> class color_conv_same
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ memmove(dst, src, width*BPP);
+ }
+ };
+
+
+ // Generic pixel converter.
+ template<class DstFormat, class SrcFormat>
+ struct conv_pixel
+ {
+ void operator()(void* dst, const void* src) const
+ {
+ // Read a pixel from the source format and write it to the destination format.
+ DstFormat::write_plain_color(dst, SrcFormat::read_plain_color(src));
+ }
+ };
+
+ // Generic row converter. Uses conv_pixel to convert individual pixels.
+ template<class DstFormat, class SrcFormat>
+ struct conv_row
+ {
+ void operator()(void* dst, const void* src, unsigned width) const
+ {
+ conv_pixel<DstFormat, SrcFormat> conv;
+ do
+ {
+ conv(dst, src);
+ dst = (int8u*)dst + DstFormat::pix_width;
+ src = (int8u*)src + SrcFormat::pix_width;
+ }
+ while (--width);
+ }
+ };
+
+ // Specialization for case where source and destination formats are identical.
+ template<class Format>
+ struct conv_row<Format, Format>
+ {
+ void operator()(void* dst, const void* src, unsigned width) const
+ {
+ memmove(dst, src, width * Format::pix_width);
+ }
+ };
+
+ // Top-level conversion function, converts one pixel format to any other.
+ template<class DstFormat, class SrcFormat, class RenBuf>
+ void convert(RenBuf* dst, const RenBuf* src)
+ {
+ color_conv(dst, src, conv_row<DstFormat, SrcFormat>());
+ }
+}
+
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/include/util/agg_color_conv_rgb8.h b/contrib/python/matplotlib/py2/extern/agg24-svn/include/util/agg_color_conv_rgb8.h
new file mode 100644
index 00000000000..609460dba41
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/include/util/agg_color_conv_rgb8.h
@@ -0,0 +1,476 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// A set of functors used with color_conv(). See file agg_color_conv.h
+// These functors can convert images with up to 8 bits per component.
+// Use convertors in the following way:
+//
+// agg::color_conv(dst, src, agg::color_conv_XXXX_to_YYYY());
+// whare XXXX and YYYY can be any of:
+// rgb24
+// bgr24
+// rgba32
+// abgr32
+// argb32
+// bgra32
+// rgb555
+// rgb565
+//----------------------------------------------------------------------------
+
+#ifndef AGG_COLOR_CONV_RGB8_INCLUDED
+#define AGG_COLOR_CONV_RGB8_INCLUDED
+
+#include "agg_basics.h"
+#include "agg_color_conv.h"
+
+namespace agg
+{
+
+ //-----------------------------------------------------color_conv_rgb24
+ class color_conv_rgb24
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ int8u tmp[3];
+ tmp[0] = *src++;
+ tmp[1] = *src++;
+ tmp[2] = *src++;
+ *dst++ = tmp[2];
+ *dst++ = tmp[1];
+ *dst++ = tmp[0];
+ }
+ while(--width);
+ }
+ };
+
+ typedef color_conv_rgb24 color_conv_rgb24_to_bgr24;
+ typedef color_conv_rgb24 color_conv_bgr24_to_rgb24;
+
+ typedef color_conv_same<3> color_conv_bgr24_to_bgr24;
+ typedef color_conv_same<3> color_conv_rgb24_to_rgb24;
+
+
+
+ //------------------------------------------------------color_conv_rgba32
+ template<int I1, int I2, int I3, int I4> class color_conv_rgba32
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ int8u tmp[4];
+ tmp[0] = *src++;
+ tmp[1] = *src++;
+ tmp[2] = *src++;
+ tmp[3] = *src++;
+ *dst++ = tmp[I1];
+ *dst++ = tmp[I2];
+ *dst++ = tmp[I3];
+ *dst++ = tmp[I4];
+ }
+ while(--width);
+ }
+ };
+
+
+ //------------------------------------------------------------------------
+ typedef color_conv_rgba32<0,3,2,1> color_conv_argb32_to_abgr32; //----color_conv_argb32_to_abgr32
+ typedef color_conv_rgba32<3,2,1,0> color_conv_argb32_to_bgra32; //----color_conv_argb32_to_bgra32
+ typedef color_conv_rgba32<1,2,3,0> color_conv_argb32_to_rgba32; //----color_conv_argb32_to_rgba32
+ typedef color_conv_rgba32<3,0,1,2> color_conv_bgra32_to_abgr32; //----color_conv_bgra32_to_abgr32
+ typedef color_conv_rgba32<3,2,1,0> color_conv_bgra32_to_argb32; //----color_conv_bgra32_to_argb32
+ typedef color_conv_rgba32<2,1,0,3> color_conv_bgra32_to_rgba32; //----color_conv_bgra32_to_rgba32
+ typedef color_conv_rgba32<3,2,1,0> color_conv_rgba32_to_abgr32; //----color_conv_rgba32_to_abgr32
+ typedef color_conv_rgba32<3,0,1,2> color_conv_rgba32_to_argb32; //----color_conv_rgba32_to_argb32
+ typedef color_conv_rgba32<2,1,0,3> color_conv_rgba32_to_bgra32; //----color_conv_rgba32_to_bgra32
+ typedef color_conv_rgba32<0,3,2,1> color_conv_abgr32_to_argb32; //----color_conv_abgr32_to_argb32
+ typedef color_conv_rgba32<1,2,3,0> color_conv_abgr32_to_bgra32; //----color_conv_abgr32_to_bgra32
+ typedef color_conv_rgba32<3,2,1,0> color_conv_abgr32_to_rgba32; //----color_conv_abgr32_to_rgba32
+
+ //------------------------------------------------------------------------
+ typedef color_conv_same<4> color_conv_rgba32_to_rgba32; //----color_conv_rgba32_to_rgba32
+ typedef color_conv_same<4> color_conv_argb32_to_argb32; //----color_conv_argb32_to_argb32
+ typedef color_conv_same<4> color_conv_bgra32_to_bgra32; //----color_conv_bgra32_to_bgra32
+ typedef color_conv_same<4> color_conv_abgr32_to_abgr32; //----color_conv_abgr32_to_abgr32
+
+
+ //--------------------------------------------color_conv_rgb24_rgba32
+ template<int I1, int I2, int I3, int A> class color_conv_rgb24_rgba32
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ dst[I1] = *src++;
+ dst[I2] = *src++;
+ dst[I3] = *src++;
+ dst[A] = 255;
+ dst += 4;
+ }
+ while(--width);
+ }
+ };
+
+
+ //------------------------------------------------------------------------
+ typedef color_conv_rgb24_rgba32<1,2,3,0> color_conv_rgb24_to_argb32; //----color_conv_rgb24_to_argb32
+ typedef color_conv_rgb24_rgba32<3,2,1,0> color_conv_rgb24_to_abgr32; //----color_conv_rgb24_to_abgr32
+ typedef color_conv_rgb24_rgba32<2,1,0,3> color_conv_rgb24_to_bgra32; //----color_conv_rgb24_to_bgra32
+ typedef color_conv_rgb24_rgba32<0,1,2,3> color_conv_rgb24_to_rgba32; //----color_conv_rgb24_to_rgba32
+ typedef color_conv_rgb24_rgba32<3,2,1,0> color_conv_bgr24_to_argb32; //----color_conv_bgr24_to_argb32
+ typedef color_conv_rgb24_rgba32<1,2,3,0> color_conv_bgr24_to_abgr32; //----color_conv_bgr24_to_abgr32
+ typedef color_conv_rgb24_rgba32<0,1,2,3> color_conv_bgr24_to_bgra32; //----color_conv_bgr24_to_bgra32
+ typedef color_conv_rgb24_rgba32<2,1,0,3> color_conv_bgr24_to_rgba32; //----color_conv_bgr24_to_rgba32
+
+
+
+ //-------------------------------------------------color_conv_rgba32_rgb24
+ template<int I1, int I2, int I3> class color_conv_rgba32_rgb24
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ *dst++ = src[I1];
+ *dst++ = src[I2];
+ *dst++ = src[I3];
+ src += 4;
+ }
+ while(--width);
+ }
+ };
+
+
+
+ //------------------------------------------------------------------------
+ typedef color_conv_rgba32_rgb24<1,2,3> color_conv_argb32_to_rgb24; //----color_conv_argb32_to_rgb24
+ typedef color_conv_rgba32_rgb24<3,2,1> color_conv_abgr32_to_rgb24; //----color_conv_abgr32_to_rgb24
+ typedef color_conv_rgba32_rgb24<2,1,0> color_conv_bgra32_to_rgb24; //----color_conv_bgra32_to_rgb24
+ typedef color_conv_rgba32_rgb24<0,1,2> color_conv_rgba32_to_rgb24; //----color_conv_rgba32_to_rgb24
+ typedef color_conv_rgba32_rgb24<3,2,1> color_conv_argb32_to_bgr24; //----color_conv_argb32_to_bgr24
+ typedef color_conv_rgba32_rgb24<1,2,3> color_conv_abgr32_to_bgr24; //----color_conv_abgr32_to_bgr24
+ typedef color_conv_rgba32_rgb24<0,1,2> color_conv_bgra32_to_bgr24; //----color_conv_bgra32_to_bgr24
+ typedef color_conv_rgba32_rgb24<2,1,0> color_conv_rgba32_to_bgr24; //----color_conv_rgba32_to_bgr24
+
+
+ //------------------------------------------------color_conv_rgb555_rgb24
+ template<int R, int B> class color_conv_rgb555_rgb24
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ unsigned rgb = *(int16u*)src;
+ dst[R] = (int8u)((rgb >> 7) & 0xF8);
+ dst[1] = (int8u)((rgb >> 2) & 0xF8);
+ dst[B] = (int8u)((rgb << 3) & 0xF8);
+ src += 2;
+ dst += 3;
+ }
+ while(--width);
+ }
+ };
+
+
+ //------------------------------------------------------------------------
+ typedef color_conv_rgb555_rgb24<2,0> color_conv_rgb555_to_bgr24; //----color_conv_rgb555_to_bgr24
+ typedef color_conv_rgb555_rgb24<0,2> color_conv_rgb555_to_rgb24; //----color_conv_rgb555_to_rgb24
+
+
+ //-------------------------------------------------color_conv_rgb24_rgb555
+ template<int R, int B> class color_conv_rgb24_rgb555
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ *(int16u*)dst = (int16u)(((unsigned(src[R]) << 7) & 0x7C00) |
+ ((unsigned(src[1]) << 2) & 0x3E0) |
+ ((unsigned(src[B]) >> 3)));
+ src += 3;
+ dst += 2;
+ }
+ while(--width);
+ }
+ };
+
+
+ //------------------------------------------------------------------------
+ typedef color_conv_rgb24_rgb555<2,0> color_conv_bgr24_to_rgb555; //----color_conv_bgr24_to_rgb555
+ typedef color_conv_rgb24_rgb555<0,2> color_conv_rgb24_to_rgb555; //----color_conv_rgb24_to_rgb555
+
+
+ //-------------------------------------------------color_conv_rgb565_rgb24
+ template<int R, int B> class color_conv_rgb565_rgb24
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ unsigned rgb = *(int16u*)src;
+ dst[R] = (rgb >> 8) & 0xF8;
+ dst[1] = (rgb >> 3) & 0xFC;
+ dst[B] = (rgb << 3) & 0xF8;
+ src += 2;
+ dst += 3;
+ }
+ while(--width);
+ }
+ };
+
+
+ //------------------------------------------------------------------------
+ typedef color_conv_rgb565_rgb24<2,0> color_conv_rgb565_to_bgr24; //----color_conv_rgb565_to_bgr24
+ typedef color_conv_rgb565_rgb24<0,2> color_conv_rgb565_to_rgb24; //----color_conv_rgb565_to_rgb24
+
+
+ //-------------------------------------------------color_conv_rgb24_rgb565
+ template<int R, int B> class color_conv_rgb24_rgb565
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ *(int16u*)dst = (int16u)(((unsigned(src[R]) << 8) & 0xF800) |
+ ((unsigned(src[1]) << 3) & 0x7E0) |
+ ((unsigned(src[B]) >> 3)));
+ src += 3;
+ dst += 2;
+ }
+ while(--width);
+ }
+ };
+
+
+ //------------------------------------------------------------------------
+ typedef color_conv_rgb24_rgb565<2,0> color_conv_bgr24_to_rgb565; //----color_conv_bgr24_to_rgb565
+ typedef color_conv_rgb24_rgb565<0,2> color_conv_rgb24_to_rgb565; //----color_conv_rgb24_to_rgb565
+
+
+
+ //-------------------------------------------------color_conv_rgb555_rgba32
+ template<int R, int G, int B, int A> class color_conv_rgb555_rgba32
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ int rgb = *(int16*)src;
+ dst[R] = (int8u)((rgb >> 7) & 0xF8);
+ dst[G] = (int8u)((rgb >> 2) & 0xF8);
+ dst[B] = (int8u)((rgb << 3) & 0xF8);
+ dst[A] = (int8u)(rgb >> 15);
+ src += 2;
+ dst += 4;
+ }
+ while(--width);
+ }
+ };
+
+
+ //------------------------------------------------------------------------
+ typedef color_conv_rgb555_rgba32<1,2,3,0> color_conv_rgb555_to_argb32; //----color_conv_rgb555_to_argb32
+ typedef color_conv_rgb555_rgba32<3,2,1,0> color_conv_rgb555_to_abgr32; //----color_conv_rgb555_to_abgr32
+ typedef color_conv_rgb555_rgba32<2,1,0,3> color_conv_rgb555_to_bgra32; //----color_conv_rgb555_to_bgra32
+ typedef color_conv_rgb555_rgba32<0,1,2,3> color_conv_rgb555_to_rgba32; //----color_conv_rgb555_to_rgba32
+
+
+ //------------------------------------------------color_conv_rgba32_rgb555
+ template<int R, int G, int B, int A> class color_conv_rgba32_rgb555
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ *(int16u*)dst = (int16u)(((unsigned(src[R]) << 7) & 0x7C00) |
+ ((unsigned(src[G]) << 2) & 0x3E0) |
+ ((unsigned(src[B]) >> 3)) |
+ ((unsigned(src[A]) << 8) & 0x8000));
+ src += 4;
+ dst += 2;
+ }
+ while(--width);
+ }
+ };
+
+
+ //------------------------------------------------------------------------
+ typedef color_conv_rgba32_rgb555<1,2,3,0> color_conv_argb32_to_rgb555; //----color_conv_argb32_to_rgb555
+ typedef color_conv_rgba32_rgb555<3,2,1,0> color_conv_abgr32_to_rgb555; //----color_conv_abgr32_to_rgb555
+ typedef color_conv_rgba32_rgb555<2,1,0,3> color_conv_bgra32_to_rgb555; //----color_conv_bgra32_to_rgb555
+ typedef color_conv_rgba32_rgb555<0,1,2,3> color_conv_rgba32_to_rgb555; //----color_conv_rgba32_to_rgb555
+
+
+
+ //------------------------------------------------color_conv_rgb565_rgba32
+ template<int R, int G, int B, int A> class color_conv_rgb565_rgba32
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ int rgb = *(int16*)src;
+ dst[R] = (rgb >> 8) & 0xF8;
+ dst[G] = (rgb >> 3) & 0xFC;
+ dst[B] = (rgb << 3) & 0xF8;
+ dst[A] = 255;
+ src += 2;
+ dst += 4;
+ }
+ while(--width);
+ }
+ };
+
+
+ //------------------------------------------------------------------------
+ typedef color_conv_rgb565_rgba32<1,2,3,0> color_conv_rgb565_to_argb32; //----color_conv_rgb565_to_argb32
+ typedef color_conv_rgb565_rgba32<3,2,1,0> color_conv_rgb565_to_abgr32; //----color_conv_rgb565_to_abgr32
+ typedef color_conv_rgb565_rgba32<2,1,0,3> color_conv_rgb565_to_bgra32; //----color_conv_rgb565_to_bgra32
+ typedef color_conv_rgb565_rgba32<0,1,2,3> color_conv_rgb565_to_rgba32; //----color_conv_rgb565_to_rgba32
+
+
+ //------------------------------------------------color_conv_rgba32_rgb565
+ template<int R, int G, int B> class color_conv_rgba32_rgb565
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ *(int16u*)dst = (int16u)(((unsigned(src[R]) << 8) & 0xF800) |
+ ((unsigned(src[G]) << 3) & 0x7E0) |
+ ((unsigned(src[B]) >> 3)));
+ src += 4;
+ dst += 2;
+ }
+ while(--width);
+ }
+ };
+
+
+ //------------------------------------------------------------------------
+ typedef color_conv_rgba32_rgb565<1,2,3> color_conv_argb32_to_rgb565; //----color_conv_argb32_to_rgb565
+ typedef color_conv_rgba32_rgb565<3,2,1> color_conv_abgr32_to_rgb565; //----color_conv_abgr32_to_rgb565
+ typedef color_conv_rgba32_rgb565<2,1,0> color_conv_bgra32_to_rgb565; //----color_conv_bgra32_to_rgb565
+ typedef color_conv_rgba32_rgb565<0,1,2> color_conv_rgba32_to_rgb565; //----color_conv_rgba32_to_rgb565
+
+
+ //---------------------------------------------color_conv_rgb555_to_rgb565
+ class color_conv_rgb555_to_rgb565
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ unsigned rgb = *(int16u*)src;
+ *(int16u*)dst = (int16u)(((rgb << 1) & 0xFFC0) | (rgb & 0x1F));
+ src += 2;
+ dst += 2;
+ }
+ while(--width);
+ }
+ };
+
+
+ //----------------------------------------------color_conv_rgb565_to_rgb555
+ class color_conv_rgb565_to_rgb555
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ unsigned rgb = *(int16u*)src;
+ *(int16u*)dst = (int16u)(((rgb >> 1) & 0x7FE0) | (rgb & 0x1F));
+ src += 2;
+ dst += 2;
+ }
+ while(--width);
+ }
+ };
+
+
+ //------------------------------------------------------------------------
+ typedef color_conv_same<2> color_conv_rgb555_to_rgb555; //----color_conv_rgb555_to_rgb555
+ typedef color_conv_same<2> color_conv_rgb565_to_rgb565; //----color_conv_rgb565_to_rgb565
+
+
+ template<int R, int B> class color_conv_rgb24_gray8
+ {
+ public:
+ void operator () (int8u* dst,
+ const int8u* src,
+ unsigned width) const
+ {
+ do
+ {
+ *dst++ = (src[R]*77 + src[1]*150 + src[B]*29) >> 8;
+ src += 3;
+ }
+ while(--width);
+ }
+ };
+
+ typedef color_conv_rgb24_gray8<0,2> color_conv_rgb24_to_gray8; //----color_conv_rgb24_to_gray8
+ typedef color_conv_rgb24_gray8<2,0> color_conv_bgr24_to_gray8; //----color_conv_bgr24_to_gray8
+
+
+}
+
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/src/ChangeLog b/contrib/python/matplotlib/py2/extern/agg24-svn/src/ChangeLog
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/src/ChangeLog
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_bezier_arc.cpp b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_bezier_arc.cpp
new file mode 100644
index 00000000000..844d300c091
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_bezier_arc.cpp
@@ -0,0 +1,258 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Arc generator. Produces at most 4 consecutive cubic bezier curves, i.e.,
+// 4, 7, 10, or 13 vertices.
+//
+//----------------------------------------------------------------------------
+
+
+#include <math.h>
+#include "agg_bezier_arc.h"
+
+
+namespace agg
+{
+
+ // This epsilon is used to prevent us from adding degenerate curves
+ // (converging to a single point).
+ // The value isn't very critical. Function arc_to_bezier() has a limit
+ // of the sweep_angle. If fabs(sweep_angle) exceeds pi/2 the curve
+ // becomes inaccurate. But slight exceeding is quite appropriate.
+ //-------------------------------------------------bezier_arc_angle_epsilon
+ const double bezier_arc_angle_epsilon = 0.01;
+
+ //------------------------------------------------------------arc_to_bezier
+ void arc_to_bezier(double cx, double cy, double rx, double ry,
+ double start_angle, double sweep_angle,
+ double* curve)
+ {
+ double x0 = cos(sweep_angle / 2.0);
+ double y0 = sin(sweep_angle / 2.0);
+ double tx = (1.0 - x0) * 4.0 / 3.0;
+ double ty = y0 - tx * x0 / y0;
+ double px[4];
+ double py[4];
+ px[0] = x0;
+ py[0] = -y0;
+ px[1] = x0 + tx;
+ py[1] = -ty;
+ px[2] = x0 + tx;
+ py[2] = ty;
+ px[3] = x0;
+ py[3] = y0;
+
+ double sn = sin(start_angle + sweep_angle / 2.0);
+ double cs = cos(start_angle + sweep_angle / 2.0);
+
+ unsigned i;
+ for(i = 0; i < 4; i++)
+ {
+ curve[i * 2] = cx + rx * (px[i] * cs - py[i] * sn);
+ curve[i * 2 + 1] = cy + ry * (px[i] * sn + py[i] * cs);
+ }
+ }
+
+
+
+ //------------------------------------------------------------------------
+ void bezier_arc::init(double x, double y,
+ double rx, double ry,
+ double start_angle,
+ double sweep_angle)
+ {
+ start_angle = fmod(start_angle, 2.0 * pi);
+ if(sweep_angle >= 2.0 * pi) sweep_angle = 2.0 * pi;
+ if(sweep_angle <= -2.0 * pi) sweep_angle = -2.0 * pi;
+
+ if(fabs(sweep_angle) < 1e-10)
+ {
+ m_num_vertices = 4;
+ m_cmd = path_cmd_line_to;
+ m_vertices[0] = x + rx * cos(start_angle);
+ m_vertices[1] = y + ry * sin(start_angle);
+ m_vertices[2] = x + rx * cos(start_angle + sweep_angle);
+ m_vertices[3] = y + ry * sin(start_angle + sweep_angle);
+ return;
+ }
+
+ double total_sweep = 0.0;
+ double local_sweep = 0.0;
+ double prev_sweep;
+ m_num_vertices = 2;
+ m_cmd = path_cmd_curve4;
+ bool done = false;
+ do
+ {
+ if(sweep_angle < 0.0)
+ {
+ prev_sweep = total_sweep;
+ local_sweep = -pi * 0.5;
+ total_sweep -= pi * 0.5;
+ if(total_sweep <= sweep_angle + bezier_arc_angle_epsilon)
+ {
+ local_sweep = sweep_angle - prev_sweep;
+ done = true;
+ }
+ }
+ else
+ {
+ prev_sweep = total_sweep;
+ local_sweep = pi * 0.5;
+ total_sweep += pi * 0.5;
+ if(total_sweep >= sweep_angle - bezier_arc_angle_epsilon)
+ {
+ local_sweep = sweep_angle - prev_sweep;
+ done = true;
+ }
+ }
+
+ arc_to_bezier(x, y, rx, ry,
+ start_angle,
+ local_sweep,
+ m_vertices + m_num_vertices - 2);
+
+ m_num_vertices += 6;
+ start_angle += local_sweep;
+ }
+ while(!done && m_num_vertices < 26);
+ }
+
+
+
+
+ //--------------------------------------------------------------------
+ void bezier_arc_svg::init(double x0, double y0,
+ double rx, double ry,
+ double angle,
+ bool large_arc_flag,
+ bool sweep_flag,
+ double x2, double y2)
+ {
+ m_radii_ok = true;
+
+ if(rx < 0.0) rx = -rx;
+ if(ry < 0.0) ry = -rx;
+
+ // Calculate the middle point between
+ // the current and the final points
+ //------------------------
+ double dx2 = (x0 - x2) / 2.0;
+ double dy2 = (y0 - y2) / 2.0;
+
+ double cos_a = cos(angle);
+ double sin_a = sin(angle);
+
+ // Calculate (x1, y1)
+ //------------------------
+ double x1 = cos_a * dx2 + sin_a * dy2;
+ double y1 = -sin_a * dx2 + cos_a * dy2;
+
+ // Ensure radii are large enough
+ //------------------------
+ double prx = rx * rx;
+ double pry = ry * ry;
+ double px1 = x1 * x1;
+ double py1 = y1 * y1;
+
+ // Check that radii are large enough
+ //------------------------
+ double radii_check = px1/prx + py1/pry;
+ if(radii_check > 1.0)
+ {
+ rx = sqrt(radii_check) * rx;
+ ry = sqrt(radii_check) * ry;
+ prx = rx * rx;
+ pry = ry * ry;
+ if(radii_check > 10.0) m_radii_ok = false;
+ }
+
+ // Calculate (cx1, cy1)
+ //------------------------
+ double sign = (large_arc_flag == sweep_flag) ? -1.0 : 1.0;
+ double sq = (prx*pry - prx*py1 - pry*px1) / (prx*py1 + pry*px1);
+ double coef = sign * sqrt((sq < 0) ? 0 : sq);
+ double cx1 = coef * ((rx * y1) / ry);
+ double cy1 = coef * -((ry * x1) / rx);
+
+ //
+ // Calculate (cx, cy) from (cx1, cy1)
+ //------------------------
+ double sx2 = (x0 + x2) / 2.0;
+ double sy2 = (y0 + y2) / 2.0;
+ double cx = sx2 + (cos_a * cx1 - sin_a * cy1);
+ double cy = sy2 + (sin_a * cx1 + cos_a * cy1);
+
+ // Calculate the start_angle (angle1) and the sweep_angle (dangle)
+ //------------------------
+ double ux = (x1 - cx1) / rx;
+ double uy = (y1 - cy1) / ry;
+ double vx = (-x1 - cx1) / rx;
+ double vy = (-y1 - cy1) / ry;
+ double p, n;
+
+ // Calculate the angle start
+ //------------------------
+ n = sqrt(ux*ux + uy*uy);
+ p = ux; // (1 * ux) + (0 * uy)
+ sign = (uy < 0) ? -1.0 : 1.0;
+ double v = p / n;
+ if(v < -1.0) v = -1.0;
+ if(v > 1.0) v = 1.0;
+ double start_angle = sign * acos(v);
+
+ // Calculate the sweep angle
+ //------------------------
+ n = sqrt((ux*ux + uy*uy) * (vx*vx + vy*vy));
+ p = ux * vx + uy * vy;
+ sign = (ux * vy - uy * vx < 0) ? -1.0 : 1.0;
+ v = p / n;
+ if(v < -1.0) v = -1.0;
+ if(v > 1.0) v = 1.0;
+ double sweep_angle = sign * acos(v);
+ if(!sweep_flag && sweep_angle > 0)
+ {
+ sweep_angle -= pi * 2.0;
+ }
+ else
+ if (sweep_flag && sweep_angle < 0)
+ {
+ sweep_angle += pi * 2.0;
+ }
+
+ // We can now build and transform the resulting arc
+ //------------------------
+ m_arc.init(0.0, 0.0, rx, ry, start_angle, sweep_angle);
+ trans_affine mtx = trans_affine_rotation(angle);
+ mtx *= trans_affine_translation(cx, cy);
+
+ for(unsigned i = 2; i < m_arc.num_vertices()-2; i += 2)
+ {
+ mtx.transform(m_arc.vertices() + i, m_arc.vertices() + i + 1);
+ }
+
+ // We must make sure that the starting and ending points
+ // exactly coincide with the initial (x0,y0) and (x2,y2)
+ m_arc.vertices()[0] = x0;
+ m_arc.vertices()[1] = y0;
+ if(m_arc.num_vertices() > 2)
+ {
+ m_arc.vertices()[m_arc.num_vertices() - 2] = x2;
+ m_arc.vertices()[m_arc.num_vertices() - 1] = y2;
+ }
+ }
+
+
+}
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_curves.cpp b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_curves.cpp
new file mode 100644
index 00000000000..47017347188
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_curves.cpp
@@ -0,0 +1,613 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#include <math.h>
+#include "agg_curves.h"
+#include "agg_math.h"
+
+namespace agg
+{
+
+ //------------------------------------------------------------------------
+ const double curve_distance_epsilon = 1e-30;
+ const double curve_collinearity_epsilon = 1e-30;
+ const double curve_angle_tolerance_epsilon = 0.01;
+ enum curve_recursion_limit_e { curve_recursion_limit = 32 };
+
+
+
+ //------------------------------------------------------------------------
+ void curve3_inc::approximation_scale(double s)
+ {
+ m_scale = s;
+ }
+
+ //------------------------------------------------------------------------
+ double curve3_inc::approximation_scale() const
+ {
+ return m_scale;
+ }
+
+ //------------------------------------------------------------------------
+ void curve3_inc::init(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3)
+ {
+ m_start_x = x1;
+ m_start_y = y1;
+ m_end_x = x3;
+ m_end_y = y3;
+
+ double dx1 = x2 - x1;
+ double dy1 = y2 - y1;
+ double dx2 = x3 - x2;
+ double dy2 = y3 - y2;
+
+ double len = sqrt(dx1 * dx1 + dy1 * dy1) + sqrt(dx2 * dx2 + dy2 * dy2);
+
+ m_num_steps = uround(len * 0.25 * m_scale);
+
+ if(m_num_steps < 4)
+ {
+ m_num_steps = 4;
+ }
+
+ double subdivide_step = 1.0 / m_num_steps;
+ double subdivide_step2 = subdivide_step * subdivide_step;
+
+ double tmpx = (x1 - x2 * 2.0 + x3) * subdivide_step2;
+ double tmpy = (y1 - y2 * 2.0 + y3) * subdivide_step2;
+
+ m_saved_fx = m_fx = x1;
+ m_saved_fy = m_fy = y1;
+
+ m_saved_dfx = m_dfx = tmpx + (x2 - x1) * (2.0 * subdivide_step);
+ m_saved_dfy = m_dfy = tmpy + (y2 - y1) * (2.0 * subdivide_step);
+
+ m_ddfx = tmpx * 2.0;
+ m_ddfy = tmpy * 2.0;
+
+ m_step = m_num_steps;
+ }
+
+ //------------------------------------------------------------------------
+ void curve3_inc::rewind(unsigned)
+ {
+ if(m_num_steps == 0)
+ {
+ m_step = -1;
+ return;
+ }
+ m_step = m_num_steps;
+ m_fx = m_saved_fx;
+ m_fy = m_saved_fy;
+ m_dfx = m_saved_dfx;
+ m_dfy = m_saved_dfy;
+ }
+
+ //------------------------------------------------------------------------
+ unsigned curve3_inc::vertex(double* x, double* y)
+ {
+ if(m_step < 0) return path_cmd_stop;
+ if(m_step == m_num_steps)
+ {
+ *x = m_start_x;
+ *y = m_start_y;
+ --m_step;
+ return path_cmd_move_to;
+ }
+ if(m_step == 0)
+ {
+ *x = m_end_x;
+ *y = m_end_y;
+ --m_step;
+ return path_cmd_line_to;
+ }
+ m_fx += m_dfx;
+ m_fy += m_dfy;
+ m_dfx += m_ddfx;
+ m_dfy += m_ddfy;
+ *x = m_fx;
+ *y = m_fy;
+ --m_step;
+ return path_cmd_line_to;
+ }
+
+ //------------------------------------------------------------------------
+ void curve3_div::init(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3)
+ {
+ m_points.remove_all();
+ m_distance_tolerance_square = 0.5 / m_approximation_scale;
+ m_distance_tolerance_square *= m_distance_tolerance_square;
+ bezier(x1, y1, x2, y2, x3, y3);
+ m_count = 0;
+ }
+
+ //------------------------------------------------------------------------
+ void curve3_div::recursive_bezier(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ unsigned level)
+ {
+ if(level > curve_recursion_limit)
+ {
+ return;
+ }
+
+ // Calculate all the mid-points of the line segments
+ //----------------------
+ double x12 = (x1 + x2) / 2;
+ double y12 = (y1 + y2) / 2;
+ double x23 = (x2 + x3) / 2;
+ double y23 = (y2 + y3) / 2;
+ double x123 = (x12 + x23) / 2;
+ double y123 = (y12 + y23) / 2;
+
+ double dx = x3-x1;
+ double dy = y3-y1;
+ double d = fabs(((x2 - x3) * dy - (y2 - y3) * dx));
+ double da;
+
+ if(d > curve_collinearity_epsilon)
+ {
+ // Regular case
+ //-----------------
+ if(d * d <= m_distance_tolerance_square * (dx*dx + dy*dy))
+ {
+ // If the curvature doesn't exceed the distance_tolerance value
+ // we tend to finish subdivisions.
+ //----------------------
+ if(m_angle_tolerance < curve_angle_tolerance_epsilon)
+ {
+ m_points.add(point_d(x123, y123));
+ return;
+ }
+
+ // Angle & Cusp Condition
+ //----------------------
+ da = fabs(atan2(y3 - y2, x3 - x2) - atan2(y2 - y1, x2 - x1));
+ if(da >= pi) da = 2*pi - da;
+
+ if(da < m_angle_tolerance)
+ {
+ // Finally we can stop the recursion
+ //----------------------
+ m_points.add(point_d(x123, y123));
+ return;
+ }
+ }
+ }
+ else
+ {
+ // Collinear case
+ //------------------
+ da = dx*dx + dy*dy;
+ if(da == 0)
+ {
+ d = calc_sq_distance(x1, y1, x2, y2);
+ }
+ else
+ {
+ d = ((x2 - x1)*dx + (y2 - y1)*dy) / da;
+ if(d > 0 && d < 1)
+ {
+ // Simple collinear case, 1---2---3
+ // We can leave just two endpoints
+ return;
+ }
+ if(d <= 0) d = calc_sq_distance(x2, y2, x1, y1);
+ else if(d >= 1) d = calc_sq_distance(x2, y2, x3, y3);
+ else d = calc_sq_distance(x2, y2, x1 + d*dx, y1 + d*dy);
+ }
+ if(d < m_distance_tolerance_square)
+ {
+ m_points.add(point_d(x2, y2));
+ return;
+ }
+ }
+
+ // Continue subdivision
+ //----------------------
+ recursive_bezier(x1, y1, x12, y12, x123, y123, level + 1);
+ recursive_bezier(x123, y123, x23, y23, x3, y3, level + 1);
+ }
+
+ //------------------------------------------------------------------------
+ void curve3_div::bezier(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3)
+ {
+ m_points.add(point_d(x1, y1));
+ recursive_bezier(x1, y1, x2, y2, x3, y3, 0);
+ m_points.add(point_d(x3, y3));
+ }
+
+
+
+
+
+ //------------------------------------------------------------------------
+ void curve4_inc::approximation_scale(double s)
+ {
+ m_scale = s;
+ }
+
+ //------------------------------------------------------------------------
+ double curve4_inc::approximation_scale() const
+ {
+ return m_scale;
+ }
+
+#if defined(_MSC_VER) && _MSC_VER <= 1200
+ //------------------------------------------------------------------------
+ static double MSC60_fix_ICE(double v) { return v; }
+#endif
+
+ //------------------------------------------------------------------------
+ void curve4_inc::init(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4)
+ {
+ m_start_x = x1;
+ m_start_y = y1;
+ m_end_x = x4;
+ m_end_y = y4;
+
+ double dx1 = x2 - x1;
+ double dy1 = y2 - y1;
+ double dx2 = x3 - x2;
+ double dy2 = y3 - y2;
+ double dx3 = x4 - x3;
+ double dy3 = y4 - y3;
+
+ double len = (sqrt(dx1 * dx1 + dy1 * dy1) +
+ sqrt(dx2 * dx2 + dy2 * dy2) +
+ sqrt(dx3 * dx3 + dy3 * dy3)) * 0.25 * m_scale;
+
+#if defined(_MSC_VER) && _MSC_VER <= 1200
+ m_num_steps = uround(MSC60_fix_ICE(len));
+#else
+ m_num_steps = uround(len);
+#endif
+
+ if(m_num_steps < 4)
+ {
+ m_num_steps = 4;
+ }
+
+ double subdivide_step = 1.0 / m_num_steps;
+ double subdivide_step2 = subdivide_step * subdivide_step;
+ double subdivide_step3 = subdivide_step * subdivide_step * subdivide_step;
+
+ double pre1 = 3.0 * subdivide_step;
+ double pre2 = 3.0 * subdivide_step2;
+ double pre4 = 6.0 * subdivide_step2;
+ double pre5 = 6.0 * subdivide_step3;
+
+ double tmp1x = x1 - x2 * 2.0 + x3;
+ double tmp1y = y1 - y2 * 2.0 + y3;
+
+ double tmp2x = (x2 - x3) * 3.0 - x1 + x4;
+ double tmp2y = (y2 - y3) * 3.0 - y1 + y4;
+
+ m_saved_fx = m_fx = x1;
+ m_saved_fy = m_fy = y1;
+
+ m_saved_dfx = m_dfx = (x2 - x1) * pre1 + tmp1x * pre2 + tmp2x * subdivide_step3;
+ m_saved_dfy = m_dfy = (y2 - y1) * pre1 + tmp1y * pre2 + tmp2y * subdivide_step3;
+
+ m_saved_ddfx = m_ddfx = tmp1x * pre4 + tmp2x * pre5;
+ m_saved_ddfy = m_ddfy = tmp1y * pre4 + tmp2y * pre5;
+
+ m_dddfx = tmp2x * pre5;
+ m_dddfy = tmp2y * pre5;
+
+ m_step = m_num_steps;
+ }
+
+ //------------------------------------------------------------------------
+ void curve4_inc::rewind(unsigned)
+ {
+ if(m_num_steps == 0)
+ {
+ m_step = -1;
+ return;
+ }
+ m_step = m_num_steps;
+ m_fx = m_saved_fx;
+ m_fy = m_saved_fy;
+ m_dfx = m_saved_dfx;
+ m_dfy = m_saved_dfy;
+ m_ddfx = m_saved_ddfx;
+ m_ddfy = m_saved_ddfy;
+ }
+
+ //------------------------------------------------------------------------
+ unsigned curve4_inc::vertex(double* x, double* y)
+ {
+ if(m_step < 0) return path_cmd_stop;
+ if(m_step == m_num_steps)
+ {
+ *x = m_start_x;
+ *y = m_start_y;
+ --m_step;
+ return path_cmd_move_to;
+ }
+
+ if(m_step == 0)
+ {
+ *x = m_end_x;
+ *y = m_end_y;
+ --m_step;
+ return path_cmd_line_to;
+ }
+
+ m_fx += m_dfx;
+ m_fy += m_dfy;
+ m_dfx += m_ddfx;
+ m_dfy += m_ddfy;
+ m_ddfx += m_dddfx;
+ m_ddfy += m_dddfy;
+
+ *x = m_fx;
+ *y = m_fy;
+ --m_step;
+ return path_cmd_line_to;
+ }
+
+
+
+
+ //------------------------------------------------------------------------
+ void curve4_div::init(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4)
+ {
+ m_points.remove_all();
+ m_distance_tolerance_square = 0.5 / m_approximation_scale;
+ m_distance_tolerance_square *= m_distance_tolerance_square;
+ bezier(x1, y1, x2, y2, x3, y3, x4, y4);
+ m_count = 0;
+ }
+
+ //------------------------------------------------------------------------
+ void curve4_div::recursive_bezier(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4,
+ unsigned level)
+ {
+ if(level > curve_recursion_limit)
+ {
+ return;
+ }
+
+ // Calculate all the mid-points of the line segments
+ //----------------------
+ double x12 = (x1 + x2) / 2;
+ double y12 = (y1 + y2) / 2;
+ double x23 = (x2 + x3) / 2;
+ double y23 = (y2 + y3) / 2;
+ double x34 = (x3 + x4) / 2;
+ double y34 = (y3 + y4) / 2;
+ double x123 = (x12 + x23) / 2;
+ double y123 = (y12 + y23) / 2;
+ double x234 = (x23 + x34) / 2;
+ double y234 = (y23 + y34) / 2;
+ double x1234 = (x123 + x234) / 2;
+ double y1234 = (y123 + y234) / 2;
+
+
+ // Try to approximate the full cubic curve by a single straight line
+ //------------------
+ double dx = x4-x1;
+ double dy = y4-y1;
+
+ double d2 = fabs(((x2 - x4) * dy - (y2 - y4) * dx));
+ double d3 = fabs(((x3 - x4) * dy - (y3 - y4) * dx));
+ double da1, da2, k;
+
+ switch((int(d2 > curve_collinearity_epsilon) << 1) +
+ int(d3 > curve_collinearity_epsilon))
+ {
+ case 0:
+ // All collinear OR p1==p4
+ //----------------------
+ k = dx*dx + dy*dy;
+ if(k == 0)
+ {
+ d2 = calc_sq_distance(x1, y1, x2, y2);
+ d3 = calc_sq_distance(x4, y4, x3, y3);
+ }
+ else
+ {
+ k = 1 / k;
+ da1 = x2 - x1;
+ da2 = y2 - y1;
+ d2 = k * (da1*dx + da2*dy);
+ da1 = x3 - x1;
+ da2 = y3 - y1;
+ d3 = k * (da1*dx + da2*dy);
+ if(d2 > 0 && d2 < 1 && d3 > 0 && d3 < 1)
+ {
+ // Simple collinear case, 1---2---3---4
+ // We can leave just two endpoints
+ return;
+ }
+ if(d2 <= 0) d2 = calc_sq_distance(x2, y2, x1, y1);
+ else if(d2 >= 1) d2 = calc_sq_distance(x2, y2, x4, y4);
+ else d2 = calc_sq_distance(x2, y2, x1 + d2*dx, y1 + d2*dy);
+
+ if(d3 <= 0) d3 = calc_sq_distance(x3, y3, x1, y1);
+ else if(d3 >= 1) d3 = calc_sq_distance(x3, y3, x4, y4);
+ else d3 = calc_sq_distance(x3, y3, x1 + d3*dx, y1 + d3*dy);
+ }
+ if(d2 > d3)
+ {
+ if(d2 < m_distance_tolerance_square)
+ {
+ m_points.add(point_d(x2, y2));
+ return;
+ }
+ }
+ else
+ {
+ if(d3 < m_distance_tolerance_square)
+ {
+ m_points.add(point_d(x3, y3));
+ return;
+ }
+ }
+ break;
+
+ case 1:
+ // p1,p2,p4 are collinear, p3 is significant
+ //----------------------
+ if(d3 * d3 <= m_distance_tolerance_square * (dx*dx + dy*dy))
+ {
+ if(m_angle_tolerance < curve_angle_tolerance_epsilon)
+ {
+ m_points.add(point_d(x23, y23));
+ return;
+ }
+
+ // Angle Condition
+ //----------------------
+ da1 = fabs(atan2(y4 - y3, x4 - x3) - atan2(y3 - y2, x3 - x2));
+ if(da1 >= pi) da1 = 2*pi - da1;
+
+ if(da1 < m_angle_tolerance)
+ {
+ m_points.add(point_d(x2, y2));
+ m_points.add(point_d(x3, y3));
+ return;
+ }
+
+ if(m_cusp_limit != 0.0)
+ {
+ if(da1 > m_cusp_limit)
+ {
+ m_points.add(point_d(x3, y3));
+ return;
+ }
+ }
+ }
+ break;
+
+ case 2:
+ // p1,p3,p4 are collinear, p2 is significant
+ //----------------------
+ if(d2 * d2 <= m_distance_tolerance_square * (dx*dx + dy*dy))
+ {
+ if(m_angle_tolerance < curve_angle_tolerance_epsilon)
+ {
+ m_points.add(point_d(x23, y23));
+ return;
+ }
+
+ // Angle Condition
+ //----------------------
+ da1 = fabs(atan2(y3 - y2, x3 - x2) - atan2(y2 - y1, x2 - x1));
+ if(da1 >= pi) da1 = 2*pi - da1;
+
+ if(da1 < m_angle_tolerance)
+ {
+ m_points.add(point_d(x2, y2));
+ m_points.add(point_d(x3, y3));
+ return;
+ }
+
+ if(m_cusp_limit != 0.0)
+ {
+ if(da1 > m_cusp_limit)
+ {
+ m_points.add(point_d(x2, y2));
+ return;
+ }
+ }
+ }
+ break;
+
+ case 3:
+ // Regular case
+ //-----------------
+ if((d2 + d3)*(d2 + d3) <= m_distance_tolerance_square * (dx*dx + dy*dy))
+ {
+ // If the curvature doesn't exceed the distance_tolerance value
+ // we tend to finish subdivisions.
+ //----------------------
+ if(m_angle_tolerance < curve_angle_tolerance_epsilon)
+ {
+ m_points.add(point_d(x23, y23));
+ return;
+ }
+
+ // Angle & Cusp Condition
+ //----------------------
+ k = atan2(y3 - y2, x3 - x2);
+ da1 = fabs(k - atan2(y2 - y1, x2 - x1));
+ da2 = fabs(atan2(y4 - y3, x4 - x3) - k);
+ if(da1 >= pi) da1 = 2*pi - da1;
+ if(da2 >= pi) da2 = 2*pi - da2;
+
+ if(da1 + da2 < m_angle_tolerance)
+ {
+ // Finally we can stop the recursion
+ //----------------------
+ m_points.add(point_d(x23, y23));
+ return;
+ }
+
+ if(m_cusp_limit != 0.0)
+ {
+ if(da1 > m_cusp_limit)
+ {
+ m_points.add(point_d(x2, y2));
+ return;
+ }
+
+ if(da2 > m_cusp_limit)
+ {
+ m_points.add(point_d(x3, y3));
+ return;
+ }
+ }
+ }
+ break;
+ }
+
+ // Continue subdivision
+ //----------------------
+ recursive_bezier(x1, y1, x12, y12, x123, y123, x1234, y1234, level + 1);
+ recursive_bezier(x1234, y1234, x234, y234, x34, y34, x4, y4, level + 1);
+ }
+
+ //------------------------------------------------------------------------
+ void curve4_div::bezier(double x1, double y1,
+ double x2, double y2,
+ double x3, double y3,
+ double x4, double y4)
+ {
+ m_points.add(point_d(x1, y1));
+ recursive_bezier(x1, y1, x2, y2, x3, y3, x4, y4, 0);
+ m_points.add(point_d(x4, y4));
+ }
+
+}
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_image_filters.cpp b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_image_filters.cpp
new file mode 100644
index 00000000000..549d9adbf5a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_image_filters.cpp
@@ -0,0 +1,103 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Filtering class image_filter_lut implemantation
+//
+//----------------------------------------------------------------------------
+
+
+#include "agg_image_filters.h"
+
+
+namespace agg
+{
+ //--------------------------------------------------------------------
+ void image_filter_lut::realloc_lut(double radius)
+ {
+ m_radius = radius;
+ m_diameter = uceil(radius) * 2;
+ m_start = -int(m_diameter / 2 - 1);
+ unsigned size = m_diameter << image_subpixel_shift;
+ if(size > m_weight_array.size())
+ {
+ m_weight_array.resize(size);
+ }
+ }
+
+
+
+ //--------------------------------------------------------------------
+ // This function normalizes integer values and corrects the rounding
+ // errors. It doesn't do anything with the source floating point values
+ // (m_weight_array_dbl), it corrects only integers according to the rule
+ // of 1.0 which means that any sum of pixel weights must be equal to 1.0.
+ // So, the filter function must produce a graph of the proper shape.
+ //--------------------------------------------------------------------
+ void image_filter_lut::normalize()
+ {
+ unsigned i;
+ int flip = 1;
+
+ for(i = 0; i < image_subpixel_scale; i++)
+ {
+ for(;;)
+ {
+ int sum = 0;
+ unsigned j;
+ for(j = 0; j < m_diameter; j++)
+ {
+ sum += m_weight_array[j * image_subpixel_scale + i];
+ }
+
+ if(sum == image_filter_scale) break;
+
+ double k = double(image_filter_scale) / double(sum);
+ sum = 0;
+ for(j = 0; j < m_diameter; j++)
+ {
+ sum += m_weight_array[j * image_subpixel_scale + i] =
+ iround(m_weight_array[j * image_subpixel_scale + i] * k);
+ }
+
+ sum -= image_filter_scale;
+ int inc = (sum > 0) ? -1 : 1;
+
+ for(j = 0; j < m_diameter && sum; j++)
+ {
+ flip ^= 1;
+ unsigned idx = flip ? m_diameter/2 + j/2 : m_diameter/2 - j/2;
+ int v = m_weight_array[idx * image_subpixel_scale + i];
+ if(v < image_filter_scale)
+ {
+ m_weight_array[idx * image_subpixel_scale + i] += inc;
+ sum += inc;
+ }
+ }
+ }
+ }
+
+ unsigned pivot = m_diameter << (image_subpixel_shift - 1);
+
+ for(i = 0; i < pivot; i++)
+ {
+ m_weight_array[pivot + i] = m_weight_array[pivot - i];
+ }
+ unsigned end = (diameter() << image_subpixel_shift) - 1;
+ m_weight_array[0] = m_weight_array[end];
+ }
+
+
+}
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_trans_affine.cpp b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_trans_affine.cpp
new file mode 100644
index 00000000000..99febc953a1
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_trans_affine.cpp
@@ -0,0 +1,194 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Affine transformations
+//
+//----------------------------------------------------------------------------
+#include "agg_trans_affine.h"
+
+
+
+namespace agg
+{
+
+ //------------------------------------------------------------------------
+ const trans_affine& trans_affine::parl_to_parl(const double* src,
+ const double* dst)
+ {
+ sx = src[2] - src[0];
+ shy = src[3] - src[1];
+ shx = src[4] - src[0];
+ sy = src[5] - src[1];
+ tx = src[0];
+ ty = src[1];
+ invert();
+ multiply(trans_affine(dst[2] - dst[0], dst[3] - dst[1],
+ dst[4] - dst[0], dst[5] - dst[1],
+ dst[0], dst[1]));
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ const trans_affine& trans_affine::rect_to_parl(double x1, double y1,
+ double x2, double y2,
+ const double* parl)
+ {
+ double src[6];
+ src[0] = x1; src[1] = y1;
+ src[2] = x2; src[3] = y1;
+ src[4] = x2; src[5] = y2;
+ parl_to_parl(src, parl);
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ const trans_affine& trans_affine::parl_to_rect(const double* parl,
+ double x1, double y1,
+ double x2, double y2)
+ {
+ double dst[6];
+ dst[0] = x1; dst[1] = y1;
+ dst[2] = x2; dst[3] = y1;
+ dst[4] = x2; dst[5] = y2;
+ parl_to_parl(parl, dst);
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ const trans_affine& trans_affine::multiply(const trans_affine& m)
+ {
+ double t0 = sx * m.sx + shy * m.shx;
+ double t2 = shx * m.sx + sy * m.shx;
+ double t4 = tx * m.sx + ty * m.shx + m.tx;
+ shy = sx * m.shy + shy * m.sy;
+ sy = shx * m.shy + sy * m.sy;
+ ty = tx * m.shy + ty * m.sy + m.ty;
+ sx = t0;
+ shx = t2;
+ tx = t4;
+ return *this;
+ }
+
+
+ //------------------------------------------------------------------------
+ const trans_affine& trans_affine::invert()
+ {
+ double d = determinant_reciprocal();
+
+ double t0 = sy * d;
+ sy = sx * d;
+ shy = -shy * d;
+ shx = -shx * d;
+
+ double t4 = -tx * t0 - ty * shx;
+ ty = -tx * shy - ty * sy;
+
+ sx = t0;
+ tx = t4;
+ return *this;
+ }
+
+
+ //------------------------------------------------------------------------
+ const trans_affine& trans_affine::flip_x()
+ {
+ sx = -sx;
+ shy = -shy;
+ tx = -tx;
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ const trans_affine& trans_affine::flip_y()
+ {
+ shx = -shx;
+ sy = -sy;
+ ty = -ty;
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ const trans_affine& trans_affine::reset()
+ {
+ sx = sy = 1.0;
+ shy = shx = tx = ty = 0.0;
+ return *this;
+ }
+
+ //------------------------------------------------------------------------
+ bool trans_affine::is_identity(double epsilon) const
+ {
+ return is_equal_eps(sx, 1.0, epsilon) &&
+ is_equal_eps(shy, 0.0, epsilon) &&
+ is_equal_eps(shx, 0.0, epsilon) &&
+ is_equal_eps(sy, 1.0, epsilon) &&
+ is_equal_eps(tx, 0.0, epsilon) &&
+ is_equal_eps(ty, 0.0, epsilon);
+ }
+
+ //------------------------------------------------------------------------
+ bool trans_affine::is_valid(double epsilon) const
+ {
+ return fabs(sx) > epsilon && fabs(sy) > epsilon;
+ }
+
+ //------------------------------------------------------------------------
+ bool trans_affine::is_equal(const trans_affine& m, double epsilon) const
+ {
+ return is_equal_eps(sx, m.sx, epsilon) &&
+ is_equal_eps(shy, m.shy, epsilon) &&
+ is_equal_eps(shx, m.shx, epsilon) &&
+ is_equal_eps(sy, m.sy, epsilon) &&
+ is_equal_eps(tx, m.tx, epsilon) &&
+ is_equal_eps(ty, m.ty, epsilon);
+ }
+
+ //------------------------------------------------------------------------
+ double trans_affine::rotation() const
+ {
+ double x1 = 0.0;
+ double y1 = 0.0;
+ double x2 = 1.0;
+ double y2 = 0.0;
+ transform(&x1, &y1);
+ transform(&x2, &y2);
+ return atan2(y2-y1, x2-x1);
+ }
+
+ //------------------------------------------------------------------------
+ void trans_affine::translation(double* dx, double* dy) const
+ {
+ *dx = tx;
+ *dy = ty;
+ }
+
+ //------------------------------------------------------------------------
+ void trans_affine::scaling(double* x, double* y) const
+ {
+ double x1 = 0.0;
+ double y1 = 0.0;
+ double x2 = 1.0;
+ double y2 = 1.0;
+ trans_affine t(*this);
+ t *= trans_affine_rotation(-rotation());
+ t.transform(&x1, &y1);
+ t.transform(&x2, &y2);
+ *x = x2 - x1;
+ *y = y2 - y1;
+ }
+
+
+}
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vcgen_contour.cpp b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vcgen_contour.cpp
new file mode 100644
index 00000000000..a6a99405ad2
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vcgen_contour.cpp
@@ -0,0 +1,165 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Contour generator
+//
+//----------------------------------------------------------------------------
+
+#include <math.h>
+#include "agg_vcgen_contour.h"
+
+namespace agg
+{
+
+ //------------------------------------------------------------------------
+ vcgen_contour::vcgen_contour() :
+ m_stroker(),
+ m_width(1),
+ m_src_vertices(),
+ m_out_vertices(),
+ m_status(initial),
+ m_src_vertex(0),
+ m_closed(0),
+ m_orientation(0),
+ m_auto_detect(false)
+ {
+ }
+
+ //------------------------------------------------------------------------
+ void vcgen_contour::remove_all()
+ {
+ m_src_vertices.remove_all();
+ m_closed = 0;
+ m_orientation = 0;
+ m_status = initial;
+ }
+
+ //------------------------------------------------------------------------
+ void vcgen_contour::add_vertex(double x, double y, unsigned cmd)
+ {
+ m_status = initial;
+ if(is_move_to(cmd))
+ {
+ m_src_vertices.modify_last(vertex_dist(x, y));
+ }
+ else
+ {
+ if(is_vertex(cmd))
+ {
+ m_src_vertices.add(vertex_dist(x, y));
+ }
+ else
+ {
+ if(is_end_poly(cmd))
+ {
+ m_closed = get_close_flag(cmd);
+ if(m_orientation == path_flags_none)
+ {
+ m_orientation = get_orientation(cmd);
+ }
+ }
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ void vcgen_contour::rewind(unsigned)
+ {
+ if(m_status == initial)
+ {
+ m_src_vertices.close(true);
+ if(m_auto_detect)
+ {
+ if(!is_oriented(m_orientation))
+ {
+ m_orientation = (calc_polygon_area(m_src_vertices) > 0.0) ?
+ path_flags_ccw :
+ path_flags_cw;
+ }
+ }
+ if(is_oriented(m_orientation))
+ {
+ m_stroker.width(is_ccw(m_orientation) ? m_width : -m_width);
+ }
+ }
+ m_status = ready;
+ m_src_vertex = 0;
+ }
+
+ //------------------------------------------------------------------------
+ unsigned vcgen_contour::vertex(double* x, double* y)
+ {
+ unsigned cmd = path_cmd_line_to;
+ while(!is_stop(cmd))
+ {
+ switch(m_status)
+ {
+ case initial:
+ rewind(0);
+
+ case ready:
+ if(m_src_vertices.size() < 2 + unsigned(m_closed != 0))
+ {
+ cmd = path_cmd_stop;
+ break;
+ }
+ m_status = outline;
+ cmd = path_cmd_move_to;
+ m_src_vertex = 0;
+ m_out_vertex = 0;
+
+ case outline:
+ if(m_src_vertex >= m_src_vertices.size())
+ {
+ m_status = end_poly;
+ break;
+ }
+ m_stroker.calc_join(m_out_vertices,
+ m_src_vertices.prev(m_src_vertex),
+ m_src_vertices.curr(m_src_vertex),
+ m_src_vertices.next(m_src_vertex),
+ m_src_vertices.prev(m_src_vertex).dist,
+ m_src_vertices.curr(m_src_vertex).dist);
+ ++m_src_vertex;
+ m_status = out_vertices;
+ m_out_vertex = 0;
+
+ case out_vertices:
+ if(m_out_vertex >= m_out_vertices.size())
+ {
+ m_status = outline;
+ }
+ else
+ {
+ const point_d& c = m_out_vertices[m_out_vertex++];
+ *x = c.x;
+ *y = c.y;
+ return cmd;
+ }
+ break;
+
+ case end_poly:
+ if(!m_closed) return path_cmd_stop;
+ m_status = stop;
+ return path_cmd_end_poly | path_flags_close | path_flags_ccw;
+
+ case stop:
+ return path_cmd_stop;
+ }
+ }
+ return cmd;
+ }
+
+}
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vcgen_dash.cpp b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vcgen_dash.cpp
new file mode 100644
index 00000000000..129505786c9
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vcgen_dash.cpp
@@ -0,0 +1,235 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Line dash generator
+//
+//----------------------------------------------------------------------------
+
+#include <math.h>
+#include "agg_vcgen_dash.h"
+#include "agg_shorten_path.h"
+
+namespace agg
+{
+
+ //------------------------------------------------------------------------
+ vcgen_dash::vcgen_dash() :
+ m_total_dash_len(0.0),
+ m_num_dashes(0),
+ m_dash_start(0.0),
+ m_shorten(0.0),
+ m_curr_dash_start(0.0),
+ m_curr_dash(0),
+ m_src_vertices(),
+ m_closed(0),
+ m_status(initial),
+ m_src_vertex(0)
+ {
+ }
+
+
+
+ //------------------------------------------------------------------------
+ void vcgen_dash::remove_all_dashes()
+ {
+ m_total_dash_len = 0.0;
+ m_num_dashes = 0;
+ m_curr_dash_start = 0.0;
+ m_curr_dash = 0;
+ }
+
+
+ //------------------------------------------------------------------------
+ void vcgen_dash::add_dash(double dash_len, double gap_len)
+ {
+ if(m_num_dashes < max_dashes)
+ {
+ m_total_dash_len += dash_len + gap_len;
+ m_dashes[m_num_dashes++] = dash_len;
+ m_dashes[m_num_dashes++] = gap_len;
+ }
+ }
+
+
+ //------------------------------------------------------------------------
+ void vcgen_dash::dash_start(double ds)
+ {
+ m_dash_start = ds;
+ calc_dash_start(fabs(ds));
+ }
+
+
+ //------------------------------------------------------------------------
+ void vcgen_dash::calc_dash_start(double ds)
+ {
+ m_curr_dash = 0;
+ m_curr_dash_start = 0.0;
+ while(ds > 0.0)
+ {
+ if(ds > m_dashes[m_curr_dash])
+ {
+ ds -= m_dashes[m_curr_dash];
+ ++m_curr_dash;
+ m_curr_dash_start = 0.0;
+ if(m_curr_dash >= m_num_dashes) m_curr_dash = 0;
+ }
+ else
+ {
+ m_curr_dash_start = ds;
+ ds = 0.0;
+ }
+ }
+ }
+
+
+ //------------------------------------------------------------------------
+ void vcgen_dash::remove_all()
+ {
+ m_status = initial;
+ m_src_vertices.remove_all();
+ m_closed = 0;
+ }
+
+
+ //------------------------------------------------------------------------
+ void vcgen_dash::add_vertex(double x, double y, unsigned cmd)
+ {
+ m_status = initial;
+ if(is_move_to(cmd))
+ {
+ m_src_vertices.modify_last(vertex_dist(x, y));
+ }
+ else
+ {
+ if(is_vertex(cmd))
+ {
+ m_src_vertices.add(vertex_dist(x, y));
+ }
+ else
+ {
+ m_closed = get_close_flag(cmd);
+ }
+ }
+ }
+
+
+ //------------------------------------------------------------------------
+ void vcgen_dash::rewind(unsigned)
+ {
+ if(m_status == initial)
+ {
+ m_src_vertices.close(m_closed != 0);
+ shorten_path(m_src_vertices, m_shorten, m_closed);
+ }
+ m_status = ready;
+ m_src_vertex = 0;
+ }
+
+
+ //------------------------------------------------------------------------
+ unsigned vcgen_dash::vertex(double* x, double* y)
+ {
+ unsigned cmd = path_cmd_move_to;
+ while(!is_stop(cmd))
+ {
+ switch(m_status)
+ {
+ case initial:
+ rewind(0);
+
+ case ready:
+ if(m_num_dashes < 2 || m_src_vertices.size() < 2)
+ {
+ cmd = path_cmd_stop;
+ break;
+ }
+ m_status = polyline;
+ m_src_vertex = 1;
+ m_v1 = &m_src_vertices[0];
+ m_v2 = &m_src_vertices[1];
+ m_curr_rest = m_v1->dist;
+ *x = m_v1->x;
+ *y = m_v1->y;
+ if(m_dash_start >= 0.0) calc_dash_start(m_dash_start);
+ return path_cmd_move_to;
+
+ case polyline:
+ {
+ double dash_rest = m_dashes[m_curr_dash] - m_curr_dash_start;
+
+ unsigned cmd = (m_curr_dash & 1) ?
+ path_cmd_move_to :
+ path_cmd_line_to;
+
+ if(m_curr_rest > dash_rest)
+ {
+ m_curr_rest -= dash_rest;
+ ++m_curr_dash;
+ if(m_curr_dash >= m_num_dashes) m_curr_dash = 0;
+ m_curr_dash_start = 0.0;
+ *x = m_v2->x - (m_v2->x - m_v1->x) * m_curr_rest / m_v1->dist;
+ *y = m_v2->y - (m_v2->y - m_v1->y) * m_curr_rest / m_v1->dist;
+ }
+ else
+ {
+ m_curr_dash_start += m_curr_rest;
+ *x = m_v2->x;
+ *y = m_v2->y;
+ ++m_src_vertex;
+ m_v1 = m_v2;
+ m_curr_rest = m_v1->dist;
+ if(m_closed)
+ {
+ if(m_src_vertex > m_src_vertices.size())
+ {
+ m_status = stop;
+ }
+ else
+ {
+ m_v2 = &m_src_vertices
+ [
+ (m_src_vertex >= m_src_vertices.size()) ? 0 :
+ m_src_vertex
+ ];
+ }
+ }
+ else
+ {
+ if(m_src_vertex >= m_src_vertices.size())
+ {
+ m_status = stop;
+ }
+ else
+ {
+ m_v2 = &m_src_vertices[m_src_vertex];
+ }
+ }
+ }
+ return cmd;
+ }
+ break;
+
+ case stop:
+ cmd = path_cmd_stop;
+ break;
+ }
+
+ }
+ return path_cmd_stop;
+ }
+
+
+}
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vcgen_stroke.cpp b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vcgen_stroke.cpp
new file mode 100644
index 00000000000..2dae3e122b8
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vcgen_stroke.cpp
@@ -0,0 +1,213 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Stroke generator
+//
+//----------------------------------------------------------------------------
+#include <math.h>
+#include "agg_vcgen_stroke.h"
+#include "agg_shorten_path.h"
+
+namespace agg
+{
+
+ //------------------------------------------------------------------------
+ vcgen_stroke::vcgen_stroke() :
+ m_stroker(),
+ m_src_vertices(),
+ m_out_vertices(),
+ m_shorten(0.0),
+ m_closed(0),
+ m_status(initial),
+ m_src_vertex(0),
+ m_out_vertex(0)
+ {
+ }
+
+ //------------------------------------------------------------------------
+ void vcgen_stroke::remove_all()
+ {
+ m_src_vertices.remove_all();
+ m_closed = 0;
+ m_status = initial;
+ }
+
+
+ //------------------------------------------------------------------------
+ void vcgen_stroke::add_vertex(double x, double y, unsigned cmd)
+ {
+ m_status = initial;
+ if(is_move_to(cmd))
+ {
+ m_src_vertices.modify_last(vertex_dist(x, y));
+ }
+ else
+ {
+ if(is_vertex(cmd))
+ {
+ m_src_vertices.add(vertex_dist(x, y));
+ }
+ else
+ {
+ m_closed = get_close_flag(cmd);
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ void vcgen_stroke::rewind(unsigned)
+ {
+ if(m_status == initial)
+ {
+ m_src_vertices.close(m_closed != 0);
+ shorten_path(m_src_vertices, m_shorten, m_closed);
+ if(m_src_vertices.size() < 3) m_closed = 0;
+ }
+ m_status = ready;
+ m_src_vertex = 0;
+ m_out_vertex = 0;
+ }
+
+
+ //------------------------------------------------------------------------
+ unsigned vcgen_stroke::vertex(double* x, double* y)
+ {
+ unsigned cmd = path_cmd_line_to;
+ while(!is_stop(cmd))
+ {
+ switch(m_status)
+ {
+ case initial:
+ rewind(0);
+
+ case ready:
+ if(m_src_vertices.size() < 2 + unsigned(m_closed != 0))
+ {
+ cmd = path_cmd_stop;
+ break;
+ }
+ m_status = m_closed ? outline1 : cap1;
+ cmd = path_cmd_move_to;
+ m_src_vertex = 0;
+ m_out_vertex = 0;
+ break;
+
+ case cap1:
+ m_stroker.calc_cap(m_out_vertices,
+ m_src_vertices[0],
+ m_src_vertices[1],
+ m_src_vertices[0].dist);
+ m_src_vertex = 1;
+ m_prev_status = outline1;
+ m_status = out_vertices;
+ m_out_vertex = 0;
+ break;
+
+ case cap2:
+ m_stroker.calc_cap(m_out_vertices,
+ m_src_vertices[m_src_vertices.size() - 1],
+ m_src_vertices[m_src_vertices.size() - 2],
+ m_src_vertices[m_src_vertices.size() - 2].dist);
+ m_prev_status = outline2;
+ m_status = out_vertices;
+ m_out_vertex = 0;
+ break;
+
+ case outline1:
+ if(m_closed)
+ {
+ if(m_src_vertex >= m_src_vertices.size())
+ {
+ m_prev_status = close_first;
+ m_status = end_poly1;
+ break;
+ }
+ }
+ else
+ {
+ if(m_src_vertex >= m_src_vertices.size() - 1)
+ {
+ m_status = cap2;
+ break;
+ }
+ }
+ m_stroker.calc_join(m_out_vertices,
+ m_src_vertices.prev(m_src_vertex),
+ m_src_vertices.curr(m_src_vertex),
+ m_src_vertices.next(m_src_vertex),
+ m_src_vertices.prev(m_src_vertex).dist,
+ m_src_vertices.curr(m_src_vertex).dist);
+ ++m_src_vertex;
+ m_prev_status = m_status;
+ m_status = out_vertices;
+ m_out_vertex = 0;
+ break;
+
+ case close_first:
+ m_status = outline2;
+ cmd = path_cmd_move_to;
+
+ case outline2:
+ if(m_src_vertex <= unsigned(m_closed == 0))
+ {
+ m_status = end_poly2;
+ m_prev_status = stop;
+ break;
+ }
+
+ --m_src_vertex;
+ m_stroker.calc_join(m_out_vertices,
+ m_src_vertices.next(m_src_vertex),
+ m_src_vertices.curr(m_src_vertex),
+ m_src_vertices.prev(m_src_vertex),
+ m_src_vertices.curr(m_src_vertex).dist,
+ m_src_vertices.prev(m_src_vertex).dist);
+
+ m_prev_status = m_status;
+ m_status = out_vertices;
+ m_out_vertex = 0;
+ break;
+
+ case out_vertices:
+ if(m_out_vertex >= m_out_vertices.size())
+ {
+ m_status = m_prev_status;
+ }
+ else
+ {
+ const point_d& c = m_out_vertices[m_out_vertex++];
+ *x = c.x;
+ *y = c.y;
+ return cmd;
+ }
+ break;
+
+ case end_poly1:
+ m_status = m_prev_status;
+ return path_cmd_end_poly | path_flags_close | path_flags_ccw;
+
+ case end_poly2:
+ m_status = m_prev_status;
+ return path_cmd_end_poly | path_flags_close | path_flags_cw;
+
+ case stop:
+ cmd = path_cmd_stop;
+ break;
+ }
+ }
+ return cmd;
+ }
+
+}
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vpgen_segmentator.cpp b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vpgen_segmentator.cpp
new file mode 100644
index 00000000000..49a45b6b13d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/src/agg_vpgen_segmentator.cpp
@@ -0,0 +1,67 @@
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+
+#include <math.h>
+#include "agg_vpgen_segmentator.h"
+
+namespace agg
+{
+
+ void vpgen_segmentator::move_to(double x, double y)
+ {
+ m_x1 = x;
+ m_y1 = y;
+ m_dx = 0.0;
+ m_dy = 0.0;
+ m_dl = 2.0;
+ m_ddl = 2.0;
+ m_cmd = path_cmd_move_to;
+ }
+
+ void vpgen_segmentator::line_to(double x, double y)
+ {
+ m_x1 += m_dx;
+ m_y1 += m_dy;
+ m_dx = x - m_x1;
+ m_dy = y - m_y1;
+ double len = sqrt(m_dx * m_dx + m_dy * m_dy) * m_approximation_scale;
+ if(len < 1e-30) len = 1e-30;
+ m_ddl = 1.0 / len;
+ m_dl = (m_cmd == path_cmd_move_to) ? 0.0 : m_ddl;
+ if(m_cmd == path_cmd_stop) m_cmd = path_cmd_line_to;
+ }
+
+ unsigned vpgen_segmentator::vertex(double* x, double* y)
+ {
+ if(m_cmd == path_cmd_stop) return path_cmd_stop;
+
+ unsigned cmd = m_cmd;
+ m_cmd = path_cmd_line_to;
+ if(m_dl >= 1.0 - m_ddl)
+ {
+ m_dl = 1.0;
+ m_cmd = path_cmd_stop;
+ *x = m_x1 + m_dx;
+ *y = m_y1 + m_dy;
+ return cmd;
+ }
+ *x = m_x1 + m_dx * m_dl;
+ *y = m_y1 + m_dy * m_dl;
+ m_dl += m_ddl;
+ return cmd;
+ }
+
+}
+
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/src/authors b/contrib/python/matplotlib/py2/extern/agg24-svn/src/authors
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/src/authors
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/src/copying b/contrib/python/matplotlib/py2/extern/agg24-svn/src/copying
new file mode 100644
index 00000000000..a08de15faa5
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/src/copying
@@ -0,0 +1,11 @@
+The Anti-Grain Geometry Project
+A high quality rendering engine for C++
+http://antigrain.com
+
+Anti-Grain Geometry - Version 2.4
+Copyright (C) 2002-2005 Maxim Shemanarev (McSeem)
+
+Permission to copy, use, modify, sell and distribute this software
+is granted provided this copyright notice appears in all copies.
+This software is provided "as is" without express or implied
+warranty, and with no claim as to its suitability for any purpose.
diff --git a/contrib/python/matplotlib/py2/extern/agg24-svn/ya.make b/contrib/python/matplotlib/py2/extern/agg24-svn/ya.make
new file mode 100644
index 00000000000..b8e286b07c4
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/agg24-svn/ya.make
@@ -0,0 +1,22 @@
+LIBRARY()
+
+LICENSE(PSF-2.0)
+
+ADDINCL(
+ contrib/python/matplotlib/py2/extern/agg24-svn/include
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ src/agg_bezier_arc.cpp
+ src/agg_curves.cpp
+ src/agg_image_filters.cpp
+ src/agg_trans_affine.cpp
+ src/agg_vcgen_contour.cpp
+ src/agg_vcgen_dash.cpp
+ src/agg_vcgen_stroke.cpp
+ src/agg_vpgen_segmentator.cpp
+)
+
+END()
diff --git a/contrib/python/matplotlib/py2/extern/ttconv/pprdrv.h b/contrib/python/matplotlib/py2/extern/ttconv/pprdrv.h
new file mode 100644
index 00000000000..39e81fee7f0
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/ttconv/pprdrv.h
@@ -0,0 +1,113 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+/*
+ * Modified for use within matplotlib
+ * 5 July 2007
+ * Michael Droettboom
+ */
+
+/*
+** ~ppr/src/include/pprdrv.h
+** Copyright 1995, Trinity College Computing Center.
+** Written by David Chappell.
+**
+** Permission to use, copy, modify, and distribute this software and its
+** documentation for any purpose and without fee is hereby granted, provided
+** that the above copyright notice appear in all copies and that both that
+** copyright notice and this permission notice appear in supporting
+** documentation. This software is provided "as is" without express or
+** implied warranty.
+**
+** This file last revised 5 December 1995.
+*/
+
+#include <vector>
+#include <cassert>
+
+/*
+ * Encapsulates all of the output to write to an arbitrary output
+ * function. This both removes the hardcoding of output to go to stdout
+ * and makes output thread-safe. Michael Droettboom [06-07-07]
+ */
+class TTStreamWriter
+{
+ private:
+ // Private copy and assignment
+ TTStreamWriter& operator=(const TTStreamWriter& other);
+ TTStreamWriter(const TTStreamWriter& other);
+
+ public:
+ TTStreamWriter() { }
+ virtual ~TTStreamWriter() { }
+
+ virtual void write(const char*) = 0;
+
+ virtual void printf(const char* format, ...);
+ virtual void put_char(int val);
+ virtual void puts(const char* a);
+ virtual void putline(const char* a);
+};
+
+class TTDictionaryCallback
+{
+private:
+ // Private copy and assignment
+ TTDictionaryCallback& operator=(const TTStreamWriter& other);
+ TTDictionaryCallback(const TTStreamWriter& other);
+
+public:
+ TTDictionaryCallback() { }
+ virtual ~TTDictionaryCallback() { }
+
+ virtual void add_pair(const char* key, const char* value) = 0;
+};
+
+void replace_newlines_with_spaces(char* a);
+
+/*
+ * A simple class for all ttconv exceptions.
+ */
+class TTException
+{
+ const char* message;
+ TTException& operator=(const TTStreamWriter& other);
+ TTException(const TTStreamWriter& other);
+
+public:
+ TTException(const char* message_) : message(message_) { }
+ const char* getMessage()
+ {
+ return message;
+ }
+};
+
+/*
+** No debug code will be included if this
+** is not defined:
+*/
+/* #define DEBUG 1 */
+
+/*
+** Uncomment the defines for the debugging
+** code you want to have included.
+*/
+#ifdef DEBUG
+#define DEBUG_TRUETYPE /* truetype fonts, conversion to Postscript */
+#endif
+
+/* Do not change anything below this line. */
+
+enum font_type_enum
+{
+ PS_TYPE_3 = 3,
+ PS_TYPE_42 = 42,
+ PS_TYPE_42_3_HYBRID = 43,
+ PDF_TYPE_3 = -3
+};
+
+/* routines in pprdrv_tt.c */
+void insert_ttfont(const char *filename, TTStreamWriter& stream, font_type_enum target_type, std::vector<int>& glyph_ids);
+
+void get_pdf_charprocs(const char *filename, std::vector<int>& glyph_ids, TTDictionaryCallback& dict);
+
+/* end of file */
diff --git a/contrib/python/matplotlib/py2/extern/ttconv/pprdrv_tt.cpp b/contrib/python/matplotlib/py2/extern/ttconv/pprdrv_tt.cpp
new file mode 100644
index 00000000000..abe209856ed
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/ttconv/pprdrv_tt.cpp
@@ -0,0 +1,1484 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+/*
+ * Modified for use within matplotlib
+ * 5 July 2007
+ * Michael Droettboom
+ */
+
+/*
+** ~ppr/src/pprdrv/pprdrv_tt.c
+** Copyright 1995, Trinity College Computing Center.
+** Written by David Chappell.
+**
+** Permission to use, copy, modify, and distribute this software and its
+** documentation for any purpose and without fee is hereby granted, provided
+** that the above copyright notice appear in all copies and that both that
+** copyright notice and this permission notice appear in supporting
+** documentation. This software is provided "as is" without express or
+** implied warranty.
+**
+** TrueType font support. These functions allow PPR to generate
+** PostScript fonts from Microsoft compatible TrueType font files.
+**
+** Last revised 19 December 1995.
+*/
+
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include "pprdrv.h"
+#include "truetype.h"
+#include <sstream>
+#ifdef _POSIX_C_SOURCE
+# undef _POSIX_C_SOURCE
+#endif
+#ifndef _AIX
+#ifdef _XOPEN_SOURCE
+# undef _XOPEN_SOURCE
+#endif
+#endif
+#include <Python.h>
+
+/*==========================================================================
+** Convert the indicated Truetype font file to a type 42 or type 3
+** PostScript font and insert it in the output stream.
+**
+** All the routines from here to the end of file file are involved
+** in this process.
+==========================================================================*/
+
+/*---------------------------------------
+** Endian conversion routines.
+** These routines take a BYTE pointer
+** and return a value formed by reading
+** bytes starting at that point.
+**
+** These routines read the big-endian
+** values which are used in TrueType
+** font files.
+---------------------------------------*/
+
+/*
+** Get an Unsigned 32 bit number.
+*/
+ULONG getULONG(BYTE *p)
+{
+ int x;
+ ULONG val=0;
+
+ for (x=0; x<4; x++)
+ {
+ val *= 0x100;
+ val += p[x];
+ }
+
+ return val;
+} /* end of ftohULONG() */
+
+/*
+** Get an unsigned 16 bit number.
+*/
+USHORT getUSHORT(BYTE *p)
+{
+ int x;
+ USHORT val=0;
+
+ for (x=0; x<2; x++)
+ {
+ val *= 0x100;
+ val += p[x];
+ }
+
+ return val;
+} /* end of getUSHORT() */
+
+/*
+** Get a 32 bit fixed point (16.16) number.
+** A special structure is used to return the value.
+*/
+Fixed getFixed(BYTE *s)
+{
+ Fixed val={0,0};
+
+ val.whole = ((s[0] * 256) + s[1]);
+ val.fraction = ((s[2] * 256) + s[3]);
+
+ return val;
+} /* end of getFixed() */
+
+/*-----------------------------------------------------------------------
+** Load a TrueType font table into memory and return a pointer to it.
+** The font's "file" and "offset_table" fields must be set before this
+** routine is called.
+**
+** This first argument is a TrueType font structure, the second
+** argument is the name of the table to retrieve. A table name
+** is always 4 characters, though the last characters may be
+** padding spaces.
+-----------------------------------------------------------------------*/
+BYTE *GetTable(struct TTFONT *font, const char *name)
+{
+ BYTE *ptr;
+ ULONG x;
+
+#ifdef DEBUG_TRUETYPE
+ debug("GetTable(file,font,\"%s\")",name);
+#endif
+
+ /* We must search the table directory. */
+ ptr = font->offset_table + 12;
+ x=0;
+ while (true)
+ {
+ if ( strncmp((const char*)ptr,name,4) == 0 )
+ {
+ ULONG offset,length;
+ BYTE *table;
+
+ offset = getULONG( ptr + 8 );
+ length = getULONG( ptr + 12 );
+ table = (BYTE*)calloc( sizeof(BYTE), length + 2 );
+
+ try
+ {
+#ifdef DEBUG_TRUETYPE
+ debug("Loading table \"%s\" from offset %d, %d bytes",name,offset,length);
+#endif
+
+ if ( fseek( font->file, (long)offset, SEEK_SET ) )
+ {
+ throw TTException("TrueType font may be corrupt (reason 3)");
+ }
+
+ if ( fread(table,sizeof(BYTE),length,font->file) != (sizeof(BYTE) * length))
+ {
+ throw TTException("TrueType font may be corrupt (reason 4)");
+ }
+ }
+ catch (TTException& )
+ {
+ free(table);
+ throw;
+ }
+ /* Always NUL-terminate; add two in case of UTF16 strings. */
+ table[length] = '\0';
+ table[length + 1] = '\0';
+ return table;
+ }
+
+ x++;
+ ptr += 16;
+ if (x == font->numTables)
+ {
+ throw TTException("TrueType font is missing table");
+ }
+ }
+
+} /* end of GetTable() */
+
+static void utf16be_to_ascii(char *dst, char *src, size_t length) {
+ ++src;
+ for (; *src != 0 && length; dst++, src += 2, --length) {
+ *dst = *src;
+ }
+}
+
+/*--------------------------------------------------------------------
+** Load the 'name' table, get information from it,
+** and store that information in the font structure.
+**
+** The 'name' table contains information such as the name of
+** the font, and it's PostScript name.
+--------------------------------------------------------------------*/
+void Read_name(struct TTFONT *font)
+{
+ BYTE *table_ptr,*ptr2;
+ int numrecords; /* Number of strings in this table */
+ BYTE *strings; /* pointer to start of string storage */
+ int x;
+ int platform; /* Current platform id */
+ int nameid; /* name id, */
+ int offset,length; /* offset and length of string. */
+
+#ifdef DEBUG_TRUETYPE
+ debug("Read_name()");
+#endif
+
+ table_ptr = NULL;
+
+ /* Set default values to avoid future references to undefined
+ * pointers. Allocate each of PostName, FullName, FamilyName,
+ * Version, and Style separately so they can be freed safely. */
+ for (char **ptr = &(font->PostName); ptr != NULL; )
+ {
+ *ptr = (char*) calloc(sizeof(char), strlen("unknown")+1);
+ strcpy(*ptr, "unknown");
+ if (ptr == &(font->PostName)) ptr = &(font->FullName);
+ else if (ptr == &(font->FullName)) ptr = &(font->FamilyName);
+ else if (ptr == &(font->FamilyName)) ptr = &(font->Version);
+ else if (ptr == &(font->Version)) ptr = &(font->Style);
+ else ptr = NULL;
+ }
+ font->Copyright = font->Trademark = (char*)NULL;
+
+ table_ptr = GetTable(font, "name"); /* pointer to table */
+ try
+ {
+ numrecords = getUSHORT( table_ptr + 2 ); /* number of names */
+ strings = table_ptr + getUSHORT( table_ptr + 4 ); /* start of string storage */
+
+ ptr2 = table_ptr + 6;
+ for (x=0; x < numrecords; x++,ptr2+=12)
+ {
+ platform = getUSHORT(ptr2);
+ nameid = getUSHORT(ptr2+6);
+ length = getUSHORT(ptr2+8);
+ offset = getUSHORT(ptr2+10);
+
+#ifdef DEBUG_TRUETYPE
+ debug("platform %d, encoding %d, language 0x%x, name %d, offset %d, length %d",
+ platform,encoding,language,nameid,offset,length);
+#endif
+
+ /* Copyright notice */
+ if ( platform == 1 && nameid == 0 )
+ {
+ font->Copyright = (char*)calloc(sizeof(char),length+1);
+ strncpy(font->Copyright,(const char*)strings+offset,length);
+ font->Copyright[length]='\0';
+ replace_newlines_with_spaces(font->Copyright);
+
+#ifdef DEBUG_TRUETYPE
+ debug("font->Copyright=\"%s\"",font->Copyright);
+#endif
+ continue;
+ }
+
+
+ /* Font Family name */
+ if ( platform == 1 && nameid == 1 )
+ {
+ free(font->FamilyName);
+ font->FamilyName = (char*)calloc(sizeof(char),length+1);
+ strncpy(font->FamilyName,(const char*)strings+offset,length);
+ font->FamilyName[length]='\0';
+ replace_newlines_with_spaces(font->FamilyName);
+
+#ifdef DEBUG_TRUETYPE
+ debug("font->FamilyName=\"%s\"",font->FamilyName);
+#endif
+ continue;
+ }
+
+
+ /* Font Family name */
+ if ( platform == 1 && nameid == 2 )
+ {
+ free(font->Style);
+ font->Style = (char*)calloc(sizeof(char),length+1);
+ strncpy(font->Style,(const char*)strings+offset,length);
+ font->Style[length]='\0';
+ replace_newlines_with_spaces(font->Style);
+
+#ifdef DEBUG_TRUETYPE
+ debug("font->Style=\"%s\"",font->Style);
+#endif
+ continue;
+ }
+
+
+ /* Full Font name */
+ if ( platform == 1 && nameid == 4 )
+ {
+ free(font->FullName);
+ font->FullName = (char*)calloc(sizeof(char),length+1);
+ strncpy(font->FullName,(const char*)strings+offset,length);
+ font->FullName[length]='\0';
+ replace_newlines_with_spaces(font->FullName);
+
+#ifdef DEBUG_TRUETYPE
+ debug("font->FullName=\"%s\"",font->FullName);
+#endif
+ continue;
+ }
+
+
+ /* Version string */
+ if ( platform == 1 && nameid == 5 )
+ {
+ free(font->Version);
+ font->Version = (char*)calloc(sizeof(char),length+1);
+ strncpy(font->Version,(const char*)strings+offset,length);
+ font->Version[length]='\0';
+ replace_newlines_with_spaces(font->Version);
+
+#ifdef DEBUG_TRUETYPE
+ debug("font->Version=\"%s\"",font->Version);
+#endif
+ continue;
+ }
+
+
+ /* PostScript name */
+ if ( platform == 1 && nameid == 6 )
+ {
+ free(font->PostName);
+ font->PostName = (char*)calloc(sizeof(char),length+1);
+ strncpy(font->PostName,(const char*)strings+offset,length);
+ font->PostName[length]='\0';
+ replace_newlines_with_spaces(font->PostName);
+
+#ifdef DEBUG_TRUETYPE
+ debug("font->PostName=\"%s\"",font->PostName);
+#endif
+ continue;
+ }
+
+ /* Microsoft-format PostScript name */
+ if ( platform == 3 && nameid == 6 )
+ {
+ free(font->PostName);
+ font->PostName = (char*)calloc(sizeof(char),length+1);
+ utf16be_to_ascii(font->PostName, (char *)strings+offset, length);
+ font->PostName[length/2]='\0';
+ replace_newlines_with_spaces(font->PostName);
+
+#ifdef DEBUG_TRUETYPE
+ debug("font->PostName=\"%s\"",font->PostName);
+#endif
+ continue;
+ }
+
+
+ /* Trademark string */
+ if ( platform == 1 && nameid == 7 )
+ {
+ font->Trademark = (char*)calloc(sizeof(char),length+1);
+ strncpy(font->Trademark,(const char*)strings+offset,length);
+ font->Trademark[length]='\0';
+ replace_newlines_with_spaces(font->Trademark);
+
+#ifdef DEBUG_TRUETYPE
+ debug("font->Trademark=\"%s\"",font->Trademark);
+#endif
+ continue;
+ }
+ }
+ }
+ catch (TTException& )
+ {
+ free(table_ptr);
+ throw;
+ }
+
+ free(table_ptr);
+} /* end of Read_name() */
+
+/*---------------------------------------------------------------------
+** Write the header for a PostScript font.
+---------------------------------------------------------------------*/
+void ttfont_header(TTStreamWriter& stream, struct TTFONT *font)
+{
+ int VMMin;
+ int VMMax;
+
+ /*
+ ** To show that it is a TrueType font in PostScript format,
+ ** we will begin the file with a specific string.
+ ** This string also indicates the version of the TrueType
+ ** specification on which the font is based and the
+ ** font manufacturer's revision number for the font.
+ */
+ if ( font->target_type == PS_TYPE_42 ||
+ font->target_type == PS_TYPE_42_3_HYBRID)
+ {
+ stream.printf("%%!PS-TrueTypeFont-%d.%d-%d.%d\n",
+ font->TTVersion.whole, font->TTVersion.fraction,
+ font->MfrRevision.whole, font->MfrRevision.fraction);
+ }
+
+ /* If it is not a Type 42 font, we will use a different format. */
+ else
+ {
+ stream.putline("%!PS-Adobe-3.0 Resource-Font");
+ } /* See RBIIp 641 */
+
+ /* We will make the title the name of the font. */
+ stream.printf("%%%%Title: %s\n",font->FullName);
+
+ /* If there is a Copyright notice, put it here too. */
+ if ( font->Copyright != (char*)NULL )
+ {
+ stream.printf("%%%%Copyright: %s\n",font->Copyright);
+ }
+
+ /* We created this file. */
+ if ( font->target_type == PS_TYPE_42 )
+ {
+ stream.putline("%%Creator: Converted from TrueType to type 42 by PPR");
+ }
+ else if (font->target_type == PS_TYPE_42_3_HYBRID)
+ {
+ stream.putline("%%Creator: Converted from TypeType to type 42/type 3 hybrid by PPR");
+ }
+ else
+ {
+ stream.putline("%%Creator: Converted from TrueType to type 3 by PPR");
+ }
+
+ /* If VM usage information is available, print it. */
+ if ( font->target_type == PS_TYPE_42 || font->target_type == PS_TYPE_42_3_HYBRID)
+ {
+ VMMin = (int)getULONG( font->post_table + 16 );
+ VMMax = (int)getULONG( font->post_table + 20 );
+ if ( VMMin > 0 && VMMax > 0 )
+ stream.printf("%%%%VMUsage: %d %d\n",VMMin,VMMax);
+ }
+
+ /* Start the dictionary which will eventually */
+ /* become the font. */
+ if (font->target_type == PS_TYPE_42)
+ {
+ stream.putline("15 dict begin");
+ }
+ else
+ {
+ stream.putline("25 dict begin");
+
+ /* Type 3 fonts will need some subroutines here. */
+ stream.putline("/_d{bind def}bind def");
+ stream.putline("/_m{moveto}_d");
+ stream.putline("/_l{lineto}_d");
+ stream.putline("/_cl{closepath eofill}_d");
+ stream.putline("/_c{curveto}_d");
+ stream.putline("/_sc{7 -1 roll{setcachedevice}{pop pop pop pop pop pop}ifelse}_d");
+ stream.putline("/_e{exec}_d");
+ }
+
+ stream.printf("/FontName /%s def\n",font->PostName);
+ stream.putline("/PaintType 0 def");
+
+ if (font->target_type == PS_TYPE_42 || font->target_type == PS_TYPE_42_3_HYBRID)
+ {
+ stream.putline("/FontMatrix[1 0 0 1 0 0]def");
+ }
+ else
+ {
+ stream.putline("/FontMatrix[.001 0 0 .001 0 0]def");
+ }
+
+ stream.printf("/FontBBox[%d %d %d %d]def\n",font->llx-1,font->lly-1,font->urx,font->ury);
+ if (font->target_type == PS_TYPE_42 || font->target_type == PS_TYPE_42_3_HYBRID)
+ {
+ stream.printf("/FontType 42 def\n", font->target_type );
+ }
+ else
+ {
+ stream.printf("/FontType 3 def\n", font->target_type );
+ }
+} /* end of ttfont_header() */
+
+/*-------------------------------------------------------------
+** Define the encoding array for this font.
+** Since we don't really want to deal with converting all of
+** the possible font encodings in the wild to a standard PS
+** one, we just explicitly create one for each font.
+-------------------------------------------------------------*/
+void ttfont_encoding(TTStreamWriter& stream, struct TTFONT *font, std::vector<int>& glyph_ids, font_type_enum target_type)
+{
+ if (target_type == PS_TYPE_3 || target_type == PS_TYPE_42_3_HYBRID)
+ {
+ stream.printf("/Encoding [ ");
+
+ for (std::vector<int>::const_iterator i = glyph_ids.begin();
+ i != glyph_ids.end(); ++i)
+ {
+ const char* name = ttfont_CharStrings_getname(font, *i);
+ stream.printf("/%s ", name);
+ }
+
+ stream.printf("] def\n");
+ }
+ else
+ {
+ stream.putline("/Encoding StandardEncoding def");
+ }
+} /* end of ttfont_encoding() */
+
+/*-----------------------------------------------------------
+** Create the optional "FontInfo" sub-dictionary.
+-----------------------------------------------------------*/
+void ttfont_FontInfo(TTStreamWriter& stream, struct TTFONT *font)
+{
+ Fixed ItalicAngle;
+
+ /* We create a sub dictionary named "FontInfo" where we */
+ /* store information which though it is not used by the */
+ /* interpreter, is useful to some programs which will */
+ /* be printing with the font. */
+ stream.putline("/FontInfo 10 dict dup begin");
+
+ /* These names come from the TrueType font's "name" table. */
+ stream.printf("/FamilyName (%s) def\n",font->FamilyName);
+ stream.printf("/FullName (%s) def\n",font->FullName);
+
+ if ( font->Copyright != (char*)NULL || font->Trademark != (char*)NULL )
+ {
+ stream.printf("/Notice (%s",
+ font->Copyright != (char*)NULL ? font->Copyright : "");
+ stream.printf("%s%s) def\n",
+ font->Trademark != (char*)NULL ? " " : "",
+ font->Trademark != (char*)NULL ? font->Trademark : "");
+ }
+
+ /* This information is not quite correct. */
+ stream.printf("/Weight (%s) def\n",font->Style);
+
+ /* Some fonts have this as "version". */
+ stream.printf("/Version (%s) def\n",font->Version);
+
+ /* Some information from the "post" table. */
+ ItalicAngle = getFixed( font->post_table + 4 );
+ stream.printf("/ItalicAngle %d.%d def\n",ItalicAngle.whole,ItalicAngle.fraction);
+ stream.printf("/isFixedPitch %s def\n", getULONG( font->post_table + 12 ) ? "true" : "false" );
+ stream.printf("/UnderlinePosition %d def\n", (int)getFWord( font->post_table + 8 ) );
+ stream.printf("/UnderlineThickness %d def\n", (int)getFWord( font->post_table + 10 ) );
+ stream.putline("end readonly def");
+} /* end of ttfont_FontInfo() */
+
+/*-------------------------------------------------------------------
+** sfnts routines
+** These routines generate the PostScript "sfnts" array which
+** contains one or more strings which contain a reduced version
+** of the TrueType font.
+**
+** A number of functions are required to accomplish this rather
+** complicated task.
+-------------------------------------------------------------------*/
+int string_len;
+int line_len;
+bool in_string;
+
+/*
+** This is called once at the start.
+*/
+void sfnts_start(TTStreamWriter& stream)
+{
+ stream.puts("/sfnts[<");
+ in_string=true;
+ string_len=0;
+ line_len=8;
+} /* end of sfnts_start() */
+
+/*
+** Write a BYTE as a hexadecimal value as part of the sfnts array.
+*/
+void sfnts_pputBYTE(TTStreamWriter& stream, BYTE n)
+{
+ static const char hexdigits[]="0123456789ABCDEF";
+
+ if (!in_string)
+ {
+ stream.put_char('<');
+ string_len=0;
+ line_len++;
+ in_string=true;
+ }
+
+ stream.put_char( hexdigits[ n / 16 ] );
+ stream.put_char( hexdigits[ n % 16 ] );
+ string_len++;
+ line_len+=2;
+
+ if (line_len > 70)
+ {
+ stream.put_char('\n');
+ line_len=0;
+ }
+
+} /* end of sfnts_pputBYTE() */
+
+/*
+** Write a USHORT as a hexadecimal value as part of the sfnts array.
+*/
+void sfnts_pputUSHORT(TTStreamWriter& stream, USHORT n)
+{
+ sfnts_pputBYTE(stream, n / 256);
+ sfnts_pputBYTE(stream, n % 256);
+} /* end of sfnts_pputUSHORT() */
+
+/*
+** Write a ULONG as part of the sfnts array.
+*/
+void sfnts_pputULONG(TTStreamWriter& stream, ULONG n)
+{
+ int x1,x2,x3;
+
+ x1 = n % 256;
+ n /= 256;
+ x2 = n % 256;
+ n /= 256;
+ x3 = n % 256;
+ n /= 256;
+
+ sfnts_pputBYTE(stream, n);
+ sfnts_pputBYTE(stream, x3);
+ sfnts_pputBYTE(stream, x2);
+ sfnts_pputBYTE(stream, x1);
+} /* end of sfnts_pputULONG() */
+
+/*
+** This is called whenever it is
+** necessary to end a string in the sfnts array.
+**
+** (The array must be broken into strings which are
+** no longer than 64K characters.)
+*/
+void sfnts_end_string(TTStreamWriter& stream)
+{
+ if (in_string)
+ {
+ string_len=0; /* fool sfnts_pputBYTE() */
+
+#ifdef DEBUG_TRUETYPE_INLINE
+ puts("\n% dummy byte:\n");
+#endif
+
+ sfnts_pputBYTE(stream, 0); /* extra byte for pre-2013 compatibility */
+ stream.put_char('>');
+ line_len++;
+ }
+ in_string=false;
+} /* end of sfnts_end_string() */
+
+/*
+** This is called at the start of each new table.
+** The argement is the length in bytes of the table
+** which will follow. If the new table will not fit
+** in the current string, a new one is started.
+*/
+void sfnts_new_table(TTStreamWriter& stream, ULONG length)
+{
+ if ( (string_len + length) > 65528 )
+ sfnts_end_string(stream);
+} /* end of sfnts_new_table() */
+
+/*
+** We may have to break up the 'glyf' table. That is the reason
+** why we provide this special routine to copy it into the sfnts
+** array.
+*/
+void sfnts_glyf_table(TTStreamWriter& stream, struct TTFONT *font, ULONG oldoffset, ULONG correct_total_length)
+{
+ ULONG off;
+ ULONG length;
+ int c;
+ ULONG total=0; /* running total of bytes written to table */
+ int x;
+ bool loca_is_local=false;
+
+#ifdef DEBUG_TRUETYPE
+ debug("sfnts_glyf_table(font,%d)", (int)correct_total_length);
+#endif
+
+ if (font->loca_table == NULL)
+ {
+ font->loca_table = GetTable(font,"loca");
+ loca_is_local = true;
+ }
+
+ /* Seek to proper position in the file. */
+ fseek( font->file, oldoffset, SEEK_SET );
+
+ /* Copy the glyphs one by one */
+ for (x=0; x < font->numGlyphs; x++)
+ {
+ /* Read the glyph offset from the index-to-location table. */
+ if (font->indexToLocFormat == 0)
+ {
+ off = getUSHORT( font->loca_table + (x * 2) );
+ off *= 2;
+ length = getUSHORT( font->loca_table + ((x+1) * 2) );
+ length *= 2;
+ length -= off;
+ }
+ else
+ {
+ off = getULONG( font->loca_table + (x * 4) );
+ length = getULONG( font->loca_table + ((x+1) * 4) );
+ length -= off;
+ }
+
+#ifdef DEBUG_TRUETYPE
+ debug("glyph length=%d",(int)length);
+#endif
+
+ /* Start new string if necessary. */
+ sfnts_new_table( stream, (int)length );
+
+ /*
+ ** Make sure the glyph is padded out to a
+ ** two byte boundary.
+ */
+ if ( length % 2 ) {
+ throw TTException("TrueType font contains a 'glyf' table without 2 byte padding");
+ }
+
+ /* Copy the bytes of the glyph. */
+ while ( length-- )
+ {
+ if ( (c = fgetc(font->file)) == EOF ) {
+ throw TTException("TrueType font may be corrupt (reason 6)");
+ }
+
+ sfnts_pputBYTE(stream, c);
+ total++; /* add to running total */
+ }
+
+ }
+
+ if (loca_is_local)
+ {
+ free(font->loca_table);
+ font->loca_table = NULL;
+ }
+
+ /* Pad out to full length from table directory */
+ while ( total < correct_total_length )
+ {
+ sfnts_pputBYTE(stream, 0);
+ total++;
+ }
+
+} /* end of sfnts_glyf_table() */
+
+/*
+** Here is the routine which ties it all together.
+**
+** Create the array called "sfnts" which
+** holds the actual TrueType data.
+*/
+void ttfont_sfnts(TTStreamWriter& stream, struct TTFONT *font)
+{
+ static const char *table_names[] = /* The names of all tables */
+ {
+ /* which it is worth while */
+ "cvt ", /* to include in a Type 42 */
+ "fpgm", /* PostScript font. */
+ "glyf",
+ "head",
+ "hhea",
+ "hmtx",
+ "loca",
+ "maxp",
+ "prep"
+ } ;
+
+ struct /* The location of each of */
+ {
+ ULONG oldoffset; /* the above tables. */
+ ULONG newoffset;
+ ULONG length;
+ ULONG checksum;
+ } tables[9];
+
+ BYTE *ptr; /* A pointer into the origional table directory. */
+ ULONG x,y; /* General use loop countes. */
+ int c; /* Input character. */
+ int diff;
+ ULONG nextoffset;
+ int count; /* How many `important' tables did we find? */
+
+ ptr = font->offset_table + 12;
+ nextoffset=0;
+ count=0;
+
+ /*
+ ** Find the tables we want and store there vital
+ ** statistics in tables[].
+ */
+ ULONG num_tables_read = 0; /* Number of tables read from the directory */
+ for (x = 0; x < 9; x++) {
+ do {
+ if (num_tables_read < font->numTables) {
+ /* There are still tables to read from ptr */
+ diff = strncmp((char*)ptr, table_names[x], 4);
+
+ if (diff > 0) { /* If we are past it. */
+ tables[x].length = 0;
+ diff = 0;
+ } else if (diff < 0) { /* If we haven't hit it yet. */
+ ptr += 16;
+ num_tables_read++;
+ } else if (diff == 0) { /* Here it is! */
+ tables[x].newoffset = nextoffset;
+ tables[x].checksum = getULONG( ptr + 4 );
+ tables[x].oldoffset = getULONG( ptr + 8 );
+ tables[x].length = getULONG( ptr + 12 );
+ nextoffset += ( ((tables[x].length + 3) / 4) * 4 );
+ count++;
+ ptr += 16;
+ num_tables_read++;
+ }
+ } else {
+ /* We've read the whole table directory already */
+ /* Some tables couldn't be found */
+ tables[x].length = 0;
+ break; /* Proceed to next tables[x] */
+ }
+ } while (diff != 0);
+
+ } /* end of for loop which passes over the table directory */
+
+ /* Begin the sfnts array. */
+ sfnts_start(stream);
+
+ /* Generate the offset table header */
+ /* Start by copying the TrueType version number. */
+ ptr = font->offset_table;
+ for (x=0; x < 4; x++)
+ {
+ sfnts_pputBYTE( stream, *(ptr++) );
+ }
+
+ /* Now, generate those silly numTables numbers. */
+ sfnts_pputUSHORT(stream, count); /* number of tables */
+
+ int search_range = 1;
+ int entry_sel = 0;
+
+ while (search_range <= count) {
+ search_range <<= 1;
+ entry_sel++;
+ }
+ entry_sel = entry_sel > 0 ? entry_sel - 1 : 0;
+ search_range = (search_range >> 1) * 16;
+ int range_shift = count * 16 - search_range;
+
+ sfnts_pputUSHORT(stream, search_range); /* searchRange */
+ sfnts_pputUSHORT(stream, entry_sel); /* entrySelector */
+ sfnts_pputUSHORT(stream, range_shift); /* rangeShift */
+
+#ifdef DEBUG_TRUETYPE
+ debug("only %d tables selected",count);
+#endif
+
+ /* Now, emmit the table directory. */
+ for (x=0; x < 9; x++)
+ {
+ if ( tables[x].length == 0 ) /* Skip missing tables */
+ {
+ continue;
+ }
+
+ /* Name */
+ sfnts_pputBYTE( stream, table_names[x][0] );
+ sfnts_pputBYTE( stream, table_names[x][1] );
+ sfnts_pputBYTE( stream, table_names[x][2] );
+ sfnts_pputBYTE( stream, table_names[x][3] );
+
+ /* Checksum */
+ sfnts_pputULONG( stream, tables[x].checksum );
+
+ /* Offset */
+ sfnts_pputULONG( stream, tables[x].newoffset + 12 + (count * 16) );
+
+ /* Length */
+ sfnts_pputULONG( stream, tables[x].length );
+ }
+
+ /* Now, send the tables */
+ for (x=0; x < 9; x++)
+ {
+ if ( tables[x].length == 0 ) /* skip tables that aren't there */
+ {
+ continue;
+ }
+
+#ifdef DEBUG_TRUETYPE
+ debug("emmiting table '%s'",table_names[x]);
+#endif
+
+ /* 'glyf' table gets special treatment */
+ if ( strcmp(table_names[x],"glyf")==0 )
+ {
+ sfnts_glyf_table(stream,font,tables[x].oldoffset,tables[x].length);
+ }
+ else /* Other tables may not exceed */
+ {
+ /* 65535 bytes in length. */
+ if ( tables[x].length > 65535 )
+ {
+ throw TTException("TrueType font has a table which is too long");
+ }
+
+ /* Start new string if necessary. */
+ sfnts_new_table(stream, tables[x].length);
+
+ /* Seek to proper position in the file. */
+ fseek( font->file, tables[x].oldoffset, SEEK_SET );
+
+ /* Copy the bytes of the table. */
+ for ( y=0; y < tables[x].length; y++ )
+ {
+ if ( (c = fgetc(font->file)) == EOF )
+ {
+ throw TTException("TrueType font may be corrupt (reason 7)");
+ }
+
+ sfnts_pputBYTE(stream, c);
+ }
+ }
+
+ /* Padd it out to a four byte boundary. */
+ y=tables[x].length;
+ while ( (y % 4) != 0 )
+ {
+ sfnts_pputBYTE(stream, 0);
+ y++;
+#ifdef DEBUG_TRUETYPE_INLINE
+ puts("\n% pad byte:\n");
+#endif
+ }
+
+ } /* End of loop for all tables */
+
+ /* Close the array. */
+ sfnts_end_string(stream);
+ stream.putline("]def");
+} /* end of ttfont_sfnts() */
+
+/*--------------------------------------------------------------
+** Create the CharStrings dictionary which will translate
+** PostScript character names to TrueType font character
+** indexes.
+**
+** If we are creating a type 3 instead of a type 42 font,
+** this array will instead convert PostScript character names
+** to executable proceedures.
+--------------------------------------------------------------*/
+const char *Apple_CharStrings[]=
+{
+ ".notdef",".null","nonmarkingreturn","space","exclam","quotedbl","numbersign",
+ "dollar","percent","ampersand","quotesingle","parenleft","parenright",
+ "asterisk","plus", "comma","hyphen","period","slash","zero","one","two",
+ "three","four","five","six","seven","eight","nine","colon","semicolon",
+ "less","equal","greater","question","at","A","B","C","D","E","F","G","H","I",
+ "J","K", "L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z",
+ "bracketleft","backslash","bracketright","asciicircum","underscore","grave",
+ "a","b","c","d","e","f","g","h","i","j","k", "l","m","n","o","p","q","r","s",
+ "t","u","v","w","x","y","z","braceleft","bar","braceright","asciitilde",
+ "Adieresis","Aring","Ccedilla","Eacute","Ntilde","Odieresis","Udieresis",
+ "aacute","agrave","acircumflex","adieresis","atilde","aring","ccedilla",
+ "eacute","egrave","ecircumflex","edieresis","iacute","igrave","icircumflex",
+ "idieresis","ntilde","oacute","ograve","ocircumflex","odieresis","otilde",
+ "uacute","ugrave","ucircumflex","udieresis","dagger","degree","cent",
+ "sterling","section","bullet","paragraph","germandbls","registered",
+ "copyright","trademark","acute","dieresis","notequal","AE","Oslash",
+ "infinity","plusminus","lessequal","greaterequal","yen","mu","partialdiff",
+ "summation","product","pi","integral","ordfeminine","ordmasculine","Omega",
+ "ae","oslash","questiondown","exclamdown","logicalnot","radical","florin",
+ "approxequal","Delta","guillemotleft","guillemotright","ellipsis",
+ "nobreakspace","Agrave","Atilde","Otilde","OE","oe","endash","emdash",
+ "quotedblleft","quotedblright","quoteleft","quoteright","divide","lozenge",
+ "ydieresis","Ydieresis","fraction","currency","guilsinglleft","guilsinglright",
+ "fi","fl","daggerdbl","periodcentered","quotesinglbase","quotedblbase",
+ "perthousand","Acircumflex","Ecircumflex","Aacute","Edieresis","Egrave",
+ "Iacute","Icircumflex","Idieresis","Igrave","Oacute","Ocircumflex","apple",
+ "Ograve","Uacute","Ucircumflex","Ugrave","dotlessi","circumflex","tilde",
+ "macron","breve","dotaccent","ring","cedilla","hungarumlaut","ogonek","caron",
+ "Lslash","lslash","Scaron","scaron","Zcaron","zcaron","brokenbar","Eth","eth",
+ "Yacute","yacute","Thorn","thorn","minus","multiply","onesuperior",
+ "twosuperior","threesuperior","onehalf","onequarter","threequarters","franc",
+ "Gbreve","gbreve","Idot","Scedilla","scedilla","Cacute","cacute","Ccaron",
+ "ccaron","dmacron","markingspace","capslock","shift","propeller","enter",
+ "markingtabrtol","markingtabltor","control","markingdeleteltor",
+ "markingdeletertol","option","escape","parbreakltor","parbreakrtol",
+ "newpage","checkmark","linebreakltor","linebreakrtol","markingnobreakspace",
+ "diamond","appleoutline"
+};
+
+/*
+** This routine is called by the one below.
+** It is also called from pprdrv_tt2.c
+*/
+const char *ttfont_CharStrings_getname(struct TTFONT *font, int charindex)
+{
+ int GlyphIndex;
+ static char temp[80];
+ char *ptr;
+ ULONG len;
+
+ Fixed post_format;
+
+ /* The 'post' table format number. */
+ post_format = getFixed( font->post_table );
+
+ if ( post_format.whole != 2 || post_format.fraction != 0 )
+ {
+ /* We don't have a glyph name table, so generate a name.
+ This generated name must match exactly the name that is
+ generated by FT2Font in get_glyph_name */
+ PyOS_snprintf(temp, 80, "uni%08x", charindex);
+ return temp;
+ }
+
+ GlyphIndex = (int)getUSHORT( font->post_table + 34 + (charindex * 2) );
+
+ if ( GlyphIndex <= 257 ) /* If a standard Apple name, */
+ {
+ return Apple_CharStrings[GlyphIndex];
+ }
+ else /* Otherwise, use one */
+ {
+ /* of the pascal strings. */
+ GlyphIndex -= 258;
+
+ /* Set pointer to start of Pascal strings. */
+ ptr = (char*)(font->post_table + 34 + (font->numGlyphs * 2));
+
+ len = (ULONG)*(ptr++); /* Step thru the strings */
+ while (GlyphIndex--) /* until we get to the one */
+ {
+ /* that we want. */
+ ptr += len;
+ len = (ULONG)*(ptr++);
+ }
+
+ if ( len >= sizeof(temp) )
+ {
+ throw TTException("TrueType font file contains a very long PostScript name");
+ }
+
+ strncpy(temp,ptr,len); /* Copy the pascal string into */
+ temp[len]='\0'; /* a buffer and make it ASCIIz. */
+
+ return temp;
+ }
+} /* end of ttfont_CharStrings_getname() */
+
+/*
+** This is the central routine of this section.
+*/
+void ttfont_CharStrings(TTStreamWriter& stream, struct TTFONT *font, std::vector<int>& glyph_ids)
+{
+ Fixed post_format;
+
+ /* The 'post' table format number. */
+ post_format = getFixed( font->post_table );
+
+ /* Emmit the start of the PostScript code to define the dictionary. */
+ stream.printf("/CharStrings %d dict dup begin\n", glyph_ids.size()+1);
+ /* Section 5.8.2 table 5.7 of the PS Language Ref says a CharStrings dictionary must contain an entry for .notdef */
+ stream.printf("/.notdef 0 def\n");
+
+ /* Emmit one key-value pair for each glyph. */
+ for (std::vector<int>::const_iterator i = glyph_ids.begin();
+ i != glyph_ids.end(); ++i)
+ {
+ if ((font->target_type == PS_TYPE_42 ||
+ font->target_type == PS_TYPE_42_3_HYBRID)
+ && *i < 256) /* type 42 */
+ {
+ stream.printf("/%s %d def\n",ttfont_CharStrings_getname(font, *i), *i);
+ }
+ else /* type 3 */
+ {
+ stream.printf("/%s{",ttfont_CharStrings_getname(font, *i));
+
+ tt_type3_charproc(stream, font, *i);
+
+ stream.putline("}_d"); /* "} bind def" */
+ }
+ }
+
+ stream.putline("end readonly def");
+} /* end of ttfont_CharStrings() */
+
+/*----------------------------------------------------------------
+** Emmit the code to finish up the dictionary and turn
+** it into a font.
+----------------------------------------------------------------*/
+void ttfont_trailer(TTStreamWriter& stream, struct TTFONT *font)
+{
+ /* If we are generating a type 3 font, we need to provide */
+ /* a BuildGlyph and BuildChar proceedures. */
+ if (font->target_type == PS_TYPE_3 ||
+ font->target_type == PS_TYPE_42_3_HYBRID)
+ {
+ stream.put_char('\n');
+
+ stream.putline("/BuildGlyph");
+ stream.putline(" {exch begin"); /* start font dictionary */
+ stream.putline(" CharStrings exch");
+ stream.putline(" 2 copy known not{pop /.notdef}if");
+ stream.putline(" true 3 1 roll get exec");
+ stream.putline(" end}_d");
+
+ stream.put_char('\n');
+
+ /* This proceedure is for compatibility with */
+ /* level 1 interpreters. */
+ stream.putline("/BuildChar {");
+ stream.putline(" 1 index /Encoding get exch get");
+ stream.putline(" 1 index /BuildGlyph get exec");
+ stream.putline("}_d");
+
+ stream.put_char('\n');
+ }
+
+ /* If we are generating a type 42 font, we need to check to see */
+ /* if this PostScript interpreter understands type 42 fonts. If */
+ /* it doesn't, we will hope that the Apple TrueType rasterizer */
+ /* has been loaded and we will adjust the font accordingly. */
+ /* I found out how to do this by examining a TrueType font */
+ /* generated by a Macintosh. That is where the TrueType interpreter */
+ /* setup instructions and part of BuildGlyph came from. */
+ if (font->target_type == PS_TYPE_42 ||
+ font->target_type == PS_TYPE_42_3_HYBRID)
+ {
+ stream.put_char('\n');
+
+ /* If we have no "resourcestatus" command, or FontType 42 */
+ /* is unknown, leave "true" on the stack. */
+ stream.putline("systemdict/resourcestatus known");
+ stream.putline(" {42 /FontType resourcestatus");
+ stream.putline(" {pop pop false}{true}ifelse}");
+ stream.putline(" {true}ifelse");
+
+ /* If true, execute code to produce an error message if */
+ /* we can't find Apple's TrueDict in VM. */
+ stream.putline("{/TrueDict where{pop}{(%%[ Error: no TrueType rasterizer ]%%)= flush}ifelse");
+
+ /* Since we are expected to use Apple's TrueDict TrueType */
+ /* reasterizer, change the font type to 3. */
+ stream.putline("/FontType 3 def");
+
+ /* Define a string to hold the state of the Apple */
+ /* TrueType interpreter. */
+ stream.putline(" /TrueState 271 string def");
+
+ /* It looks like we get information about the resolution */
+ /* of the printer and store it in the TrueState string. */
+ stream.putline(" TrueDict begin sfnts save");
+ stream.putline(" 72 0 matrix defaultmatrix dtransform dup");
+ stream.putline(" mul exch dup mul add sqrt cvi 0 72 matrix");
+ stream.putline(" defaultmatrix dtransform dup mul exch dup");
+ stream.putline(" mul add sqrt cvi 3 -1 roll restore");
+ stream.putline(" TrueState initer end");
+
+ /* This BuildGlyph procedure will look the name up in the */
+ /* CharStrings array, and then check to see if what it gets */
+ /* is a procedure. If it is, it executes it, otherwise, it */
+ /* lets the TrueType rasterizer loose on it. */
+
+ /* When this proceedure is executed the stack contains */
+ /* the font dictionary and the character name. We */
+ /* exchange arguments and move the dictionary to the */
+ /* dictionary stack. */
+ stream.putline(" /BuildGlyph{exch begin");
+ /* stack: charname */
+
+ /* Put two copies of CharStrings on the stack and consume */
+ /* one testing to see if the charname is defined in it, */
+ /* leave the answer on the stack. */
+ stream.putline(" CharStrings dup 2 index known");
+ /* stack: charname CharStrings bool */
+
+ /* Exchange the CharStrings dictionary and the charname, */
+ /* but if the answer was false, replace the character name */
+ /* with ".notdef". */
+ stream.putline(" {exch}{exch pop /.notdef}ifelse");
+ /* stack: CharStrings charname */
+
+ /* Get the value from the CharStrings dictionary and see */
+ /* if it is executable. */
+ stream.putline(" get dup xcheck");
+ /* stack: CharStrings_entry */
+
+ /* If is a proceedure. Execute according to RBIIp 277-278. */
+ stream.putline(" {currentdict systemdict begin begin exec end end}");
+
+ /* Is a TrueType character index, let the rasterizer at it. */
+ stream.putline(" {TrueDict begin /bander load cvlit exch TrueState render end}");
+
+ stream.putline(" ifelse");
+
+ /* Pop the font's dictionary off the stack. */
+ stream.putline(" end}bind def");
+
+ /* This is the level 1 compatibility BuildChar procedure. */
+ /* See RBIIp 281. */
+ stream.putline(" /BuildChar{");
+ stream.putline(" 1 index /Encoding get exch get");
+ stream.putline(" 1 index /BuildGlyph get exec");
+ stream.putline(" }bind def");
+
+ /* Here we close the condition which is true */
+ /* if the printer has no built-in TrueType */
+ /* rasterizer. */
+ stream.putline("}if");
+ stream.put_char('\n');
+ } /* end of if Type 42 not understood. */
+
+ stream.putline("FontName currentdict end definefont pop");
+ /* stream.putline("%%EOF"); */
+} /* end of ttfont_trailer() */
+
+/*------------------------------------------------------------------
+** This is the externally callable routine which inserts the font.
+------------------------------------------------------------------*/
+
+void read_font(const char *filename, font_type_enum target_type, std::vector<int>& glyph_ids, TTFONT& font)
+{
+ BYTE *ptr;
+
+ /* Decide what type of PostScript font we will be generating. */
+ font.target_type = target_type;
+
+ if (font.target_type == PS_TYPE_42)
+ {
+ bool has_low = false;
+ bool has_high = false;
+
+ for (std::vector<int>::const_iterator i = glyph_ids.begin();
+ i != glyph_ids.end(); ++i)
+ {
+ if (*i > 255)
+ {
+ has_high = true;
+ if (has_low) break;
+ }
+ else
+ {
+ has_low = true;
+ if (has_high) break;
+ }
+ }
+
+ if (has_high && has_low)
+ {
+ font.target_type = PS_TYPE_42_3_HYBRID;
+ }
+ else if (has_high && !has_low)
+ {
+ font.target_type = PS_TYPE_3;
+ }
+ }
+
+ /* Save the file name for error messages. */
+ font.filename=filename;
+
+ /* Open the font file */
+ if ( (font.file = fopen(filename,"rb")) == (FILE*)NULL )
+ {
+ throw TTException("Failed to open TrueType font");
+ }
+
+ /* Allocate space for the unvarying part of the offset table. */
+ assert(font.offset_table == NULL);
+ font.offset_table = (BYTE*)calloc( 12, sizeof(BYTE) );
+
+ /* Read the first part of the offset table. */
+ if ( fread( font.offset_table, sizeof(BYTE), 12, font.file ) != 12 )
+ {
+ throw TTException("TrueType font may be corrupt (reason 1)");
+ }
+
+ /* Determine how many directory entries there are. */
+ font.numTables = getUSHORT( font.offset_table + 4 );
+#ifdef DEBUG_TRUETYPE
+ debug("numTables=%d",(int)font.numTables);
+#endif
+
+ /* Expand the memory block to hold the whole thing. */
+ font.offset_table = (BYTE*)realloc( font.offset_table, sizeof(BYTE) * (12 + font.numTables * 16) );
+
+ /* Read the rest of the table directory. */
+ if ( fread( font.offset_table + 12, sizeof(BYTE), (font.numTables*16), font.file ) != (font.numTables*16) )
+ {
+ throw TTException("TrueType font may be corrupt (reason 2)");
+ }
+
+ /* Extract information from the "Offset" table. */
+ font.TTVersion = getFixed( font.offset_table );
+
+ /* Load the "head" table and extract information from it. */
+ ptr = GetTable(&font, "head");
+ try
+ {
+ font.MfrRevision = getFixed( ptr + 4 ); /* font revision number */
+ font.unitsPerEm = getUSHORT( ptr + 18 );
+ font.HUPM = font.unitsPerEm / 2;
+#ifdef DEBUG_TRUETYPE
+ debug("unitsPerEm=%d",(int)font.unitsPerEm);
+#endif
+ font.llx = topost2( getFWord( ptr + 36 ) ); /* bounding box info */
+ font.lly = topost2( getFWord( ptr + 38 ) );
+ font.urx = topost2( getFWord( ptr + 40 ) );
+ font.ury = topost2( getFWord( ptr + 42 ) );
+ font.indexToLocFormat = getSHORT( ptr + 50 ); /* size of 'loca' data */
+ if (font.indexToLocFormat != 0 && font.indexToLocFormat != 1)
+ {
+ throw TTException("TrueType font is unusable because indexToLocFormat != 0");
+ }
+ if ( getSHORT(ptr+52) != 0 )
+ {
+ throw TTException("TrueType font is unusable because glyphDataFormat != 0");
+ }
+ }
+ catch (TTException& )
+ {
+ free(ptr);
+ throw;
+ }
+ free(ptr);
+
+ /* Load information from the "name" table. */
+ Read_name(&font);
+
+ /* We need to have the PostScript table around. */
+ assert(font.post_table == NULL);
+ font.post_table = GetTable(&font, "post");
+ font.numGlyphs = getUSHORT( font.post_table + 32 );
+
+ /* If we are generating a Type 3 font, we will need to */
+ /* have the 'loca' and 'glyf' tables arround while */
+ /* we are generating the CharStrings. */
+ if (font.target_type == PS_TYPE_3 || font.target_type == PDF_TYPE_3 ||
+ font.target_type == PS_TYPE_42_3_HYBRID)
+ {
+ BYTE *ptr; /* We need only one value */
+ ptr = GetTable(&font, "hhea");
+ font.numberOfHMetrics = getUSHORT(ptr + 34);
+ free(ptr);
+
+ assert(font.loca_table == NULL);
+ font.loca_table = GetTable(&font,"loca");
+ assert(font.glyf_table == NULL);
+ font.glyf_table = GetTable(&font,"glyf");
+ assert(font.hmtx_table == NULL);
+ font.hmtx_table = GetTable(&font,"hmtx");
+ }
+
+ if (glyph_ids.size() == 0)
+ {
+ glyph_ids.clear();
+ glyph_ids.reserve(font.numGlyphs);
+ for (int x = 0; x < font.numGlyphs; ++x)
+ {
+ glyph_ids.push_back(x);
+ }
+ }
+ else if (font.target_type == PS_TYPE_3 ||
+ font.target_type == PS_TYPE_42_3_HYBRID)
+ {
+ ttfont_add_glyph_dependencies(&font, glyph_ids);
+ }
+
+} /* end of insert_ttfont() */
+
+void insert_ttfont(const char *filename, TTStreamWriter& stream,
+ font_type_enum target_type, std::vector<int>& glyph_ids)
+{
+ struct TTFONT font;
+
+ read_font(filename, target_type, glyph_ids, font);
+
+ /* Write the header for the PostScript font. */
+ ttfont_header(stream, &font);
+
+ /* Define the encoding. */
+ ttfont_encoding(stream, &font, glyph_ids, target_type);
+
+ /* Insert FontInfo dictionary. */
+ ttfont_FontInfo(stream, &font);
+
+ /* If we are generating a type 42 font, */
+ /* emmit the sfnts array. */
+ if (font.target_type == PS_TYPE_42 ||
+ font.target_type == PS_TYPE_42_3_HYBRID)
+ {
+ ttfont_sfnts(stream, &font);
+ }
+
+ /* Emmit the CharStrings array. */
+ ttfont_CharStrings(stream, &font, glyph_ids);
+
+ /* Send the font trailer. */
+ ttfont_trailer(stream, &font);
+
+} /* end of insert_ttfont() */
+
+class StringStreamWriter : public TTStreamWriter
+{
+ std::ostringstream oss;
+
+public:
+ void write(const char* a)
+ {
+ oss << a;
+ }
+
+ std::string str()
+ {
+ return oss.str();
+ }
+};
+
+void get_pdf_charprocs(const char *filename, std::vector<int>& glyph_ids, TTDictionaryCallback& dict)
+{
+ struct TTFONT font;
+
+ read_font(filename, PDF_TYPE_3, glyph_ids, font);
+
+ for (std::vector<int>::const_iterator i = glyph_ids.begin();
+ i != glyph_ids.end(); ++i)
+ {
+ StringStreamWriter writer;
+ tt_type3_charproc(writer, &font, *i);
+ const char* name = ttfont_CharStrings_getname(&font, *i);
+ dict.add_pair(name, writer.str().c_str());
+ }
+}
+
+TTFONT::TTFONT() :
+ file(NULL),
+ PostName(NULL),
+ FullName(NULL),
+ FamilyName(NULL),
+ Style(NULL),
+ Copyright(NULL),
+ Version(NULL),
+ Trademark(NULL),
+ offset_table(NULL),
+ post_table(NULL),
+ loca_table(NULL),
+ glyf_table(NULL),
+ hmtx_table(NULL)
+{
+
+}
+
+TTFONT::~TTFONT()
+{
+ if (file)
+ {
+ fclose(file);
+ }
+ free(PostName);
+ free(FullName);
+ free(FamilyName);
+ free(Style);
+ free(Copyright);
+ free(Version);
+ free(Trademark);
+ free(offset_table);
+ free(post_table);
+ free(loca_table);
+ free(glyf_table);
+ free(hmtx_table);
+}
+
+/* end of file */
diff --git a/contrib/python/matplotlib/py2/extern/ttconv/pprdrv_tt2.cpp b/contrib/python/matplotlib/py2/extern/ttconv/pprdrv_tt2.cpp
new file mode 100644
index 00000000000..058bc005348
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/ttconv/pprdrv_tt2.cpp
@@ -0,0 +1,736 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+/*
+ * Modified for use within matplotlib
+ * 5 July 2007
+ * Michael Droettboom
+ */
+
+/*
+** ~ppr/src/pprdrv/pprdrv_tt2.c
+** Copyright 1995, Trinity College Computing Center.
+** Written by David Chappell.
+**
+** Permission to use, copy, modify, and distribute this software and its
+** documentation for any purpose and without fee is hereby granted, provided
+** that the above copyright notice appear in all copies and that both that
+** copyright notice and this permission notice appear in supporting
+** documentation. This software is provided "as is" without express or
+** implied warranty.
+**
+** TrueType font support. These functions allow PPR to generate
+** PostScript fonts from Microsoft compatible TrueType font files.
+**
+** The functions in this file do most of the work to convert a
+** TrueType font to a type 3 PostScript font.
+**
+** Most of the material in this file is derived from a program called
+** "ttf2ps" which L. S. Ng posted to the usenet news group
+** "comp.sources.postscript". The author did not provide a copyright
+** notice or indicate any restrictions on use.
+**
+** Last revised 11 July 1995.
+*/
+
+#include <cstdlib>
+#include <cmath>
+#include <cstring>
+#include <memory>
+#include "pprdrv.h"
+#include "truetype.h"
+#include <algorithm>
+#include <stack>
+#include <list>
+
+class GlyphToType3
+{
+private:
+ GlyphToType3& operator=(const GlyphToType3& other);
+ GlyphToType3(const GlyphToType3& other);
+
+ /* The PostScript bounding box. */
+ int llx,lly,urx,ury;
+ int advance_width;
+
+ /* Variables to hold the character data. */
+ int *epts_ctr; /* array of contour endpoints */
+ int num_pts, num_ctr; /* number of points, number of coutours */
+ FWord *xcoor, *ycoor; /* arrays of x and y coordinates */
+ BYTE *tt_flags; /* array of TrueType flags */
+
+ int stack_depth; /* A book-keeping variable for keeping track of the depth of the PS stack */
+
+ bool pdf_mode;
+
+ void load_char(TTFONT* font, BYTE *glyph);
+ void stack(TTStreamWriter& stream, int new_elem);
+ void stack_end(TTStreamWriter& stream);
+ void PSConvert(TTStreamWriter& stream);
+ void PSCurveto(TTStreamWriter& stream,
+ FWord x0, FWord y0,
+ FWord x1, FWord y1,
+ FWord x2, FWord y2);
+ void PSMoveto(TTStreamWriter& stream, int x, int y);
+ void PSLineto(TTStreamWriter& stream, int x, int y);
+ void do_composite(TTStreamWriter& stream, struct TTFONT *font, BYTE *glyph);
+
+public:
+ GlyphToType3(TTStreamWriter& stream, struct TTFONT *font, int charindex, bool embedded = false);
+ ~GlyphToType3();
+};
+
+// Each point on a TrueType contour is either on the path or off it (a
+// control point); here's a simple representation for building such
+// contours. Added by Jouni Seppänen 2012-05-27.
+enum Flag { ON_PATH, OFF_PATH };
+struct FlaggedPoint
+{
+ enum Flag flag;
+ FWord x;
+ FWord y;
+ FlaggedPoint(Flag flag_, FWord x_, FWord y_): flag(flag_), x(x_), y(y_) {};
+};
+
+double area(FWord *x, FWord *y, int n);
+#define sqr(x) ((x)*(x))
+
+#define NOMOREINCTR -1
+#define NOMOREOUTCTR -1
+
+/*
+** This routine is used to break the character
+** procedure up into a number of smaller
+** procedures. This is necessary so as not to
+** overflow the stack on certain level 1 interpreters.
+**
+** Prepare to push another item onto the stack,
+** starting a new proceedure if necessary.
+**
+** Not all the stack depth calculations in this routine
+** are perfectly accurate, but they do the job.
+*/
+void GlyphToType3::stack(TTStreamWriter& stream, int new_elem)
+{
+ if ( !pdf_mode && num_pts > 25 ) /* Only do something of we will */
+ {
+ /* have a log of points. */
+ if (stack_depth == 0)
+ {
+ stream.put_char('{');
+ stack_depth=1;
+ }
+
+ stack_depth += new_elem; /* Account for what we propose to add */
+
+ if (stack_depth > 100)
+ {
+ stream.puts("}_e{");
+ stack_depth = 3 + new_elem; /* A rough estimate */
+ }
+ }
+} /* end of stack() */
+
+void GlyphToType3::stack_end(TTStreamWriter& stream) /* called at end */
+{
+ if ( !pdf_mode && stack_depth )
+ {
+ stream.puts("}_e");
+ stack_depth=0;
+ }
+} /* end of stack_end() */
+
+/*
+** We call this routine to emmit the PostScript code
+** for the character we have loaded with load_char().
+*/
+void GlyphToType3::PSConvert(TTStreamWriter& stream)
+{
+ int j, k;
+
+ /* Step thru the contours.
+ * j = index to xcoor, ycoor, tt_flags (point data)
+ * k = index to epts_ctr (which points belong to the same contour) */
+ for(j = k = 0; k < num_ctr; k++)
+ {
+ // A TrueType contour consists of on-path and off-path points.
+ // Two consecutive on-path points are to be joined with a
+ // line; off-path points between on-path points indicate a
+ // quadratic spline, where the off-path point is the control
+ // point. Two consecutive off-path points have an implicit
+ // on-path point midway between them.
+ std::list<FlaggedPoint> points;
+
+ // Represent flags and x/y coordinates as a C++ list
+ for (; j <= epts_ctr[k]; j++)
+ {
+ if (!(tt_flags[j] & 1)) {
+ points.push_back(FlaggedPoint(OFF_PATH, xcoor[j], ycoor[j]));
+ } else {
+ points.push_back(FlaggedPoint(ON_PATH, xcoor[j], ycoor[j]));
+ }
+ }
+
+ if (points.size() == 0) {
+ // Don't try to access the last element of an empty list
+ continue;
+ }
+
+ // For any two consecutive off-path points, insert the implied
+ // on-path point.
+ FlaggedPoint prev = points.back();
+ for (std::list<FlaggedPoint>::iterator it = points.begin();
+ it != points.end();
+ it++)
+ {
+ if (prev.flag == OFF_PATH && it->flag == OFF_PATH)
+ {
+ points.insert(it,
+ FlaggedPoint(ON_PATH,
+ (prev.x + it->x) / 2,
+ (prev.y + it->y) / 2));
+ }
+ prev = *it;
+ }
+ // Handle the wrap-around: insert a point either at the beginning
+ // or at the end that has the same coordinates as the opposite point.
+ // This also ensures that the initial point is ON_PATH.
+ if (points.front().flag == OFF_PATH)
+ {
+ assert(points.back().flag == ON_PATH);
+ points.insert(points.begin(), points.back());
+ }
+ else
+ {
+ assert(points.front().flag == ON_PATH);
+ points.push_back(points.front());
+ }
+
+ // The first point
+ stack(stream, 3);
+ PSMoveto(stream, points.front().x, points.front().y);
+
+ // Step through the remaining points
+ std::list<FlaggedPoint>::const_iterator it = points.begin();
+ for (it++; it != points.end(); /* incremented inside */)
+ {
+ const FlaggedPoint& point = *it;
+ if (point.flag == ON_PATH)
+ {
+ stack(stream, 3);
+ PSLineto(stream, point.x, point.y);
+ it++;
+ } else {
+ std::list<FlaggedPoint>::const_iterator prev = it, next = it;
+ prev--;
+ next++;
+ assert(prev->flag == ON_PATH);
+ assert(next->flag == ON_PATH);
+ stack(stream, 7);
+ PSCurveto(stream,
+ prev->x, prev->y,
+ point.x, point.y,
+ next->x, next->y);
+ it++;
+ it++;
+ }
+ }
+ }
+
+ /* Now, we can fill the whole thing. */
+ stack(stream, 1);
+ stream.puts( pdf_mode ? "f" : "_cl" );
+} /* end of PSConvert() */
+
+void GlyphToType3::PSMoveto(TTStreamWriter& stream, int x, int y)
+{
+ stream.printf(pdf_mode ? "%d %d m\n" : "%d %d _m\n",
+ x, y);
+}
+
+void GlyphToType3::PSLineto(TTStreamWriter& stream, int x, int y)
+{
+ stream.printf(pdf_mode ? "%d %d l\n" : "%d %d _l\n",
+ x, y);
+}
+
+/*
+** Emit a PostScript "curveto" command, assuming the current point
+** is (x0, y0), the control point of a quadratic spline is (x1, y1),
+** and the endpoint is (x2, y2). Note that this requires a conversion,
+** since PostScript splines are cubic.
+*/
+void GlyphToType3::PSCurveto(TTStreamWriter& stream,
+ FWord x0, FWord y0,
+ FWord x1, FWord y1,
+ FWord x2, FWord y2)
+{
+ double sx[3], sy[3], cx[3], cy[3];
+
+ sx[0] = x0;
+ sy[0] = y0;
+ sx[1] = x1;
+ sy[1] = y1;
+ sx[2] = x2;
+ sy[2] = y2;
+ cx[0] = (2*sx[1]+sx[0])/3;
+ cy[0] = (2*sy[1]+sy[0])/3;
+ cx[1] = (sx[2]+2*sx[1])/3;
+ cy[1] = (sy[2]+2*sy[1])/3;
+ cx[2] = sx[2];
+ cy[2] = sy[2];
+ stream.printf("%d %d %d %d %d %d %s\n",
+ (int)cx[0], (int)cy[0], (int)cx[1], (int)cy[1],
+ (int)cx[2], (int)cy[2], pdf_mode ? "c" : "_c");
+}
+
+/*
+** Deallocate the structures which stored
+** the data for the last simple glyph.
+*/
+GlyphToType3::~GlyphToType3()
+{
+ free(tt_flags); /* The flags array */
+ free(xcoor); /* The X coordinates */
+ free(ycoor); /* The Y coordinates */
+ free(epts_ctr); /* The array of contour endpoints */
+}
+
+/*
+** Load the simple glyph data pointed to by glyph.
+** The pointer "glyph" should point 10 bytes into
+** the glyph data.
+*/
+void GlyphToType3::load_char(TTFONT* font, BYTE *glyph)
+{
+ int x;
+ BYTE c, ct;
+
+ /* Read the contour endpoints list. */
+ epts_ctr = (int *)calloc(num_ctr,sizeof(int));
+ for (x = 0; x < num_ctr; x++)
+ {
+ epts_ctr[x] = getUSHORT(glyph);
+ glyph += 2;
+ }
+
+ /* From the endpoint of the last contour, we can */
+ /* determine the number of points. */
+ num_pts = epts_ctr[num_ctr-1]+1;
+#ifdef DEBUG_TRUETYPE
+ debug("num_pts=%d",num_pts);
+ stream.printf("%% num_pts=%d\n",num_pts);
+#endif
+
+ /* Skip the instructions. */
+ x = getUSHORT(glyph);
+ glyph += 2;
+ glyph += x;
+
+ /* Allocate space to hold the data. */
+ tt_flags = (BYTE *)calloc(num_pts,sizeof(BYTE));
+ xcoor = (FWord *)calloc(num_pts,sizeof(FWord));
+ ycoor = (FWord *)calloc(num_pts,sizeof(FWord));
+
+ /* Read the flags array, uncompressing it as we go. */
+ /* There is danger of overflow here. */
+ for (x = 0; x < num_pts; )
+ {
+ tt_flags[x++] = c = *(glyph++);
+
+ if (c&8) /* If next byte is repeat count, */
+ {
+ ct = *(glyph++);
+
+ if ( (x + ct) > num_pts )
+ {
+ throw TTException("Error in TT flags");
+ }
+
+ while (ct--)
+ {
+ tt_flags[x++] = c;
+ }
+ }
+ }
+
+ /* Read the x coordinates */
+ for (x = 0; x < num_pts; x++)
+ {
+ if (tt_flags[x] & 2) /* one byte value with */
+ {
+ /* external sign */
+ c = *(glyph++);
+ xcoor[x] = (tt_flags[x] & 0x10) ? c : (-1 * (int)c);
+ }
+ else if (tt_flags[x] & 0x10) /* repeat last */
+ {
+ xcoor[x] = 0;
+ }
+ else /* two byte signed value */
+ {
+ xcoor[x] = getFWord(glyph);
+ glyph+=2;
+ }
+ }
+
+ /* Read the y coordinates */
+ for (x = 0; x < num_pts; x++)
+ {
+ if (tt_flags[x] & 4) /* one byte value with */
+ {
+ /* external sign */
+ c = *(glyph++);
+ ycoor[x] = (tt_flags[x] & 0x20) ? c : (-1 * (int)c);
+ }
+ else if (tt_flags[x] & 0x20) /* repeat last value */
+ {
+ ycoor[x] = 0;
+ }
+ else /* two byte signed value */
+ {
+ ycoor[x] = getUSHORT(glyph);
+ glyph+=2;
+ }
+ }
+
+ /* Convert delta values to absolute values. */
+ for (x = 1; x < num_pts; x++)
+ {
+ xcoor[x] += xcoor[x-1];
+ ycoor[x] += ycoor[x-1];
+ }
+
+ for (x=0; x < num_pts; x++)
+ {
+ xcoor[x] = topost(xcoor[x]);
+ ycoor[x] = topost(ycoor[x]);
+ }
+
+} /* end of load_char() */
+
+/*
+** Emmit PostScript code for a composite character.
+*/
+void GlyphToType3::do_composite(TTStreamWriter& stream, struct TTFONT *font, BYTE *glyph)
+{
+ USHORT flags;
+ USHORT glyphIndex;
+ int arg1;
+ int arg2;
+
+ /* Once around this loop for each component. */
+ do
+ {
+ flags = getUSHORT(glyph); /* read the flags word */
+ glyph += 2;
+
+ glyphIndex = getUSHORT(glyph); /* read the glyphindex word */
+ glyph += 2;
+
+ if (flags & ARG_1_AND_2_ARE_WORDS)
+ {
+ /* The tt spec. seems to say these are signed. */
+ arg1 = getSHORT(glyph);
+ glyph += 2;
+ arg2 = getSHORT(glyph);
+ glyph += 2;
+ }
+ else /* The tt spec. does not clearly indicate */
+ {
+ /* whether these values are signed or not. */
+ arg1 = *(signed char *)(glyph++);
+ arg2 = *(signed char *)(glyph++);
+ }
+
+ if (flags & WE_HAVE_A_SCALE)
+ {
+ glyph += 2;
+ }
+ else if (flags & WE_HAVE_AN_X_AND_Y_SCALE)
+ {
+ glyph += 4;
+ }
+ else if (flags & WE_HAVE_A_TWO_BY_TWO)
+ {
+ glyph += 8;
+ }
+ else
+ {
+ }
+
+ /* Debugging */
+#ifdef DEBUG_TRUETYPE
+ stream.printf("%% flags=%d, arg1=%d, arg2=%d\n",
+ (int)flags,arg1,arg2);
+#endif
+
+ if (pdf_mode)
+ {
+ if ( flags & ARGS_ARE_XY_VALUES )
+ {
+ /* We should have been able to use 'Do' to reference the
+ subglyph here. However, that doesn't seem to work with
+ xpdf or gs (only acrobat), so instead, this just includes
+ the subglyph here inline. */
+ stream.printf("q 1 0 0 1 %d %d cm\n", topost(arg1), topost(arg2));
+ }
+ else
+ {
+ stream.printf("%% unimplemented shift, arg1=%d, arg2=%d\n",arg1,arg2);
+ }
+ GlyphToType3(stream, font, glyphIndex, true);
+ if ( flags & ARGS_ARE_XY_VALUES )
+ {
+ stream.printf("\nQ\n");
+ }
+ }
+ else
+ {
+ /* If we have an (X,Y) shif and it is non-zero, */
+ /* translate the coordinate system. */
+ if ( flags & ARGS_ARE_XY_VALUES )
+ {
+ if ( arg1 != 0 || arg2 != 0 )
+ stream.printf("gsave %d %d translate\n", topost(arg1), topost(arg2) );
+ }
+ else
+ {
+ stream.printf("%% unimplemented shift, arg1=%d, arg2=%d\n",arg1,arg2);
+ }
+
+ /* Invoke the CharStrings procedure to print the component. */
+ stream.printf("false CharStrings /%s get exec\n",
+ ttfont_CharStrings_getname(font,glyphIndex));
+
+ /* If we translated the coordinate system, */
+ /* put it back the way it was. */
+ if ( flags & ARGS_ARE_XY_VALUES && (arg1 != 0 || arg2 != 0) )
+ {
+ stream.puts("grestore ");
+ }
+ }
+
+ }
+ while (flags & MORE_COMPONENTS);
+
+} /* end of do_composite() */
+
+/*
+** Return a pointer to a specific glyph's data.
+*/
+BYTE *find_glyph_data(struct TTFONT *font, int charindex)
+{
+ ULONG off;
+ ULONG length;
+
+ /* Read the glyph offset from the index to location table. */
+ if (font->indexToLocFormat == 0)
+ {
+ off = getUSHORT( font->loca_table + (charindex * 2) );
+ off *= 2;
+ length = getUSHORT( font->loca_table + ((charindex+1) * 2) );
+ length *= 2;
+ length -= off;
+ }
+ else
+ {
+ off = getULONG( font->loca_table + (charindex * 4) );
+ length = getULONG( font->loca_table + ((charindex+1) * 4) );
+ length -= off;
+ }
+
+ if (length > 0)
+ {
+ return font->glyf_table + off;
+ }
+ else
+ {
+ return (BYTE*)NULL;
+ }
+
+} /* end of find_glyph_data() */
+
+GlyphToType3::GlyphToType3(TTStreamWriter& stream, struct TTFONT *font, int charindex, bool embedded /* = false */)
+{
+ BYTE *glyph;
+
+ tt_flags = NULL;
+ xcoor = NULL;
+ ycoor = NULL;
+ epts_ctr = NULL;
+ stack_depth = 0;
+ pdf_mode = font->target_type < 0;
+
+ /* Get a pointer to the data. */
+ glyph = find_glyph_data( font, charindex );
+
+ /* If the character is blank, it has no bounding box, */
+ /* otherwise read the bounding box. */
+ if ( glyph == (BYTE*)NULL )
+ {
+ llx=lly=urx=ury=0; /* A blank char has an all zero BoundingBox */
+ num_ctr=0; /* Set this for later if()s */
+ }
+ else
+ {
+ /* Read the number of contours. */
+ num_ctr = getSHORT(glyph);
+
+ /* Read PostScript bounding box. */
+ llx = getFWord(glyph + 2);
+ lly = getFWord(glyph + 4);
+ urx = getFWord(glyph + 6);
+ ury = getFWord(glyph + 8);
+
+ /* Advance the pointer. */
+ glyph += 10;
+ }
+
+ /* If it is a simple character, load its data. */
+ if (num_ctr > 0)
+ {
+ load_char(font, glyph);
+ }
+ else
+ {
+ num_pts=0;
+ }
+
+ /* Consult the horizontal metrics table to determine */
+ /* the character width. */
+ if ( charindex < font->numberOfHMetrics )
+ {
+ advance_width = getuFWord( font->hmtx_table + (charindex * 4) );
+ }
+ else
+ {
+ advance_width = getuFWord( font->hmtx_table + ((font->numberOfHMetrics-1) * 4) );
+ }
+
+ /* Execute setcachedevice in order to inform the font machinery */
+ /* of the character bounding box and advance width. */
+ stack(stream, 7);
+ if (pdf_mode)
+ {
+ if (!embedded) {
+ stream.printf("%d 0 %d %d %d %d d1\n",
+ topost(advance_width),
+ topost(llx), topost(lly), topost(urx), topost(ury) );
+ }
+ }
+ else if (font->target_type == PS_TYPE_42_3_HYBRID)
+ {
+ stream.printf("pop gsave .001 .001 scale %d 0 %d %d %d %d setcachedevice\n",
+ topost(advance_width),
+ topost(llx), topost(lly), topost(urx), topost(ury) );
+ }
+ else
+ {
+ stream.printf("%d 0 %d %d %d %d _sc\n",
+ topost(advance_width),
+ topost(llx), topost(lly), topost(urx), topost(ury) );
+ }
+
+ /* If it is a simple glyph, convert it, */
+ /* otherwise, close the stack business. */
+ if ( num_ctr > 0 ) /* simple */
+ {
+ PSConvert(stream);
+ }
+ else if ( num_ctr < 0 ) /* composite */
+ {
+ do_composite(stream, font, glyph);
+ }
+
+ if (font->target_type == PS_TYPE_42_3_HYBRID)
+ {
+ stream.printf("\ngrestore\n");
+ }
+
+ stack_end(stream);
+}
+
+/*
+** This is the routine which is called from pprdrv_tt.c.
+*/
+void tt_type3_charproc(TTStreamWriter& stream, struct TTFONT *font, int charindex)
+{
+ GlyphToType3 glyph(stream, font, charindex);
+} /* end of tt_type3_charproc() */
+
+/*
+** Some of the given glyph ids may refer to composite glyphs.
+** This function adds all of the dependencies of those composite
+** glyphs to the glyph id vector. Michael Droettboom [06-07-07]
+*/
+void ttfont_add_glyph_dependencies(struct TTFONT *font, std::vector<int>& glyph_ids)
+{
+ std::sort(glyph_ids.begin(), glyph_ids.end());
+
+ std::stack<int> glyph_stack;
+ for (std::vector<int>::iterator i = glyph_ids.begin();
+ i != glyph_ids.end(); ++i)
+ {
+ glyph_stack.push(*i);
+ }
+
+ while (glyph_stack.size())
+ {
+ int gind = glyph_stack.top();
+ glyph_stack.pop();
+
+ BYTE* glyph = find_glyph_data( font, gind );
+ if (glyph != (BYTE*)NULL)
+ {
+
+ int num_ctr = getSHORT(glyph);
+ if (num_ctr <= 0) // This is a composite glyph
+ {
+
+ glyph += 10;
+ USHORT flags = 0;
+
+ do
+ {
+ flags = getUSHORT(glyph);
+ glyph += 2;
+ gind = (int)getUSHORT(glyph);
+ glyph += 2;
+
+ std::vector<int>::iterator insertion =
+ std::lower_bound(glyph_ids.begin(), glyph_ids.end(), gind);
+ if (insertion == glyph_ids.end() || *insertion != gind)
+ {
+ glyph_ids.insert(insertion, gind);
+ glyph_stack.push(gind);
+ }
+
+ if (flags & ARG_1_AND_2_ARE_WORDS)
+ {
+ glyph += 4;
+ }
+ else
+ {
+ glyph += 2;
+ }
+
+ if (flags & WE_HAVE_A_SCALE)
+ {
+ glyph += 2;
+ }
+ else if (flags & WE_HAVE_AN_X_AND_Y_SCALE)
+ {
+ glyph += 4;
+ }
+ else if (flags & WE_HAVE_A_TWO_BY_TWO)
+ {
+ glyph += 8;
+ }
+ }
+ while (flags & MORE_COMPONENTS);
+ }
+ }
+ }
+}
+
+/* end of file */
diff --git a/contrib/python/matplotlib/py2/extern/ttconv/truetype.h b/contrib/python/matplotlib/py2/extern/ttconv/truetype.h
new file mode 100644
index 00000000000..86be14fe370
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/ttconv/truetype.h
@@ -0,0 +1,129 @@
+/* -*- mode: c; c-basic-offset: 4 -*- */
+
+/*
+ * Modified for use within matplotlib
+ * 5 July 2007
+ * Michael Droettboom
+ */
+
+#include <stdio.h>
+
+/*
+** ~ppr/src/include/typetype.h
+**
+** Permission to use, copy, modify, and distribute this software and its
+** documentation for any purpose and without fee is hereby granted, provided
+** that the above copyright notice appear in all copies and that both that
+** copyright notice and this permission notice appear in supporting
+** documentation. This software is provided "as is" without express or
+** implied warranty.
+**
+** This include file is shared by the source files
+** "pprdrv/pprdrv_tt.c" and "pprdrv/pprdrv_tt2.c".
+**
+** Last modified 19 April 1995.
+*/
+
+/* Types used in TrueType font files. */
+#define BYTE unsigned char
+#define USHORT unsigned short int
+#define SHORT short signed int
+#define ULONG unsigned int
+#define FIXED long signed int
+#define FWord short signed int
+#define uFWord short unsigned int
+
+/* This structure stores a 16.16 bit fixed */
+/* point number. */
+typedef struct
+ {
+ short int whole;
+ unsigned short int fraction;
+ } Fixed;
+
+/* This structure tells what we have found out about */
+/* the current font. */
+struct TTFONT
+ {
+ // A quick-and-dirty way to create a minimum level of exception safety
+ // Added by Michael Droettboom
+ TTFONT();
+ ~TTFONT();
+
+ const char *filename; /* Name of TT file */
+ FILE *file; /* the open TT file */
+ font_type_enum target_type; /* 42 or 3 for PS, or -3 for PDF */
+
+ ULONG numTables; /* number of tables present */
+ char *PostName; /* Font's PostScript name */
+ char *FullName; /* Font's full name */
+ char *FamilyName; /* Font's family name */
+ char *Style; /* Font's style string */
+ char *Copyright; /* Font's copyright string */
+ char *Version; /* Font's version string */
+ char *Trademark; /* Font's trademark string */
+ int llx,lly,urx,ury; /* bounding box */
+
+ Fixed TTVersion; /* Truetype version number from offset table */
+ Fixed MfrRevision; /* Revision number of this font */
+
+ BYTE *offset_table; /* Offset table in memory */
+ BYTE *post_table; /* 'post' table in memory */
+
+ BYTE *loca_table; /* 'loca' table in memory */
+ BYTE *glyf_table; /* 'glyf' table in memory */
+ BYTE *hmtx_table; /* 'hmtx' table in memory */
+
+ USHORT numberOfHMetrics;
+ int unitsPerEm; /* unitsPerEm converted to int */
+ int HUPM; /* half of above */
+
+ int numGlyphs; /* from 'post' table */
+
+ int indexToLocFormat; /* short or long offsets */
+};
+
+ULONG getULONG(BYTE *p);
+USHORT getUSHORT(BYTE *p);
+Fixed getFixed(BYTE *p);
+
+/*
+** Get an funits word.
+** since it is 16 bits long, we can
+** use getUSHORT() to do the real work.
+*/
+#define getFWord(x) (FWord)getUSHORT(x)
+#define getuFWord(x) (uFWord)getUSHORT(x)
+
+/*
+** We can get a SHORT by making USHORT signed.
+*/
+#define getSHORT(x) (SHORT)getUSHORT(x)
+
+/* This is the one routine in pprdrv_tt.c that is */
+/* called from pprdrv_tt.c. */
+const char *ttfont_CharStrings_getname(struct TTFONT *font, int charindex);
+
+void tt_type3_charproc(TTStreamWriter& stream, struct TTFONT *font, int charindex);
+
+/* Added 06-07-07 Michael Droettboom */
+void ttfont_add_glyph_dependencies(struct TTFONT *font, std::vector<int>& glypy_ids);
+
+/* This routine converts a number in the font's character coordinate */
+/* system to a number in a 1000 unit character system. */
+#define topost(x) (int)( ((int)(x) * 1000 + font->HUPM) / font->unitsPerEm )
+#define topost2(x) (int)( ((int)(x) * 1000 + font.HUPM) / font.unitsPerEm )
+
+/* Composite glyph values. */
+#define ARG_1_AND_2_ARE_WORDS 1
+#define ARGS_ARE_XY_VALUES 2
+#define ROUND_XY_TO_GRID 4
+#define WE_HAVE_A_SCALE 8
+/* RESERVED 16 */
+#define MORE_COMPONENTS 32
+#define WE_HAVE_AN_X_AND_Y_SCALE 64
+#define WE_HAVE_A_TWO_BY_TWO 128
+#define WE_HAVE_INSTRUCTIONS 256
+#define USE_MY_METRICS 512
+
+/* end of file */
diff --git a/contrib/python/matplotlib/py2/extern/ttconv/ttutil.cpp b/contrib/python/matplotlib/py2/extern/ttconv/ttutil.cpp
new file mode 100644
index 00000000000..85e7e23c4f0
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/ttconv/ttutil.cpp
@@ -0,0 +1,82 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+/*
+ * Modified for use within matplotlib
+ * 5 July 2007
+ * Michael Droettboom
+ */
+
+/* Very simple interface to the ppr TT routines */
+/* (c) Frank Siegert 1996 */
+
+#include <cstdio>
+#include <cstdarg>
+#include <cstdlib>
+#include "pprdrv.h"
+
+#if DEBUG_TRUETYPE
+void debug(const char *format, ... )
+{
+ va_list arg_list;
+ va_start(arg_list, format);
+
+ printf(format, arg_list);
+
+ va_end(arg_list);
+}
+#endif
+
+#define PRINTF_BUFFER_SIZE 512
+void TTStreamWriter::printf(const char* format, ...)
+{
+ va_list arg_list;
+ va_start(arg_list, format);
+ char buffer[PRINTF_BUFFER_SIZE];
+
+#if defined(WIN32) || defined(_MSC_VER)
+ int size = _vsnprintf(buffer, PRINTF_BUFFER_SIZE, format, arg_list);
+#else
+ int size = vsnprintf(buffer, PRINTF_BUFFER_SIZE, format, arg_list);
+#endif
+ if (size >= PRINTF_BUFFER_SIZE) {
+ char* buffer2 = (char*)malloc(size);
+#if defined(WIN32) || defined(_MSC_VER)
+ _vsnprintf(buffer2, size, format, arg_list);
+#else
+ vsnprintf(buffer2, size, format, arg_list);
+#endif
+ free(buffer2);
+ } else {
+ this->write(buffer);
+ }
+
+ va_end(arg_list);
+}
+
+void TTStreamWriter::put_char(int val)
+{
+ char c[2];
+ c[0] = (char)val;
+ c[1] = 0;
+ this->write(c);
+}
+
+void TTStreamWriter::puts(const char *a)
+{
+ this->write(a);
+}
+
+void TTStreamWriter::putline(const char *a)
+{
+ this->write(a);
+ this->write("\n");
+}
+
+void replace_newlines_with_spaces(char *a) {
+ char* i = a;
+ while (*i != 0) {
+ if (*i == '\r' || *i == '\n')
+ *i = ' ';
+ i++;
+ }
+}
diff --git a/contrib/python/matplotlib/py2/extern/ttconv/ya.make b/contrib/python/matplotlib/py2/extern/ttconv/ya.make
new file mode 100644
index 00000000000..847ab2038db
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/ttconv/ya.make
@@ -0,0 +1,13 @@
+PY23_LIBRARY()
+
+LICENSE(PSF-2.0)
+
+NO_WSHADOW()
+
+SRCS(
+ pprdrv_tt.cpp
+ pprdrv_tt2.cpp
+ ttutil.cpp
+)
+
+END()
diff --git a/contrib/python/matplotlib/py2/extern/ya.make b/contrib/python/matplotlib/py2/extern/ya.make
new file mode 100644
index 00000000000..ef8acb58eaa
--- /dev/null
+++ b/contrib/python/matplotlib/py2/extern/ya.make
@@ -0,0 +1,4 @@
+RECURSE(
+ agg24-svn
+ ttconv
+)
diff --git a/contrib/python/matplotlib/py2/matplotlib/__init__.py b/contrib/python/matplotlib/py2/matplotlib/__init__.py
new file mode 100644
index 00000000000..f561dfd012c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/__init__.py
@@ -0,0 +1,1925 @@
+"""
+This is an object-oriented plotting library.
+
+A procedural interface is provided by the companion pyplot module,
+which may be imported directly, e.g.::
+
+ import matplotlib.pyplot as plt
+
+or using ipython::
+
+ ipython
+
+at your terminal, followed by::
+
+ In [1]: %matplotlib
+ In [2]: import matplotlib.pyplot as plt
+
+at the ipython shell prompt.
+
+For the most part, direct use of the object-oriented library is
+encouraged when programming; pyplot is primarily for working
+interactively. The
+exceptions are the pyplot commands :func:`~matplotlib.pyplot.figure`,
+:func:`~matplotlib.pyplot.subplot`,
+:func:`~matplotlib.pyplot.subplots`, and
+:func:`~pyplot.savefig`, which can greatly simplify scripting.
+
+Modules include:
+
+ :mod:`matplotlib.axes`
+ defines the :class:`~matplotlib.axes.Axes` class. Most pylab
+ commands are wrappers for :class:`~matplotlib.axes.Axes`
+ methods. The axes module is the highest level of OO access to
+ the library.
+
+ :mod:`matplotlib.figure`
+ defines the :class:`~matplotlib.figure.Figure` class.
+
+ :mod:`matplotlib.artist`
+ defines the :class:`~matplotlib.artist.Artist` base class for
+ all classes that draw things.
+
+ :mod:`matplotlib.lines`
+ defines the :class:`~matplotlib.lines.Line2D` class for
+ drawing lines and markers
+
+ :mod:`matplotlib.patches`
+ defines classes for drawing polygons
+
+ :mod:`matplotlib.text`
+ defines the :class:`~matplotlib.text.Text`,
+ :class:`~matplotlib.text.TextWithDash`, and
+ :class:`~matplotlib.text.Annotate` classes
+
+ :mod:`matplotlib.image`
+ defines the :class:`~matplotlib.image.AxesImage` and
+ :class:`~matplotlib.image.FigureImage` classes
+
+ :mod:`matplotlib.collections`
+ classes for efficient drawing of groups of lines or polygons
+
+ :mod:`matplotlib.colors`
+ classes for interpreting color specifications and for making
+ colormaps
+
+ :mod:`matplotlib.cm`
+ colormaps and the :class:`~matplotlib.image.ScalarMappable`
+ mixin class for providing color mapping functionality to other
+ classes
+
+ :mod:`matplotlib.ticker`
+ classes for calculating tick mark locations and for formatting
+ tick labels
+
+ :mod:`matplotlib.backends`
+ a subpackage with modules for various gui libraries and output
+ formats
+
+The base matplotlib namespace includes:
+
+ :data:`~matplotlib.rcParams`
+ a global dictionary of default configuration settings. It is
+ initialized by code which may be overridden by a matplotlibrc
+ file.
+
+ :func:`~matplotlib.rc`
+ a function for setting groups of rcParams values
+
+ :func:`~matplotlib.use`
+ a function for setting the matplotlib backend. If used, this
+ function must be called immediately after importing matplotlib
+ for the first time. In particular, it must be called
+ **before** importing pylab (if pylab is imported).
+
+matplotlib was initially written by John D. Hunter (1968-2012) and is now
+developed and maintained by a host of others.
+
+Occasionally the internal documentation (python docstrings) will refer
+to MATLAB&reg;, a registered trademark of The MathWorks, Inc.
+
+"""
+from __future__ import absolute_import, division, print_function
+
+import six
+
+import atexit
+from collections import MutableMapping
+import contextlib
+import distutils.version
+import functools
+import io
+import inspect
+import itertools
+import locale
+import logging
+import os
+import re
+import shutil
+import stat
+import sys
+import tempfile
+import warnings
+
+# cbook must import matplotlib only within function
+# definitions, so it is safe to import from it here.
+from . import cbook
+from matplotlib.cbook import (
+ _backports, mplDeprecation, dedent, get_label, sanitize_sequence)
+from matplotlib.compat import subprocess
+from matplotlib.rcsetup import defaultParams, validate_backend, cycler
+
+import numpy
+from six.moves.urllib.request import urlopen
+from six.moves import reload_module as reload
+
+# Get the version from the _version.py versioneer file. For a git checkout,
+# this is computed based on the number of commits since the last tag.
+from ._version import get_versions
+__version__ = str(get_versions()['version'])
+del get_versions
+
+_log = logging.getLogger(__name__)
+
+__version__numpy__ = str('1.7.1') # minimum required numpy version
+
+__bibtex__ = r"""@Article{Hunter:2007,
+ Author = {Hunter, J. D.},
+ Title = {Matplotlib: A 2D graphics environment},
+ Journal = {Computing In Science \& Engineering},
+ Volume = {9},
+ Number = {3},
+ Pages = {90--95},
+ abstract = {Matplotlib is a 2D graphics package used for Python
+ for application development, interactive scripting, and
+ publication-quality image generation across user
+ interfaces and operating systems.},
+ publisher = {IEEE COMPUTER SOC},
+ year = 2007
+}"""
+
+
+_python27 = (sys.version_info.major == 2 and sys.version_info.minor >= 7)
+_python34 = (sys.version_info.major == 3 and sys.version_info.minor >= 4)
+if not (_python27 or _python34):
+ raise ImportError("Matplotlib requires Python 2.7 or 3.4 or later")
+
+if _python27:
+ _log.addHandler(logging.NullHandler())
+
+
+def compare_versions(a, b):
+ "return True if a is greater than or equal to b"
+ if a:
+ if six.PY3:
+ if isinstance(a, bytes):
+ a = a.decode('ascii')
+ if isinstance(b, bytes):
+ b = b.decode('ascii')
+ a = distutils.version.LooseVersion(a)
+ b = distutils.version.LooseVersion(b)
+ return a >= b
+ else:
+ return False
+
+
+try:
+ import dateutil
+except ImportError:
+ raise ImportError("Matplotlib requires dateutil")
+
+
+if not compare_versions(six.__version__, '1.10'):
+ raise ImportError(
+ "Matplotlib requires six>=1.10; you have %s" % six.__version__)
+
+
+try:
+ import pyparsing
+except ImportError:
+ raise ImportError("Matplotlib requires pyparsing")
+else:
+ if not compare_versions(pyparsing.__version__, '2.0.1'):
+ raise ImportError(
+ "Matplotlib requires pyparsing>=2.0.1; you have %s"
+ % pyparsing.__version__)
+
+
+if not compare_versions(numpy.__version__, __version__numpy__):
+ raise ImportError(
+ "Matplotlib requires numpy>=%s; you have %s" % (
+ __version__numpy__, numpy.__version__))
+
+
+if not hasattr(sys, 'argv'): # for modpython
+ sys.argv = [str('modpython')]
+
+
+def _is_writable_dir(p):
+ """
+ p is a string pointing to a putative writable dir -- return True p
+ is such a string, else False
+ """
+ return os.access(p, os.W_OK) and os.path.isdir(p)
+
+_verbose_msg = """\
+matplotlib.verbose is deprecated;
+Command line argument --verbose-LEVEL is deprecated.
+This functionality is now provided by the standard
+python logging library. To get more (or less) logging output:
+ import logging
+ logger = logging.getLogger('matplotlib')
+ logger.set_level(logging.INFO)"""
+
+
+def _set_logger_verbose_level(level_str='silent', file_str='sys.stdout'):
+ """
+ Use a --verbose-LEVEL level to set the logging level:
+
+ """
+ levelmap = {'silent': logging.WARNING, 'helpful': logging.INFO,
+ 'debug': logging.DEBUG, 'debug-annoying': logging.DEBUG,
+ 'info': logging.INFO, 'warning': logging.WARNING}
+ # Check that current state of logger isn't already more verbose
+ # than the requested level. If it is more verbose, then leave more
+ # verbose.
+ newlev = levelmap[level_str]
+ oldlev = _log.getEffectiveLevel()
+ if newlev < oldlev:
+ _log.setLevel(newlev)
+ std = {
+ 'sys.stdout': sys.stdout,
+ 'sys.stderr': sys.stderr,
+ }
+ if file_str in std:
+ fileo = std[file_str]
+ else:
+ fileo = sys.stdout
+ try:
+ fileo = open(file_str, 'w')
+ # if this fails, we will just write to stdout
+ except IOError:
+ warnings.warn('could not open log file "{0}"'
+ 'for writing. Check your '
+ 'matplotlibrc'.format(file_str))
+ console = logging.StreamHandler(fileo)
+ console.setLevel(newlev)
+ _log.addHandler(console)
+
+
+def _parse_commandline():
+ """
+ Check for --verbose-LEVEL type command line arguments and
+ set logging level appropriately.
+ """
+
+ levels = ('silent', 'helpful', 'debug', 'debug-annoying',
+ 'info', 'warning')
+
+ for arg in sys.argv[1:]:
+ if arg.startswith('--verbose-'):
+ level_str = arg[10:]
+ # If it doesn't match one of ours, then don't even
+ # bother noting it, we are just a 3rd-party library
+ # to somebody else's script.
+ if level_str in levels:
+ _set_logger_verbose_level(level_str)
+
+_parse_commandline()
+
+
+class Verbose(object):
+ """
+ A class to handle reporting. Set the fileo attribute to any file
+ instance to handle the output. Default is sys.stdout
+ """
+ levels = ('silent', 'helpful', 'debug', 'debug-annoying')
+ vald = {level: i for i, level in enumerate(levels)}
+
+ # parse the verbosity from the command line; flags look like
+ # --verbose-silent or --verbose-helpful
+ _commandLineVerbose = None
+
+ for arg in sys.argv[1:]:
+ if not arg.startswith('--verbose-'):
+ continue
+ level_str = arg[10:]
+ # If it doesn't match one of ours, then don't even
+ # bother noting it, we are just a 3rd-party library
+ # to somebody else's script.
+ if level_str in levels:
+ _commandLineVerbose = level_str
+
+ @cbook.deprecated("2.2", message=_verbose_msg)
+ def __init__(self):
+ self.set_level('silent')
+ self.fileo = sys.stdout
+
+ @cbook.deprecated("2.2", message=_verbose_msg)
+ def set_level(self, level):
+ 'set the verbosity to one of the Verbose.levels strings'
+
+ if self._commandLineVerbose is not None:
+ level = self._commandLineVerbose
+ if level not in self.levels:
+ warnings.warn('matplotlib: unrecognized --verbose-* string "%s".'
+ ' Legal values are %s' % (level, self.levels))
+ else:
+ self.level = level
+
+ @cbook.deprecated("2.2", message=_verbose_msg)
+ def set_fileo(self, fname):
+ std = {
+ 'sys.stdout': sys.stdout,
+ 'sys.stderr': sys.stderr,
+ }
+ if fname in std:
+ self.fileo = std[fname]
+ else:
+ try:
+ fileo = open(fname, 'w')
+ except IOError:
+ raise ValueError('Verbose object could not open log file "{0}"'
+ ' for writing.\nCheck your matplotlibrc '
+ 'verbose.fileo setting'.format(fname))
+ else:
+ self.fileo = fileo
+
+ @cbook.deprecated("2.2", message=_verbose_msg)
+ def report(self, s, level='helpful'):
+ """
+ print message s to self.fileo if self.level>=level. Return
+ value indicates whether a message was issued
+
+ """
+ if self.ge(level):
+ print(s, file=self.fileo)
+ return True
+ return False
+
+ @cbook.deprecated("2.2", message=_verbose_msg)
+ def wrap(self, fmt, func, level='helpful', always=True):
+ """
+ return a callable function that wraps func and reports it
+ output through the verbose handler if current verbosity level
+ is higher than level
+
+ if always is True, the report will occur on every function
+ call; otherwise only on the first time the function is called
+ """
+ assert callable(func)
+
+ def wrapper(*args, **kwargs):
+ ret = func(*args, **kwargs)
+
+ if (always or not wrapper._spoke):
+ spoke = self.report(fmt % ret, level)
+ if not wrapper._spoke:
+ wrapper._spoke = spoke
+ return ret
+ wrapper._spoke = False
+ wrapper.__doc__ = func.__doc__
+ return wrapper
+
+ @cbook.deprecated("2.2", message=_verbose_msg)
+ def ge(self, level):
+ 'return true if self.level is >= level'
+ return self.vald[self.level] >= self.vald[level]
+
+
+with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ verbose = Verbose()
+
+
+def _wrap(fmt, func, level=logging.DEBUG, always=True):
+ """
+ return a callable function that wraps func and reports its
+ output through logger
+
+ if always is True, the report will occur on every function
+ call; otherwise only on the first time the function is called
+ """
+ assert callable(func)
+
+ def wrapper(*args, **kwargs):
+ ret = func(*args, **kwargs)
+
+ if (always or not wrapper._spoke):
+ _log.log(level, fmt % ret)
+ spoke = True
+ if not wrapper._spoke:
+ wrapper._spoke = spoke
+ return ret
+ wrapper._spoke = False
+ wrapper.__doc__ = func.__doc__
+ return wrapper
+
+
+def checkdep_dvipng():
+ try:
+ s = subprocess.Popen([str('dvipng'), '-version'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = s.communicate()
+ line = stdout.decode('ascii').split('\n')[1]
+ v = line.split()[-1]
+ return v
+ except (IndexError, ValueError, OSError):
+ return None
+
+
+def checkdep_ghostscript():
+ if checkdep_ghostscript.executable is None:
+ if sys.platform == 'win32':
+ # mgs is the name in miktex
+ gs_execs = ['gswin32c', 'gswin64c', 'mgs', 'gs']
+ else:
+ gs_execs = ['gs']
+ for gs_exec in gs_execs:
+ try:
+ s = subprocess.Popen(
+ [str(gs_exec), '--version'], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = s.communicate()
+ if s.returncode == 0:
+ v = stdout[:-1].decode('ascii')
+ checkdep_ghostscript.executable = gs_exec
+ checkdep_ghostscript.version = v
+ except (IndexError, ValueError, OSError):
+ pass
+ return checkdep_ghostscript.executable, checkdep_ghostscript.version
+checkdep_ghostscript.executable = None
+checkdep_ghostscript.version = None
+
+
+# Deprecated, as it is unneeded and some distributions (e.g. MiKTeX 2.9.6350)
+# do not actually report the TeX version.
+@cbook.deprecated("2.1")
+def checkdep_tex():
+ try:
+ s = subprocess.Popen([str('tex'), '-version'], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = s.communicate()
+ line = stdout.decode('ascii').split('\n')[0]
+ pattern = r'3\.1\d+'
+ match = re.search(pattern, line)
+ v = match.group(0)
+ return v
+ except (IndexError, ValueError, AttributeError, OSError):
+ return None
+
+
+def checkdep_pdftops():
+ try:
+ s = subprocess.Popen([str('pdftops'), '-v'], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = s.communicate()
+ lines = stderr.decode('ascii').split('\n')
+ for line in lines:
+ if 'version' in line:
+ v = line.split()[-1]
+ return v
+ except (IndexError, ValueError, UnboundLocalError, OSError):
+ return None
+
+
+def checkdep_inkscape():
+ if checkdep_inkscape.version is None:
+ try:
+ s = subprocess.Popen([str('inkscape'), '-V'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = s.communicate()
+ lines = stdout.decode('ascii').split('\n')
+ for line in lines:
+ if 'Inkscape' in line:
+ v = line.split()[1]
+ break
+ checkdep_inkscape.version = v
+ except (IndexError, ValueError, UnboundLocalError, OSError):
+ pass
+ return checkdep_inkscape.version
+checkdep_inkscape.version = None
+
+
+@cbook.deprecated("2.1")
+def checkdep_xmllint():
+ try:
+ s = subprocess.Popen([str('xmllint'), '--version'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = s.communicate()
+ lines = stderr.decode('ascii').split('\n')
+ for line in lines:
+ if 'version' in line:
+ v = line.split()[-1]
+ break
+ return v
+ except (IndexError, ValueError, UnboundLocalError, OSError):
+ return None
+
+
+def checkdep_ps_distiller(s):
+ if not s:
+ return False
+
+ flag = True
+ gs_req = '8.60'
+ gs_exec, gs_v = checkdep_ghostscript()
+ if not compare_versions(gs_v, gs_req):
+ flag = False
+ warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
+ 'unless ghostscript-%s or later is installed on your '
+ 'system') % gs_req)
+
+ if s == 'xpdf':
+ pdftops_req = '3.0'
+ pdftops_req_alt = '0.9' # poppler version numbers, ugh
+ pdftops_v = checkdep_pdftops()
+ if compare_versions(pdftops_v, pdftops_req):
+ pass
+ elif (compare_versions(pdftops_v, pdftops_req_alt) and not
+ compare_versions(pdftops_v, '1.0')):
+ pass
+ else:
+ flag = False
+ warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
+ 'xpdf unless xpdf-%s or later is installed on '
+ 'your system') % pdftops_req)
+
+ if flag:
+ return s
+ else:
+ return False
+
+
+def checkdep_usetex(s):
+ if not s:
+ return False
+
+ gs_req = '8.60'
+ dvipng_req = '1.6'
+ flag = True
+
+ if _backports.which("tex") is None:
+ flag = False
+ warnings.warn('matplotlibrc text.usetex option can not be used unless '
+ 'TeX is installed on your system')
+
+ dvipng_v = checkdep_dvipng()
+ if not compare_versions(dvipng_v, dvipng_req):
+ flag = False
+ warnings.warn('matplotlibrc text.usetex can not be used with *Agg '
+ 'backend unless dvipng-%s or later is installed on '
+ 'your system' % dvipng_req)
+
+ gs_exec, gs_v = checkdep_ghostscript()
+ if not compare_versions(gs_v, gs_req):
+ flag = False
+ warnings.warn('matplotlibrc text.usetex can not be used unless '
+ 'ghostscript-%s or later is installed on your system'
+ % gs_req)
+
+ return flag
+
+
+def _get_home():
+ """Find user's home directory if possible.
+ Otherwise, returns None.
+
+ :see:
+ http://mail.python.org/pipermail/python-list/2005-February/325395.html
+ """
+ if six.PY2 and sys.platform == 'win32':
+ path = os.path.expanduser(b"~").decode(sys.getfilesystemencoding())
+ else:
+ path = os.path.expanduser("~")
+ if os.path.isdir(path):
+ return path
+ for evar in ('HOME', 'USERPROFILE', 'TMP'):
+ path = os.environ.get(evar)
+ if path is not None and os.path.isdir(path):
+ return path
+ return None
+
+
+def _create_tmp_config_dir():
+ """
+ If the config directory can not be created, create a temporary
+ directory.
+ """
+ configdir = os.environ['MPLCONFIGDIR'] = (
+ tempfile.mkdtemp(prefix='matplotlib-'))
+ atexit.register(shutil.rmtree, configdir)
+ return configdir
+
+
+get_home = _wrap('$HOME=%s', _get_home, always=False)
+
+
+def _get_xdg_config_dir():
+ """
+ Returns the XDG configuration directory, according to the `XDG
+ base directory spec
+ <http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
+ """
+ path = os.environ.get('XDG_CONFIG_HOME')
+ if path is None:
+ path = get_home()
+ if path is not None:
+ path = os.path.join(path, '.config')
+ return path
+
+
+def _get_xdg_cache_dir():
+ """
+ Returns the XDG cache directory, according to the `XDG
+ base directory spec
+ <http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
+ """
+ path = os.environ.get('XDG_CACHE_HOME')
+ if path is None:
+ path = get_home()
+ if path is not None:
+ path = os.path.join(path, '.cache')
+ return path
+
+
+def _get_config_or_cache_dir(xdg_base):
+ from matplotlib.cbook import mkdirs
+
+ configdir = os.environ.get('MPLCONFIGDIR')
+ if configdir is not None:
+ configdir = os.path.abspath(configdir)
+ if not os.path.exists(configdir):
+ mkdirs(configdir)
+
+ if not _is_writable_dir(configdir):
+ return _create_tmp_config_dir()
+ return configdir
+
+ p = None
+ h = get_home()
+ if h is not None:
+ p = os.path.join(h, '.matplotlib')
+ if sys.platform.startswith(('linux', 'freebsd')):
+ p = None
+ if xdg_base is not None:
+ p = os.path.join(xdg_base, 'matplotlib')
+
+ if p is not None:
+ if os.path.exists(p):
+ if _is_writable_dir(p):
+ return p
+ else:
+ try:
+ mkdirs(p)
+ except OSError:
+ pass
+ else:
+ return p
+
+ return _create_tmp_config_dir()
+
+
+def _get_configdir():
+ """
+ Return the string representing the configuration directory.
+
+ The directory is chosen as follows:
+
+ 1. If the MPLCONFIGDIR environment variable is supplied, choose that.
+
+ 2a. On Linux, follow the XDG specification and look first in
+ `$XDG_CONFIG_HOME`, if defined, or `$HOME/.config`.
+
+ 2b. On other platforms, choose `$HOME/.matplotlib`.
+
+ 3. If the chosen directory exists and is writable, use that as the
+ configuration directory.
+ 4. If possible, create a temporary directory, and use it as the
+ configuration directory.
+ 5. A writable directory could not be found or created; return None.
+ """
+ return _get_config_or_cache_dir(_get_xdg_config_dir())
+
+get_configdir = _wrap('CONFIGDIR=%s', _get_configdir, always=False)
+
+
+def _get_cachedir():
+ """
+ Return the location of the cache directory.
+
+ The procedure used to find the directory is the same as for
+ _get_config_dir, except using `$XDG_CACHE_HOME`/`~/.cache` instead.
+ """
+ return _get_config_or_cache_dir(_get_xdg_cache_dir())
+
+get_cachedir = _wrap('CACHEDIR=%s', _get_cachedir, always=False)
+
+
+def _decode_filesystem_path(path):
+ if not isinstance(path, str):
+ return path.decode(sys.getfilesystemencoding())
+ else:
+ return path
+
+
+def _get_internal_mpl_data():
+ import tempfile
+ import hashlib
+ import errno
+
+ def mpl_archive():
+ import __res
+ res_name = '/mpl-data/mpl-data.tar.gz'
+ return __res.find(res_name)
+
+ def extract_to(data, path):
+ import tarfile
+ string = six.BytesIO(data)
+ tar = tarfile.open(fileobj=string)
+ try:
+ tar.extractall(path)
+ finally:
+ tar.close()
+
+ data = mpl_archive()
+ if not data:
+ return None
+
+ mpl_path = os.path.join(tempfile.gettempdir(), 'mpl-data_{}_{}'.format(hashlib.md5(data).hexdigest(), os.geteuid()))
+ if os.path.exists(mpl_path):
+ return mpl_path
+
+ tmp_dir = tempfile.mkdtemp(prefix='mpl-temp', dir=tempfile.gettempdir())
+ extract_to(data, tmp_dir)
+
+ try:
+ os.rename(tmp_dir, mpl_path)
+ except OSError as e:
+ if e.errno not in [errno.EEXIST, errno.ENOTEMPTY]:
+ raise
+
+ return mpl_path
+
+
+def _get_data_path():
+ 'get the path to matplotlib data'
+
+ if 'MATPLOTLIBDATA' in os.environ:
+ path = os.environ['MATPLOTLIBDATA']
+ if not os.path.isdir(path):
+ raise RuntimeError('Path in environment MATPLOTLIBDATA not a '
+ 'directory')
+ return path
+
+ _file = _decode_filesystem_path(__file__)
+ path = os.sep.join([os.path.dirname(_file), 'mpl-data'])
+ if os.path.isdir(path):
+ return path
+
+ # setuptools' namespace_packages may highjack this init file
+ # so need to try something known to be in matplotlib, not basemap
+ import matplotlib.afm
+ _file = _decode_filesystem_path(matplotlib.afm.__file__)
+ path = os.sep.join([os.path.dirname(_file), 'mpl-data'])
+ if os.path.isdir(path):
+ return path
+
+ # py2exe zips pure python, so still need special check
+ if getattr(sys, 'frozen', None):
+ exe_path = os.path.dirname(_decode_filesystem_path(sys.executable))
+ path = os.path.join(exe_path, 'mpl-data')
+ if os.path.isdir(path):
+ return path
+
+ # Try again assuming we need to step up one more directory
+ path = os.path.join(os.path.split(exe_path)[0], 'mpl-data')
+ if os.path.isdir(path):
+ return path
+
+ # Try again assuming sys.path[0] is a dir not a exe
+ path = os.path.join(sys.path[0], 'mpl-data')
+ if os.path.isdir(path):
+ return path
+
+ path = _get_internal_mpl_data()
+ if path:
+ return path
+
+ raise RuntimeError('Could not find the matplotlib data files')
+
+
+def _get_data_path_cached():
+ if defaultParams['datapath'][0] is None:
+ defaultParams['datapath'][0] = _get_data_path()
+ return defaultParams['datapath'][0]
+
+get_data_path = _wrap('matplotlib data path %s', _get_data_path_cached,
+ always=False)
+
+
+def get_py2exe_datafiles():
+ datapath = get_data_path()
+ _, tail = os.path.split(datapath)
+ d = {}
+ for root, _, files in os.walk(datapath):
+ # Need to explicitly remove cocoa_agg files or py2exe complains
+ # NOTE I don't know why, but do as previous version
+ if 'Matplotlib.nib' in files:
+ files.remove('Matplotlib.nib')
+ files = [os.path.join(root, filename) for filename in files]
+ root = root.replace(tail, 'mpl-data')
+ root = root[root.index('mpl-data'):]
+ d[root] = files
+ return list(d.items())
+
+
+def matplotlib_fname():
+ """
+ Get the location of the config file.
+
+ The file location is determined in the following order
+
+ - `$PWD/matplotlibrc`
+
+ - `$MATPLOTLIBRC` if it is a file (or a named pipe, which can be created
+ e.g. by process substitution)
+
+ - `$MATPLOTLIBRC/matplotlibrc`
+
+ - `$MPLCONFIGDIR/matplotlibrc`
+
+ - On Linux,
+
+ - `$XDG_CONFIG_HOME/matplotlib/matplotlibrc` (if
+ $XDG_CONFIG_HOME is defined)
+
+ - or `$HOME/.config/matplotlib/matplotlibrc` (if
+ $XDG_CONFIG_HOME is not defined)
+
+ - On other platforms,
+
+ - `$HOME/.matplotlib/matplotlibrc` if `$HOME` is defined.
+
+ - Lastly, it looks in `$MATPLOTLIBDATA/matplotlibrc` for a
+ system-defined copy.
+ """
+
+ def gen_candidates():
+ yield os.path.join(six.moves.getcwd(), 'matplotlibrc')
+ try:
+ matplotlibrc = os.environ['MATPLOTLIBRC']
+ except KeyError:
+ pass
+ else:
+ yield matplotlibrc
+ yield os.path.join(matplotlibrc, 'matplotlibrc')
+ yield os.path.join(_get_configdir(), 'matplotlibrc')
+ yield os.path.join(get_data_path(), 'matplotlibrc')
+
+ for fname in gen_candidates():
+ if os.path.exists(fname):
+ st_mode = os.stat(fname).st_mode
+ if stat.S_ISREG(st_mode) or stat.S_ISFIFO(st_mode):
+ break
+ # Return first candidate that is a file, or last candidate if none is
+ # valid (in that case, a warning is raised at startup by `rc_params`).
+ return fname
+
+
+# names of keys to deprecate
+# the values are a tuple of (new_name, f_old_2_new, f_new_2_old)
+# the inverse function may be `None`
+_deprecated_map = {}
+
+_deprecated_ignore_map = {'nbagg.transparent': 'figure.facecolor'}
+
+_obsolete_set = {'plugins.directory', 'text.dvipnghack'}
+
+# The following may use a value of None to suppress the warning.
+# do NOT include in _all_deprecated
+_deprecated_set = {'axes.hold',
+ 'backend.qt4',
+ 'backend.qt5'}
+
+_all_deprecated = set(itertools.chain(
+ _deprecated_ignore_map, _deprecated_map, _obsolete_set))
+
+
+class RcParams(MutableMapping, dict):
+
+ """
+ A dictionary object including validation
+
+ validating functions are defined and associated with rc parameters in
+ :mod:`matplotlib.rcsetup`
+ """
+
+ validate = dict((key, converter) for key, (default, converter) in
+ six.iteritems(defaultParams)
+ if key not in _all_deprecated)
+ msg_depr = "%s is deprecated and replaced with %s; please use the latter."
+ msg_depr_set = ("%s is deprecated. Please remove it from your "
+ "matplotlibrc and/or style files.")
+ msg_depr_ignore = "%s is deprecated and ignored. Use %s instead."
+ msg_obsolete = ("%s is obsolete. Please remove it from your matplotlibrc "
+ "and/or style files.")
+ msg_backend_obsolete = ("The {} rcParam was deprecated in version 2.2. In"
+ " order to force the use of a specific Qt binding,"
+ " either import that binding first, or set the "
+ "QT_API environment variable.")
+
+ # validate values on the way in
+ def __init__(self, *args, **kwargs):
+ self.update(*args, **kwargs)
+
+ def __setitem__(self, key, val):
+ try:
+ if key in _deprecated_map:
+ alt_key, alt_val, inverse_alt = _deprecated_map[key]
+ warnings.warn(self.msg_depr % (key, alt_key),
+ mplDeprecation)
+ key = alt_key
+ val = alt_val(val)
+ elif key in _deprecated_set and val is not None:
+ if key.startswith('backend'):
+ warnings.warn(self.msg_backend_obsolete.format(key),
+ mplDeprecation)
+ else:
+ warnings.warn(self.msg_depr_set % key,
+ mplDeprecation)
+ elif key in _deprecated_ignore_map:
+ alt = _deprecated_ignore_map[key]
+ warnings.warn(self.msg_depr_ignore % (key, alt),
+ mplDeprecation)
+ return
+ elif key in _obsolete_set:
+ warnings.warn(self.msg_obsolete % (key, ),
+ mplDeprecation)
+ return
+ try:
+ cval = self.validate[key](val)
+ except ValueError as ve:
+ raise ValueError("Key %s: %s" % (key, str(ve)))
+ dict.__setitem__(self, key, cval)
+ except KeyError:
+ raise KeyError(
+ '%s is not a valid rc parameter. See rcParams.keys() for a '
+ 'list of valid parameters.' % (key,))
+
+ def __getitem__(self, key):
+ inverse_alt = None
+ if key in _deprecated_map:
+ alt_key, alt_val, inverse_alt = _deprecated_map[key]
+ warnings.warn(self.msg_depr % (key, alt_key),
+ mplDeprecation)
+ key = alt_key
+
+ elif key in _deprecated_ignore_map:
+ alt = _deprecated_ignore_map[key]
+ warnings.warn(self.msg_depr_ignore % (key, alt),
+ mplDeprecation)
+ key = alt
+
+ elif key in _obsolete_set:
+ warnings.warn(self.msg_obsolete % (key, ),
+ mplDeprecation)
+ return None
+
+ val = dict.__getitem__(self, key)
+ if inverse_alt is not None:
+ return inverse_alt(val)
+ else:
+ return val
+
+ def __repr__(self):
+ import pprint
+ class_name = self.__class__.__name__
+ indent = len(class_name) + 1
+ repr_split = pprint.pformat(dict(self), indent=1,
+ width=80 - indent).split('\n')
+ repr_indented = ('\n' + ' ' * indent).join(repr_split)
+ return '{0}({1})'.format(class_name, repr_indented)
+
+ def __str__(self):
+ return '\n'.join('{0}: {1}'.format(k, v)
+ for k, v in sorted(self.items()))
+
+ def __iter__(self):
+ """
+ Yield sorted list of keys.
+ """
+ for k in sorted(dict.__iter__(self)):
+ yield k
+
+ def find_all(self, pattern):
+ """
+ Return the subset of this RcParams dictionary whose keys match,
+ using :func:`re.search`, the given ``pattern``.
+
+ .. note::
+
+ Changes to the returned dictionary are *not* propagated to
+ the parent RcParams dictionary.
+
+ """
+ pattern_re = re.compile(pattern)
+ return RcParams((key, value)
+ for key, value in self.items()
+ if pattern_re.search(key))
+
+
+def rc_params(fail_on_error=False):
+ """Return a :class:`matplotlib.RcParams` instance from the
+ default matplotlib rc file.
+ """
+ fname = matplotlib_fname()
+ if not os.path.exists(fname):
+ # this should never happen, default in mpl-data should always be found
+ message = 'could not find rc file; returning defaults'
+ ret = RcParams([(key, default) for key, (default, _) in
+ six.iteritems(defaultParams)
+ if key not in _all_deprecated])
+ warnings.warn(message)
+ return ret
+
+ return rc_params_from_file(fname, fail_on_error)
+
+
+URL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\')
+
+
+def is_url(filename):
+ """Return True if string is an http, ftp, or file URL path."""
+ return URL_REGEX.match(filename) is not None
+
+
+def _url_lines(f):
+ # Compatibility for urlopen in python 3, which yields bytes.
+ for line in f:
+ yield line.decode('utf8')
+
+
+@contextlib.contextmanager
+def _open_file_or_url(fname):
+ if is_url(fname):
+ f = urlopen(fname)
+ yield _url_lines(f)
+ f.close()
+ else:
+ fname = os.path.expanduser(fname)
+ encoding = locale.getpreferredencoding(do_setlocale=False)
+ if encoding is None:
+ encoding = "utf-8"
+ with io.open(fname, encoding=encoding) as f:
+ yield f
+
+
+_error_details_fmt = 'line #%d\n\t"%s"\n\tin file "%s"'
+
+
+def _rc_params_in_file(fname, fail_on_error=False):
+ """Return :class:`matplotlib.RcParams` from the contents of the given file.
+
+ Unlike `rc_params_from_file`, the configuration class only contains the
+ parameters specified in the file (i.e. default values are not filled in).
+ """
+ cnt = 0
+ rc_temp = {}
+ with _open_file_or_url(fname) as fd:
+ try:
+ for line in fd:
+ cnt += 1
+ strippedline = line.split('#', 1)[0].strip()
+ if not strippedline:
+ continue
+ tup = strippedline.split(':', 1)
+ if len(tup) != 2:
+ error_details = _error_details_fmt % (cnt, line, fname)
+ warnings.warn('Illegal %s' % error_details)
+ continue
+ key, val = tup
+ key = key.strip()
+ val = val.strip()
+ if key in rc_temp:
+ warnings.warn('Duplicate key in file "%s", line #%d' %
+ (fname, cnt))
+ rc_temp[key] = (val, line, cnt)
+ except UnicodeDecodeError:
+ warnings.warn(
+ ('Cannot decode configuration file %s with '
+ 'encoding %s, check LANG and LC_* variables')
+ % (fname, locale.getpreferredencoding(do_setlocale=False) or
+ 'utf-8 (default)'))
+ raise
+
+ config = RcParams()
+
+ for key in ('verbose.level', 'verbose.fileo'):
+ if key in rc_temp:
+ val, line, cnt = rc_temp.pop(key)
+ if fail_on_error:
+ config[key] = val # try to convert to proper type or raise
+ else:
+ try:
+ config[key] = val # try to convert to proper type or skip
+ except Exception as msg:
+ error_details = _error_details_fmt % (cnt, line, fname)
+ warnings.warn('Bad val "%s" on %s\n\t%s' %
+ (val, error_details, msg))
+
+ for key, (val, line, cnt) in six.iteritems(rc_temp):
+ if key in defaultParams:
+ if fail_on_error:
+ config[key] = val # try to convert to proper type or raise
+ else:
+ try:
+ config[key] = val # try to convert to proper type or skip
+ except Exception as msg:
+ error_details = _error_details_fmt % (cnt, line, fname)
+ warnings.warn('Bad val "%s" on %s\n\t%s' %
+ (val, error_details, msg))
+ elif key in _deprecated_ignore_map:
+ warnings.warn('%s is deprecated. Update your matplotlibrc to use '
+ '%s instead.' % (key, _deprecated_ignore_map[key]),
+ mplDeprecation)
+
+ else:
+ print("""
+Bad key "%s" on line %d in
+%s.
+You probably need to get an updated matplotlibrc file from
+http://github.com/matplotlib/matplotlib/blob/master/matplotlibrc.template
+or from the matplotlib source distribution""" % (key, cnt, fname),
+ file=sys.stderr)
+
+ return config
+
+
+def rc_params_from_file(fname, fail_on_error=False, use_default_template=True):
+ """Return :class:`matplotlib.RcParams` from the contents of the given file.
+
+ Parameters
+ ----------
+ fname : str
+ Name of file parsed for matplotlib settings.
+ fail_on_error : bool
+ If True, raise an error when the parser fails to convert a parameter.
+ use_default_template : bool
+ If True, initialize with default parameters before updating with those
+ in the given file. If False, the configuration class only contains the
+ parameters specified in the file. (Useful for updating dicts.)
+ """
+ config_from_file = _rc_params_in_file(fname, fail_on_error)
+
+ if not use_default_template:
+ return config_from_file
+
+ iter_params = six.iteritems(defaultParams)
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", mplDeprecation)
+ config = RcParams([(key, default) for key, (default, _) in iter_params
+ if key not in _all_deprecated])
+ config.update(config_from_file)
+
+ if config['datapath'] is None:
+ config['datapath'] = get_data_path()
+
+ if "".join(config['text.latex.preamble']):
+ _log.info("""
+*****************************************************************
+You have the following UNSUPPORTED LaTeX preamble customizations:
+%s
+Please do not ask for support with these customizations active.
+*****************************************************************
+""", '\n'.join(config['text.latex.preamble']))
+ _log.debug('loaded rc file %s', fname)
+
+ return config
+
+
+# this is the instance used by the matplotlib classes
+rcParams = rc_params()
+
+if rcParams['examples.directory']:
+ # paths that are intended to be relative to matplotlib_fname()
+ # are allowed for the examples.directory parameter.
+ # However, we will need to fully qualify the path because
+ # Sphinx requires absolute paths.
+ if not os.path.isabs(rcParams['examples.directory']):
+ _basedir, _fname = os.path.split(matplotlib_fname())
+ # Sometimes matplotlib_fname() can return relative paths,
+ # Also, using realpath() guarantees that Sphinx will use
+ # the same path that matplotlib sees (in case of weird symlinks).
+ _basedir = os.path.realpath(_basedir)
+ _fullpath = os.path.join(_basedir, rcParams['examples.directory'])
+ rcParams['examples.directory'] = _fullpath
+
+rcParamsOrig = rcParams.copy()
+
+with warnings.catch_warnings():
+ warnings.simplefilter("ignore", mplDeprecation)
+ rcParamsDefault = RcParams([(key, default) for key, (default, converter) in
+ six.iteritems(defaultParams)
+ if key not in _all_deprecated])
+
+rcParams['ps.usedistiller'] = checkdep_ps_distiller(
+ rcParams['ps.usedistiller'])
+
+rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
+
+if rcParams['axes.formatter.use_locale']:
+ locale.setlocale(locale.LC_ALL, '')
+
+
+def rc(group, **kwargs):
+ """
+ Set the current rc params. Group is the grouping for the rc, e.g.,
+ for ``lines.linewidth`` the group is ``lines``, for
+ ``axes.facecolor``, the group is ``axes``, and so on. Group may
+ also be a list or tuple of group names, e.g., (*xtick*, *ytick*).
+ *kwargs* is a dictionary attribute name/value pairs, e.g.,::
+
+ rc('lines', linewidth=2, color='r')
+
+ sets the current rc params and is equivalent to::
+
+ rcParams['lines.linewidth'] = 2
+ rcParams['lines.color'] = 'r'
+
+ The following aliases are available to save typing for interactive
+ users:
+
+ ===== =================
+ Alias Property
+ ===== =================
+ 'lw' 'linewidth'
+ 'ls' 'linestyle'
+ 'c' 'color'
+ 'fc' 'facecolor'
+ 'ec' 'edgecolor'
+ 'mew' 'markeredgewidth'
+ 'aa' 'antialiased'
+ ===== =================
+
+ Thus you could abbreviate the above rc command as::
+
+ rc('lines', lw=2, c='r')
+
+
+ Note you can use python's kwargs dictionary facility to store
+ dictionaries of default parameters. e.g., you can customize the
+ font rc as follows::
+
+ font = {'family' : 'monospace',
+ 'weight' : 'bold',
+ 'size' : 'larger'}
+
+ rc('font', **font) # pass in the font dict as kwargs
+
+ This enables you to easily switch between several configurations. Use
+ ``matplotlib.style.use('default')`` or :func:`~matplotlib.rcdefaults` to
+ restore the default rc params after changes.
+ """
+
+ aliases = {
+ 'lw': 'linewidth',
+ 'ls': 'linestyle',
+ 'c': 'color',
+ 'fc': 'facecolor',
+ 'ec': 'edgecolor',
+ 'mew': 'markeredgewidth',
+ 'aa': 'antialiased',
+ }
+
+ if isinstance(group, six.string_types):
+ group = (group,)
+ for g in group:
+ for k, v in six.iteritems(kwargs):
+ name = aliases.get(k) or k
+ key = '%s.%s' % (g, name)
+ try:
+ rcParams[key] = v
+ except KeyError:
+ raise KeyError(('Unrecognized key "%s" for group "%s" and '
+ 'name "%s"') % (key, g, name))
+
+
+def rcdefaults():
+ """Restore the rc params from Matplotlib's internal defaults.
+
+ See Also
+ --------
+ rc_file_defaults :
+ Restore the rc params from the rc file originally loaded by Matplotlib.
+ matplotlib.style.use :
+ Use a specific style file. Call ``style.use('default')`` to restore
+ the default style.
+ """
+ rcParams.clear()
+ rcParams.update(rcParamsDefault)
+
+
+def rc_file_defaults():
+ """Restore the rc params from the original rc file loaded by Matplotlib.
+ """
+ rcParams.update(rcParamsOrig)
+
+
+def rc_file(fname):
+ """
+ Update rc params from file.
+ """
+ rcParams.update(rc_params_from_file(fname))
+
+
+class rc_context(object):
+ """
+ Return a context manager for managing rc settings.
+
+ This allows one to do::
+
+ with mpl.rc_context(fname='screen.rc'):
+ plt.plot(x, a)
+ with mpl.rc_context(fname='print.rc'):
+ plt.plot(x, b)
+ plt.plot(x, c)
+
+ The 'a' vs 'x' and 'c' vs 'x' plots would have settings from
+ 'screen.rc', while the 'b' vs 'x' plot would have settings from
+ 'print.rc'.
+
+ A dictionary can also be passed to the context manager::
+
+ with mpl.rc_context(rc={'text.usetex': True}, fname='screen.rc'):
+ plt.plot(x, a)
+
+ The 'rc' dictionary takes precedence over the settings loaded from
+ 'fname'. Passing a dictionary only is also valid. For example a
+ common usage is::
+
+ with mpl.rc_context(rc={'interactive': False}):
+ fig, ax = plt.subplots()
+ ax.plot(range(3), range(3))
+ fig.savefig('A.png', format='png')
+ plt.close(fig)
+ """
+ # While it may seem natural to implement rc_context using
+ # contextlib.contextmanager, that would entail always calling the finally:
+ # clause of the contextmanager (which restores the original rcs) including
+ # during garbage collection; as a result, something like `plt.xkcd();
+ # gc.collect()` would result in the style being lost (as `xkcd()` is
+ # implemented on top of rc_context, and nothing is holding onto context
+ # manager except possibly circular references.
+
+ def __init__(self, rc=None, fname=None):
+ self._orig = rcParams.copy()
+ try:
+ if fname:
+ rc_file(fname)
+ if rc:
+ rcParams.update(rc)
+ except Exception:
+ # If anything goes wrong, revert to the original rcs.
+ dict.update(rcParams, self._orig)
+ raise
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ # No need to revalidate the original values.
+ dict.update(rcParams, self._orig)
+
+
+_use_error_msg = """
+This call to matplotlib.use() has no effect because the backend has already
+been chosen; matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
+or matplotlib.backends is imported for the first time.
+
+The backend was *originally* set to {backend!r} by the following code:
+{tb}
+"""
+
+
+def use(arg, warn=True, force=False):
+ """
+ Set the matplotlib backend to one of the known backends.
+
+ The argument is case-insensitive. *warn* specifies whether a
+ warning should be issued if a backend has already been set up.
+ *force* is an **experimental** flag that tells matplotlib to
+ attempt to initialize a new backend by reloading the backend
+ module.
+
+ .. note::
+
+ This function must be called *before* importing pyplot for
+ the first time; or, if you are not using pyplot, it must be called
+ before importing matplotlib.backends. If warn is True, a warning
+ is issued if you try and call this after pylab or pyplot have been
+ loaded. In certain black magic use cases, e.g.
+ :func:`pyplot.switch_backend`, we are doing the reloading necessary to
+ make the backend switch work (in some cases, e.g., pure image
+ backends) so one can set warn=False to suppress the warnings.
+
+ To find out which backend is currently set, see
+ :func:`matplotlib.get_backend`.
+
+ """
+ # Lets determine the proper backend name first
+ if arg.startswith('module://'):
+ name = arg
+ else:
+ # Lowercase only non-module backend names (modules are case-sensitive)
+ arg = arg.lower()
+ name = validate_backend(arg)
+
+ # Check if we've already set up a backend
+ if 'matplotlib.backends' in sys.modules:
+ # Warn only if called with a different name
+ if (rcParams['backend'] != name) and warn:
+ import matplotlib.backends
+ warnings.warn(
+ _use_error_msg.format(
+ backend=rcParams['backend'],
+ tb=matplotlib.backends._backend_loading_tb),
+ stacklevel=2)
+
+ # Unless we've been told to force it, just return
+ if not force:
+ return
+ need_reload = True
+ else:
+ need_reload = False
+
+ # Store the backend name
+ rcParams['backend'] = name
+
+ # If needed we reload here because a lot of setup code is triggered on
+ # module import. See backends/__init__.py for more detail.
+ if need_reload:
+ reload(sys.modules['matplotlib.backends'])
+
+
+try:
+ use(os.environ['MPLBACKEND'])
+except KeyError:
+ pass
+
+
+def get_backend():
+ """Return the name of the current backend."""
+ return rcParams['backend']
+
+
+def interactive(b):
+ """
+ Set interactive mode to boolean b.
+
+ If b is True, then draw after every plotting command, e.g., after xlabel
+ """
+ rcParams['interactive'] = b
+
+
+def is_interactive():
+ 'Return true if plot mode is interactive'
+ return rcParams['interactive']
+
+
+def tk_window_focus():
+ """Return true if focus maintenance under TkAgg on win32 is on.
+ This currently works only for python.exe and IPython.exe.
+ Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
+ if rcParams['backend'] != 'TkAgg':
+ return False
+ return rcParams['tk.window_focus']
+
+
+default_test_modules = [
+ 'matplotlib.tests',
+ 'matplotlib.sphinxext.tests',
+ 'mpl_toolkits.tests',
+]
+
+
+def _init_tests():
+ try:
+ import faulthandler
+ except ImportError:
+ pass
+ else:
+ # CPython's faulthandler since v3.6 handles exceptions on Windows
+ # https://bugs.python.org/issue23848 but until v3.6.4 it was
+ # printing non-fatal exceptions https://bugs.python.org/issue30557
+ import platform
+ if not (sys.platform == 'win32' and
+ (3, 6) < sys.version_info < (3, 6, 4) and
+ platform.python_implementation() == 'CPython'):
+ faulthandler.enable()
+
+ # The version of FreeType to install locally for running the
+ # tests. This must match the value in `setupext.py`
+ LOCAL_FREETYPE_VERSION = '2.6.1'
+
+ from matplotlib import ft2font
+ if (ft2font.__freetype_version__ != LOCAL_FREETYPE_VERSION or
+ ft2font.__freetype_build_type__ != 'local'):
+ warnings.warn(
+ "Matplotlib is not built with the correct FreeType version to run "
+ "tests. Set local_freetype=True in setup.cfg and rebuild. "
+ "Expect many image comparison failures below. "
+ "Expected freetype version {0}. "
+ "Found freetype version {1}. "
+ "Freetype build type is {2}local".format(
+ LOCAL_FREETYPE_VERSION,
+ ft2font.__freetype_version__,
+ "" if ft2font.__freetype_build_type__ == 'local' else "not "
+ )
+ )
+
+ try:
+ import pytest
+ try:
+ from unittest import mock
+ except ImportError:
+ import mock
+ except ImportError:
+ print("matplotlib.test requires pytest and mock to run.")
+ raise
+
+
+def test(verbosity=None, coverage=False, switch_backend_warn=True,
+ recursionlimit=0, **kwargs):
+ """run the matplotlib test suite"""
+ _init_tests()
+ if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'tests')):
+ raise ImportError("Matplotlib test data is not installed")
+
+ old_backend = get_backend()
+ old_recursionlimit = sys.getrecursionlimit()
+ try:
+ use('agg')
+ if recursionlimit:
+ sys.setrecursionlimit(recursionlimit)
+ import pytest
+
+ args = kwargs.pop('argv', [])
+ provide_default_modules = True
+ use_pyargs = True
+ for arg in args:
+ if any(arg.startswith(module_path)
+ for module_path in default_test_modules):
+ provide_default_modules = False
+ break
+ if os.path.exists(arg):
+ provide_default_modules = False
+ use_pyargs = False
+ break
+ if use_pyargs:
+ args += ['--pyargs']
+ if provide_default_modules:
+ args += default_test_modules
+
+ if coverage:
+ args += ['--cov']
+
+ if verbosity:
+ args += ['-' + 'v' * verbosity]
+
+ retcode = pytest.main(args, **kwargs)
+ finally:
+ if old_backend.lower() != 'agg':
+ use(old_backend, warn=switch_backend_warn)
+ if recursionlimit:
+ sys.setrecursionlimit(old_recursionlimit)
+
+ return retcode
+
+
+test.__test__ = False # pytest: this function is not a test
+
+
+def _replacer(data, key):
+ """Either returns data[key] or passes data back. Also
+ converts input data to a sequence as needed.
+ """
+ # if key isn't a string don't bother
+ if not isinstance(key, six.string_types):
+ return (key)
+ # try to use __getitem__
+ try:
+ return sanitize_sequence(data[key])
+ # key does not exist, silently fall back to key
+ except KeyError:
+ return key
+
+
+_DATA_DOC_APPENDIX = """
+
+.. note::
+ In addition to the above described arguments, this function can take a
+ **data** keyword argument. If such a **data** argument is given, the
+ following arguments are replaced by **data[<arg>]**:
+
+ {replaced}
+"""
+
+
+def _add_data_doc(docstring, replace_names, replace_all_args):
+ """Add documentation for a *data* field to the given docstring.
+
+ Parameters
+ ----------
+ docstring : str
+ The input docstring.
+ replace_names : list of strings or None
+ The list of parameter names which arguments should be replaced by
+ `data[name]`. If None, all arguments are replaced if they are
+ included in `data`.
+ replace_all_args : bool
+ If True, all arguments in *args get replaced, even if they are not
+ in replace_names.
+
+ Returns
+ -------
+ The augmented docstring.
+ """
+ if docstring is None:
+ docstring = ''
+ else:
+ docstring = dedent(docstring)
+ _repl = ""
+ if replace_names is None:
+ _repl = "* All positional and all keyword arguments."
+ else:
+ if len(replace_names) != 0:
+ _repl = "* All arguments with the following names: '{names}'."
+ if replace_all_args:
+ _repl += "\n * All positional arguments."
+ _repl = _repl.format(names="', '".join(sorted(replace_names)))
+ return docstring + _DATA_DOC_APPENDIX.format(replaced=_repl)
+
+
+def _preprocess_data(replace_names=None, replace_all_args=False,
+ label_namer=None, positional_parameter_names=None):
+ """
+ A decorator to add a 'data' kwarg to any a function. The signature
+ of the input function must include the ax argument at the first position ::
+
+ def foo(ax, *args, **kwargs)
+
+ so this is suitable for use with Axes methods.
+
+ Parameters
+ ----------
+ replace_names : list of strings, optional, default: None
+ The list of parameter names which arguments should be replaced by
+ `data[name]`. If None, all arguments are replaced if they are
+ included in `data`.
+ replace_all_args : bool, default: False
+ If True, all arguments in *args get replaced, even if they are not
+ in replace_names.
+ label_namer : string, optional, default: None
+ The name of the parameter which argument should be used as label, if
+ label is not set. If None, the label keyword argument is not set.
+ positional_parameter_names : list of strings or callable, optional
+ The full list of positional parameter names (excluding an explicit
+ `ax`/'self' argument at the first place and including all possible
+ positional parameter in `*args`), in the right order. Can also include
+ all other keyword parameter. Only needed if the wrapped function does
+ contain `*args` and (replace_names is not None or replace_all_args is
+ False). If it is a callable, it will be called with the actual
+ tuple of *args and the data and should return a list like
+ above.
+ NOTE: callables should only be used when the names and order of *args
+ can only be determined at runtime. Please use list of names
+ when the order and names of *args is clear before runtime!
+
+ .. note:: decorator also converts MappingView input data to list.
+ """
+ if replace_names is not None:
+ replace_names = set(replace_names)
+
+ def param(func):
+ new_sig = None
+ # signature is since 3.3 and wrapped since 3.2, but we support 3.4+.
+ python_has_signature = python_has_wrapped = six.PY3
+
+ # if in a legacy version of python and IPython is already imported
+ # try to use their back-ported signature
+ if not python_has_signature and 'IPython' in sys.modules:
+ try:
+ import IPython.utils.signatures
+ signature = IPython.utils.signatures.signature
+ Parameter = IPython.utils.signatures.Parameter
+ except ImportError:
+ pass
+ else:
+ python_has_signature = True
+ else:
+ if python_has_signature:
+ signature = inspect.signature
+ Parameter = inspect.Parameter
+
+ if not python_has_signature:
+ arg_spec = inspect.getargspec(func)
+ _arg_names = arg_spec.args
+ _has_varargs = arg_spec.varargs is not None
+ _has_varkwargs = arg_spec.keywords is not None
+ else:
+ sig = signature(func)
+ _has_varargs = False
+ _has_varkwargs = False
+ _arg_names = []
+ params = list(sig.parameters.values())
+ for p in params:
+ if p.kind is Parameter.VAR_POSITIONAL:
+ _has_varargs = True
+ elif p.kind is Parameter.VAR_KEYWORD:
+ _has_varkwargs = True
+ else:
+ _arg_names.append(p.name)
+ data_param = Parameter('data',
+ Parameter.KEYWORD_ONLY,
+ default=None)
+ if _has_varkwargs:
+ params.insert(-1, data_param)
+ else:
+ params.append(data_param)
+ new_sig = sig.replace(parameters=params)
+ # Import-time check: do we have enough information to replace *args?
+ arg_names_at_runtime = False
+ # there can't be any positional arguments behind *args and no
+ # positional args can end up in **kwargs, so only *varargs make
+ # problems.
+ # http://stupidpythonideas.blogspot.de/2013/08/arguments-and-parameters.html
+ if not _has_varargs:
+ # all args are "named", so no problem
+ # remove the first "ax" / self arg
+ arg_names = _arg_names[1:]
+ else:
+ # Here we have "unnamed" variables and we need a way to determine
+ # whether to replace a arg or not
+ if replace_names is None:
+ # all argnames should be replaced
+ arg_names = None
+ elif len(replace_names) == 0:
+ # No argnames should be replaced
+ arg_names = []
+ elif len(_arg_names) > 1 and (positional_parameter_names is None):
+ # we got no manual parameter names but more than an 'ax' ...
+ if len(replace_names - set(_arg_names[1:])) == 0:
+ # all to be replaced arguments are in the list
+ arg_names = _arg_names[1:]
+ else:
+ raise AssertionError(
+ "Got unknown 'replace_names' and wrapped function "
+ "{!r} uses '*args', need 'positional_parameter_names'"
+ .format(func.__name__))
+ else:
+ if positional_parameter_names is not None:
+ if callable(positional_parameter_names):
+ # determined by the function at runtime
+ arg_names_at_runtime = True
+ # so that we don't compute the label_pos at import time
+ arg_names = []
+ else:
+ arg_names = positional_parameter_names
+ else:
+ if replace_all_args:
+ arg_names = []
+ else:
+ raise AssertionError(
+ "Got 'replace_names' and wrapped function {!r} "
+ "uses *args, need 'positional_parameter_names' or "
+ "'replace_all_args'".format(func.__name__))
+
+ # compute the possible label_namer and label position in positional
+ # arguments
+ label_pos = 9999 # bigger than all "possible" argument lists
+ label_namer_pos = 9999 # bigger than all "possible" argument lists
+ if (label_namer and # we actually want a label here ...
+ arg_names and # and we can determine a label in *args ...
+ (label_namer in arg_names)): # and it is in *args
+ label_namer_pos = arg_names.index(label_namer)
+ if "label" in arg_names:
+ label_pos = arg_names.index("label")
+
+ # Check the case we know a label_namer but we can't find it the
+ # arg_names... Unfortunately the label_namer can be in **kwargs,
+ # which we can't detect here and which results in a non-set label
+ # which might surprise the user :-(
+ if label_namer and not arg_names_at_runtime and not _has_varkwargs:
+ if not arg_names:
+ raise AssertionError(
+ "label_namer {!r} can't be found as the parameter without "
+ "'positional_parameter_names'".format(label_namer))
+ elif label_namer not in arg_names:
+ raise AssertionError(
+ "label_namer {!r} can't be found in the parameter names "
+ "(known argnames: %s).".format(label_namer, arg_names))
+ else:
+ # this is the case when the name is in arg_names
+ pass
+
+ @functools.wraps(func)
+ def inner(ax, *args, **kwargs):
+ # this is needed because we want to change these values if
+ # arg_names_at_runtime==True, but python does not allow assigning
+ # to a variable in a outer scope. So use some new local ones and
+ # set them to the already computed values.
+ _label_pos = label_pos
+ _label_namer_pos = label_namer_pos
+ _arg_names = arg_names
+
+ label = None
+
+ data = kwargs.pop('data', None)
+
+ if data is None: # data validation
+ args = tuple(sanitize_sequence(a) for a in args)
+ else:
+ if arg_names_at_runtime:
+ # update the information about replace names and
+ # label position
+ _arg_names = positional_parameter_names(args, data)
+ if (label_namer and # we actually want a label here ...
+ _arg_names and # and we can find a label in *args
+ (label_namer in _arg_names)): # and it is in *args
+ _label_namer_pos = _arg_names.index(label_namer)
+ if "label" in _arg_names:
+ _label_pos = arg_names.index("label")
+
+ # save the current label_namer value so that it can be used as
+ # a label
+ if _label_namer_pos < len(args):
+ label = args[_label_namer_pos]
+ else:
+ label = kwargs.get(label_namer, None)
+ # ensure a string, as label can't be anything else
+ if not isinstance(label, six.string_types):
+ label = None
+
+ if (replace_names is None) or (replace_all_args is True):
+ # all should be replaced
+ args = tuple(_replacer(data, a) for
+ j, a in enumerate(args))
+ else:
+ # An arg is replaced if the arg_name of that position is
+ # in replace_names ...
+ if len(_arg_names) < len(args):
+ raise RuntimeError(
+ "Got more args than function expects")
+ args = tuple(_replacer(data, a)
+ if _arg_names[j] in replace_names else a
+ for j, a in enumerate(args))
+
+ if replace_names is None:
+ # replace all kwargs ...
+ kwargs = dict((k, _replacer(data, v))
+ for k, v in six.iteritems(kwargs))
+ else:
+ # ... or only if a kwarg of that name is in replace_names
+ kwargs = dict((k, _replacer(data, v)
+ if k in replace_names else v)
+ for k, v in six.iteritems(kwargs))
+
+ # replace the label if this func "wants" a label arg and the user
+ # didn't set one. Note: if the user puts in "label=None", it does
+ # *NOT* get replaced!
+ user_supplied_label = (
+ (len(args) >= _label_pos) or # label is included in args
+ ('label' in kwargs) # ... or in kwargs
+ )
+ if (label_namer and not user_supplied_label):
+ if _label_namer_pos < len(args):
+ kwargs['label'] = get_label(args[_label_namer_pos], label)
+ elif label_namer in kwargs:
+ kwargs['label'] = get_label(kwargs[label_namer], label)
+ else:
+ warnings.warn(
+ "Tried to set a label via parameter %r in func %r but "
+ "couldn't find such an argument.\n"
+ "(This is a programming error, please report to "
+ "the Matplotlib list!)" % (label_namer, func.__name__),
+ RuntimeWarning, stacklevel=2)
+ return func(ax, *args, **kwargs)
+
+ inner.__doc__ = _add_data_doc(inner.__doc__,
+ replace_names, replace_all_args)
+ if not python_has_wrapped:
+ inner.__wrapped__ = func
+ if new_sig is not None:
+ inner.__signature__ = new_sig
+ return inner
+
+ return param
+
+_log.debug('matplotlib version %s', __version__)
+_log.debug('interactive is %s', is_interactive())
+_log.debug('platform is %s', sys.platform)
+_log.debug('loaded modules: %s', list(sys.modules))
diff --git a/contrib/python/matplotlib/py2/matplotlib/_animation_data.py b/contrib/python/matplotlib/py2/matplotlib/_animation_data.py
new file mode 100644
index 00000000000..4c3f2c75b65
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/_animation_data.py
@@ -0,0 +1,210 @@
+# Javascript template for HTMLWriter
+JS_INCLUDE = """
+<link rel="stylesheet"
+href="https://maxcdn.bootstrapcdn.com/font-awesome/4.4.0/
+css/font-awesome.min.css">
+<script language="javascript">
+ /* Define the Animation class */
+ function Animation(frames, img_id, slider_id, interval, loop_select_id){
+ this.img_id = img_id;
+ this.slider_id = slider_id;
+ this.loop_select_id = loop_select_id;
+ this.interval = interval;
+ this.current_frame = 0;
+ this.direction = 0;
+ this.timer = null;
+ this.frames = new Array(frames.length);
+
+ for (var i=0; i<frames.length; i++)
+ {
+ this.frames[i] = new Image();
+ this.frames[i].src = frames[i];
+ }
+ document.getElementById(this.slider_id).max = this.frames.length - 1;
+ this.set_frame(this.current_frame);
+ }
+
+ Animation.prototype.get_loop_state = function(){
+ var button_group = document[this.loop_select_id].state;
+ for (var i = 0; i < button_group.length; i++) {
+ var button = button_group[i];
+ if (button.checked) {
+ return button.value;
+ }
+ }
+ return undefined;
+ }
+
+ Animation.prototype.set_frame = function(frame){
+ this.current_frame = frame;
+ document.getElementById(this.img_id).src =
+ this.frames[this.current_frame].src;
+ document.getElementById(this.slider_id).value = this.current_frame;
+ }
+
+ Animation.prototype.next_frame = function()
+ {
+ this.set_frame(Math.min(this.frames.length - 1, this.current_frame + 1));
+ }
+
+ Animation.prototype.previous_frame = function()
+ {
+ this.set_frame(Math.max(0, this.current_frame - 1));
+ }
+
+ Animation.prototype.first_frame = function()
+ {
+ this.set_frame(0);
+ }
+
+ Animation.prototype.last_frame = function()
+ {
+ this.set_frame(this.frames.length - 1);
+ }
+
+ Animation.prototype.slower = function()
+ {
+ this.interval /= 0.7;
+ if(this.direction > 0){this.play_animation();}
+ else if(this.direction < 0){this.reverse_animation();}
+ }
+
+ Animation.prototype.faster = function()
+ {
+ this.interval *= 0.7;
+ if(this.direction > 0){this.play_animation();}
+ else if(this.direction < 0){this.reverse_animation();}
+ }
+
+ Animation.prototype.anim_step_forward = function()
+ {
+ this.current_frame += 1;
+ if(this.current_frame < this.frames.length){
+ this.set_frame(this.current_frame);
+ }else{
+ var loop_state = this.get_loop_state();
+ if(loop_state == "loop"){
+ this.first_frame();
+ }else if(loop_state == "reflect"){
+ this.last_frame();
+ this.reverse_animation();
+ }else{
+ this.pause_animation();
+ this.last_frame();
+ }
+ }
+ }
+
+ Animation.prototype.anim_step_reverse = function()
+ {
+ this.current_frame -= 1;
+ if(this.current_frame >= 0){
+ this.set_frame(this.current_frame);
+ }else{
+ var loop_state = this.get_loop_state();
+ if(loop_state == "loop"){
+ this.last_frame();
+ }else if(loop_state == "reflect"){
+ this.first_frame();
+ this.play_animation();
+ }else{
+ this.pause_animation();
+ this.first_frame();
+ }
+ }
+ }
+
+ Animation.prototype.pause_animation = function()
+ {
+ this.direction = 0;
+ if (this.timer){
+ clearInterval(this.timer);
+ this.timer = null;
+ }
+ }
+
+ Animation.prototype.play_animation = function()
+ {
+ this.pause_animation();
+ this.direction = 1;
+ var t = this;
+ if (!this.timer) this.timer = setInterval(function() {
+ t.anim_step_forward();
+ }, this.interval);
+ }
+
+ Animation.prototype.reverse_animation = function()
+ {
+ this.pause_animation();
+ this.direction = -1;
+ var t = this;
+ if (!this.timer) this.timer = setInterval(function() {
+ t.anim_step_reverse();
+ }, this.interval);
+ }
+</script>
+"""
+
+
+# HTML template for HTMLWriter
+DISPLAY_TEMPLATE = """
+<div class="animation" align="center">
+ <img id="_anim_img{id}">
+ <br>
+ <input id="_anim_slider{id}" type="range" style="width:350px"
+ name="points" min="0" max="1" step="1" value="0"
+ onchange="anim{id}.set_frame(parseInt(this.value));"></input>
+ <br>
+ <button onclick="anim{id}.slower()"><i class="fa fa-minus"></i></button>
+ <button onclick="anim{id}.first_frame()"><i class="fa fa-fast-backward">
+ </i></button>
+ <button onclick="anim{id}.previous_frame()">
+ <i class="fa fa-step-backward"></i></button>
+ <button onclick="anim{id}.reverse_animation()">
+ <i class="fa fa-play fa-flip-horizontal"></i></button>
+ <button onclick="anim{id}.pause_animation()"><i class="fa fa-pause">
+ </i></button>
+ <button onclick="anim{id}.play_animation()"><i class="fa fa-play"></i>
+ </button>
+ <button onclick="anim{id}.next_frame()"><i class="fa fa-step-forward">
+ </i></button>
+ <button onclick="anim{id}.last_frame()"><i class="fa fa-fast-forward">
+ </i></button>
+ <button onclick="anim{id}.faster()"><i class="fa fa-plus"></i></button>
+ <form action="#n" name="_anim_loop_select{id}" class="anim_control">
+ <input type="radio" name="state"
+ value="once" {once_checked}> Once </input>
+ <input type="radio" name="state"
+ value="loop" {loop_checked}> Loop </input>
+ <input type="radio" name="state"
+ value="reflect" {reflect_checked}> Reflect </input>
+ </form>
+</div>
+
+
+<script language="javascript">
+ /* Instantiate the Animation class. */
+ /* The IDs given should match those used in the template above. */
+ (function() {{
+ var img_id = "_anim_img{id}";
+ var slider_id = "_anim_slider{id}";
+ var loop_select_id = "_anim_loop_select{id}";
+ var frames = new Array({Nframes});
+ {fill_frames}
+
+ /* set a timeout to make sure all the above elements are created before
+ the object is initialized. */
+ setTimeout(function() {{
+ anim{id} = new Animation(frames, img_id, slider_id, {interval},
+ loop_select_id);
+ }}, 0);
+ }})()
+</script>
+"""
+
+INCLUDED_FRAMES = """
+ for (var i=0; i<{Nframes}; i++){{
+ frames[i] = "{frame_dir}/frame" + ("0000000" + i).slice(-7) +
+ ".{frame_format}";
+ }}
+"""
diff --git a/contrib/python/matplotlib/py2/matplotlib/_cm.py b/contrib/python/matplotlib/py2/matplotlib/_cm.py
new file mode 100644
index 00000000000..a32229cb63b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/_cm.py
@@ -0,0 +1,1445 @@
+"""
+Nothing here but dictionaries for generating LinearSegmentedColormaps,
+and a dictionary of these dictionaries.
+
+Documentation for each is in pyplot.colormaps(). Please update this
+with the purpose and type of your colormap if you add data for one here.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import numpy as np
+
+_binary_data = {
+ 'red': ((0., 1., 1.), (1., 0., 0.)),
+ 'green': ((0., 1., 1.), (1., 0., 0.)),
+ 'blue': ((0., 1., 1.), (1., 0., 0.))
+ }
+
+_autumn_data = {'red': ((0., 1.0, 1.0), (1.0, 1.0, 1.0)),
+ 'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
+ 'blue': ((0., 0., 0.), (1.0, 0., 0.))}
+
+_bone_data = {'red': ((0., 0., 0.),
+ (0.746032, 0.652778, 0.652778),
+ (1.0, 1.0, 1.0)),
+ 'green': ((0., 0., 0.),
+ (0.365079, 0.319444, 0.319444),
+ (0.746032, 0.777778, 0.777778),
+ (1.0, 1.0, 1.0)),
+ 'blue': ((0., 0., 0.),
+ (0.365079, 0.444444, 0.444444),
+ (1.0, 1.0, 1.0))}
+
+_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
+ 'green': ((0., 1., 1.), (1.0, 0., 0.)),
+ 'blue': ((0., 1., 1.), (1.0, 1., 1.))}
+
+_copper_data = {'red': ((0., 0., 0.),
+ (0.809524, 1.000000, 1.000000),
+ (1.0, 1.0, 1.0)),
+ 'green': ((0., 0., 0.),
+ (1.0, 0.7812, 0.7812)),
+ 'blue': ((0., 0., 0.),
+ (1.0, 0.4975, 0.4975))}
+
+_flag_data = {
+ 'red': lambda x: 0.75 * np.sin((x * 31.5 + 0.25) * np.pi) + 0.5,
+ 'green': lambda x: np.sin(x * 31.5 * np.pi),
+ 'blue': lambda x: 0.75 * np.sin((x * 31.5 - 0.25) * np.pi) + 0.5,
+}
+
+_prism_data = {
+ 'red': lambda x: 0.75 * np.sin((x * 20.9 + 0.25) * np.pi) + 0.67,
+ 'green': lambda x: 0.75 * np.sin((x * 20.9 - 0.25) * np.pi) + 0.33,
+ 'blue': lambda x: -1.1 * np.sin((x * 20.9) * np.pi),
+}
+
+
+def cubehelix(gamma=1.0, s=0.5, r=-1.5, h=1.0):
+ """Return custom data dictionary of (r,g,b) conversion functions, which
+ can be used with :func:`register_cmap`, for the cubehelix color scheme.
+
+ Unlike most other color schemes cubehelix was designed by D.A. Green to
+ be monotonically increasing in terms of perceived brightness.
+ Also, when printed on a black and white postscript printer, the scheme
+ results in a greyscale with monotonically increasing brightness.
+ This color scheme is named cubehelix because the r,g,b values produced
+ can be visualised as a squashed helix around the diagonal in the
+ r,g,b color cube.
+
+ For a unit color cube (i.e. 3-D coordinates for r,g,b each in the
+ range 0 to 1) the color scheme starts at (r,g,b) = (0,0,0), i.e. black,
+ and finishes at (r,g,b) = (1,1,1), i.e. white. For some fraction *x*,
+ between 0 and 1, the color is the corresponding grey value at that
+ fraction along the black to white diagonal (x,x,x) plus a color
+ element. This color element is calculated in a plane of constant
+ perceived intensity and controlled by the following parameters.
+
+ Optional keyword arguments:
+
+ ========= =======================================================
+ Keyword Description
+ ========= =======================================================
+ gamma gamma factor to emphasise either low intensity values
+ (gamma < 1), or high intensity values (gamma > 1);
+ defaults to 1.0.
+ s the start color; defaults to 0.5 (i.e. purple).
+ r the number of r,g,b rotations in color that are made
+ from the start to the end of the color scheme; defaults
+ to -1.5 (i.e. -> B -> G -> R -> B).
+ h the hue parameter which controls how saturated the
+ colors are. If this parameter is zero then the color
+ scheme is purely a greyscale; defaults to 1.0.
+ ========= =======================================================
+
+ """
+
+ def get_color_function(p0, p1):
+
+ def color(x):
+ # Apply gamma factor to emphasise low or high intensity values
+ xg = x ** gamma
+
+ # Calculate amplitude and angle of deviation from the black
+ # to white diagonal in the plane of constant
+ # perceived intensity.
+ a = h * xg * (1 - xg) / 2
+
+ phi = 2 * np.pi * (s / 3 + r * x)
+
+ return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
+ return color
+
+ return {
+ 'red': get_color_function(-0.14861, 1.78277),
+ 'green': get_color_function(-0.29227, -0.90649),
+ 'blue': get_color_function(1.97294, 0.0),
+ }
+
+_cubehelix_data = cubehelix()
+
+_bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0))
+_brg_data = ((0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0))
+
+# Gnuplot palette functions
+gfunc = {
+ 0: lambda x: 0,
+ 1: lambda x: 0.5,
+ 2: lambda x: 1,
+ 3: lambda x: x,
+ 4: lambda x: x ** 2,
+ 5: lambda x: x ** 3,
+ 6: lambda x: x ** 4,
+ 7: lambda x: np.sqrt(x),
+ 8: lambda x: np.sqrt(np.sqrt(x)),
+ 9: lambda x: np.sin(x * np.pi / 2),
+ 10: lambda x: np.cos(x * np.pi / 2),
+ 11: lambda x: np.abs(x - 0.5),
+ 12: lambda x: (2 * x - 1) ** 2,
+ 13: lambda x: np.sin(x * np.pi),
+ 14: lambda x: np.abs(np.cos(x * np.pi)),
+ 15: lambda x: np.sin(x * 2 * np.pi),
+ 16: lambda x: np.cos(x * 2 * np.pi),
+ 17: lambda x: np.abs(np.sin(x * 2 * np.pi)),
+ 18: lambda x: np.abs(np.cos(x * 2 * np.pi)),
+ 19: lambda x: np.abs(np.sin(x * 4 * np.pi)),
+ 20: lambda x: np.abs(np.cos(x * 4 * np.pi)),
+ 21: lambda x: 3 * x,
+ 22: lambda x: 3 * x - 1,
+ 23: lambda x: 3 * x - 2,
+ 24: lambda x: np.abs(3 * x - 1),
+ 25: lambda x: np.abs(3 * x - 2),
+ 26: lambda x: (3 * x - 1) / 2,
+ 27: lambda x: (3 * x - 2) / 2,
+ 28: lambda x: np.abs((3 * x - 1) / 2),
+ 29: lambda x: np.abs((3 * x - 2) / 2),
+ 30: lambda x: x / 0.32 - 0.78125,
+ 31: lambda x: 2 * x - 0.84,
+ 32: lambda x: gfunc32(x),
+ 33: lambda x: np.abs(2 * x - 0.5),
+ 34: lambda x: 2 * x,
+ 35: lambda x: 2 * x - 0.5,
+ 36: lambda x: 2 * x - 1.
+}
+
+
+def gfunc32(x):
+ ret = np.zeros(len(x))
+ m = (x < 0.25)
+ ret[m] = 4 * x[m]
+ m = (x >= 0.25) & (x < 0.92)
+ ret[m] = -2 * x[m] + 1.84
+ m = (x >= 0.92)
+ ret[m] = x[m] / 0.08 - 11.5
+ return ret
+
+_gnuplot_data = {
+ 'red': gfunc[7],
+ 'green': gfunc[5],
+ 'blue': gfunc[15],
+}
+
+_gnuplot2_data = {
+ 'red': gfunc[30],
+ 'green': gfunc[31],
+ 'blue': gfunc[32],
+}
+
+_ocean_data = {
+ 'red': gfunc[23],
+ 'green': gfunc[28],
+ 'blue': gfunc[3],
+}
+
+_afmhot_data = {
+ 'red': gfunc[34],
+ 'green': gfunc[35],
+ 'blue': gfunc[36],
+}
+
+_rainbow_data = {
+ 'red': gfunc[33],
+ 'green': gfunc[13],
+ 'blue': gfunc[10],
+}
+
+_seismic_data = (
+ (0.0, 0.0, 0.3), (0.0, 0.0, 1.0),
+ (1.0, 1.0, 1.0), (1.0, 0.0, 0.0),
+ (0.5, 0.0, 0.0))
+
+_terrain_data = (
+ (0.00, (0.2, 0.2, 0.6)),
+ (0.15, (0.0, 0.6, 1.0)),
+ (0.25, (0.0, 0.8, 0.4)),
+ (0.50, (1.0, 1.0, 0.6)),
+ (0.75, (0.5, 0.36, 0.33)),
+ (1.00, (1.0, 1.0, 1.0)))
+
+_gray_data = {'red': ((0., 0, 0), (1., 1, 1)),
+ 'green': ((0., 0, 0), (1., 1, 1)),
+ 'blue': ((0., 0, 0), (1., 1, 1))}
+
+_hot_data = {'red': ((0., 0.0416, 0.0416),
+ (0.365079, 1.000000, 1.000000),
+ (1.0, 1.0, 1.0)),
+ 'green': ((0., 0., 0.),
+ (0.365079, 0.000000, 0.000000),
+ (0.746032, 1.000000, 1.000000),
+ (1.0, 1.0, 1.0)),
+ 'blue': ((0., 0., 0.),
+ (0.746032, 0.000000, 0.000000),
+ (1.0, 1.0, 1.0))}
+
+_hsv_data = {'red': ((0., 1., 1.),
+ (0.158730, 1.000000, 1.000000),
+ (0.174603, 0.968750, 0.968750),
+ (0.333333, 0.031250, 0.031250),
+ (0.349206, 0.000000, 0.000000),
+ (0.666667, 0.000000, 0.000000),
+ (0.682540, 0.031250, 0.031250),
+ (0.841270, 0.968750, 0.968750),
+ (0.857143, 1.000000, 1.000000),
+ (1.0, 1.0, 1.0)),
+ 'green': ((0., 0., 0.),
+ (0.158730, 0.937500, 0.937500),
+ (0.174603, 1.000000, 1.000000),
+ (0.507937, 1.000000, 1.000000),
+ (0.666667, 0.062500, 0.062500),
+ (0.682540, 0.000000, 0.000000),
+ (1.0, 0., 0.)),
+ 'blue': ((0., 0., 0.),
+ (0.333333, 0.000000, 0.000000),
+ (0.349206, 0.062500, 0.062500),
+ (0.507937, 1.000000, 1.000000),
+ (0.841270, 1.000000, 1.000000),
+ (0.857143, 0.937500, 0.937500),
+ (1.0, 0.09375, 0.09375))}
+
+_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89, 1, 1),
+ (1, 0.5, 0.5)),
+ 'green': ((0., 0, 0), (0.125, 0, 0), (0.375, 1, 1), (0.64, 1, 1),
+ (0.91, 0, 0), (1, 0, 0)),
+ 'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1),
+ (0.65, 0, 0), (1, 0, 0))}
+
+_pink_data = {'red': ((0., 0.1178, 0.1178), (0.015873, 0.195857, 0.195857),
+ (0.031746, 0.250661, 0.250661),
+ (0.047619, 0.295468, 0.295468),
+ (0.063492, 0.334324, 0.334324),
+ (0.079365, 0.369112, 0.369112),
+ (0.095238, 0.400892, 0.400892),
+ (0.111111, 0.430331, 0.430331),
+ (0.126984, 0.457882, 0.457882),
+ (0.142857, 0.483867, 0.483867),
+ (0.158730, 0.508525, 0.508525),
+ (0.174603, 0.532042, 0.532042),
+ (0.190476, 0.554563, 0.554563),
+ (0.206349, 0.576204, 0.576204),
+ (0.222222, 0.597061, 0.597061),
+ (0.238095, 0.617213, 0.617213),
+ (0.253968, 0.636729, 0.636729),
+ (0.269841, 0.655663, 0.655663),
+ (0.285714, 0.674066, 0.674066),
+ (0.301587, 0.691980, 0.691980),
+ (0.317460, 0.709441, 0.709441),
+ (0.333333, 0.726483, 0.726483),
+ (0.349206, 0.743134, 0.743134),
+ (0.365079, 0.759421, 0.759421),
+ (0.380952, 0.766356, 0.766356),
+ (0.396825, 0.773229, 0.773229),
+ (0.412698, 0.780042, 0.780042),
+ (0.428571, 0.786796, 0.786796),
+ (0.444444, 0.793492, 0.793492),
+ (0.460317, 0.800132, 0.800132),
+ (0.476190, 0.806718, 0.806718),
+ (0.492063, 0.813250, 0.813250),
+ (0.507937, 0.819730, 0.819730),
+ (0.523810, 0.826160, 0.826160),
+ (0.539683, 0.832539, 0.832539),
+ (0.555556, 0.838870, 0.838870),
+ (0.571429, 0.845154, 0.845154),
+ (0.587302, 0.851392, 0.851392),
+ (0.603175, 0.857584, 0.857584),
+ (0.619048, 0.863731, 0.863731),
+ (0.634921, 0.869835, 0.869835),
+ (0.650794, 0.875897, 0.875897),
+ (0.666667, 0.881917, 0.881917),
+ (0.682540, 0.887896, 0.887896),
+ (0.698413, 0.893835, 0.893835),
+ (0.714286, 0.899735, 0.899735),
+ (0.730159, 0.905597, 0.905597),
+ (0.746032, 0.911421, 0.911421),
+ (0.761905, 0.917208, 0.917208),
+ (0.777778, 0.922958, 0.922958),
+ (0.793651, 0.928673, 0.928673),
+ (0.809524, 0.934353, 0.934353),
+ (0.825397, 0.939999, 0.939999),
+ (0.841270, 0.945611, 0.945611),
+ (0.857143, 0.951190, 0.951190),
+ (0.873016, 0.956736, 0.956736),
+ (0.888889, 0.962250, 0.962250),
+ (0.904762, 0.967733, 0.967733),
+ (0.920635, 0.973185, 0.973185),
+ (0.936508, 0.978607, 0.978607),
+ (0.952381, 0.983999, 0.983999),
+ (0.968254, 0.989361, 0.989361),
+ (0.984127, 0.994695, 0.994695), (1.0, 1.0, 1.0)),
+ 'green': ((0., 0., 0.), (0.015873, 0.102869, 0.102869),
+ (0.031746, 0.145479, 0.145479),
+ (0.047619, 0.178174, 0.178174),
+ (0.063492, 0.205738, 0.205738),
+ (0.079365, 0.230022, 0.230022),
+ (0.095238, 0.251976, 0.251976),
+ (0.111111, 0.272166, 0.272166),
+ (0.126984, 0.290957, 0.290957),
+ (0.142857, 0.308607, 0.308607),
+ (0.158730, 0.325300, 0.325300),
+ (0.174603, 0.341178, 0.341178),
+ (0.190476, 0.356348, 0.356348),
+ (0.206349, 0.370899, 0.370899),
+ (0.222222, 0.384900, 0.384900),
+ (0.238095, 0.398410, 0.398410),
+ (0.253968, 0.411476, 0.411476),
+ (0.269841, 0.424139, 0.424139),
+ (0.285714, 0.436436, 0.436436),
+ (0.301587, 0.448395, 0.448395),
+ (0.317460, 0.460044, 0.460044),
+ (0.333333, 0.471405, 0.471405),
+ (0.349206, 0.482498, 0.482498),
+ (0.365079, 0.493342, 0.493342),
+ (0.380952, 0.517549, 0.517549),
+ (0.396825, 0.540674, 0.540674),
+ (0.412698, 0.562849, 0.562849),
+ (0.428571, 0.584183, 0.584183),
+ (0.444444, 0.604765, 0.604765),
+ (0.460317, 0.624669, 0.624669),
+ (0.476190, 0.643958, 0.643958),
+ (0.492063, 0.662687, 0.662687),
+ (0.507937, 0.680900, 0.680900),
+ (0.523810, 0.698638, 0.698638),
+ (0.539683, 0.715937, 0.715937),
+ (0.555556, 0.732828, 0.732828),
+ (0.571429, 0.749338, 0.749338),
+ (0.587302, 0.765493, 0.765493),
+ (0.603175, 0.781313, 0.781313),
+ (0.619048, 0.796819, 0.796819),
+ (0.634921, 0.812029, 0.812029),
+ (0.650794, 0.826960, 0.826960),
+ (0.666667, 0.841625, 0.841625),
+ (0.682540, 0.856040, 0.856040),
+ (0.698413, 0.870216, 0.870216),
+ (0.714286, 0.884164, 0.884164),
+ (0.730159, 0.897896, 0.897896),
+ (0.746032, 0.911421, 0.911421),
+ (0.761905, 0.917208, 0.917208),
+ (0.777778, 0.922958, 0.922958),
+ (0.793651, 0.928673, 0.928673),
+ (0.809524, 0.934353, 0.934353),
+ (0.825397, 0.939999, 0.939999),
+ (0.841270, 0.945611, 0.945611),
+ (0.857143, 0.951190, 0.951190),
+ (0.873016, 0.956736, 0.956736),
+ (0.888889, 0.962250, 0.962250),
+ (0.904762, 0.967733, 0.967733),
+ (0.920635, 0.973185, 0.973185),
+ (0.936508, 0.978607, 0.978607),
+ (0.952381, 0.983999, 0.983999),
+ (0.968254, 0.989361, 0.989361),
+ (0.984127, 0.994695, 0.994695), (1.0, 1.0, 1.0)),
+ 'blue': ((0., 0., 0.), (0.015873, 0.102869, 0.102869),
+ (0.031746, 0.145479, 0.145479),
+ (0.047619, 0.178174, 0.178174),
+ (0.063492, 0.205738, 0.205738),
+ (0.079365, 0.230022, 0.230022),
+ (0.095238, 0.251976, 0.251976),
+ (0.111111, 0.272166, 0.272166),
+ (0.126984, 0.290957, 0.290957),
+ (0.142857, 0.308607, 0.308607),
+ (0.158730, 0.325300, 0.325300),
+ (0.174603, 0.341178, 0.341178),
+ (0.190476, 0.356348, 0.356348),
+ (0.206349, 0.370899, 0.370899),
+ (0.222222, 0.384900, 0.384900),
+ (0.238095, 0.398410, 0.398410),
+ (0.253968, 0.411476, 0.411476),
+ (0.269841, 0.424139, 0.424139),
+ (0.285714, 0.436436, 0.436436),
+ (0.301587, 0.448395, 0.448395),
+ (0.317460, 0.460044, 0.460044),
+ (0.333333, 0.471405, 0.471405),
+ (0.349206, 0.482498, 0.482498),
+ (0.365079, 0.493342, 0.493342),
+ (0.380952, 0.503953, 0.503953),
+ (0.396825, 0.514344, 0.514344),
+ (0.412698, 0.524531, 0.524531),
+ (0.428571, 0.534522, 0.534522),
+ (0.444444, 0.544331, 0.544331),
+ (0.460317, 0.553966, 0.553966),
+ (0.476190, 0.563436, 0.563436),
+ (0.492063, 0.572750, 0.572750),
+ (0.507937, 0.581914, 0.581914),
+ (0.523810, 0.590937, 0.590937),
+ (0.539683, 0.599824, 0.599824),
+ (0.555556, 0.608581, 0.608581),
+ (0.571429, 0.617213, 0.617213),
+ (0.587302, 0.625727, 0.625727),
+ (0.603175, 0.634126, 0.634126),
+ (0.619048, 0.642416, 0.642416),
+ (0.634921, 0.650600, 0.650600),
+ (0.650794, 0.658682, 0.658682),
+ (0.666667, 0.666667, 0.666667),
+ (0.682540, 0.674556, 0.674556),
+ (0.698413, 0.682355, 0.682355),
+ (0.714286, 0.690066, 0.690066),
+ (0.730159, 0.697691, 0.697691),
+ (0.746032, 0.705234, 0.705234),
+ (0.761905, 0.727166, 0.727166),
+ (0.777778, 0.748455, 0.748455),
+ (0.793651, 0.769156, 0.769156),
+ (0.809524, 0.789314, 0.789314),
+ (0.825397, 0.808969, 0.808969),
+ (0.841270, 0.828159, 0.828159),
+ (0.857143, 0.846913, 0.846913),
+ (0.873016, 0.865261, 0.865261),
+ (0.888889, 0.883229, 0.883229),
+ (0.904762, 0.900837, 0.900837),
+ (0.920635, 0.918109, 0.918109),
+ (0.936508, 0.935061, 0.935061),
+ (0.952381, 0.951711, 0.951711),
+ (0.968254, 0.968075, 0.968075),
+ (0.984127, 0.984167, 0.984167), (1.0, 1.0, 1.0))}
+
+_spring_data = {'red': ((0., 1., 1.), (1.0, 1.0, 1.0)),
+ 'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
+ 'blue': ((0., 1., 1.), (1.0, 0.0, 0.0))}
+
+
+_summer_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
+ 'green': ((0., 0.5, 0.5), (1.0, 1.0, 1.0)),
+ 'blue': ((0., 0.4, 0.4), (1.0, 0.4, 0.4))}
+
+
+_winter_data = {'red': ((0., 0., 0.), (1.0, 0.0, 0.0)),
+ 'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
+ 'blue': ((0., 1., 1.), (1.0, 0.5, 0.5))}
+
+_nipy_spectral_data = {
+ 'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667),
+ (0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0),
+ (0.20, 0.0, 0.0), (0.25, 0.0, 0.0),
+ (0.30, 0.0, 0.0), (0.35, 0.0, 0.0),
+ (0.40, 0.0, 0.0), (0.45, 0.0, 0.0),
+ (0.50, 0.0, 0.0), (0.55, 0.0, 0.0),
+ (0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333),
+ (0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0),
+ (0.80, 1.0, 1.0), (0.85, 1.0, 1.0),
+ (0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80),
+ (1.0, 0.80, 0.80)],
+ 'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0),
+ (0.10, 0.0, 0.0), (0.15, 0.0, 0.0),
+ (0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667),
+ (0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667),
+ (0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000),
+ (0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667),
+ (0.60, 1.0, 1.0), (0.65, 1.0, 1.0),
+ (0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000),
+ (0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0),
+ (0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
+ (1.0, 0.80, 0.80)],
+ 'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333),
+ (0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667),
+ (0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667),
+ (0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667),
+ (0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0),
+ (0.5, 0.0, 0.0), (0.55, 0.0, 0.0),
+ (0.60, 0.0, 0.0), (0.65, 0.0, 0.0),
+ (0.70, 0.0, 0.0), (0.75, 0.0, 0.0),
+ (0.80, 0.0, 0.0), (0.85, 0.0, 0.0),
+ (0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
+ (1.0, 0.80, 0.80)],
+}
+
+
+# 34 colormaps based on color specifications and designs
+# developed by Cynthia Brewer (http://colorbrewer.org).
+# The ColorBrewer palettes have been included under the terms
+# of an Apache-stype license (for details, see the file
+# LICENSE_COLORBREWER in the license directory of the matplotlib
+# source distribution).
+
+# RGB values taken from Brewer's Excel sheet, divided by 255
+
+_Blues_data = (
+ (0.96862745098039216, 0.98431372549019602, 1.0 ),
+ (0.87058823529411766, 0.92156862745098034, 0.96862745098039216),
+ (0.77647058823529413, 0.85882352941176465, 0.93725490196078431),
+ (0.61960784313725492, 0.792156862745098 , 0.88235294117647056),
+ (0.41960784313725491, 0.68235294117647061, 0.83921568627450982),
+ (0.25882352941176473, 0.5725490196078431 , 0.77647058823529413),
+ (0.12941176470588237, 0.44313725490196076, 0.70980392156862748),
+ (0.03137254901960784, 0.31764705882352939, 0.61176470588235299),
+ (0.03137254901960784, 0.18823529411764706, 0.41960784313725491)
+ )
+
+_BrBG_data = (
+ (0.32941176470588235, 0.18823529411764706, 0.0196078431372549 ),
+ (0.5490196078431373 , 0.31764705882352939, 0.0392156862745098 ),
+ (0.74901960784313726, 0.50588235294117645, 0.17647058823529413),
+ (0.87450980392156863, 0.76078431372549016, 0.49019607843137253),
+ (0.96470588235294119, 0.90980392156862744, 0.76470588235294112),
+ (0.96078431372549022, 0.96078431372549022, 0.96078431372549022),
+ (0.7803921568627451 , 0.91764705882352937, 0.89803921568627454),
+ (0.50196078431372548, 0.80392156862745101, 0.75686274509803919),
+ (0.20784313725490197, 0.59215686274509804, 0.5607843137254902 ),
+ (0.00392156862745098, 0.4 , 0.36862745098039218),
+ (0.0 , 0.23529411764705882, 0.18823529411764706)
+ )
+
+_BuGn_data = (
+ (0.96862745098039216, 0.9882352941176471 , 0.99215686274509807),
+ (0.89803921568627454, 0.96078431372549022, 0.97647058823529409),
+ (0.8 , 0.92549019607843142, 0.90196078431372551),
+ (0.6 , 0.84705882352941175, 0.78823529411764703),
+ (0.4 , 0.76078431372549016, 0.64313725490196083),
+ (0.25490196078431371, 0.68235294117647061, 0.46274509803921571),
+ (0.13725490196078433, 0.54509803921568623, 0.27058823529411763),
+ (0.0 , 0.42745098039215684, 0.17254901960784313),
+ (0.0 , 0.26666666666666666, 0.10588235294117647)
+ )
+
+_BuPu_data = (
+ (0.96862745098039216, 0.9882352941176471 , 0.99215686274509807),
+ (0.8784313725490196 , 0.92549019607843142, 0.95686274509803926),
+ (0.74901960784313726, 0.82745098039215681, 0.90196078431372551),
+ (0.61960784313725492, 0.73725490196078436, 0.85490196078431369),
+ (0.5490196078431373 , 0.58823529411764708, 0.77647058823529413),
+ (0.5490196078431373 , 0.41960784313725491, 0.69411764705882351),
+ (0.53333333333333333, 0.25490196078431371, 0.61568627450980395),
+ (0.50588235294117645, 0.05882352941176471, 0.48627450980392156),
+ (0.30196078431372547, 0.0 , 0.29411764705882354)
+ )
+
+_GnBu_data = (
+ (0.96862745098039216, 0.9882352941176471 , 0.94117647058823528),
+ (0.8784313725490196 , 0.95294117647058818, 0.85882352941176465),
+ (0.8 , 0.92156862745098034, 0.77254901960784317),
+ (0.6588235294117647 , 0.8666666666666667 , 0.70980392156862748),
+ (0.4823529411764706 , 0.8 , 0.7686274509803922 ),
+ (0.30588235294117649, 0.70196078431372544, 0.82745098039215681),
+ (0.16862745098039217, 0.5490196078431373 , 0.74509803921568629),
+ (0.03137254901960784, 0.40784313725490196, 0.67450980392156867),
+ (0.03137254901960784, 0.25098039215686274, 0.50588235294117645)
+ )
+
+_Greens_data = (
+ (0.96862745098039216, 0.9882352941176471 , 0.96078431372549022),
+ (0.89803921568627454, 0.96078431372549022, 0.8784313725490196 ),
+ (0.7803921568627451 , 0.9137254901960784 , 0.75294117647058822),
+ (0.63137254901960782, 0.85098039215686272, 0.60784313725490191),
+ (0.45490196078431372, 0.7686274509803922 , 0.46274509803921571),
+ (0.25490196078431371, 0.6705882352941176 , 0.36470588235294116),
+ (0.13725490196078433, 0.54509803921568623, 0.27058823529411763),
+ (0.0 , 0.42745098039215684, 0.17254901960784313),
+ (0.0 , 0.26666666666666666, 0.10588235294117647)
+ )
+
+_Greys_data = (
+ (1.0 , 1.0 , 1.0 ),
+ (0.94117647058823528, 0.94117647058823528, 0.94117647058823528),
+ (0.85098039215686272, 0.85098039215686272, 0.85098039215686272),
+ (0.74117647058823533, 0.74117647058823533, 0.74117647058823533),
+ (0.58823529411764708, 0.58823529411764708, 0.58823529411764708),
+ (0.45098039215686275, 0.45098039215686275, 0.45098039215686275),
+ (0.32156862745098042, 0.32156862745098042, 0.32156862745098042),
+ (0.14509803921568629, 0.14509803921568629, 0.14509803921568629),
+ (0.0 , 0.0 , 0.0 )
+ )
+
+_Oranges_data = (
+ (1.0 , 0.96078431372549022, 0.92156862745098034),
+ (0.99607843137254903, 0.90196078431372551, 0.80784313725490198),
+ (0.99215686274509807, 0.81568627450980391, 0.63529411764705879),
+ (0.99215686274509807, 0.68235294117647061, 0.41960784313725491),
+ (0.99215686274509807, 0.55294117647058827, 0.23529411764705882),
+ (0.94509803921568625, 0.41176470588235292, 0.07450980392156863),
+ (0.85098039215686272, 0.28235294117647058, 0.00392156862745098),
+ (0.65098039215686276, 0.21176470588235294, 0.01176470588235294),
+ (0.49803921568627452, 0.15294117647058825, 0.01568627450980392)
+ )
+
+_OrRd_data = (
+ (1.0 , 0.96862745098039216, 0.92549019607843142),
+ (0.99607843137254903, 0.90980392156862744, 0.78431372549019607),
+ (0.99215686274509807, 0.83137254901960789, 0.61960784313725492),
+ (0.99215686274509807, 0.73333333333333328, 0.51764705882352946),
+ (0.9882352941176471 , 0.55294117647058827, 0.34901960784313724),
+ (0.93725490196078431, 0.396078431372549 , 0.28235294117647058),
+ (0.84313725490196079, 0.18823529411764706, 0.12156862745098039),
+ (0.70196078431372544, 0.0 , 0.0 ),
+ (0.49803921568627452, 0.0 , 0.0 )
+ )
+
+_PiYG_data = (
+ (0.55686274509803924, 0.00392156862745098, 0.32156862745098042),
+ (0.77254901960784317, 0.10588235294117647, 0.49019607843137253),
+ (0.87058823529411766, 0.46666666666666667, 0.68235294117647061),
+ (0.94509803921568625, 0.71372549019607845, 0.85490196078431369),
+ (0.99215686274509807, 0.8784313725490196 , 0.93725490196078431),
+ (0.96862745098039216, 0.96862745098039216, 0.96862745098039216),
+ (0.90196078431372551, 0.96078431372549022, 0.81568627450980391),
+ (0.72156862745098038, 0.88235294117647056, 0.52549019607843139),
+ (0.49803921568627452, 0.73725490196078436, 0.25490196078431371),
+ (0.30196078431372547, 0.5725490196078431 , 0.12941176470588237),
+ (0.15294117647058825, 0.39215686274509803, 0.09803921568627451)
+ )
+
+_PRGn_data = (
+ (0.25098039215686274, 0.0 , 0.29411764705882354),
+ (0.46274509803921571, 0.16470588235294117, 0.51372549019607838),
+ (0.6 , 0.4392156862745098 , 0.6705882352941176 ),
+ (0.76078431372549016, 0.6470588235294118 , 0.81176470588235294),
+ (0.90588235294117647, 0.83137254901960789, 0.90980392156862744),
+ (0.96862745098039216, 0.96862745098039216, 0.96862745098039216),
+ (0.85098039215686272, 0.94117647058823528, 0.82745098039215681),
+ (0.65098039215686276, 0.85882352941176465, 0.62745098039215685),
+ (0.35294117647058826, 0.68235294117647061, 0.38039215686274508),
+ (0.10588235294117647, 0.47058823529411764, 0.21568627450980393),
+ (0.0 , 0.26666666666666666, 0.10588235294117647)
+ )
+
+_PuBu_data = (
+ (1.0 , 0.96862745098039216, 0.98431372549019602),
+ (0.92549019607843142, 0.90588235294117647, 0.94901960784313721),
+ (0.81568627450980391, 0.81960784313725488, 0.90196078431372551),
+ (0.65098039215686276, 0.74117647058823533, 0.85882352941176465),
+ (0.45490196078431372, 0.66274509803921566, 0.81176470588235294),
+ (0.21176470588235294, 0.56470588235294117, 0.75294117647058822),
+ (0.0196078431372549 , 0.4392156862745098 , 0.69019607843137254),
+ (0.01568627450980392, 0.35294117647058826, 0.55294117647058827),
+ (0.00784313725490196, 0.2196078431372549 , 0.34509803921568627)
+ )
+
+_PuBuGn_data = (
+ (1.0 , 0.96862745098039216, 0.98431372549019602),
+ (0.92549019607843142, 0.88627450980392153, 0.94117647058823528),
+ (0.81568627450980391, 0.81960784313725488, 0.90196078431372551),
+ (0.65098039215686276, 0.74117647058823533, 0.85882352941176465),
+ (0.40392156862745099, 0.66274509803921566, 0.81176470588235294),
+ (0.21176470588235294, 0.56470588235294117, 0.75294117647058822),
+ (0.00784313725490196, 0.50588235294117645, 0.54117647058823526),
+ (0.00392156862745098, 0.42352941176470588, 0.34901960784313724),
+ (0.00392156862745098, 0.27450980392156865, 0.21176470588235294)
+ )
+
+_PuOr_data = (
+ (0.49803921568627452, 0.23137254901960785, 0.03137254901960784),
+ (0.70196078431372544, 0.34509803921568627, 0.02352941176470588),
+ (0.8784313725490196 , 0.50980392156862742, 0.07843137254901961),
+ (0.99215686274509807, 0.72156862745098038, 0.38823529411764707),
+ (0.99607843137254903, 0.8784313725490196 , 0.71372549019607845),
+ (0.96862745098039216, 0.96862745098039216, 0.96862745098039216),
+ (0.84705882352941175, 0.85490196078431369, 0.92156862745098034),
+ (0.69803921568627447, 0.6705882352941176 , 0.82352941176470584),
+ (0.50196078431372548, 0.45098039215686275, 0.67450980392156867),
+ (0.32941176470588235, 0.15294117647058825, 0.53333333333333333),
+ (0.17647058823529413, 0.0 , 0.29411764705882354)
+ )
+
+_PuRd_data = (
+ (0.96862745098039216, 0.95686274509803926, 0.97647058823529409),
+ (0.90588235294117647, 0.88235294117647056, 0.93725490196078431),
+ (0.83137254901960789, 0.72549019607843135, 0.85490196078431369),
+ (0.78823529411764703, 0.58039215686274515, 0.7803921568627451 ),
+ (0.87450980392156863, 0.396078431372549 , 0.69019607843137254),
+ (0.90588235294117647, 0.16078431372549021, 0.54117647058823526),
+ (0.80784313725490198, 0.07058823529411765, 0.33725490196078434),
+ (0.59607843137254901, 0.0 , 0.2627450980392157 ),
+ (0.40392156862745099, 0.0 , 0.12156862745098039)
+ )
+
+_Purples_data = (
+ (0.9882352941176471 , 0.98431372549019602, 0.99215686274509807),
+ (0.93725490196078431, 0.92941176470588238, 0.96078431372549022),
+ (0.85490196078431369, 0.85490196078431369, 0.92156862745098034),
+ (0.73725490196078436, 0.74117647058823533, 0.86274509803921573),
+ (0.61960784313725492, 0.60392156862745094, 0.78431372549019607),
+ (0.50196078431372548, 0.49019607843137253, 0.72941176470588232),
+ (0.41568627450980394, 0.31764705882352939, 0.63921568627450975),
+ (0.32941176470588235, 0.15294117647058825, 0.5607843137254902 ),
+ (0.24705882352941178, 0.0 , 0.49019607843137253)
+ )
+
+_RdBu_data = (
+ (0.40392156862745099, 0.0 , 0.12156862745098039),
+ (0.69803921568627447, 0.09411764705882353, 0.16862745098039217),
+ (0.83921568627450982, 0.37647058823529411, 0.30196078431372547),
+ (0.95686274509803926, 0.6470588235294118 , 0.50980392156862742),
+ (0.99215686274509807, 0.85882352941176465, 0.7803921568627451 ),
+ (0.96862745098039216, 0.96862745098039216, 0.96862745098039216),
+ (0.81960784313725488, 0.89803921568627454, 0.94117647058823528),
+ (0.5725490196078431 , 0.77254901960784317, 0.87058823529411766),
+ (0.2627450980392157 , 0.57647058823529407, 0.76470588235294112),
+ (0.12941176470588237, 0.4 , 0.67450980392156867),
+ (0.0196078431372549 , 0.18823529411764706, 0.38039215686274508)
+ )
+
+_RdGy_data = (
+ (0.40392156862745099, 0.0 , 0.12156862745098039),
+ (0.69803921568627447, 0.09411764705882353, 0.16862745098039217),
+ (0.83921568627450982, 0.37647058823529411, 0.30196078431372547),
+ (0.95686274509803926, 0.6470588235294118 , 0.50980392156862742),
+ (0.99215686274509807, 0.85882352941176465, 0.7803921568627451 ),
+ (1.0 , 1.0 , 1.0 ),
+ (0.8784313725490196 , 0.8784313725490196 , 0.8784313725490196 ),
+ (0.72941176470588232, 0.72941176470588232, 0.72941176470588232),
+ (0.52941176470588236, 0.52941176470588236, 0.52941176470588236),
+ (0.30196078431372547, 0.30196078431372547, 0.30196078431372547),
+ (0.10196078431372549, 0.10196078431372549, 0.10196078431372549)
+ )
+
+_RdPu_data = (
+ (1.0 , 0.96862745098039216, 0.95294117647058818),
+ (0.99215686274509807, 0.8784313725490196 , 0.86666666666666667),
+ (0.9882352941176471 , 0.77254901960784317, 0.75294117647058822),
+ (0.98039215686274506, 0.62352941176470589, 0.70980392156862748),
+ (0.96862745098039216, 0.40784313725490196, 0.63137254901960782),
+ (0.86666666666666667, 0.20392156862745098, 0.59215686274509804),
+ (0.68235294117647061, 0.00392156862745098, 0.49411764705882355),
+ (0.47843137254901963, 0.00392156862745098, 0.46666666666666667),
+ (0.28627450980392155, 0.0 , 0.41568627450980394)
+ )
+
+_RdYlBu_data = (
+ (0.6470588235294118 , 0.0 , 0.14901960784313725),
+ (0.84313725490196079, 0.18823529411764706 , 0.15294117647058825),
+ (0.95686274509803926, 0.42745098039215684 , 0.2627450980392157 ),
+ (0.99215686274509807, 0.68235294117647061 , 0.38039215686274508),
+ (0.99607843137254903, 0.8784313725490196 , 0.56470588235294117),
+ (1.0 , 1.0 , 0.74901960784313726),
+ (0.8784313725490196 , 0.95294117647058818 , 0.97254901960784312),
+ (0.6705882352941176 , 0.85098039215686272 , 0.9137254901960784 ),
+ (0.45490196078431372, 0.67843137254901964 , 0.81960784313725488),
+ (0.27058823529411763, 0.45882352941176469 , 0.70588235294117652),
+ (0.19215686274509805, 0.21176470588235294 , 0.58431372549019611)
+ )
+
+_RdYlGn_data = (
+ (0.6470588235294118 , 0.0 , 0.14901960784313725),
+ (0.84313725490196079, 0.18823529411764706 , 0.15294117647058825),
+ (0.95686274509803926, 0.42745098039215684 , 0.2627450980392157 ),
+ (0.99215686274509807, 0.68235294117647061 , 0.38039215686274508),
+ (0.99607843137254903, 0.8784313725490196 , 0.54509803921568623),
+ (1.0 , 1.0 , 0.74901960784313726),
+ (0.85098039215686272, 0.93725490196078431 , 0.54509803921568623),
+ (0.65098039215686276, 0.85098039215686272 , 0.41568627450980394),
+ (0.4 , 0.74117647058823533 , 0.38823529411764707),
+ (0.10196078431372549, 0.59607843137254901 , 0.31372549019607843),
+ (0.0 , 0.40784313725490196 , 0.21568627450980393)
+ )
+
+_Reds_data = (
+ (1.0 , 0.96078431372549022 , 0.94117647058823528),
+ (0.99607843137254903, 0.8784313725490196 , 0.82352941176470584),
+ (0.9882352941176471 , 0.73333333333333328 , 0.63137254901960782),
+ (0.9882352941176471 , 0.5725490196078431 , 0.44705882352941179),
+ (0.98431372549019602, 0.41568627450980394 , 0.29019607843137257),
+ (0.93725490196078431, 0.23137254901960785 , 0.17254901960784313),
+ (0.79607843137254897, 0.094117647058823528, 0.11372549019607843),
+ (0.6470588235294118 , 0.058823529411764705, 0.08235294117647058),
+ (0.40392156862745099, 0.0 , 0.05098039215686274)
+ )
+
+_Spectral_data = (
+ (0.61960784313725492, 0.003921568627450980, 0.25882352941176473),
+ (0.83529411764705885, 0.24313725490196078 , 0.30980392156862746),
+ (0.95686274509803926, 0.42745098039215684 , 0.2627450980392157 ),
+ (0.99215686274509807, 0.68235294117647061 , 0.38039215686274508),
+ (0.99607843137254903, 0.8784313725490196 , 0.54509803921568623),
+ (1.0 , 1.0 , 0.74901960784313726),
+ (0.90196078431372551, 0.96078431372549022 , 0.59607843137254901),
+ (0.6705882352941176 , 0.8666666666666667 , 0.64313725490196083),
+ (0.4 , 0.76078431372549016 , 0.6470588235294118 ),
+ (0.19607843137254902, 0.53333333333333333 , 0.74117647058823533),
+ (0.36862745098039218, 0.30980392156862746 , 0.63529411764705879)
+ )
+
+_YlGn_data = (
+ (1.0 , 1.0 , 0.89803921568627454),
+ (0.96862745098039216, 0.9882352941176471 , 0.72549019607843135),
+ (0.85098039215686272, 0.94117647058823528 , 0.63921568627450975),
+ (0.67843137254901964, 0.8666666666666667 , 0.55686274509803924),
+ (0.47058823529411764, 0.77647058823529413 , 0.47450980392156861),
+ (0.25490196078431371, 0.6705882352941176 , 0.36470588235294116),
+ (0.13725490196078433, 0.51764705882352946 , 0.2627450980392157 ),
+ (0.0 , 0.40784313725490196 , 0.21568627450980393),
+ (0.0 , 0.27058823529411763 , 0.16078431372549021)
+ )
+
+_YlGnBu_data = (
+ (1.0 , 1.0 , 0.85098039215686272),
+ (0.92941176470588238, 0.97254901960784312 , 0.69411764705882351),
+ (0.7803921568627451 , 0.9137254901960784 , 0.70588235294117652),
+ (0.49803921568627452, 0.80392156862745101 , 0.73333333333333328),
+ (0.25490196078431371, 0.71372549019607845 , 0.7686274509803922 ),
+ (0.11372549019607843, 0.56862745098039214 , 0.75294117647058822),
+ (0.13333333333333333, 0.36862745098039218 , 0.6588235294117647 ),
+ (0.14509803921568629, 0.20392156862745098 , 0.58039215686274515),
+ (0.03137254901960784, 0.11372549019607843 , 0.34509803921568627)
+ )
+
+_YlOrBr_data = (
+ (1.0 , 1.0 , 0.89803921568627454),
+ (1.0 , 0.96862745098039216 , 0.73725490196078436),
+ (0.99607843137254903, 0.8901960784313725 , 0.56862745098039214),
+ (0.99607843137254903, 0.7686274509803922 , 0.30980392156862746),
+ (0.99607843137254903, 0.6 , 0.16078431372549021),
+ (0.92549019607843142, 0.4392156862745098 , 0.07843137254901961),
+ (0.8 , 0.29803921568627451 , 0.00784313725490196),
+ (0.6 , 0.20392156862745098 , 0.01568627450980392),
+ (0.4 , 0.14509803921568629 , 0.02352941176470588)
+ )
+
+_YlOrRd_data = (
+ (1.0 , 1.0 , 0.8 ),
+ (1.0 , 0.92941176470588238 , 0.62745098039215685),
+ (0.99607843137254903, 0.85098039215686272 , 0.46274509803921571),
+ (0.99607843137254903, 0.69803921568627447 , 0.29803921568627451),
+ (0.99215686274509807, 0.55294117647058827 , 0.23529411764705882),
+ (0.9882352941176471 , 0.30588235294117649 , 0.16470588235294117),
+ (0.8901960784313725 , 0.10196078431372549 , 0.10980392156862745),
+ (0.74117647058823533, 0.0 , 0.14901960784313725),
+ (0.50196078431372548, 0.0 , 0.14901960784313725)
+ )
+
+
+# ColorBrewer's qualitative maps, implemented using ListedColormap
+# for use with mpl.colors.NoNorm
+
+_Accent_data = (
+ (0.49803921568627452, 0.78823529411764703, 0.49803921568627452),
+ (0.74509803921568629, 0.68235294117647061, 0.83137254901960789),
+ (0.99215686274509807, 0.75294117647058822, 0.52549019607843139),
+ (1.0, 1.0, 0.6 ),
+ (0.2196078431372549, 0.42352941176470588, 0.69019607843137254),
+ (0.94117647058823528, 0.00784313725490196, 0.49803921568627452),
+ (0.74901960784313726, 0.35686274509803922, 0.09019607843137254),
+ (0.4, 0.4, 0.4 ),
+ )
+
+_Dark2_data = (
+ (0.10588235294117647, 0.61960784313725492, 0.46666666666666667),
+ (0.85098039215686272, 0.37254901960784315, 0.00784313725490196),
+ (0.45882352941176469, 0.4392156862745098, 0.70196078431372544),
+ (0.90588235294117647, 0.16078431372549021, 0.54117647058823526),
+ (0.4, 0.65098039215686276, 0.11764705882352941),
+ (0.90196078431372551, 0.6705882352941176, 0.00784313725490196),
+ (0.65098039215686276, 0.46274509803921571, 0.11372549019607843),
+ (0.4, 0.4, 0.4 ),
+ )
+
+_Paired_data = (
+ (0.65098039215686276, 0.80784313725490198, 0.8901960784313725 ),
+ (0.12156862745098039, 0.47058823529411764, 0.70588235294117652),
+ (0.69803921568627447, 0.87450980392156863, 0.54117647058823526),
+ (0.2, 0.62745098039215685, 0.17254901960784313),
+ (0.98431372549019602, 0.60392156862745094, 0.6 ),
+ (0.8901960784313725, 0.10196078431372549, 0.10980392156862745),
+ (0.99215686274509807, 0.74901960784313726, 0.43529411764705883),
+ (1.0, 0.49803921568627452, 0.0 ),
+ (0.792156862745098, 0.69803921568627447, 0.83921568627450982),
+ (0.41568627450980394, 0.23921568627450981, 0.60392156862745094),
+ (1.0, 1.0, 0.6 ),
+ (0.69411764705882351, 0.34901960784313724, 0.15686274509803921),
+ )
+
+_Pastel1_data = (
+ (0.98431372549019602, 0.70588235294117652, 0.68235294117647061),
+ (0.70196078431372544, 0.80392156862745101, 0.8901960784313725 ),
+ (0.8, 0.92156862745098034, 0.77254901960784317),
+ (0.87058823529411766, 0.79607843137254897, 0.89411764705882357),
+ (0.99607843137254903, 0.85098039215686272, 0.65098039215686276),
+ (1.0, 1.0, 0.8 ),
+ (0.89803921568627454, 0.84705882352941175, 0.74117647058823533),
+ (0.99215686274509807, 0.85490196078431369, 0.92549019607843142),
+ (0.94901960784313721, 0.94901960784313721, 0.94901960784313721),
+ )
+
+_Pastel2_data = (
+ (0.70196078431372544, 0.88627450980392153, 0.80392156862745101),
+ (0.99215686274509807, 0.80392156862745101, 0.67450980392156867),
+ (0.79607843137254897, 0.83529411764705885, 0.90980392156862744),
+ (0.95686274509803926, 0.792156862745098, 0.89411764705882357),
+ (0.90196078431372551, 0.96078431372549022, 0.78823529411764703),
+ (1.0, 0.94901960784313721, 0.68235294117647061),
+ (0.94509803921568625, 0.88627450980392153, 0.8 ),
+ (0.8, 0.8, 0.8 ),
+ )
+
+_Set1_data = (
+ (0.89411764705882357, 0.10196078431372549, 0.10980392156862745),
+ (0.21568627450980393, 0.49411764705882355, 0.72156862745098038),
+ (0.30196078431372547, 0.68627450980392157, 0.29019607843137257),
+ (0.59607843137254901, 0.30588235294117649, 0.63921568627450975),
+ (1.0, 0.49803921568627452, 0.0 ),
+ (1.0, 1.0, 0.2 ),
+ (0.65098039215686276, 0.33725490196078434, 0.15686274509803921),
+ (0.96862745098039216, 0.50588235294117645, 0.74901960784313726),
+ (0.6, 0.6, 0.6),
+ )
+
+_Set2_data = (
+ (0.4, 0.76078431372549016, 0.6470588235294118 ),
+ (0.9882352941176471, 0.55294117647058827, 0.3843137254901961 ),
+ (0.55294117647058827, 0.62745098039215685, 0.79607843137254897),
+ (0.90588235294117647, 0.54117647058823526, 0.76470588235294112),
+ (0.65098039215686276, 0.84705882352941175, 0.32941176470588235),
+ (1.0, 0.85098039215686272, 0.18431372549019609),
+ (0.89803921568627454, 0.7686274509803922, 0.58039215686274515),
+ (0.70196078431372544, 0.70196078431372544, 0.70196078431372544),
+ )
+
+_Set3_data = (
+ (0.55294117647058827, 0.82745098039215681, 0.7803921568627451 ),
+ (1.0, 1.0, 0.70196078431372544),
+ (0.74509803921568629, 0.72941176470588232, 0.85490196078431369),
+ (0.98431372549019602, 0.50196078431372548, 0.44705882352941179),
+ (0.50196078431372548, 0.69411764705882351, 0.82745098039215681),
+ (0.99215686274509807, 0.70588235294117652, 0.3843137254901961 ),
+ (0.70196078431372544, 0.87058823529411766, 0.41176470588235292),
+ (0.9882352941176471, 0.80392156862745101, 0.89803921568627454),
+ (0.85098039215686272, 0.85098039215686272, 0.85098039215686272),
+ (0.73725490196078436, 0.50196078431372548, 0.74117647058823533),
+ (0.8, 0.92156862745098034, 0.77254901960784317),
+ (1.0, 0.92941176470588238, 0.43529411764705883),
+ )
+
+
+# The next 7 palettes are from the Yorick scientific visalisation package,
+# an evolution of the GIST package, both by David H. Munro.
+# They are released under a BSD-like license (see LICENSE_YORICK in
+# the license directory of the matplotlib source distribution).
+#
+# Most palette functions have been reduced to simple function descriptions
+# by Reinier Heeres, since the rgb components were mostly straight lines.
+# gist_earth_data and gist_ncar_data were simplified by a script and some
+# manual effort.
+
+_gist_earth_data = \
+{'red': (
+(0.0, 0.0, 0.0000),
+(0.2824, 0.1882, 0.1882),
+(0.4588, 0.2714, 0.2714),
+(0.5490, 0.4719, 0.4719),
+(0.6980, 0.7176, 0.7176),
+(0.7882, 0.7553, 0.7553),
+(1.0000, 0.9922, 0.9922),
+), 'green': (
+(0.0, 0.0, 0.0000),
+(0.0275, 0.0000, 0.0000),
+(0.1098, 0.1893, 0.1893),
+(0.1647, 0.3035, 0.3035),
+(0.2078, 0.3841, 0.3841),
+(0.2824, 0.5020, 0.5020),
+(0.5216, 0.6397, 0.6397),
+(0.6980, 0.7171, 0.7171),
+(0.7882, 0.6392, 0.6392),
+(0.7922, 0.6413, 0.6413),
+(0.8000, 0.6447, 0.6447),
+(0.8078, 0.6481, 0.6481),
+(0.8157, 0.6549, 0.6549),
+(0.8667, 0.6991, 0.6991),
+(0.8745, 0.7103, 0.7103),
+(0.8824, 0.7216, 0.7216),
+(0.8902, 0.7323, 0.7323),
+(0.8980, 0.7430, 0.7430),
+(0.9412, 0.8275, 0.8275),
+(0.9569, 0.8635, 0.8635),
+(0.9647, 0.8816, 0.8816),
+(0.9961, 0.9733, 0.9733),
+(1.0000, 0.9843, 0.9843),
+), 'blue': (
+(0.0, 0.0, 0.0000),
+(0.0039, 0.1684, 0.1684),
+(0.0078, 0.2212, 0.2212),
+(0.0275, 0.4329, 0.4329),
+(0.0314, 0.4549, 0.4549),
+(0.2824, 0.5004, 0.5004),
+(0.4667, 0.2748, 0.2748),
+(0.5451, 0.3205, 0.3205),
+(0.7843, 0.3961, 0.3961),
+(0.8941, 0.6651, 0.6651),
+(1.0000, 0.9843, 0.9843),
+)}
+
+_gist_gray_data = {
+ 'red': gfunc[3],
+ 'green': gfunc[3],
+ 'blue': gfunc[3],
+}
+
+_gist_heat_data = {
+ 'red': lambda x: 1.5 * x,
+ 'green': lambda x: 2 * x - 1,
+ 'blue': lambda x: 4 * x - 3,
+}
+
+_gist_ncar_data = \
+{'red': (
+(0.0, 0.0, 0.0000),
+(0.3098, 0.0000, 0.0000),
+(0.3725, 0.3993, 0.3993),
+(0.4235, 0.5003, 0.5003),
+(0.5333, 1.0000, 1.0000),
+(0.7922, 1.0000, 1.0000),
+(0.8471, 0.6218, 0.6218),
+(0.8980, 0.9235, 0.9235),
+(1.0000, 0.9961, 0.9961),
+), 'green': (
+(0.0, 0.0, 0.0000),
+(0.0510, 0.3722, 0.3722),
+(0.1059, 0.0000, 0.0000),
+(0.1569, 0.7202, 0.7202),
+(0.1608, 0.7537, 0.7537),
+(0.1647, 0.7752, 0.7752),
+(0.2157, 1.0000, 1.0000),
+(0.2588, 0.9804, 0.9804),
+(0.2706, 0.9804, 0.9804),
+(0.3176, 1.0000, 1.0000),
+(0.3686, 0.8081, 0.8081),
+(0.4275, 1.0000, 1.0000),
+(0.5216, 1.0000, 1.0000),
+(0.6314, 0.7292, 0.7292),
+(0.6863, 0.2796, 0.2796),
+(0.7451, 0.0000, 0.0000),
+(0.7922, 0.0000, 0.0000),
+(0.8431, 0.1753, 0.1753),
+(0.8980, 0.5000, 0.5000),
+(1.0000, 0.9725, 0.9725),
+), 'blue': (
+(0.0, 0.5020, 0.5020),
+(0.0510, 0.0222, 0.0222),
+(0.1098, 1.0000, 1.0000),
+(0.2039, 1.0000, 1.0000),
+(0.2627, 0.6145, 0.6145),
+(0.3216, 0.0000, 0.0000),
+(0.4157, 0.0000, 0.0000),
+(0.4745, 0.2342, 0.2342),
+(0.5333, 0.0000, 0.0000),
+(0.5804, 0.0000, 0.0000),
+(0.6314, 0.0549, 0.0549),
+(0.6902, 0.0000, 0.0000),
+(0.7373, 0.0000, 0.0000),
+(0.7922, 0.9738, 0.9738),
+(0.8000, 1.0000, 1.0000),
+(0.8431, 1.0000, 1.0000),
+(0.8980, 0.9341, 0.9341),
+(1.0000, 0.9961, 0.9961),
+)}
+
+_gist_rainbow_data = (
+ (0.000, (1.00, 0.00, 0.16)),
+ (0.030, (1.00, 0.00, 0.00)),
+ (0.215, (1.00, 1.00, 0.00)),
+ (0.400, (0.00, 1.00, 0.00)),
+ (0.586, (0.00, 1.00, 1.00)),
+ (0.770, (0.00, 0.00, 1.00)),
+ (0.954, (1.00, 0.00, 1.00)),
+ (1.000, (1.00, 0.00, 0.75))
+)
+
+_gist_stern_data = {
+ 'red': (
+ (0.000, 0.000, 0.000), (0.0547, 1.000, 1.000),
+ (0.250, 0.027, 0.250), # (0.2500, 0.250, 0.250),
+ (1.000, 1.000, 1.000)),
+ 'green': ((0, 0, 0), (1, 1, 1)),
+ 'blue': (
+ (0.000, 0.000, 0.000), (0.500, 1.000, 1.000),
+ (0.735, 0.000, 0.000), (1.000, 1.000, 1.000))
+}
+
+_gist_yarg_data = {
+ 'red': lambda x: 1 - x,
+ 'green': lambda x: 1 - x,
+ 'blue': lambda x: 1 - x,
+}
+
+# This bipolar color map was generated from CoolWarmFloat33.csv of
+# "Diverging Color Maps for Scientific Visualization" by Kenneth Moreland.
+# <http://www.kennethmoreland.com/color-maps/>
+_coolwarm_data = {
+ 'red': [
+ (0.0, 0.2298057, 0.2298057),
+ (0.03125, 0.26623388, 0.26623388),
+ (0.0625, 0.30386891, 0.30386891),
+ (0.09375, 0.342804478, 0.342804478),
+ (0.125, 0.38301334, 0.38301334),
+ (0.15625, 0.424369608, 0.424369608),
+ (0.1875, 0.46666708, 0.46666708),
+ (0.21875, 0.509635204, 0.509635204),
+ (0.25, 0.552953156, 0.552953156),
+ (0.28125, 0.596262162, 0.596262162),
+ (0.3125, 0.639176211, 0.639176211),
+ (0.34375, 0.681291281, 0.681291281),
+ (0.375, 0.722193294, 0.722193294),
+ (0.40625, 0.761464949, 0.761464949),
+ (0.4375, 0.798691636, 0.798691636),
+ (0.46875, 0.833466556, 0.833466556),
+ (0.5, 0.865395197, 0.865395197),
+ (0.53125, 0.897787179, 0.897787179),
+ (0.5625, 0.924127593, 0.924127593),
+ (0.59375, 0.944468518, 0.944468518),
+ (0.625, 0.958852946, 0.958852946),
+ (0.65625, 0.96732803, 0.96732803),
+ (0.6875, 0.969954137, 0.969954137),
+ (0.71875, 0.966811177, 0.966811177),
+ (0.75, 0.958003065, 0.958003065),
+ (0.78125, 0.943660866, 0.943660866),
+ (0.8125, 0.923944917, 0.923944917),
+ (0.84375, 0.89904617, 0.89904617),
+ (0.875, 0.869186849, 0.869186849),
+ (0.90625, 0.834620542, 0.834620542),
+ (0.9375, 0.795631745, 0.795631745),
+ (0.96875, 0.752534934, 0.752534934),
+ (1.0, 0.705673158, 0.705673158)],
+ 'green': [
+ (0.0, 0.298717966, 0.298717966),
+ (0.03125, 0.353094838, 0.353094838),
+ (0.0625, 0.406535296, 0.406535296),
+ (0.09375, 0.458757618, 0.458757618),
+ (0.125, 0.50941904, 0.50941904),
+ (0.15625, 0.558148092, 0.558148092),
+ (0.1875, 0.604562568, 0.604562568),
+ (0.21875, 0.648280772, 0.648280772),
+ (0.25, 0.688929332, 0.688929332),
+ (0.28125, 0.726149107, 0.726149107),
+ (0.3125, 0.759599947, 0.759599947),
+ (0.34375, 0.788964712, 0.788964712),
+ (0.375, 0.813952739, 0.813952739),
+ (0.40625, 0.834302879, 0.834302879),
+ (0.4375, 0.849786142, 0.849786142),
+ (0.46875, 0.860207984, 0.860207984),
+ (0.5, 0.86541021, 0.86541021),
+ (0.53125, 0.848937047, 0.848937047),
+ (0.5625, 0.827384882, 0.827384882),
+ (0.59375, 0.800927443, 0.800927443),
+ (0.625, 0.769767752, 0.769767752),
+ (0.65625, 0.734132809, 0.734132809),
+ (0.6875, 0.694266682, 0.694266682),
+ (0.71875, 0.650421156, 0.650421156),
+ (0.75, 0.602842431, 0.602842431),
+ (0.78125, 0.551750968, 0.551750968),
+ (0.8125, 0.49730856, 0.49730856),
+ (0.84375, 0.439559467, 0.439559467),
+ (0.875, 0.378313092, 0.378313092),
+ (0.90625, 0.312874446, 0.312874446),
+ (0.9375, 0.24128379, 0.24128379),
+ (0.96875, 0.157246067, 0.157246067),
+ (1.0, 0.01555616, 0.01555616)],
+ 'blue': [
+ (0.0, 0.753683153, 0.753683153),
+ (0.03125, 0.801466763, 0.801466763),
+ (0.0625, 0.84495867, 0.84495867),
+ (0.09375, 0.883725899, 0.883725899),
+ (0.125, 0.917387822, 0.917387822),
+ (0.15625, 0.945619588, 0.945619588),
+ (0.1875, 0.968154911, 0.968154911),
+ (0.21875, 0.98478814, 0.98478814),
+ (0.25, 0.995375608, 0.995375608),
+ (0.28125, 0.999836203, 0.999836203),
+ (0.3125, 0.998151185, 0.998151185),
+ (0.34375, 0.990363227, 0.990363227),
+ (0.375, 0.976574709, 0.976574709),
+ (0.40625, 0.956945269, 0.956945269),
+ (0.4375, 0.931688648, 0.931688648),
+ (0.46875, 0.901068838, 0.901068838),
+ (0.5, 0.865395561, 0.865395561),
+ (0.53125, 0.820880546, 0.820880546),
+ (0.5625, 0.774508472, 0.774508472),
+ (0.59375, 0.726736146, 0.726736146),
+ (0.625, 0.678007945, 0.678007945),
+ (0.65625, 0.628751763, 0.628751763),
+ (0.6875, 0.579375448, 0.579375448),
+ (0.71875, 0.530263762, 0.530263762),
+ (0.75, 0.481775914, 0.481775914),
+ (0.78125, 0.434243684, 0.434243684),
+ (0.8125, 0.387970225, 0.387970225),
+ (0.84375, 0.343229596, 0.343229596),
+ (0.875, 0.300267182, 0.300267182),
+ (0.90625, 0.259301199, 0.259301199),
+ (0.9375, 0.220525627, 0.220525627),
+ (0.96875, 0.184115123, 0.184115123),
+ (1.0, 0.150232812, 0.150232812)]
+ }
+
+# Implementation of Carey Rappaport's CMRmap.
+# See `A Color Map for Effective Black-and-White Rendering of Color-Scale
+# Images' by Carey Rappaport
+# http://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m
+_CMRmap_data = {'red': ((0.000, 0.00, 0.00),
+ (0.125, 0.15, 0.15),
+ (0.250, 0.30, 0.30),
+ (0.375, 0.60, 0.60),
+ (0.500, 1.00, 1.00),
+ (0.625, 0.90, 0.90),
+ (0.750, 0.90, 0.90),
+ (0.875, 0.90, 0.90),
+ (1.000, 1.00, 1.00)),
+ 'green': ((0.000, 0.00, 0.00),
+ (0.125, 0.15, 0.15),
+ (0.250, 0.15, 0.15),
+ (0.375, 0.20, 0.20),
+ (0.500, 0.25, 0.25),
+ (0.625, 0.50, 0.50),
+ (0.750, 0.75, 0.75),
+ (0.875, 0.90, 0.90),
+ (1.000, 1.00, 1.00)),
+ 'blue': ((0.000, 0.00, 0.00),
+ (0.125, 0.50, 0.50),
+ (0.250, 0.75, 0.75),
+ (0.375, 0.50, 0.50),
+ (0.500, 0.15, 0.15),
+ (0.625, 0.00, 0.00),
+ (0.750, 0.10, 0.10),
+ (0.875, 0.50, 0.50),
+ (1.000, 1.00, 1.00))}
+
+
+# An MIT licensed, colorblind-friendly heatmap from Wistia:
+# https://github.com/wistia/heatmap-palette
+# http://wistia.com/blog/heatmaps-for-colorblindness
+#
+# >>> import matplotlib.colors as c
+# >>> colors = ["#e4ff7a", "#ffe81a", "#ffbd00", "#ffa000", "#fc7f00"]
+# >>> cm = c.LinearSegmentedColormap.from_list('wistia', colors)
+# >>> _wistia_data = cm._segmentdata
+# >>> del _wistia_data['alpha']
+#
+_wistia_data = {
+ 'red': [(0.0, 0.8941176470588236, 0.8941176470588236),
+ (0.25, 1.0, 1.0),
+ (0.5, 1.0, 1.0),
+ (0.75, 1.0, 1.0),
+ (1.0, 0.9882352941176471, 0.9882352941176471)],
+ 'green': [(0.0, 1.0, 1.0),
+ (0.25, 0.9098039215686274, 0.9098039215686274),
+ (0.5, 0.7411764705882353, 0.7411764705882353),
+ (0.75, 0.6274509803921569, 0.6274509803921569),
+ (1.0, 0.4980392156862745, 0.4980392156862745)],
+ 'blue': [(0.0, 0.47843137254901963, 0.47843137254901963),
+ (0.25, 0.10196078431372549, 0.10196078431372549),
+ (0.5, 0.0, 0.0),
+ (0.75, 0.0, 0.0),
+ (1.0, 0.0, 0.0)],
+}
+
+
+# Categorical palettes from Vega:
+# https://github.com/vega/vega/wiki/Scales
+# (divided by 255)
+#
+
+_tab10_data = (
+ (0.12156862745098039, 0.4666666666666667, 0.7058823529411765 ), # 1f77b4
+ (1.0, 0.4980392156862745, 0.054901960784313725), # ff7f0e
+ (0.17254901960784313, 0.6274509803921569, 0.17254901960784313 ), # 2ca02c
+ (0.8392156862745098, 0.15294117647058825, 0.1568627450980392 ), # d62728
+ (0.5803921568627451, 0.403921568627451, 0.7411764705882353 ), # 9467bd
+ (0.5490196078431373, 0.33725490196078434, 0.29411764705882354 ), # 8c564b
+ (0.8901960784313725, 0.4666666666666667, 0.7607843137254902 ), # e377c2
+ (0.4980392156862745, 0.4980392156862745, 0.4980392156862745 ), # 7f7f7f
+ (0.7372549019607844, 0.7411764705882353, 0.13333333333333333 ), # bcbd22
+ (0.09019607843137255, 0.7450980392156863, 0.8117647058823529), # 17becf
+)
+
+_tab20_data = (
+ (0.12156862745098039, 0.4666666666666667, 0.7058823529411765 ), # 1f77b4
+ (0.6823529411764706, 0.7803921568627451, 0.9098039215686274 ), # aec7e8
+ (1.0, 0.4980392156862745, 0.054901960784313725), # ff7f0e
+ (1.0, 0.7333333333333333, 0.47058823529411764 ), # ffbb78
+ (0.17254901960784313, 0.6274509803921569, 0.17254901960784313 ), # 2ca02c
+ (0.596078431372549, 0.8745098039215686, 0.5411764705882353 ), # 98df8a
+ (0.8392156862745098, 0.15294117647058825, 0.1568627450980392 ), # d62728
+ (1.0, 0.596078431372549, 0.5882352941176471 ), # ff9896
+ (0.5803921568627451, 0.403921568627451, 0.7411764705882353 ), # 9467bd
+ (0.7725490196078432, 0.6901960784313725, 0.8352941176470589 ), # c5b0d5
+ (0.5490196078431373, 0.33725490196078434, 0.29411764705882354 ), # 8c564b
+ (0.7686274509803922, 0.611764705882353, 0.5803921568627451 ), # c49c94
+ (0.8901960784313725, 0.4666666666666667, 0.7607843137254902 ), # e377c2
+ (0.9686274509803922, 0.7137254901960784, 0.8235294117647058 ), # f7b6d2
+ (0.4980392156862745, 0.4980392156862745, 0.4980392156862745 ), # 7f7f7f
+ (0.7803921568627451, 0.7803921568627451, 0.7803921568627451 ), # c7c7c7
+ (0.7372549019607844, 0.7411764705882353, 0.13333333333333333 ), # bcbd22
+ (0.8588235294117647, 0.8588235294117647, 0.5529411764705883 ), # dbdb8d
+ (0.09019607843137255, 0.7450980392156863, 0.8117647058823529 ), # 17becf
+ (0.6196078431372549, 0.8549019607843137, 0.8980392156862745), # 9edae5
+)
+
+_tab20b_data = (
+ (0.2235294117647059, 0.23137254901960785, 0.4745098039215686 ), # 393b79
+ (0.3215686274509804, 0.32941176470588235, 0.6392156862745098 ), # 5254a3
+ (0.4196078431372549, 0.43137254901960786, 0.8117647058823529 ), # 6b6ecf
+ (0.611764705882353, 0.6196078431372549, 0.8705882352941177 ), # 9c9ede
+ (0.38823529411764707, 0.4745098039215686, 0.2235294117647059 ), # 637939
+ (0.5490196078431373, 0.6352941176470588, 0.3215686274509804 ), # 8ca252
+ (0.7098039215686275, 0.8117647058823529, 0.4196078431372549 ), # b5cf6b
+ (0.807843137254902, 0.8588235294117647, 0.611764705882353 ), # cedb9c
+ (0.5490196078431373, 0.42745098039215684, 0.19215686274509805), # 8c6d31
+ (0.7411764705882353, 0.6196078431372549, 0.2235294117647059 ), # bd9e39
+ (0.9058823529411765, 0.7294117647058823, 0.3215686274509804 ), # e7ba52
+ (0.9058823529411765, 0.796078431372549, 0.5803921568627451 ), # e7cb94
+ (0.5176470588235295, 0.23529411764705882, 0.2235294117647059 ), # 843c39
+ (0.6784313725490196, 0.28627450980392155, 0.2901960784313726 ), # ad494a
+ (0.8392156862745098, 0.3803921568627451, 0.4196078431372549 ), # d6616b
+ (0.9058823529411765, 0.5882352941176471, 0.611764705882353 ), # e7969c
+ (0.4823529411764706, 0.2549019607843137, 0.45098039215686275), # 7b4173
+ (0.6470588235294118, 0.3176470588235294, 0.5803921568627451 ), # a55194
+ (0.807843137254902, 0.42745098039215684, 0.7411764705882353 ), # ce6dbd
+ (0.8705882352941177, 0.6196078431372549, 0.8392156862745098 ), # de9ed6
+)
+
+_tab20c_data = (
+ (0.19215686274509805, 0.5098039215686274, 0.7411764705882353 ), # 3182bd
+ (0.4196078431372549, 0.6823529411764706, 0.8392156862745098 ), # 6baed6
+ (0.6196078431372549, 0.792156862745098, 0.8823529411764706 ), # 9ecae1
+ (0.7764705882352941, 0.8588235294117647, 0.9372549019607843 ), # c6dbef
+ (0.9019607843137255, 0.3333333333333333, 0.050980392156862744), # e6550d
+ (0.9921568627450981, 0.5529411764705883, 0.23529411764705882 ), # fd8d3c
+ (0.9921568627450981, 0.6823529411764706, 0.4196078431372549 ), # fdae6b
+ (0.9921568627450981, 0.8156862745098039, 0.6352941176470588 ), # fdd0a2
+ (0.19215686274509805, 0.6392156862745098, 0.32941176470588235 ), # 31a354
+ (0.4549019607843137, 0.7686274509803922, 0.4627450980392157 ), # 74c476
+ (0.6313725490196078, 0.8509803921568627, 0.6078431372549019 ), # a1d99b
+ (0.7803921568627451, 0.9137254901960784, 0.7529411764705882 ), # c7e9c0
+ (0.4588235294117647, 0.4196078431372549, 0.6941176470588235 ), # 756bb1
+ (0.6196078431372549, 0.6039215686274509, 0.7843137254901961 ), # 9e9ac8
+ (0.7372549019607844, 0.7411764705882353, 0.8627450980392157 ), # bcbddc
+ (0.8549019607843137, 0.8549019607843137, 0.9215686274509803 ), # dadaeb
+ (0.38823529411764707, 0.38823529411764707, 0.38823529411764707 ), # 636363
+ (0.5882352941176471, 0.5882352941176471, 0.5882352941176471 ), # 969696
+ (0.7411764705882353, 0.7411764705882353, 0.7411764705882353 ), # bdbdbd
+ (0.8509803921568627, 0.8509803921568627, 0.8509803921568627 ), # d9d9d9
+)
+
+
+datad = {
+ 'Blues': _Blues_data,
+ 'BrBG': _BrBG_data,
+ 'BuGn': _BuGn_data,
+ 'BuPu': _BuPu_data,
+ 'CMRmap': _CMRmap_data,
+ 'GnBu': _GnBu_data,
+ 'Greens': _Greens_data,
+ 'Greys': _Greys_data,
+ 'OrRd': _OrRd_data,
+ 'Oranges': _Oranges_data,
+ 'PRGn': _PRGn_data,
+ 'PiYG': _PiYG_data,
+ 'PuBu': _PuBu_data,
+ 'PuBuGn': _PuBuGn_data,
+ 'PuOr': _PuOr_data,
+ 'PuRd': _PuRd_data,
+ 'Purples': _Purples_data,
+ 'RdBu': _RdBu_data,
+ 'RdGy': _RdGy_data,
+ 'RdPu': _RdPu_data,
+ 'RdYlBu': _RdYlBu_data,
+ 'RdYlGn': _RdYlGn_data,
+ 'Reds': _Reds_data,
+ 'Spectral': _Spectral_data,
+ 'Wistia': _wistia_data,
+ 'YlGn': _YlGn_data,
+ 'YlGnBu': _YlGnBu_data,
+ 'YlOrBr': _YlOrBr_data,
+ 'YlOrRd': _YlOrRd_data,
+ 'afmhot': _afmhot_data,
+ 'autumn': _autumn_data,
+ 'binary': _binary_data,
+ 'bone': _bone_data,
+ 'brg': _brg_data,
+ 'bwr': _bwr_data,
+ 'cool': _cool_data,
+ 'coolwarm': _coolwarm_data,
+ 'copper': _copper_data,
+ 'cubehelix': _cubehelix_data,
+ 'flag': _flag_data,
+ 'gist_earth': _gist_earth_data,
+ 'gist_gray': _gist_gray_data,
+ 'gist_heat': _gist_heat_data,
+ 'gist_ncar': _gist_ncar_data,
+ 'gist_rainbow': _gist_rainbow_data,
+ 'gist_stern': _gist_stern_data,
+ 'gist_yarg': _gist_yarg_data,
+ 'gnuplot': _gnuplot_data,
+ 'gnuplot2': _gnuplot2_data,
+ 'gray': _gray_data,
+ 'hot': _hot_data,
+ 'hsv': _hsv_data,
+ 'jet': _jet_data,
+ 'nipy_spectral': _nipy_spectral_data,
+ 'ocean': _ocean_data,
+ 'pink': _pink_data,
+ 'prism': _prism_data,
+ 'rainbow': _rainbow_data,
+ 'seismic': _seismic_data,
+ 'spring': _spring_data,
+ 'summer': _summer_data,
+ 'terrain': _terrain_data,
+ 'winter': _winter_data,
+ # Qualitative
+ 'Accent': {'listed': _Accent_data},
+ 'Dark2': {'listed': _Dark2_data},
+ 'Paired': {'listed': _Paired_data},
+ 'Pastel1': {'listed': _Pastel1_data},
+ 'Pastel2': {'listed': _Pastel2_data},
+ 'Set1': {'listed': _Set1_data},
+ 'Set2': {'listed': _Set2_data},
+ 'Set3': {'listed': _Set3_data},
+ 'tab10': {'listed': _tab10_data},
+ 'tab20': {'listed': _tab20_data},
+ 'tab20b': {'listed': _tab20b_data},
+ 'tab20c': {'listed': _tab20c_data},
+}
diff --git a/contrib/python/matplotlib/py2/matplotlib/_cm_listed.py b/contrib/python/matplotlib/py2/matplotlib/_cm_listed.py
new file mode 100644
index 00000000000..c4a4e13e4d7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/_cm_listed.py
@@ -0,0 +1,1298 @@
+from .colors import ListedColormap
+
+_magma_data = [[0.001462, 0.000466, 0.013866],
+ [0.002258, 0.001295, 0.018331],
+ [0.003279, 0.002305, 0.023708],
+ [0.004512, 0.003490, 0.029965],
+ [0.005950, 0.004843, 0.037130],
+ [0.007588, 0.006356, 0.044973],
+ [0.009426, 0.008022, 0.052844],
+ [0.011465, 0.009828, 0.060750],
+ [0.013708, 0.011771, 0.068667],
+ [0.016156, 0.013840, 0.076603],
+ [0.018815, 0.016026, 0.084584],
+ [0.021692, 0.018320, 0.092610],
+ [0.024792, 0.020715, 0.100676],
+ [0.028123, 0.023201, 0.108787],
+ [0.031696, 0.025765, 0.116965],
+ [0.035520, 0.028397, 0.125209],
+ [0.039608, 0.031090, 0.133515],
+ [0.043830, 0.033830, 0.141886],
+ [0.048062, 0.036607, 0.150327],
+ [0.052320, 0.039407, 0.158841],
+ [0.056615, 0.042160, 0.167446],
+ [0.060949, 0.044794, 0.176129],
+ [0.065330, 0.047318, 0.184892],
+ [0.069764, 0.049726, 0.193735],
+ [0.074257, 0.052017, 0.202660],
+ [0.078815, 0.054184, 0.211667],
+ [0.083446, 0.056225, 0.220755],
+ [0.088155, 0.058133, 0.229922],
+ [0.092949, 0.059904, 0.239164],
+ [0.097833, 0.061531, 0.248477],
+ [0.102815, 0.063010, 0.257854],
+ [0.107899, 0.064335, 0.267289],
+ [0.113094, 0.065492, 0.276784],
+ [0.118405, 0.066479, 0.286321],
+ [0.123833, 0.067295, 0.295879],
+ [0.129380, 0.067935, 0.305443],
+ [0.135053, 0.068391, 0.315000],
+ [0.140858, 0.068654, 0.324538],
+ [0.146785, 0.068738, 0.334011],
+ [0.152839, 0.068637, 0.343404],
+ [0.159018, 0.068354, 0.352688],
+ [0.165308, 0.067911, 0.361816],
+ [0.171713, 0.067305, 0.370771],
+ [0.178212, 0.066576, 0.379497],
+ [0.184801, 0.065732, 0.387973],
+ [0.191460, 0.064818, 0.396152],
+ [0.198177, 0.063862, 0.404009],
+ [0.204935, 0.062907, 0.411514],
+ [0.211718, 0.061992, 0.418647],
+ [0.218512, 0.061158, 0.425392],
+ [0.225302, 0.060445, 0.431742],
+ [0.232077, 0.059889, 0.437695],
+ [0.238826, 0.059517, 0.443256],
+ [0.245543, 0.059352, 0.448436],
+ [0.252220, 0.059415, 0.453248],
+ [0.258857, 0.059706, 0.457710],
+ [0.265447, 0.060237, 0.461840],
+ [0.271994, 0.060994, 0.465660],
+ [0.278493, 0.061978, 0.469190],
+ [0.284951, 0.063168, 0.472451],
+ [0.291366, 0.064553, 0.475462],
+ [0.297740, 0.066117, 0.478243],
+ [0.304081, 0.067835, 0.480812],
+ [0.310382, 0.069702, 0.483186],
+ [0.316654, 0.071690, 0.485380],
+ [0.322899, 0.073782, 0.487408],
+ [0.329114, 0.075972, 0.489287],
+ [0.335308, 0.078236, 0.491024],
+ [0.341482, 0.080564, 0.492631],
+ [0.347636, 0.082946, 0.494121],
+ [0.353773, 0.085373, 0.495501],
+ [0.359898, 0.087831, 0.496778],
+ [0.366012, 0.090314, 0.497960],
+ [0.372116, 0.092816, 0.499053],
+ [0.378211, 0.095332, 0.500067],
+ [0.384299, 0.097855, 0.501002],
+ [0.390384, 0.100379, 0.501864],
+ [0.396467, 0.102902, 0.502658],
+ [0.402548, 0.105420, 0.503386],
+ [0.408629, 0.107930, 0.504052],
+ [0.414709, 0.110431, 0.504662],
+ [0.420791, 0.112920, 0.505215],
+ [0.426877, 0.115395, 0.505714],
+ [0.432967, 0.117855, 0.506160],
+ [0.439062, 0.120298, 0.506555],
+ [0.445163, 0.122724, 0.506901],
+ [0.451271, 0.125132, 0.507198],
+ [0.457386, 0.127522, 0.507448],
+ [0.463508, 0.129893, 0.507652],
+ [0.469640, 0.132245, 0.507809],
+ [0.475780, 0.134577, 0.507921],
+ [0.481929, 0.136891, 0.507989],
+ [0.488088, 0.139186, 0.508011],
+ [0.494258, 0.141462, 0.507988],
+ [0.500438, 0.143719, 0.507920],
+ [0.506629, 0.145958, 0.507806],
+ [0.512831, 0.148179, 0.507648],
+ [0.519045, 0.150383, 0.507443],
+ [0.525270, 0.152569, 0.507192],
+ [0.531507, 0.154739, 0.506895],
+ [0.537755, 0.156894, 0.506551],
+ [0.544015, 0.159033, 0.506159],
+ [0.550287, 0.161158, 0.505719],
+ [0.556571, 0.163269, 0.505230],
+ [0.562866, 0.165368, 0.504692],
+ [0.569172, 0.167454, 0.504105],
+ [0.575490, 0.169530, 0.503466],
+ [0.581819, 0.171596, 0.502777],
+ [0.588158, 0.173652, 0.502035],
+ [0.594508, 0.175701, 0.501241],
+ [0.600868, 0.177743, 0.500394],
+ [0.607238, 0.179779, 0.499492],
+ [0.613617, 0.181811, 0.498536],
+ [0.620005, 0.183840, 0.497524],
+ [0.626401, 0.185867, 0.496456],
+ [0.632805, 0.187893, 0.495332],
+ [0.639216, 0.189921, 0.494150],
+ [0.645633, 0.191952, 0.492910],
+ [0.652056, 0.193986, 0.491611],
+ [0.658483, 0.196027, 0.490253],
+ [0.664915, 0.198075, 0.488836],
+ [0.671349, 0.200133, 0.487358],
+ [0.677786, 0.202203, 0.485819],
+ [0.684224, 0.204286, 0.484219],
+ [0.690661, 0.206384, 0.482558],
+ [0.697098, 0.208501, 0.480835],
+ [0.703532, 0.210638, 0.479049],
+ [0.709962, 0.212797, 0.477201],
+ [0.716387, 0.214982, 0.475290],
+ [0.722805, 0.217194, 0.473316],
+ [0.729216, 0.219437, 0.471279],
+ [0.735616, 0.221713, 0.469180],
+ [0.742004, 0.224025, 0.467018],
+ [0.748378, 0.226377, 0.464794],
+ [0.754737, 0.228772, 0.462509],
+ [0.761077, 0.231214, 0.460162],
+ [0.767398, 0.233705, 0.457755],
+ [0.773695, 0.236249, 0.455289],
+ [0.779968, 0.238851, 0.452765],
+ [0.786212, 0.241514, 0.450184],
+ [0.792427, 0.244242, 0.447543],
+ [0.798608, 0.247040, 0.444848],
+ [0.804752, 0.249911, 0.442102],
+ [0.810855, 0.252861, 0.439305],
+ [0.816914, 0.255895, 0.436461],
+ [0.822926, 0.259016, 0.433573],
+ [0.828886, 0.262229, 0.430644],
+ [0.834791, 0.265540, 0.427671],
+ [0.840636, 0.268953, 0.424666],
+ [0.846416, 0.272473, 0.421631],
+ [0.852126, 0.276106, 0.418573],
+ [0.857763, 0.279857, 0.415496],
+ [0.863320, 0.283729, 0.412403],
+ [0.868793, 0.287728, 0.409303],
+ [0.874176, 0.291859, 0.406205],
+ [0.879464, 0.296125, 0.403118],
+ [0.884651, 0.300530, 0.400047],
+ [0.889731, 0.305079, 0.397002],
+ [0.894700, 0.309773, 0.393995],
+ [0.899552, 0.314616, 0.391037],
+ [0.904281, 0.319610, 0.388137],
+ [0.908884, 0.324755, 0.385308],
+ [0.913354, 0.330052, 0.382563],
+ [0.917689, 0.335500, 0.379915],
+ [0.921884, 0.341098, 0.377376],
+ [0.925937, 0.346844, 0.374959],
+ [0.929845, 0.352734, 0.372677],
+ [0.933606, 0.358764, 0.370541],
+ [0.937221, 0.364929, 0.368567],
+ [0.940687, 0.371224, 0.366762],
+ [0.944006, 0.377643, 0.365136],
+ [0.947180, 0.384178, 0.363701],
+ [0.950210, 0.390820, 0.362468],
+ [0.953099, 0.397563, 0.361438],
+ [0.955849, 0.404400, 0.360619],
+ [0.958464, 0.411324, 0.360014],
+ [0.960949, 0.418323, 0.359630],
+ [0.963310, 0.425390, 0.359469],
+ [0.965549, 0.432519, 0.359529],
+ [0.967671, 0.439703, 0.359810],
+ [0.969680, 0.446936, 0.360311],
+ [0.971582, 0.454210, 0.361030],
+ [0.973381, 0.461520, 0.361965],
+ [0.975082, 0.468861, 0.363111],
+ [0.976690, 0.476226, 0.364466],
+ [0.978210, 0.483612, 0.366025],
+ [0.979645, 0.491014, 0.367783],
+ [0.981000, 0.498428, 0.369734],
+ [0.982279, 0.505851, 0.371874],
+ [0.983485, 0.513280, 0.374198],
+ [0.984622, 0.520713, 0.376698],
+ [0.985693, 0.528148, 0.379371],
+ [0.986700, 0.535582, 0.382210],
+ [0.987646, 0.543015, 0.385210],
+ [0.988533, 0.550446, 0.388365],
+ [0.989363, 0.557873, 0.391671],
+ [0.990138, 0.565296, 0.395122],
+ [0.990871, 0.572706, 0.398714],
+ [0.991558, 0.580107, 0.402441],
+ [0.992196, 0.587502, 0.406299],
+ [0.992785, 0.594891, 0.410283],
+ [0.993326, 0.602275, 0.414390],
+ [0.993834, 0.609644, 0.418613],
+ [0.994309, 0.616999, 0.422950],
+ [0.994738, 0.624350, 0.427397],
+ [0.995122, 0.631696, 0.431951],
+ [0.995480, 0.639027, 0.436607],
+ [0.995810, 0.646344, 0.441361],
+ [0.996096, 0.653659, 0.446213],
+ [0.996341, 0.660969, 0.451160],
+ [0.996580, 0.668256, 0.456192],
+ [0.996775, 0.675541, 0.461314],
+ [0.996925, 0.682828, 0.466526],
+ [0.997077, 0.690088, 0.471811],
+ [0.997186, 0.697349, 0.477182],
+ [0.997254, 0.704611, 0.482635],
+ [0.997325, 0.711848, 0.488154],
+ [0.997351, 0.719089, 0.493755],
+ [0.997351, 0.726324, 0.499428],
+ [0.997341, 0.733545, 0.505167],
+ [0.997285, 0.740772, 0.510983],
+ [0.997228, 0.747981, 0.516859],
+ [0.997138, 0.755190, 0.522806],
+ [0.997019, 0.762398, 0.528821],
+ [0.996898, 0.769591, 0.534892],
+ [0.996727, 0.776795, 0.541039],
+ [0.996571, 0.783977, 0.547233],
+ [0.996369, 0.791167, 0.553499],
+ [0.996162, 0.798348, 0.559820],
+ [0.995932, 0.805527, 0.566202],
+ [0.995680, 0.812706, 0.572645],
+ [0.995424, 0.819875, 0.579140],
+ [0.995131, 0.827052, 0.585701],
+ [0.994851, 0.834213, 0.592307],
+ [0.994524, 0.841387, 0.598983],
+ [0.994222, 0.848540, 0.605696],
+ [0.993866, 0.855711, 0.612482],
+ [0.993545, 0.862859, 0.619299],
+ [0.993170, 0.870024, 0.626189],
+ [0.992831, 0.877168, 0.633109],
+ [0.992440, 0.884330, 0.640099],
+ [0.992089, 0.891470, 0.647116],
+ [0.991688, 0.898627, 0.654202],
+ [0.991332, 0.905763, 0.661309],
+ [0.990930, 0.912915, 0.668481],
+ [0.990570, 0.920049, 0.675675],
+ [0.990175, 0.927196, 0.682926],
+ [0.989815, 0.934329, 0.690198],
+ [0.989434, 0.941470, 0.697519],
+ [0.989077, 0.948604, 0.704863],
+ [0.988717, 0.955742, 0.712242],
+ [0.988367, 0.962878, 0.719649],
+ [0.988033, 0.970012, 0.727077],
+ [0.987691, 0.977154, 0.734536],
+ [0.987387, 0.984288, 0.742002],
+ [0.987053, 0.991438, 0.749504]]
+
+_inferno_data = [[0.001462, 0.000466, 0.013866],
+ [0.002267, 0.001270, 0.018570],
+ [0.003299, 0.002249, 0.024239],
+ [0.004547, 0.003392, 0.030909],
+ [0.006006, 0.004692, 0.038558],
+ [0.007676, 0.006136, 0.046836],
+ [0.009561, 0.007713, 0.055143],
+ [0.011663, 0.009417, 0.063460],
+ [0.013995, 0.011225, 0.071862],
+ [0.016561, 0.013136, 0.080282],
+ [0.019373, 0.015133, 0.088767],
+ [0.022447, 0.017199, 0.097327],
+ [0.025793, 0.019331, 0.105930],
+ [0.029432, 0.021503, 0.114621],
+ [0.033385, 0.023702, 0.123397],
+ [0.037668, 0.025921, 0.132232],
+ [0.042253, 0.028139, 0.141141],
+ [0.046915, 0.030324, 0.150164],
+ [0.051644, 0.032474, 0.159254],
+ [0.056449, 0.034569, 0.168414],
+ [0.061340, 0.036590, 0.177642],
+ [0.066331, 0.038504, 0.186962],
+ [0.071429, 0.040294, 0.196354],
+ [0.076637, 0.041905, 0.205799],
+ [0.081962, 0.043328, 0.215289],
+ [0.087411, 0.044556, 0.224813],
+ [0.092990, 0.045583, 0.234358],
+ [0.098702, 0.046402, 0.243904],
+ [0.104551, 0.047008, 0.253430],
+ [0.110536, 0.047399, 0.262912],
+ [0.116656, 0.047574, 0.272321],
+ [0.122908, 0.047536, 0.281624],
+ [0.129285, 0.047293, 0.290788],
+ [0.135778, 0.046856, 0.299776],
+ [0.142378, 0.046242, 0.308553],
+ [0.149073, 0.045468, 0.317085],
+ [0.155850, 0.044559, 0.325338],
+ [0.162689, 0.043554, 0.333277],
+ [0.169575, 0.042489, 0.340874],
+ [0.176493, 0.041402, 0.348111],
+ [0.183429, 0.040329, 0.354971],
+ [0.190367, 0.039309, 0.361447],
+ [0.197297, 0.038400, 0.367535],
+ [0.204209, 0.037632, 0.373238],
+ [0.211095, 0.037030, 0.378563],
+ [0.217949, 0.036615, 0.383522],
+ [0.224763, 0.036405, 0.388129],
+ [0.231538, 0.036405, 0.392400],
+ [0.238273, 0.036621, 0.396353],
+ [0.244967, 0.037055, 0.400007],
+ [0.251620, 0.037705, 0.403378],
+ [0.258234, 0.038571, 0.406485],
+ [0.264810, 0.039647, 0.409345],
+ [0.271347, 0.040922, 0.411976],
+ [0.277850, 0.042353, 0.414392],
+ [0.284321, 0.043933, 0.416608],
+ [0.290763, 0.045644, 0.418637],
+ [0.297178, 0.047470, 0.420491],
+ [0.303568, 0.049396, 0.422182],
+ [0.309935, 0.051407, 0.423721],
+ [0.316282, 0.053490, 0.425116],
+ [0.322610, 0.055634, 0.426377],
+ [0.328921, 0.057827, 0.427511],
+ [0.335217, 0.060060, 0.428524],
+ [0.341500, 0.062325, 0.429425],
+ [0.347771, 0.064616, 0.430217],
+ [0.354032, 0.066925, 0.430906],
+ [0.360284, 0.069247, 0.431497],
+ [0.366529, 0.071579, 0.431994],
+ [0.372768, 0.073915, 0.432400],
+ [0.379001, 0.076253, 0.432719],
+ [0.385228, 0.078591, 0.432955],
+ [0.391453, 0.080927, 0.433109],
+ [0.397674, 0.083257, 0.433183],
+ [0.403894, 0.085580, 0.433179],
+ [0.410113, 0.087896, 0.433098],
+ [0.416331, 0.090203, 0.432943],
+ [0.422549, 0.092501, 0.432714],
+ [0.428768, 0.094790, 0.432412],
+ [0.434987, 0.097069, 0.432039],
+ [0.441207, 0.099338, 0.431594],
+ [0.447428, 0.101597, 0.431080],
+ [0.453651, 0.103848, 0.430498],
+ [0.459875, 0.106089, 0.429846],
+ [0.466100, 0.108322, 0.429125],
+ [0.472328, 0.110547, 0.428334],
+ [0.478558, 0.112764, 0.427475],
+ [0.484789, 0.114974, 0.426548],
+ [0.491022, 0.117179, 0.425552],
+ [0.497257, 0.119379, 0.424488],
+ [0.503493, 0.121575, 0.423356],
+ [0.509730, 0.123769, 0.422156],
+ [0.515967, 0.125960, 0.420887],
+ [0.522206, 0.128150, 0.419549],
+ [0.528444, 0.130341, 0.418142],
+ [0.534683, 0.132534, 0.416667],
+ [0.540920, 0.134729, 0.415123],
+ [0.547157, 0.136929, 0.413511],
+ [0.553392, 0.139134, 0.411829],
+ [0.559624, 0.141346, 0.410078],
+ [0.565854, 0.143567, 0.408258],
+ [0.572081, 0.145797, 0.406369],
+ [0.578304, 0.148039, 0.404411],
+ [0.584521, 0.150294, 0.402385],
+ [0.590734, 0.152563, 0.400290],
+ [0.596940, 0.154848, 0.398125],
+ [0.603139, 0.157151, 0.395891],
+ [0.609330, 0.159474, 0.393589],
+ [0.615513, 0.161817, 0.391219],
+ [0.621685, 0.164184, 0.388781],
+ [0.627847, 0.166575, 0.386276],
+ [0.633998, 0.168992, 0.383704],
+ [0.640135, 0.171438, 0.381065],
+ [0.646260, 0.173914, 0.378359],
+ [0.652369, 0.176421, 0.375586],
+ [0.658463, 0.178962, 0.372748],
+ [0.664540, 0.181539, 0.369846],
+ [0.670599, 0.184153, 0.366879],
+ [0.676638, 0.186807, 0.363849],
+ [0.682656, 0.189501, 0.360757],
+ [0.688653, 0.192239, 0.357603],
+ [0.694627, 0.195021, 0.354388],
+ [0.700576, 0.197851, 0.351113],
+ [0.706500, 0.200728, 0.347777],
+ [0.712396, 0.203656, 0.344383],
+ [0.718264, 0.206636, 0.340931],
+ [0.724103, 0.209670, 0.337424],
+ [0.729909, 0.212759, 0.333861],
+ [0.735683, 0.215906, 0.330245],
+ [0.741423, 0.219112, 0.326576],
+ [0.747127, 0.222378, 0.322856],
+ [0.752794, 0.225706, 0.319085],
+ [0.758422, 0.229097, 0.315266],
+ [0.764010, 0.232554, 0.311399],
+ [0.769556, 0.236077, 0.307485],
+ [0.775059, 0.239667, 0.303526],
+ [0.780517, 0.243327, 0.299523],
+ [0.785929, 0.247056, 0.295477],
+ [0.791293, 0.250856, 0.291390],
+ [0.796607, 0.254728, 0.287264],
+ [0.801871, 0.258674, 0.283099],
+ [0.807082, 0.262692, 0.278898],
+ [0.812239, 0.266786, 0.274661],
+ [0.817341, 0.270954, 0.270390],
+ [0.822386, 0.275197, 0.266085],
+ [0.827372, 0.279517, 0.261750],
+ [0.832299, 0.283913, 0.257383],
+ [0.837165, 0.288385, 0.252988],
+ [0.841969, 0.292933, 0.248564],
+ [0.846709, 0.297559, 0.244113],
+ [0.851384, 0.302260, 0.239636],
+ [0.855992, 0.307038, 0.235133],
+ [0.860533, 0.311892, 0.230606],
+ [0.865006, 0.316822, 0.226055],
+ [0.869409, 0.321827, 0.221482],
+ [0.873741, 0.326906, 0.216886],
+ [0.878001, 0.332060, 0.212268],
+ [0.882188, 0.337287, 0.207628],
+ [0.886302, 0.342586, 0.202968],
+ [0.890341, 0.347957, 0.198286],
+ [0.894305, 0.353399, 0.193584],
+ [0.898192, 0.358911, 0.188860],
+ [0.902003, 0.364492, 0.184116],
+ [0.905735, 0.370140, 0.179350],
+ [0.909390, 0.375856, 0.174563],
+ [0.912966, 0.381636, 0.169755],
+ [0.916462, 0.387481, 0.164924],
+ [0.919879, 0.393389, 0.160070],
+ [0.923215, 0.399359, 0.155193],
+ [0.926470, 0.405389, 0.150292],
+ [0.929644, 0.411479, 0.145367],
+ [0.932737, 0.417627, 0.140417],
+ [0.935747, 0.423831, 0.135440],
+ [0.938675, 0.430091, 0.130438],
+ [0.941521, 0.436405, 0.125409],
+ [0.944285, 0.442772, 0.120354],
+ [0.946965, 0.449191, 0.115272],
+ [0.949562, 0.455660, 0.110164],
+ [0.952075, 0.462178, 0.105031],
+ [0.954506, 0.468744, 0.099874],
+ [0.956852, 0.475356, 0.094695],
+ [0.959114, 0.482014, 0.089499],
+ [0.961293, 0.488716, 0.084289],
+ [0.963387, 0.495462, 0.079073],
+ [0.965397, 0.502249, 0.073859],
+ [0.967322, 0.509078, 0.068659],
+ [0.969163, 0.515946, 0.063488],
+ [0.970919, 0.522853, 0.058367],
+ [0.972590, 0.529798, 0.053324],
+ [0.974176, 0.536780, 0.048392],
+ [0.975677, 0.543798, 0.043618],
+ [0.977092, 0.550850, 0.039050],
+ [0.978422, 0.557937, 0.034931],
+ [0.979666, 0.565057, 0.031409],
+ [0.980824, 0.572209, 0.028508],
+ [0.981895, 0.579392, 0.026250],
+ [0.982881, 0.586606, 0.024661],
+ [0.983779, 0.593849, 0.023770],
+ [0.984591, 0.601122, 0.023606],
+ [0.985315, 0.608422, 0.024202],
+ [0.985952, 0.615750, 0.025592],
+ [0.986502, 0.623105, 0.027814],
+ [0.986964, 0.630485, 0.030908],
+ [0.987337, 0.637890, 0.034916],
+ [0.987622, 0.645320, 0.039886],
+ [0.987819, 0.652773, 0.045581],
+ [0.987926, 0.660250, 0.051750],
+ [0.987945, 0.667748, 0.058329],
+ [0.987874, 0.675267, 0.065257],
+ [0.987714, 0.682807, 0.072489],
+ [0.987464, 0.690366, 0.079990],
+ [0.987124, 0.697944, 0.087731],
+ [0.986694, 0.705540, 0.095694],
+ [0.986175, 0.713153, 0.103863],
+ [0.985566, 0.720782, 0.112229],
+ [0.984865, 0.728427, 0.120785],
+ [0.984075, 0.736087, 0.129527],
+ [0.983196, 0.743758, 0.138453],
+ [0.982228, 0.751442, 0.147565],
+ [0.981173, 0.759135, 0.156863],
+ [0.980032, 0.766837, 0.166353],
+ [0.978806, 0.774545, 0.176037],
+ [0.977497, 0.782258, 0.185923],
+ [0.976108, 0.789974, 0.196018],
+ [0.974638, 0.797692, 0.206332],
+ [0.973088, 0.805409, 0.216877],
+ [0.971468, 0.813122, 0.227658],
+ [0.969783, 0.820825, 0.238686],
+ [0.968041, 0.828515, 0.249972],
+ [0.966243, 0.836191, 0.261534],
+ [0.964394, 0.843848, 0.273391],
+ [0.962517, 0.851476, 0.285546],
+ [0.960626, 0.859069, 0.298010],
+ [0.958720, 0.866624, 0.310820],
+ [0.956834, 0.874129, 0.323974],
+ [0.954997, 0.881569, 0.337475],
+ [0.953215, 0.888942, 0.351369],
+ [0.951546, 0.896226, 0.365627],
+ [0.950018, 0.903409, 0.380271],
+ [0.948683, 0.910473, 0.395289],
+ [0.947594, 0.917399, 0.410665],
+ [0.946809, 0.924168, 0.426373],
+ [0.946392, 0.930761, 0.442367],
+ [0.946403, 0.937159, 0.458592],
+ [0.946903, 0.943348, 0.474970],
+ [0.947937, 0.949318, 0.491426],
+ [0.949545, 0.955063, 0.507860],
+ [0.951740, 0.960587, 0.524203],
+ [0.954529, 0.965896, 0.540361],
+ [0.957896, 0.971003, 0.556275],
+ [0.961812, 0.975924, 0.571925],
+ [0.966249, 0.980678, 0.587206],
+ [0.971162, 0.985282, 0.602154],
+ [0.976511, 0.989753, 0.616760],
+ [0.982257, 0.994109, 0.631017],
+ [0.988362, 0.998364, 0.644924]]
+
+_plasma_data = [[0.050383, 0.029803, 0.527975],
+ [0.063536, 0.028426, 0.533124],
+ [0.075353, 0.027206, 0.538007],
+ [0.086222, 0.026125, 0.542658],
+ [0.096379, 0.025165, 0.547103],
+ [0.105980, 0.024309, 0.551368],
+ [0.115124, 0.023556, 0.555468],
+ [0.123903, 0.022878, 0.559423],
+ [0.132381, 0.022258, 0.563250],
+ [0.140603, 0.021687, 0.566959],
+ [0.148607, 0.021154, 0.570562],
+ [0.156421, 0.020651, 0.574065],
+ [0.164070, 0.020171, 0.577478],
+ [0.171574, 0.019706, 0.580806],
+ [0.178950, 0.019252, 0.584054],
+ [0.186213, 0.018803, 0.587228],
+ [0.193374, 0.018354, 0.590330],
+ [0.200445, 0.017902, 0.593364],
+ [0.207435, 0.017442, 0.596333],
+ [0.214350, 0.016973, 0.599239],
+ [0.221197, 0.016497, 0.602083],
+ [0.227983, 0.016007, 0.604867],
+ [0.234715, 0.015502, 0.607592],
+ [0.241396, 0.014979, 0.610259],
+ [0.248032, 0.014439, 0.612868],
+ [0.254627, 0.013882, 0.615419],
+ [0.261183, 0.013308, 0.617911],
+ [0.267703, 0.012716, 0.620346],
+ [0.274191, 0.012109, 0.622722],
+ [0.280648, 0.011488, 0.625038],
+ [0.287076, 0.010855, 0.627295],
+ [0.293478, 0.010213, 0.629490],
+ [0.299855, 0.009561, 0.631624],
+ [0.306210, 0.008902, 0.633694],
+ [0.312543, 0.008239, 0.635700],
+ [0.318856, 0.007576, 0.637640],
+ [0.325150, 0.006915, 0.639512],
+ [0.331426, 0.006261, 0.641316],
+ [0.337683, 0.005618, 0.643049],
+ [0.343925, 0.004991, 0.644710],
+ [0.350150, 0.004382, 0.646298],
+ [0.356359, 0.003798, 0.647810],
+ [0.362553, 0.003243, 0.649245],
+ [0.368733, 0.002724, 0.650601],
+ [0.374897, 0.002245, 0.651876],
+ [0.381047, 0.001814, 0.653068],
+ [0.387183, 0.001434, 0.654177],
+ [0.393304, 0.001114, 0.655199],
+ [0.399411, 0.000859, 0.656133],
+ [0.405503, 0.000678, 0.656977],
+ [0.411580, 0.000577, 0.657730],
+ [0.417642, 0.000564, 0.658390],
+ [0.423689, 0.000646, 0.658956],
+ [0.429719, 0.000831, 0.659425],
+ [0.435734, 0.001127, 0.659797],
+ [0.441732, 0.001540, 0.660069],
+ [0.447714, 0.002080, 0.660240],
+ [0.453677, 0.002755, 0.660310],
+ [0.459623, 0.003574, 0.660277],
+ [0.465550, 0.004545, 0.660139],
+ [0.471457, 0.005678, 0.659897],
+ [0.477344, 0.006980, 0.659549],
+ [0.483210, 0.008460, 0.659095],
+ [0.489055, 0.010127, 0.658534],
+ [0.494877, 0.011990, 0.657865],
+ [0.500678, 0.014055, 0.657088],
+ [0.506454, 0.016333, 0.656202],
+ [0.512206, 0.018833, 0.655209],
+ [0.517933, 0.021563, 0.654109],
+ [0.523633, 0.024532, 0.652901],
+ [0.529306, 0.027747, 0.651586],
+ [0.534952, 0.031217, 0.650165],
+ [0.540570, 0.034950, 0.648640],
+ [0.546157, 0.038954, 0.647010],
+ [0.551715, 0.043136, 0.645277],
+ [0.557243, 0.047331, 0.643443],
+ [0.562738, 0.051545, 0.641509],
+ [0.568201, 0.055778, 0.639477],
+ [0.573632, 0.060028, 0.637349],
+ [0.579029, 0.064296, 0.635126],
+ [0.584391, 0.068579, 0.632812],
+ [0.589719, 0.072878, 0.630408],
+ [0.595011, 0.077190, 0.627917],
+ [0.600266, 0.081516, 0.625342],
+ [0.605485, 0.085854, 0.622686],
+ [0.610667, 0.090204, 0.619951],
+ [0.615812, 0.094564, 0.617140],
+ [0.620919, 0.098934, 0.614257],
+ [0.625987, 0.103312, 0.611305],
+ [0.631017, 0.107699, 0.608287],
+ [0.636008, 0.112092, 0.605205],
+ [0.640959, 0.116492, 0.602065],
+ [0.645872, 0.120898, 0.598867],
+ [0.650746, 0.125309, 0.595617],
+ [0.655580, 0.129725, 0.592317],
+ [0.660374, 0.134144, 0.588971],
+ [0.665129, 0.138566, 0.585582],
+ [0.669845, 0.142992, 0.582154],
+ [0.674522, 0.147419, 0.578688],
+ [0.679160, 0.151848, 0.575189],
+ [0.683758, 0.156278, 0.571660],
+ [0.688318, 0.160709, 0.568103],
+ [0.692840, 0.165141, 0.564522],
+ [0.697324, 0.169573, 0.560919],
+ [0.701769, 0.174005, 0.557296],
+ [0.706178, 0.178437, 0.553657],
+ [0.710549, 0.182868, 0.550004],
+ [0.714883, 0.187299, 0.546338],
+ [0.719181, 0.191729, 0.542663],
+ [0.723444, 0.196158, 0.538981],
+ [0.727670, 0.200586, 0.535293],
+ [0.731862, 0.205013, 0.531601],
+ [0.736019, 0.209439, 0.527908],
+ [0.740143, 0.213864, 0.524216],
+ [0.744232, 0.218288, 0.520524],
+ [0.748289, 0.222711, 0.516834],
+ [0.752312, 0.227133, 0.513149],
+ [0.756304, 0.231555, 0.509468],
+ [0.760264, 0.235976, 0.505794],
+ [0.764193, 0.240396, 0.502126],
+ [0.768090, 0.244817, 0.498465],
+ [0.771958, 0.249237, 0.494813],
+ [0.775796, 0.253658, 0.491171],
+ [0.779604, 0.258078, 0.487539],
+ [0.783383, 0.262500, 0.483918],
+ [0.787133, 0.266922, 0.480307],
+ [0.790855, 0.271345, 0.476706],
+ [0.794549, 0.275770, 0.473117],
+ [0.798216, 0.280197, 0.469538],
+ [0.801855, 0.284626, 0.465971],
+ [0.805467, 0.289057, 0.462415],
+ [0.809052, 0.293491, 0.458870],
+ [0.812612, 0.297928, 0.455338],
+ [0.816144, 0.302368, 0.451816],
+ [0.819651, 0.306812, 0.448306],
+ [0.823132, 0.311261, 0.444806],
+ [0.826588, 0.315714, 0.441316],
+ [0.830018, 0.320172, 0.437836],
+ [0.833422, 0.324635, 0.434366],
+ [0.836801, 0.329105, 0.430905],
+ [0.840155, 0.333580, 0.427455],
+ [0.843484, 0.338062, 0.424013],
+ [0.846788, 0.342551, 0.420579],
+ [0.850066, 0.347048, 0.417153],
+ [0.853319, 0.351553, 0.413734],
+ [0.856547, 0.356066, 0.410322],
+ [0.859750, 0.360588, 0.406917],
+ [0.862927, 0.365119, 0.403519],
+ [0.866078, 0.369660, 0.400126],
+ [0.869203, 0.374212, 0.396738],
+ [0.872303, 0.378774, 0.393355],
+ [0.875376, 0.383347, 0.389976],
+ [0.878423, 0.387932, 0.386600],
+ [0.881443, 0.392529, 0.383229],
+ [0.884436, 0.397139, 0.379860],
+ [0.887402, 0.401762, 0.376494],
+ [0.890340, 0.406398, 0.373130],
+ [0.893250, 0.411048, 0.369768],
+ [0.896131, 0.415712, 0.366407],
+ [0.898984, 0.420392, 0.363047],
+ [0.901807, 0.425087, 0.359688],
+ [0.904601, 0.429797, 0.356329],
+ [0.907365, 0.434524, 0.352970],
+ [0.910098, 0.439268, 0.349610],
+ [0.912800, 0.444029, 0.346251],
+ [0.915471, 0.448807, 0.342890],
+ [0.918109, 0.453603, 0.339529],
+ [0.920714, 0.458417, 0.336166],
+ [0.923287, 0.463251, 0.332801],
+ [0.925825, 0.468103, 0.329435],
+ [0.928329, 0.472975, 0.326067],
+ [0.930798, 0.477867, 0.322697],
+ [0.933232, 0.482780, 0.319325],
+ [0.935630, 0.487712, 0.315952],
+ [0.937990, 0.492667, 0.312575],
+ [0.940313, 0.497642, 0.309197],
+ [0.942598, 0.502639, 0.305816],
+ [0.944844, 0.507658, 0.302433],
+ [0.947051, 0.512699, 0.299049],
+ [0.949217, 0.517763, 0.295662],
+ [0.951344, 0.522850, 0.292275],
+ [0.953428, 0.527960, 0.288883],
+ [0.955470, 0.533093, 0.285490],
+ [0.957469, 0.538250, 0.282096],
+ [0.959424, 0.543431, 0.278701],
+ [0.961336, 0.548636, 0.275305],
+ [0.963203, 0.553865, 0.271909],
+ [0.965024, 0.559118, 0.268513],
+ [0.966798, 0.564396, 0.265118],
+ [0.968526, 0.569700, 0.261721],
+ [0.970205, 0.575028, 0.258325],
+ [0.971835, 0.580382, 0.254931],
+ [0.973416, 0.585761, 0.251540],
+ [0.974947, 0.591165, 0.248151],
+ [0.976428, 0.596595, 0.244767],
+ [0.977856, 0.602051, 0.241387],
+ [0.979233, 0.607532, 0.238013],
+ [0.980556, 0.613039, 0.234646],
+ [0.981826, 0.618572, 0.231287],
+ [0.983041, 0.624131, 0.227937],
+ [0.984199, 0.629718, 0.224595],
+ [0.985301, 0.635330, 0.221265],
+ [0.986345, 0.640969, 0.217948],
+ [0.987332, 0.646633, 0.214648],
+ [0.988260, 0.652325, 0.211364],
+ [0.989128, 0.658043, 0.208100],
+ [0.989935, 0.663787, 0.204859],
+ [0.990681, 0.669558, 0.201642],
+ [0.991365, 0.675355, 0.198453],
+ [0.991985, 0.681179, 0.195295],
+ [0.992541, 0.687030, 0.192170],
+ [0.993032, 0.692907, 0.189084],
+ [0.993456, 0.698810, 0.186041],
+ [0.993814, 0.704741, 0.183043],
+ [0.994103, 0.710698, 0.180097],
+ [0.994324, 0.716681, 0.177208],
+ [0.994474, 0.722691, 0.174381],
+ [0.994553, 0.728728, 0.171622],
+ [0.994561, 0.734791, 0.168938],
+ [0.994495, 0.740880, 0.166335],
+ [0.994355, 0.746995, 0.163821],
+ [0.994141, 0.753137, 0.161404],
+ [0.993851, 0.759304, 0.159092],
+ [0.993482, 0.765499, 0.156891],
+ [0.993033, 0.771720, 0.154808],
+ [0.992505, 0.777967, 0.152855],
+ [0.991897, 0.784239, 0.151042],
+ [0.991209, 0.790537, 0.149377],
+ [0.990439, 0.796859, 0.147870],
+ [0.989587, 0.803205, 0.146529],
+ [0.988648, 0.809579, 0.145357],
+ [0.987621, 0.815978, 0.144363],
+ [0.986509, 0.822401, 0.143557],
+ [0.985314, 0.828846, 0.142945],
+ [0.984031, 0.835315, 0.142528],
+ [0.982653, 0.841812, 0.142303],
+ [0.981190, 0.848329, 0.142279],
+ [0.979644, 0.854866, 0.142453],
+ [0.977995, 0.861432, 0.142808],
+ [0.976265, 0.868016, 0.143351],
+ [0.974443, 0.874622, 0.144061],
+ [0.972530, 0.881250, 0.144923],
+ [0.970533, 0.887896, 0.145919],
+ [0.968443, 0.894564, 0.147014],
+ [0.966271, 0.901249, 0.148180],
+ [0.964021, 0.907950, 0.149370],
+ [0.961681, 0.914672, 0.150520],
+ [0.959276, 0.921407, 0.151566],
+ [0.956808, 0.928152, 0.152409],
+ [0.954287, 0.934908, 0.152921],
+ [0.951726, 0.941671, 0.152925],
+ [0.949151, 0.948435, 0.152178],
+ [0.946602, 0.955190, 0.150328],
+ [0.944152, 0.961916, 0.146861],
+ [0.941896, 0.968590, 0.140956],
+ [0.940015, 0.975158, 0.131326]]
+
+_viridis_data = [[0.267004, 0.004874, 0.329415],
+ [0.268510, 0.009605, 0.335427],
+ [0.269944, 0.014625, 0.341379],
+ [0.271305, 0.019942, 0.347269],
+ [0.272594, 0.025563, 0.353093],
+ [0.273809, 0.031497, 0.358853],
+ [0.274952, 0.037752, 0.364543],
+ [0.276022, 0.044167, 0.370164],
+ [0.277018, 0.050344, 0.375715],
+ [0.277941, 0.056324, 0.381191],
+ [0.278791, 0.062145, 0.386592],
+ [0.279566, 0.067836, 0.391917],
+ [0.280267, 0.073417, 0.397163],
+ [0.280894, 0.078907, 0.402329],
+ [0.281446, 0.084320, 0.407414],
+ [0.281924, 0.089666, 0.412415],
+ [0.282327, 0.094955, 0.417331],
+ [0.282656, 0.100196, 0.422160],
+ [0.282910, 0.105393, 0.426902],
+ [0.283091, 0.110553, 0.431554],
+ [0.283197, 0.115680, 0.436115],
+ [0.283229, 0.120777, 0.440584],
+ [0.283187, 0.125848, 0.444960],
+ [0.283072, 0.130895, 0.449241],
+ [0.282884, 0.135920, 0.453427],
+ [0.282623, 0.140926, 0.457517],
+ [0.282290, 0.145912, 0.461510],
+ [0.281887, 0.150881, 0.465405],
+ [0.281412, 0.155834, 0.469201],
+ [0.280868, 0.160771, 0.472899],
+ [0.280255, 0.165693, 0.476498],
+ [0.279574, 0.170599, 0.479997],
+ [0.278826, 0.175490, 0.483397],
+ [0.278012, 0.180367, 0.486697],
+ [0.277134, 0.185228, 0.489898],
+ [0.276194, 0.190074, 0.493001],
+ [0.275191, 0.194905, 0.496005],
+ [0.274128, 0.199721, 0.498911],
+ [0.273006, 0.204520, 0.501721],
+ [0.271828, 0.209303, 0.504434],
+ [0.270595, 0.214069, 0.507052],
+ [0.269308, 0.218818, 0.509577],
+ [0.267968, 0.223549, 0.512008],
+ [0.266580, 0.228262, 0.514349],
+ [0.265145, 0.232956, 0.516599],
+ [0.263663, 0.237631, 0.518762],
+ [0.262138, 0.242286, 0.520837],
+ [0.260571, 0.246922, 0.522828],
+ [0.258965, 0.251537, 0.524736],
+ [0.257322, 0.256130, 0.526563],
+ [0.255645, 0.260703, 0.528312],
+ [0.253935, 0.265254, 0.529983],
+ [0.252194, 0.269783, 0.531579],
+ [0.250425, 0.274290, 0.533103],
+ [0.248629, 0.278775, 0.534556],
+ [0.246811, 0.283237, 0.535941],
+ [0.244972, 0.287675, 0.537260],
+ [0.243113, 0.292092, 0.538516],
+ [0.241237, 0.296485, 0.539709],
+ [0.239346, 0.300855, 0.540844],
+ [0.237441, 0.305202, 0.541921],
+ [0.235526, 0.309527, 0.542944],
+ [0.233603, 0.313828, 0.543914],
+ [0.231674, 0.318106, 0.544834],
+ [0.229739, 0.322361, 0.545706],
+ [0.227802, 0.326594, 0.546532],
+ [0.225863, 0.330805, 0.547314],
+ [0.223925, 0.334994, 0.548053],
+ [0.221989, 0.339161, 0.548752],
+ [0.220057, 0.343307, 0.549413],
+ [0.218130, 0.347432, 0.550038],
+ [0.216210, 0.351535, 0.550627],
+ [0.214298, 0.355619, 0.551184],
+ [0.212395, 0.359683, 0.551710],
+ [0.210503, 0.363727, 0.552206],
+ [0.208623, 0.367752, 0.552675],
+ [0.206756, 0.371758, 0.553117],
+ [0.204903, 0.375746, 0.553533],
+ [0.203063, 0.379716, 0.553925],
+ [0.201239, 0.383670, 0.554294],
+ [0.199430, 0.387607, 0.554642],
+ [0.197636, 0.391528, 0.554969],
+ [0.195860, 0.395433, 0.555276],
+ [0.194100, 0.399323, 0.555565],
+ [0.192357, 0.403199, 0.555836],
+ [0.190631, 0.407061, 0.556089],
+ [0.188923, 0.410910, 0.556326],
+ [0.187231, 0.414746, 0.556547],
+ [0.185556, 0.418570, 0.556753],
+ [0.183898, 0.422383, 0.556944],
+ [0.182256, 0.426184, 0.557120],
+ [0.180629, 0.429975, 0.557282],
+ [0.179019, 0.433756, 0.557430],
+ [0.177423, 0.437527, 0.557565],
+ [0.175841, 0.441290, 0.557685],
+ [0.174274, 0.445044, 0.557792],
+ [0.172719, 0.448791, 0.557885],
+ [0.171176, 0.452530, 0.557965],
+ [0.169646, 0.456262, 0.558030],
+ [0.168126, 0.459988, 0.558082],
+ [0.166617, 0.463708, 0.558119],
+ [0.165117, 0.467423, 0.558141],
+ [0.163625, 0.471133, 0.558148],
+ [0.162142, 0.474838, 0.558140],
+ [0.160665, 0.478540, 0.558115],
+ [0.159194, 0.482237, 0.558073],
+ [0.157729, 0.485932, 0.558013],
+ [0.156270, 0.489624, 0.557936],
+ [0.154815, 0.493313, 0.557840],
+ [0.153364, 0.497000, 0.557724],
+ [0.151918, 0.500685, 0.557587],
+ [0.150476, 0.504369, 0.557430],
+ [0.149039, 0.508051, 0.557250],
+ [0.147607, 0.511733, 0.557049],
+ [0.146180, 0.515413, 0.556823],
+ [0.144759, 0.519093, 0.556572],
+ [0.143343, 0.522773, 0.556295],
+ [0.141935, 0.526453, 0.555991],
+ [0.140536, 0.530132, 0.555659],
+ [0.139147, 0.533812, 0.555298],
+ [0.137770, 0.537492, 0.554906],
+ [0.136408, 0.541173, 0.554483],
+ [0.135066, 0.544853, 0.554029],
+ [0.133743, 0.548535, 0.553541],
+ [0.132444, 0.552216, 0.553018],
+ [0.131172, 0.555899, 0.552459],
+ [0.129933, 0.559582, 0.551864],
+ [0.128729, 0.563265, 0.551229],
+ [0.127568, 0.566949, 0.550556],
+ [0.126453, 0.570633, 0.549841],
+ [0.125394, 0.574318, 0.549086],
+ [0.124395, 0.578002, 0.548287],
+ [0.123463, 0.581687, 0.547445],
+ [0.122606, 0.585371, 0.546557],
+ [0.121831, 0.589055, 0.545623],
+ [0.121148, 0.592739, 0.544641],
+ [0.120565, 0.596422, 0.543611],
+ [0.120092, 0.600104, 0.542530],
+ [0.119738, 0.603785, 0.541400],
+ [0.119512, 0.607464, 0.540218],
+ [0.119423, 0.611141, 0.538982],
+ [0.119483, 0.614817, 0.537692],
+ [0.119699, 0.618490, 0.536347],
+ [0.120081, 0.622161, 0.534946],
+ [0.120638, 0.625828, 0.533488],
+ [0.121380, 0.629492, 0.531973],
+ [0.122312, 0.633153, 0.530398],
+ [0.123444, 0.636809, 0.528763],
+ [0.124780, 0.640461, 0.527068],
+ [0.126326, 0.644107, 0.525311],
+ [0.128087, 0.647749, 0.523491],
+ [0.130067, 0.651384, 0.521608],
+ [0.132268, 0.655014, 0.519661],
+ [0.134692, 0.658636, 0.517649],
+ [0.137339, 0.662252, 0.515571],
+ [0.140210, 0.665859, 0.513427],
+ [0.143303, 0.669459, 0.511215],
+ [0.146616, 0.673050, 0.508936],
+ [0.150148, 0.676631, 0.506589],
+ [0.153894, 0.680203, 0.504172],
+ [0.157851, 0.683765, 0.501686],
+ [0.162016, 0.687316, 0.499129],
+ [0.166383, 0.690856, 0.496502],
+ [0.170948, 0.694384, 0.493803],
+ [0.175707, 0.697900, 0.491033],
+ [0.180653, 0.701402, 0.488189],
+ [0.185783, 0.704891, 0.485273],
+ [0.191090, 0.708366, 0.482284],
+ [0.196571, 0.711827, 0.479221],
+ [0.202219, 0.715272, 0.476084],
+ [0.208030, 0.718701, 0.472873],
+ [0.214000, 0.722114, 0.469588],
+ [0.220124, 0.725509, 0.466226],
+ [0.226397, 0.728888, 0.462789],
+ [0.232815, 0.732247, 0.459277],
+ [0.239374, 0.735588, 0.455688],
+ [0.246070, 0.738910, 0.452024],
+ [0.252899, 0.742211, 0.448284],
+ [0.259857, 0.745492, 0.444467],
+ [0.266941, 0.748751, 0.440573],
+ [0.274149, 0.751988, 0.436601],
+ [0.281477, 0.755203, 0.432552],
+ [0.288921, 0.758394, 0.428426],
+ [0.296479, 0.761561, 0.424223],
+ [0.304148, 0.764704, 0.419943],
+ [0.311925, 0.767822, 0.415586],
+ [0.319809, 0.770914, 0.411152],
+ [0.327796, 0.773980, 0.406640],
+ [0.335885, 0.777018, 0.402049],
+ [0.344074, 0.780029, 0.397381],
+ [0.352360, 0.783011, 0.392636],
+ [0.360741, 0.785964, 0.387814],
+ [0.369214, 0.788888, 0.382914],
+ [0.377779, 0.791781, 0.377939],
+ [0.386433, 0.794644, 0.372886],
+ [0.395174, 0.797475, 0.367757],
+ [0.404001, 0.800275, 0.362552],
+ [0.412913, 0.803041, 0.357269],
+ [0.421908, 0.805774, 0.351910],
+ [0.430983, 0.808473, 0.346476],
+ [0.440137, 0.811138, 0.340967],
+ [0.449368, 0.813768, 0.335384],
+ [0.458674, 0.816363, 0.329727],
+ [0.468053, 0.818921, 0.323998],
+ [0.477504, 0.821444, 0.318195],
+ [0.487026, 0.823929, 0.312321],
+ [0.496615, 0.826376, 0.306377],
+ [0.506271, 0.828786, 0.300362],
+ [0.515992, 0.831158, 0.294279],
+ [0.525776, 0.833491, 0.288127],
+ [0.535621, 0.835785, 0.281908],
+ [0.545524, 0.838039, 0.275626],
+ [0.555484, 0.840254, 0.269281],
+ [0.565498, 0.842430, 0.262877],
+ [0.575563, 0.844566, 0.256415],
+ [0.585678, 0.846661, 0.249897],
+ [0.595839, 0.848717, 0.243329],
+ [0.606045, 0.850733, 0.236712],
+ [0.616293, 0.852709, 0.230052],
+ [0.626579, 0.854645, 0.223353],
+ [0.636902, 0.856542, 0.216620],
+ [0.647257, 0.858400, 0.209861],
+ [0.657642, 0.860219, 0.203082],
+ [0.668054, 0.861999, 0.196293],
+ [0.678489, 0.863742, 0.189503],
+ [0.688944, 0.865448, 0.182725],
+ [0.699415, 0.867117, 0.175971],
+ [0.709898, 0.868751, 0.169257],
+ [0.720391, 0.870350, 0.162603],
+ [0.730889, 0.871916, 0.156029],
+ [0.741388, 0.873449, 0.149561],
+ [0.751884, 0.874951, 0.143228],
+ [0.762373, 0.876424, 0.137064],
+ [0.772852, 0.877868, 0.131109],
+ [0.783315, 0.879285, 0.125405],
+ [0.793760, 0.880678, 0.120005],
+ [0.804182, 0.882046, 0.114965],
+ [0.814576, 0.883393, 0.110347],
+ [0.824940, 0.884720, 0.106217],
+ [0.835270, 0.886029, 0.102646],
+ [0.845561, 0.887322, 0.099702],
+ [0.855810, 0.888601, 0.097452],
+ [0.866013, 0.889868, 0.095953],
+ [0.876168, 0.891125, 0.095250],
+ [0.886271, 0.892374, 0.095374],
+ [0.896320, 0.893616, 0.096335],
+ [0.906311, 0.894855, 0.098125],
+ [0.916242, 0.896091, 0.100717],
+ [0.926106, 0.897330, 0.104071],
+ [0.935904, 0.898570, 0.108131],
+ [0.945636, 0.899815, 0.112838],
+ [0.955300, 0.901065, 0.118128],
+ [0.964894, 0.902323, 0.123941],
+ [0.974417, 0.903590, 0.130215],
+ [0.983868, 0.904867, 0.136897],
+ [0.993248, 0.906157, 0.143936]]
+
+_cividis_data = [[0.000000, 0.135112, 0.304751],
+ [0.000000, 0.138068, 0.311105],
+ [0.000000, 0.141013, 0.317579],
+ [0.000000, 0.143951, 0.323982],
+ [0.000000, 0.146877, 0.330479],
+ [0.000000, 0.149791, 0.337065],
+ [0.000000, 0.152673, 0.343704],
+ [0.000000, 0.155377, 0.350500],
+ [0.000000, 0.157932, 0.357521],
+ [0.000000, 0.160495, 0.364534],
+ [0.000000, 0.163058, 0.371608],
+ [0.000000, 0.165621, 0.378769],
+ [0.000000, 0.168204, 0.385902],
+ [0.000000, 0.170800, 0.393100],
+ [0.000000, 0.173420, 0.400353],
+ [0.000000, 0.176082, 0.407577],
+ [0.000000, 0.178802, 0.414764],
+ [0.000000, 0.181610, 0.421859],
+ [0.000000, 0.184550, 0.428802],
+ [0.000000, 0.186915, 0.435532],
+ [0.000000, 0.188769, 0.439563],
+ [0.000000, 0.190950, 0.441085],
+ [0.000000, 0.193366, 0.441561],
+ [0.003602, 0.195911, 0.441564],
+ [0.017852, 0.198528, 0.441248],
+ [0.032110, 0.201199, 0.440785],
+ [0.046205, 0.203903, 0.440196],
+ [0.058378, 0.206629, 0.439531],
+ [0.068968, 0.209372, 0.438863],
+ [0.078624, 0.212122, 0.438105],
+ [0.087465, 0.214879, 0.437342],
+ [0.095645, 0.217643, 0.436593],
+ [0.103401, 0.220406, 0.435790],
+ [0.110658, 0.223170, 0.435067],
+ [0.117612, 0.225935, 0.434308],
+ [0.124291, 0.228697, 0.433547],
+ [0.130669, 0.231458, 0.432840],
+ [0.136830, 0.234216, 0.432148],
+ [0.142852, 0.236972, 0.431404],
+ [0.148638, 0.239724, 0.430752],
+ [0.154261, 0.242475, 0.430120],
+ [0.159733, 0.245221, 0.429528],
+ [0.165113, 0.247965, 0.428908],
+ [0.170362, 0.250707, 0.428325],
+ [0.175490, 0.253444, 0.427790],
+ [0.180503, 0.256180, 0.427299],
+ [0.185453, 0.258914, 0.426788],
+ [0.190303, 0.261644, 0.426329],
+ [0.195057, 0.264372, 0.425924],
+ [0.199764, 0.267099, 0.425497],
+ [0.204385, 0.269823, 0.425126],
+ [0.208926, 0.272546, 0.424809],
+ [0.213431, 0.275266, 0.424480],
+ [0.217863, 0.277985, 0.424206],
+ [0.222264, 0.280702, 0.423914],
+ [0.226598, 0.283419, 0.423678],
+ [0.230871, 0.286134, 0.423498],
+ [0.235120, 0.288848, 0.423304],
+ [0.239312, 0.291562, 0.423167],
+ [0.243485, 0.294274, 0.423014],
+ [0.247605, 0.296986, 0.422917],
+ [0.251675, 0.299698, 0.422873],
+ [0.255731, 0.302409, 0.422814],
+ [0.259740, 0.305120, 0.422810],
+ [0.263738, 0.307831, 0.422789],
+ [0.267693, 0.310542, 0.422821],
+ [0.271639, 0.313253, 0.422837],
+ [0.275513, 0.315965, 0.422979],
+ [0.279411, 0.318677, 0.423031],
+ [0.283240, 0.321390, 0.423211],
+ [0.287065, 0.324103, 0.423373],
+ [0.290884, 0.326816, 0.423517],
+ [0.294669, 0.329531, 0.423716],
+ [0.298421, 0.332247, 0.423973],
+ [0.302169, 0.334963, 0.424213],
+ [0.305886, 0.337681, 0.424512],
+ [0.309601, 0.340399, 0.424790],
+ [0.313287, 0.343120, 0.425120],
+ [0.316941, 0.345842, 0.425512],
+ [0.320595, 0.348565, 0.425889],
+ [0.324250, 0.351289, 0.426250],
+ [0.327875, 0.354016, 0.426670],
+ [0.331474, 0.356744, 0.427144],
+ [0.335073, 0.359474, 0.427605],
+ [0.338673, 0.362206, 0.428053],
+ [0.342246, 0.364939, 0.428559],
+ [0.345793, 0.367676, 0.429127],
+ [0.349341, 0.370414, 0.429685],
+ [0.352892, 0.373153, 0.430226],
+ [0.356418, 0.375896, 0.430823],
+ [0.359916, 0.378641, 0.431501],
+ [0.363446, 0.381388, 0.432075],
+ [0.366923, 0.384139, 0.432796],
+ [0.370430, 0.386890, 0.433428],
+ [0.373884, 0.389646, 0.434209],
+ [0.377371, 0.392404, 0.434890],
+ [0.380830, 0.395164, 0.435653],
+ [0.384268, 0.397928, 0.436475],
+ [0.387705, 0.400694, 0.437305],
+ [0.391151, 0.403464, 0.438096],
+ [0.394568, 0.406236, 0.438986],
+ [0.397991, 0.409011, 0.439848],
+ [0.401418, 0.411790, 0.440708],
+ [0.404820, 0.414572, 0.441642],
+ [0.408226, 0.417357, 0.442570],
+ [0.411607, 0.420145, 0.443577],
+ [0.414992, 0.422937, 0.444578],
+ [0.418383, 0.425733, 0.445560],
+ [0.421748, 0.428531, 0.446640],
+ [0.425120, 0.431334, 0.447692],
+ [0.428462, 0.434140, 0.448864],
+ [0.431817, 0.436950, 0.449982],
+ [0.435168, 0.439763, 0.451134],
+ [0.438504, 0.442580, 0.452341],
+ [0.441810, 0.445402, 0.453659],
+ [0.445148, 0.448226, 0.454885],
+ [0.448447, 0.451053, 0.456264],
+ [0.451759, 0.453887, 0.457582],
+ [0.455072, 0.456718, 0.458976],
+ [0.458366, 0.459552, 0.460457],
+ [0.461616, 0.462405, 0.461969],
+ [0.464947, 0.465241, 0.463395],
+ [0.468254, 0.468083, 0.464908],
+ [0.471501, 0.470960, 0.466357],
+ [0.474812, 0.473832, 0.467681],
+ [0.478186, 0.476699, 0.468845],
+ [0.481622, 0.479573, 0.469767],
+ [0.485141, 0.482451, 0.470384],
+ [0.488697, 0.485318, 0.471008],
+ [0.492278, 0.488198, 0.471453],
+ [0.495913, 0.491076, 0.471751],
+ [0.499552, 0.493960, 0.472032],
+ [0.503185, 0.496851, 0.472305],
+ [0.506866, 0.499743, 0.472432],
+ [0.510540, 0.502643, 0.472550],
+ [0.514226, 0.505546, 0.472640],
+ [0.517920, 0.508454, 0.472707],
+ [0.521643, 0.511367, 0.472639],
+ [0.525348, 0.514285, 0.472660],
+ [0.529086, 0.517207, 0.472543],
+ [0.532829, 0.520135, 0.472401],
+ [0.536553, 0.523067, 0.472352],
+ [0.540307, 0.526005, 0.472163],
+ [0.544069, 0.528948, 0.471947],
+ [0.547840, 0.531895, 0.471704],
+ [0.551612, 0.534849, 0.471439],
+ [0.555393, 0.537807, 0.471147],
+ [0.559181, 0.540771, 0.470829],
+ [0.562972, 0.543741, 0.470488],
+ [0.566802, 0.546715, 0.469988],
+ [0.570607, 0.549695, 0.469593],
+ [0.574417, 0.552682, 0.469172],
+ [0.578236, 0.555673, 0.468724],
+ [0.582087, 0.558670, 0.468118],
+ [0.585916, 0.561674, 0.467618],
+ [0.589753, 0.564682, 0.467090],
+ [0.593622, 0.567697, 0.466401],
+ [0.597469, 0.570718, 0.465821],
+ [0.601354, 0.573743, 0.465074],
+ [0.605211, 0.576777, 0.464441],
+ [0.609105, 0.579816, 0.463638],
+ [0.612977, 0.582861, 0.462950],
+ [0.616852, 0.585913, 0.462237],
+ [0.620765, 0.588970, 0.461351],
+ [0.624654, 0.592034, 0.460583],
+ [0.628576, 0.595104, 0.459641],
+ [0.632506, 0.598180, 0.458668],
+ [0.636412, 0.601264, 0.457818],
+ [0.640352, 0.604354, 0.456791],
+ [0.644270, 0.607450, 0.455886],
+ [0.648222, 0.610553, 0.454801],
+ [0.652178, 0.613664, 0.453689],
+ [0.656114, 0.616780, 0.452702],
+ [0.660082, 0.619904, 0.451534],
+ [0.664055, 0.623034, 0.450338],
+ [0.668008, 0.626171, 0.449270],
+ [0.671991, 0.629316, 0.448018],
+ [0.675981, 0.632468, 0.446736],
+ [0.679979, 0.635626, 0.445424],
+ [0.683950, 0.638793, 0.444251],
+ [0.687957, 0.641966, 0.442886],
+ [0.691971, 0.645145, 0.441491],
+ [0.695985, 0.648334, 0.440072],
+ [0.700008, 0.651529, 0.438624],
+ [0.704037, 0.654731, 0.437147],
+ [0.708067, 0.657942, 0.435647],
+ [0.712105, 0.661160, 0.434117],
+ [0.716177, 0.664384, 0.432386],
+ [0.720222, 0.667618, 0.430805],
+ [0.724274, 0.670859, 0.429194],
+ [0.728334, 0.674107, 0.427554],
+ [0.732422, 0.677364, 0.425717],
+ [0.736488, 0.680629, 0.424028],
+ [0.740589, 0.683900, 0.422131],
+ [0.744664, 0.687181, 0.420393],
+ [0.748772, 0.690470, 0.418448],
+ [0.752886, 0.693766, 0.416472],
+ [0.756975, 0.697071, 0.414659],
+ [0.761096, 0.700384, 0.412638],
+ [0.765223, 0.703705, 0.410587],
+ [0.769353, 0.707035, 0.408516],
+ [0.773486, 0.710373, 0.406422],
+ [0.777651, 0.713719, 0.404112],
+ [0.781795, 0.717074, 0.401966],
+ [0.785965, 0.720438, 0.399613],
+ [0.790116, 0.723810, 0.397423],
+ [0.794298, 0.727190, 0.395016],
+ [0.798480, 0.730580, 0.392597],
+ [0.802667, 0.733978, 0.390153],
+ [0.806859, 0.737385, 0.387684],
+ [0.811054, 0.740801, 0.385198],
+ [0.815274, 0.744226, 0.382504],
+ [0.819499, 0.747659, 0.379785],
+ [0.823729, 0.751101, 0.377043],
+ [0.827959, 0.754553, 0.374292],
+ [0.832192, 0.758014, 0.371529],
+ [0.836429, 0.761483, 0.368747],
+ [0.840693, 0.764962, 0.365746],
+ [0.844957, 0.768450, 0.362741],
+ [0.849223, 0.771947, 0.359729],
+ [0.853515, 0.775454, 0.356500],
+ [0.857809, 0.778969, 0.353259],
+ [0.862105, 0.782494, 0.350011],
+ [0.866421, 0.786028, 0.346571],
+ [0.870717, 0.789572, 0.343333],
+ [0.875057, 0.793125, 0.339685],
+ [0.879378, 0.796687, 0.336241],
+ [0.883720, 0.800258, 0.332599],
+ [0.888081, 0.803839, 0.328770],
+ [0.892440, 0.807430, 0.324968],
+ [0.896818, 0.811030, 0.320982],
+ [0.901195, 0.814639, 0.317021],
+ [0.905589, 0.818257, 0.312889],
+ [0.910000, 0.821885, 0.308594],
+ [0.914407, 0.825522, 0.304348],
+ [0.918828, 0.829168, 0.299960],
+ [0.923279, 0.832822, 0.295244],
+ [0.927724, 0.836486, 0.290611],
+ [0.932180, 0.840159, 0.285880],
+ [0.936660, 0.843841, 0.280876],
+ [0.941147, 0.847530, 0.275815],
+ [0.945654, 0.851228, 0.270532],
+ [0.950178, 0.854933, 0.265085],
+ [0.954725, 0.858646, 0.259365],
+ [0.959284, 0.862365, 0.253563],
+ [0.963872, 0.866089, 0.247445],
+ [0.968469, 0.869819, 0.241310],
+ [0.973114, 0.873550, 0.234677],
+ [0.977780, 0.877281, 0.227954],
+ [0.982497, 0.881008, 0.220878],
+ [0.987293, 0.884718, 0.213336],
+ [0.992218, 0.888385, 0.205468],
+ [0.994847, 0.892954, 0.203445],
+ [0.995249, 0.898384, 0.207561],
+ [0.995503, 0.903866, 0.212370],
+ [0.995737, 0.909344, 0.217772]]
+
+cmaps = {}
+for (name, data) in (('magma', _magma_data),
+ ('inferno', _inferno_data),
+ ('plasma', _plasma_data),
+ ('viridis', _viridis_data),
+ ('cividis', _cividis_data)):
+
+ cmaps[name] = ListedColormap(data, name=name)
+ # generate reversed colormap
+ name = name + '_r'
+ cmaps[name] = ListedColormap(list(reversed(data)), name=name)
diff --git a/contrib/python/matplotlib/py2/matplotlib/_color_data.py b/contrib/python/matplotlib/py2/matplotlib/_color_data.py
new file mode 100644
index 00000000000..774e251d72b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/_color_data.py
@@ -0,0 +1,1147 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+from collections import OrderedDict
+import six
+
+
+BASE_COLORS = {
+ 'b': (0, 0, 1),
+ 'g': (0, 0.5, 0),
+ 'r': (1, 0, 0),
+ 'c': (0, 0.75, 0.75),
+ 'm': (0.75, 0, 0.75),
+ 'y': (0.75, 0.75, 0),
+ 'k': (0, 0, 0),
+ 'w': (1, 1, 1)}
+
+
+# These colors are from Tableau
+TABLEAU_COLORS = (
+ ('blue', '#1f77b4'),
+ ('orange', '#ff7f0e'),
+ ('green', '#2ca02c'),
+ ('red', '#d62728'),
+ ('purple', '#9467bd'),
+ ('brown', '#8c564b'),
+ ('pink', '#e377c2'),
+ ('gray', '#7f7f7f'),
+ ('olive', '#bcbd22'),
+ ('cyan', '#17becf'),
+)
+
+# Normalize name to "tab:<name>" to avoid name collisions.
+TABLEAU_COLORS = OrderedDict(
+ ('tab:' + name, value) for name, value in TABLEAU_COLORS)
+
+# This mapping of color names -> hex values is taken from
+# a survey run by Randel Monroe see:
+# http://blog.xkcd.com/2010/05/03/color-survey-results/
+# for more details. The results are hosted at
+# https://xkcd.com/color/rgb.txt
+#
+# License: http://creativecommons.org/publicdomain/zero/1.0/
+XKCD_COLORS = {
+ 'cloudy blue': '#acc2d9',
+ 'dark pastel green': '#56ae57',
+ 'dust': '#b2996e',
+ 'electric lime': '#a8ff04',
+ 'fresh green': '#69d84f',
+ 'light eggplant': '#894585',
+ 'nasty green': '#70b23f',
+ 'really light blue': '#d4ffff',
+ 'tea': '#65ab7c',
+ 'warm purple': '#952e8f',
+ 'yellowish tan': '#fcfc81',
+ 'cement': '#a5a391',
+ 'dark grass green': '#388004',
+ 'dusty teal': '#4c9085',
+ 'grey teal': '#5e9b8a',
+ 'macaroni and cheese': '#efb435',
+ 'pinkish tan': '#d99b82',
+ 'spruce': '#0a5f38',
+ 'strong blue': '#0c06f7',
+ 'toxic green': '#61de2a',
+ 'windows blue': '#3778bf',
+ 'blue blue': '#2242c7',
+ 'blue with a hint of purple': '#533cc6',
+ 'booger': '#9bb53c',
+ 'bright sea green': '#05ffa6',
+ 'dark green blue': '#1f6357',
+ 'deep turquoise': '#017374',
+ 'green teal': '#0cb577',
+ 'strong pink': '#ff0789',
+ 'bland': '#afa88b',
+ 'deep aqua': '#08787f',
+ 'lavender pink': '#dd85d7',
+ 'light moss green': '#a6c875',
+ 'light seafoam green': '#a7ffb5',
+ 'olive yellow': '#c2b709',
+ 'pig pink': '#e78ea5',
+ 'deep lilac': '#966ebd',
+ 'desert': '#ccad60',
+ 'dusty lavender': '#ac86a8',
+ 'purpley grey': '#947e94',
+ 'purply': '#983fb2',
+ 'candy pink': '#ff63e9',
+ 'light pastel green': '#b2fba5',
+ 'boring green': '#63b365',
+ 'kiwi green': '#8ee53f',
+ 'light grey green': '#b7e1a1',
+ 'orange pink': '#ff6f52',
+ 'tea green': '#bdf8a3',
+ 'very light brown': '#d3b683',
+ 'egg shell': '#fffcc4',
+ 'eggplant purple': '#430541',
+ 'powder pink': '#ffb2d0',
+ 'reddish grey': '#997570',
+ 'baby shit brown': '#ad900d',
+ 'liliac': '#c48efd',
+ 'stormy blue': '#507b9c',
+ 'ugly brown': '#7d7103',
+ 'custard': '#fffd78',
+ 'darkish pink': '#da467d',
+ 'deep brown': '#410200',
+ 'greenish beige': '#c9d179',
+ 'manilla': '#fffa86',
+ 'off blue': '#5684ae',
+ 'battleship grey': '#6b7c85',
+ 'browny green': '#6f6c0a',
+ 'bruise': '#7e4071',
+ 'kelley green': '#009337',
+ 'sickly yellow': '#d0e429',
+ 'sunny yellow': '#fff917',
+ 'azul': '#1d5dec',
+ 'darkgreen': '#054907',
+ 'green/yellow': '#b5ce08',
+ 'lichen': '#8fb67b',
+ 'light light green': '#c8ffb0',
+ 'pale gold': '#fdde6c',
+ 'sun yellow': '#ffdf22',
+ 'tan green': '#a9be70',
+ 'burple': '#6832e3',
+ 'butterscotch': '#fdb147',
+ 'toupe': '#c7ac7d',
+ 'dark cream': '#fff39a',
+ 'indian red': '#850e04',
+ 'light lavendar': '#efc0fe',
+ 'poison green': '#40fd14',
+ 'baby puke green': '#b6c406',
+ 'bright yellow green': '#9dff00',
+ 'charcoal grey': '#3c4142',
+ 'squash': '#f2ab15',
+ 'cinnamon': '#ac4f06',
+ 'light pea green': '#c4fe82',
+ 'radioactive green': '#2cfa1f',
+ 'raw sienna': '#9a6200',
+ 'baby purple': '#ca9bf7',
+ 'cocoa': '#875f42',
+ 'light royal blue': '#3a2efe',
+ 'orangeish': '#fd8d49',
+ 'rust brown': '#8b3103',
+ 'sand brown': '#cba560',
+ 'swamp': '#698339',
+ 'tealish green': '#0cdc73',
+ 'burnt siena': '#b75203',
+ 'camo': '#7f8f4e',
+ 'dusk blue': '#26538d',
+ 'fern': '#63a950',
+ 'old rose': '#c87f89',
+ 'pale light green': '#b1fc99',
+ 'peachy pink': '#ff9a8a',
+ 'rosy pink': '#f6688e',
+ 'light bluish green': '#76fda8',
+ 'light bright green': '#53fe5c',
+ 'light neon green': '#4efd54',
+ 'light seafoam': '#a0febf',
+ 'tiffany blue': '#7bf2da',
+ 'washed out green': '#bcf5a6',
+ 'browny orange': '#ca6b02',
+ 'nice blue': '#107ab0',
+ 'sapphire': '#2138ab',
+ 'greyish teal': '#719f91',
+ 'orangey yellow': '#fdb915',
+ 'parchment': '#fefcaf',
+ 'straw': '#fcf679',
+ 'very dark brown': '#1d0200',
+ 'terracota': '#cb6843',
+ 'ugly blue': '#31668a',
+ 'clear blue': '#247afd',
+ 'creme': '#ffffb6',
+ 'foam green': '#90fda9',
+ 'grey/green': '#86a17d',
+ 'light gold': '#fddc5c',
+ 'seafoam blue': '#78d1b6',
+ 'topaz': '#13bbaf',
+ 'violet pink': '#fb5ffc',
+ 'wintergreen': '#20f986',
+ 'yellow tan': '#ffe36e',
+ 'dark fuchsia': '#9d0759',
+ 'indigo blue': '#3a18b1',
+ 'light yellowish green': '#c2ff89',
+ 'pale magenta': '#d767ad',
+ 'rich purple': '#720058',
+ 'sunflower yellow': '#ffda03',
+ 'green/blue': '#01c08d',
+ 'leather': '#ac7434',
+ 'racing green': '#014600',
+ 'vivid purple': '#9900fa',
+ 'dark royal blue': '#02066f',
+ 'hazel': '#8e7618',
+ 'muted pink': '#d1768f',
+ 'booger green': '#96b403',
+ 'canary': '#fdff63',
+ 'cool grey': '#95a3a6',
+ 'dark taupe': '#7f684e',
+ 'darkish purple': '#751973',
+ 'true green': '#089404',
+ 'coral pink': '#ff6163',
+ 'dark sage': '#598556',
+ 'dark slate blue': '#214761',
+ 'flat blue': '#3c73a8',
+ 'mushroom': '#ba9e88',
+ 'rich blue': '#021bf9',
+ 'dirty purple': '#734a65',
+ 'greenblue': '#23c48b',
+ 'icky green': '#8fae22',
+ 'light khaki': '#e6f2a2',
+ 'warm blue': '#4b57db',
+ 'dark hot pink': '#d90166',
+ 'deep sea blue': '#015482',
+ 'carmine': '#9d0216',
+ 'dark yellow green': '#728f02',
+ 'pale peach': '#ffe5ad',
+ 'plum purple': '#4e0550',
+ 'golden rod': '#f9bc08',
+ 'neon red': '#ff073a',
+ 'old pink': '#c77986',
+ 'very pale blue': '#d6fffe',
+ 'blood orange': '#fe4b03',
+ 'grapefruit': '#fd5956',
+ 'sand yellow': '#fce166',
+ 'clay brown': '#b2713d',
+ 'dark blue grey': '#1f3b4d',
+ 'flat green': '#699d4c',
+ 'light green blue': '#56fca2',
+ 'warm pink': '#fb5581',
+ 'dodger blue': '#3e82fc',
+ 'gross green': '#a0bf16',
+ 'ice': '#d6fffa',
+ 'metallic blue': '#4f738e',
+ 'pale salmon': '#ffb19a',
+ 'sap green': '#5c8b15',
+ 'algae': '#54ac68',
+ 'bluey grey': '#89a0b0',
+ 'greeny grey': '#7ea07a',
+ 'highlighter green': '#1bfc06',
+ 'light light blue': '#cafffb',
+ 'light mint': '#b6ffbb',
+ 'raw umber': '#a75e09',
+ 'vivid blue': '#152eff',
+ 'deep lavender': '#8d5eb7',
+ 'dull teal': '#5f9e8f',
+ 'light greenish blue': '#63f7b4',
+ 'mud green': '#606602',
+ 'pinky': '#fc86aa',
+ 'red wine': '#8c0034',
+ 'shit green': '#758000',
+ 'tan brown': '#ab7e4c',
+ 'darkblue': '#030764',
+ 'rosa': '#fe86a4',
+ 'lipstick': '#d5174e',
+ 'pale mauve': '#fed0fc',
+ 'claret': '#680018',
+ 'dandelion': '#fedf08',
+ 'orangered': '#fe420f',
+ 'poop green': '#6f7c00',
+ 'ruby': '#ca0147',
+ 'dark': '#1b2431',
+ 'greenish turquoise': '#00fbb0',
+ 'pastel red': '#db5856',
+ 'piss yellow': '#ddd618',
+ 'bright cyan': '#41fdfe',
+ 'dark coral': '#cf524e',
+ 'algae green': '#21c36f',
+ 'darkish red': '#a90308',
+ 'reddy brown': '#6e1005',
+ 'blush pink': '#fe828c',
+ 'camouflage green': '#4b6113',
+ 'lawn green': '#4da409',
+ 'putty': '#beae8a',
+ 'vibrant blue': '#0339f8',
+ 'dark sand': '#a88f59',
+ 'purple/blue': '#5d21d0',
+ 'saffron': '#feb209',
+ 'twilight': '#4e518b',
+ 'warm brown': '#964e02',
+ 'bluegrey': '#85a3b2',
+ 'bubble gum pink': '#ff69af',
+ 'duck egg blue': '#c3fbf4',
+ 'greenish cyan': '#2afeb7',
+ 'petrol': '#005f6a',
+ 'royal': '#0c1793',
+ 'butter': '#ffff81',
+ 'dusty orange': '#f0833a',
+ 'off yellow': '#f1f33f',
+ 'pale olive green': '#b1d27b',
+ 'orangish': '#fc824a',
+ 'leaf': '#71aa34',
+ 'light blue grey': '#b7c9e2',
+ 'dried blood': '#4b0101',
+ 'lightish purple': '#a552e6',
+ 'rusty red': '#af2f0d',
+ 'lavender blue': '#8b88f8',
+ 'light grass green': '#9af764',
+ 'light mint green': '#a6fbb2',
+ 'sunflower': '#ffc512',
+ 'velvet': '#750851',
+ 'brick orange': '#c14a09',
+ 'lightish red': '#fe2f4a',
+ 'pure blue': '#0203e2',
+ 'twilight blue': '#0a437a',
+ 'violet red': '#a50055',
+ 'yellowy brown': '#ae8b0c',
+ 'carnation': '#fd798f',
+ 'muddy yellow': '#bfac05',
+ 'dark seafoam green': '#3eaf76',
+ 'deep rose': '#c74767',
+ 'dusty red': '#b9484e',
+ 'grey/blue': '#647d8e',
+ 'lemon lime': '#bffe28',
+ 'purple/pink': '#d725de',
+ 'brown yellow': '#b29705',
+ 'purple brown': '#673a3f',
+ 'wisteria': '#a87dc2',
+ 'banana yellow': '#fafe4b',
+ 'lipstick red': '#c0022f',
+ 'water blue': '#0e87cc',
+ 'brown grey': '#8d8468',
+ 'vibrant purple': '#ad03de',
+ 'baby green': '#8cff9e',
+ 'barf green': '#94ac02',
+ 'eggshell blue': '#c4fff7',
+ 'sandy yellow': '#fdee73',
+ 'cool green': '#33b864',
+ 'pale': '#fff9d0',
+ 'blue/grey': '#758da3',
+ 'hot magenta': '#f504c9',
+ 'greyblue': '#77a1b5',
+ 'purpley': '#8756e4',
+ 'baby shit green': '#889717',
+ 'brownish pink': '#c27e79',
+ 'dark aquamarine': '#017371',
+ 'diarrhea': '#9f8303',
+ 'light mustard': '#f7d560',
+ 'pale sky blue': '#bdf6fe',
+ 'turtle green': '#75b84f',
+ 'bright olive': '#9cbb04',
+ 'dark grey blue': '#29465b',
+ 'greeny brown': '#696006',
+ 'lemon green': '#adf802',
+ 'light periwinkle': '#c1c6fc',
+ 'seaweed green': '#35ad6b',
+ 'sunshine yellow': '#fffd37',
+ 'ugly purple': '#a442a0',
+ 'medium pink': '#f36196',
+ 'puke brown': '#947706',
+ 'very light pink': '#fff4f2',
+ 'viridian': '#1e9167',
+ 'bile': '#b5c306',
+ 'faded yellow': '#feff7f',
+ 'very pale green': '#cffdbc',
+ 'vibrant green': '#0add08',
+ 'bright lime': '#87fd05',
+ 'spearmint': '#1ef876',
+ 'light aquamarine': '#7bfdc7',
+ 'light sage': '#bcecac',
+ 'yellowgreen': '#bbf90f',
+ 'baby poo': '#ab9004',
+ 'dark seafoam': '#1fb57a',
+ 'deep teal': '#00555a',
+ 'heather': '#a484ac',
+ 'rust orange': '#c45508',
+ 'dirty blue': '#3f829d',
+ 'fern green': '#548d44',
+ 'bright lilac': '#c95efb',
+ 'weird green': '#3ae57f',
+ 'peacock blue': '#016795',
+ 'avocado green': '#87a922',
+ 'faded orange': '#f0944d',
+ 'grape purple': '#5d1451',
+ 'hot green': '#25ff29',
+ 'lime yellow': '#d0fe1d',
+ 'mango': '#ffa62b',
+ 'shamrock': '#01b44c',
+ 'bubblegum': '#ff6cb5',
+ 'purplish brown': '#6b4247',
+ 'vomit yellow': '#c7c10c',
+ 'pale cyan': '#b7fffa',
+ 'key lime': '#aeff6e',
+ 'tomato red': '#ec2d01',
+ 'lightgreen': '#76ff7b',
+ 'merlot': '#730039',
+ 'night blue': '#040348',
+ 'purpleish pink': '#df4ec8',
+ 'apple': '#6ecb3c',
+ 'baby poop green': '#8f9805',
+ 'green apple': '#5edc1f',
+ 'heliotrope': '#d94ff5',
+ 'yellow/green': '#c8fd3d',
+ 'almost black': '#070d0d',
+ 'cool blue': '#4984b8',
+ 'leafy green': '#51b73b',
+ 'mustard brown': '#ac7e04',
+ 'dusk': '#4e5481',
+ 'dull brown': '#876e4b',
+ 'frog green': '#58bc08',
+ 'vivid green': '#2fef10',
+ 'bright light green': '#2dfe54',
+ 'fluro green': '#0aff02',
+ 'kiwi': '#9cef43',
+ 'seaweed': '#18d17b',
+ 'navy green': '#35530a',
+ 'ultramarine blue': '#1805db',
+ 'iris': '#6258c4',
+ 'pastel orange': '#ff964f',
+ 'yellowish orange': '#ffab0f',
+ 'perrywinkle': '#8f8ce7',
+ 'tealish': '#24bca8',
+ 'dark plum': '#3f012c',
+ 'pear': '#cbf85f',
+ 'pinkish orange': '#ff724c',
+ 'midnight purple': '#280137',
+ 'light urple': '#b36ff6',
+ 'dark mint': '#48c072',
+ 'greenish tan': '#bccb7a',
+ 'light burgundy': '#a8415b',
+ 'turquoise blue': '#06b1c4',
+ 'ugly pink': '#cd7584',
+ 'sandy': '#f1da7a',
+ 'electric pink': '#ff0490',
+ 'muted purple': '#805b87',
+ 'mid green': '#50a747',
+ 'greyish': '#a8a495',
+ 'neon yellow': '#cfff04',
+ 'banana': '#ffff7e',
+ 'carnation pink': '#ff7fa7',
+ 'tomato': '#ef4026',
+ 'sea': '#3c9992',
+ 'muddy brown': '#886806',
+ 'turquoise green': '#04f489',
+ 'buff': '#fef69e',
+ 'fawn': '#cfaf7b',
+ 'muted blue': '#3b719f',
+ 'pale rose': '#fdc1c5',
+ 'dark mint green': '#20c073',
+ 'amethyst': '#9b5fc0',
+ 'blue/green': '#0f9b8e',
+ 'chestnut': '#742802',
+ 'sick green': '#9db92c',
+ 'pea': '#a4bf20',
+ 'rusty orange': '#cd5909',
+ 'stone': '#ada587',
+ 'rose red': '#be013c',
+ 'pale aqua': '#b8ffeb',
+ 'deep orange': '#dc4d01',
+ 'earth': '#a2653e',
+ 'mossy green': '#638b27',
+ 'grassy green': '#419c03',
+ 'pale lime green': '#b1ff65',
+ 'light grey blue': '#9dbcd4',
+ 'pale grey': '#fdfdfe',
+ 'asparagus': '#77ab56',
+ 'blueberry': '#464196',
+ 'purple red': '#990147',
+ 'pale lime': '#befd73',
+ 'greenish teal': '#32bf84',
+ 'caramel': '#af6f09',
+ 'deep magenta': '#a0025c',
+ 'light peach': '#ffd8b1',
+ 'milk chocolate': '#7f4e1e',
+ 'ocher': '#bf9b0c',
+ 'off green': '#6ba353',
+ 'purply pink': '#f075e6',
+ 'lightblue': '#7bc8f6',
+ 'dusky blue': '#475f94',
+ 'golden': '#f5bf03',
+ 'light beige': '#fffeb6',
+ 'butter yellow': '#fffd74',
+ 'dusky purple': '#895b7b',
+ 'french blue': '#436bad',
+ 'ugly yellow': '#d0c101',
+ 'greeny yellow': '#c6f808',
+ 'orangish red': '#f43605',
+ 'shamrock green': '#02c14d',
+ 'orangish brown': '#b25f03',
+ 'tree green': '#2a7e19',
+ 'deep violet': '#490648',
+ 'gunmetal': '#536267',
+ 'blue/purple': '#5a06ef',
+ 'cherry': '#cf0234',
+ 'sandy brown': '#c4a661',
+ 'warm grey': '#978a84',
+ 'dark indigo': '#1f0954',
+ 'midnight': '#03012d',
+ 'bluey green': '#2bb179',
+ 'grey pink': '#c3909b',
+ 'soft purple': '#a66fb5',
+ 'blood': '#770001',
+ 'brown red': '#922b05',
+ 'medium grey': '#7d7f7c',
+ 'berry': '#990f4b',
+ 'poo': '#8f7303',
+ 'purpley pink': '#c83cb9',
+ 'light salmon': '#fea993',
+ 'snot': '#acbb0d',
+ 'easter purple': '#c071fe',
+ 'light yellow green': '#ccfd7f',
+ 'dark navy blue': '#00022e',
+ 'drab': '#828344',
+ 'light rose': '#ffc5cb',
+ 'rouge': '#ab1239',
+ 'purplish red': '#b0054b',
+ 'slime green': '#99cc04',
+ 'baby poop': '#937c00',
+ 'irish green': '#019529',
+ 'pink/purple': '#ef1de7',
+ 'dark navy': '#000435',
+ 'greeny blue': '#42b395',
+ 'light plum': '#9d5783',
+ 'pinkish grey': '#c8aca9',
+ 'dirty orange': '#c87606',
+ 'rust red': '#aa2704',
+ 'pale lilac': '#e4cbff',
+ 'orangey red': '#fa4224',
+ 'primary blue': '#0804f9',
+ 'kermit green': '#5cb200',
+ 'brownish purple': '#76424e',
+ 'murky green': '#6c7a0e',
+ 'wheat': '#fbdd7e',
+ 'very dark purple': '#2a0134',
+ 'bottle green': '#044a05',
+ 'watermelon': '#fd4659',
+ 'deep sky blue': '#0d75f8',
+ 'fire engine red': '#fe0002',
+ 'yellow ochre': '#cb9d06',
+ 'pumpkin orange': '#fb7d07',
+ 'pale olive': '#b9cc81',
+ 'light lilac': '#edc8ff',
+ 'lightish green': '#61e160',
+ 'carolina blue': '#8ab8fe',
+ 'mulberry': '#920a4e',
+ 'shocking pink': '#fe02a2',
+ 'auburn': '#9a3001',
+ 'bright lime green': '#65fe08',
+ 'celadon': '#befdb7',
+ 'pinkish brown': '#b17261',
+ 'poo brown': '#885f01',
+ 'bright sky blue': '#02ccfe',
+ 'celery': '#c1fd95',
+ 'dirt brown': '#836539',
+ 'strawberry': '#fb2943',
+ 'dark lime': '#84b701',
+ 'copper': '#b66325',
+ 'medium brown': '#7f5112',
+ 'muted green': '#5fa052',
+ "robin's egg": '#6dedfd',
+ 'bright aqua': '#0bf9ea',
+ 'bright lavender': '#c760ff',
+ 'ivory': '#ffffcb',
+ 'very light purple': '#f6cefc',
+ 'light navy': '#155084',
+ 'pink red': '#f5054f',
+ 'olive brown': '#645403',
+ 'poop brown': '#7a5901',
+ 'mustard green': '#a8b504',
+ 'ocean green': '#3d9973',
+ 'very dark blue': '#000133',
+ 'dusty green': '#76a973',
+ 'light navy blue': '#2e5a88',
+ 'minty green': '#0bf77d',
+ 'adobe': '#bd6c48',
+ 'barney': '#ac1db8',
+ 'jade green': '#2baf6a',
+ 'bright light blue': '#26f7fd',
+ 'light lime': '#aefd6c',
+ 'dark khaki': '#9b8f55',
+ 'orange yellow': '#ffad01',
+ 'ocre': '#c69c04',
+ 'maize': '#f4d054',
+ 'faded pink': '#de9dac',
+ 'british racing green': '#05480d',
+ 'sandstone': '#c9ae74',
+ 'mud brown': '#60460f',
+ 'light sea green': '#98f6b0',
+ 'robin egg blue': '#8af1fe',
+ 'aqua marine': '#2ee8bb',
+ 'dark sea green': '#11875d',
+ 'soft pink': '#fdb0c0',
+ 'orangey brown': '#b16002',
+ 'cherry red': '#f7022a',
+ 'burnt yellow': '#d5ab09',
+ 'brownish grey': '#86775f',
+ 'camel': '#c69f59',
+ 'purplish grey': '#7a687f',
+ 'marine': '#042e60',
+ 'greyish pink': '#c88d94',
+ 'pale turquoise': '#a5fbd5',
+ 'pastel yellow': '#fffe71',
+ 'bluey purple': '#6241c7',
+ 'canary yellow': '#fffe40',
+ 'faded red': '#d3494e',
+ 'sepia': '#985e2b',
+ 'coffee': '#a6814c',
+ 'bright magenta': '#ff08e8',
+ 'mocha': '#9d7651',
+ 'ecru': '#feffca',
+ 'purpleish': '#98568d',
+ 'cranberry': '#9e003a',
+ 'darkish green': '#287c37',
+ 'brown orange': '#b96902',
+ 'dusky rose': '#ba6873',
+ 'melon': '#ff7855',
+ 'sickly green': '#94b21c',
+ 'silver': '#c5c9c7',
+ 'purply blue': '#661aee',
+ 'purpleish blue': '#6140ef',
+ 'hospital green': '#9be5aa',
+ 'shit brown': '#7b5804',
+ 'mid blue': '#276ab3',
+ 'amber': '#feb308',
+ 'easter green': '#8cfd7e',
+ 'soft blue': '#6488ea',
+ 'cerulean blue': '#056eee',
+ 'golden brown': '#b27a01',
+ 'bright turquoise': '#0ffef9',
+ 'red pink': '#fa2a55',
+ 'red purple': '#820747',
+ 'greyish brown': '#7a6a4f',
+ 'vermillion': '#f4320c',
+ 'russet': '#a13905',
+ 'steel grey': '#6f828a',
+ 'lighter purple': '#a55af4',
+ 'bright violet': '#ad0afd',
+ 'prussian blue': '#004577',
+ 'slate green': '#658d6d',
+ 'dirty pink': '#ca7b80',
+ 'dark blue green': '#005249',
+ 'pine': '#2b5d34',
+ 'yellowy green': '#bff128',
+ 'dark gold': '#b59410',
+ 'bluish': '#2976bb',
+ 'darkish blue': '#014182',
+ 'dull red': '#bb3f3f',
+ 'pinky red': '#fc2647',
+ 'bronze': '#a87900',
+ 'pale teal': '#82cbb2',
+ 'military green': '#667c3e',
+ 'barbie pink': '#fe46a5',
+ 'bubblegum pink': '#fe83cc',
+ 'pea soup green': '#94a617',
+ 'dark mustard': '#a88905',
+ 'shit': '#7f5f00',
+ 'medium purple': '#9e43a2',
+ 'very dark green': '#062e03',
+ 'dirt': '#8a6e45',
+ 'dusky pink': '#cc7a8b',
+ 'red violet': '#9e0168',
+ 'lemon yellow': '#fdff38',
+ 'pistachio': '#c0fa8b',
+ 'dull yellow': '#eedc5b',
+ 'dark lime green': '#7ebd01',
+ 'denim blue': '#3b5b92',
+ 'teal blue': '#01889f',
+ 'lightish blue': '#3d7afd',
+ 'purpley blue': '#5f34e7',
+ 'light indigo': '#6d5acf',
+ 'swamp green': '#748500',
+ 'brown green': '#706c11',
+ 'dark maroon': '#3c0008',
+ 'hot purple': '#cb00f5',
+ 'dark forest green': '#002d04',
+ 'faded blue': '#658cbb',
+ 'drab green': '#749551',
+ 'light lime green': '#b9ff66',
+ 'snot green': '#9dc100',
+ 'yellowish': '#faee66',
+ 'light blue green': '#7efbb3',
+ 'bordeaux': '#7b002c',
+ 'light mauve': '#c292a1',
+ 'ocean': '#017b92',
+ 'marigold': '#fcc006',
+ 'muddy green': '#657432',
+ 'dull orange': '#d8863b',
+ 'steel': '#738595',
+ 'electric purple': '#aa23ff',
+ 'fluorescent green': '#08ff08',
+ 'yellowish brown': '#9b7a01',
+ 'blush': '#f29e8e',
+ 'soft green': '#6fc276',
+ 'bright orange': '#ff5b00',
+ 'lemon': '#fdff52',
+ 'purple grey': '#866f85',
+ 'acid green': '#8ffe09',
+ 'pale lavender': '#eecffe',
+ 'violet blue': '#510ac9',
+ 'light forest green': '#4f9153',
+ 'burnt red': '#9f2305',
+ 'khaki green': '#728639',
+ 'cerise': '#de0c62',
+ 'faded purple': '#916e99',
+ 'apricot': '#ffb16d',
+ 'dark olive green': '#3c4d03',
+ 'grey brown': '#7f7053',
+ 'green grey': '#77926f',
+ 'true blue': '#010fcc',
+ 'pale violet': '#ceaefa',
+ 'periwinkle blue': '#8f99fb',
+ 'light sky blue': '#c6fcff',
+ 'blurple': '#5539cc',
+ 'green brown': '#544e03',
+ 'bluegreen': '#017a79',
+ 'bright teal': '#01f9c6',
+ 'brownish yellow': '#c9b003',
+ 'pea soup': '#929901',
+ 'forest': '#0b5509',
+ 'barney purple': '#a00498',
+ 'ultramarine': '#2000b1',
+ 'purplish': '#94568c',
+ 'puke yellow': '#c2be0e',
+ 'bluish grey': '#748b97',
+ 'dark periwinkle': '#665fd1',
+ 'dark lilac': '#9c6da5',
+ 'reddish': '#c44240',
+ 'light maroon': '#a24857',
+ 'dusty purple': '#825f87',
+ 'terra cotta': '#c9643b',
+ 'avocado': '#90b134',
+ 'marine blue': '#01386a',
+ 'teal green': '#25a36f',
+ 'slate grey': '#59656d',
+ 'lighter green': '#75fd63',
+ 'electric green': '#21fc0d',
+ 'dusty blue': '#5a86ad',
+ 'golden yellow': '#fec615',
+ 'bright yellow': '#fffd01',
+ 'light lavender': '#dfc5fe',
+ 'umber': '#b26400',
+ 'poop': '#7f5e00',
+ 'dark peach': '#de7e5d',
+ 'jungle green': '#048243',
+ 'eggshell': '#ffffd4',
+ 'denim': '#3b638c',
+ 'yellow brown': '#b79400',
+ 'dull purple': '#84597e',
+ 'chocolate brown': '#411900',
+ 'wine red': '#7b0323',
+ 'neon blue': '#04d9ff',
+ 'dirty green': '#667e2c',
+ 'light tan': '#fbeeac',
+ 'ice blue': '#d7fffe',
+ 'cadet blue': '#4e7496',
+ 'dark mauve': '#874c62',
+ 'very light blue': '#d5ffff',
+ 'grey purple': '#826d8c',
+ 'pastel pink': '#ffbacd',
+ 'very light green': '#d1ffbd',
+ 'dark sky blue': '#448ee4',
+ 'evergreen': '#05472a',
+ 'dull pink': '#d5869d',
+ 'aubergine': '#3d0734',
+ 'mahogany': '#4a0100',
+ 'reddish orange': '#f8481c',
+ 'deep green': '#02590f',
+ 'vomit green': '#89a203',
+ 'purple pink': '#e03fd8',
+ 'dusty pink': '#d58a94',
+ 'faded green': '#7bb274',
+ 'camo green': '#526525',
+ 'pinky purple': '#c94cbe',
+ 'pink purple': '#db4bda',
+ 'brownish red': '#9e3623',
+ 'dark rose': '#b5485d',
+ 'mud': '#735c12',
+ 'brownish': '#9c6d57',
+ 'emerald green': '#028f1e',
+ 'pale brown': '#b1916e',
+ 'dull blue': '#49759c',
+ 'burnt umber': '#a0450e',
+ 'medium green': '#39ad48',
+ 'clay': '#b66a50',
+ 'light aqua': '#8cffdb',
+ 'light olive green': '#a4be5c',
+ 'brownish orange': '#cb7723',
+ 'dark aqua': '#05696b',
+ 'purplish pink': '#ce5dae',
+ 'dark salmon': '#c85a53',
+ 'greenish grey': '#96ae8d',
+ 'jade': '#1fa774',
+ 'ugly green': '#7a9703',
+ 'dark beige': '#ac9362',
+ 'emerald': '#01a049',
+ 'pale red': '#d9544d',
+ 'light magenta': '#fa5ff7',
+ 'sky': '#82cafc',
+ 'light cyan': '#acfffc',
+ 'yellow orange': '#fcb001',
+ 'reddish purple': '#910951',
+ 'reddish pink': '#fe2c54',
+ 'orchid': '#c875c4',
+ 'dirty yellow': '#cdc50a',
+ 'orange red': '#fd411e',
+ 'deep red': '#9a0200',
+ 'orange brown': '#be6400',
+ 'cobalt blue': '#030aa7',
+ 'neon pink': '#fe019a',
+ 'rose pink': '#f7879a',
+ 'greyish purple': '#887191',
+ 'raspberry': '#b00149',
+ 'aqua green': '#12e193',
+ 'salmon pink': '#fe7b7c',
+ 'tangerine': '#ff9408',
+ 'brownish green': '#6a6e09',
+ 'red brown': '#8b2e16',
+ 'greenish brown': '#696112',
+ 'pumpkin': '#e17701',
+ 'pine green': '#0a481e',
+ 'charcoal': '#343837',
+ 'baby pink': '#ffb7ce',
+ 'cornflower': '#6a79f7',
+ 'blue violet': '#5d06e9',
+ 'chocolate': '#3d1c02',
+ 'greyish green': '#82a67d',
+ 'scarlet': '#be0119',
+ 'green yellow': '#c9ff27',
+ 'dark olive': '#373e02',
+ 'sienna': '#a9561e',
+ 'pastel purple': '#caa0ff',
+ 'terracotta': '#ca6641',
+ 'aqua blue': '#02d8e9',
+ 'sage green': '#88b378',
+ 'blood red': '#980002',
+ 'deep pink': '#cb0162',
+ 'grass': '#5cac2d',
+ 'moss': '#769958',
+ 'pastel blue': '#a2bffe',
+ 'bluish green': '#10a674',
+ 'green blue': '#06b48b',
+ 'dark tan': '#af884a',
+ 'greenish blue': '#0b8b87',
+ 'pale orange': '#ffa756',
+ 'vomit': '#a2a415',
+ 'forrest green': '#154406',
+ 'dark lavender': '#856798',
+ 'dark violet': '#34013f',
+ 'purple blue': '#632de9',
+ 'dark cyan': '#0a888a',
+ 'olive drab': '#6f7632',
+ 'pinkish': '#d46a7e',
+ 'cobalt': '#1e488f',
+ 'neon purple': '#bc13fe',
+ 'light turquoise': '#7ef4cc',
+ 'apple green': '#76cd26',
+ 'dull green': '#74a662',
+ 'wine': '#80013f',
+ 'powder blue': '#b1d1fc',
+ 'off white': '#ffffe4',
+ 'electric blue': '#0652ff',
+ 'dark turquoise': '#045c5a',
+ 'blue purple': '#5729ce',
+ 'azure': '#069af3',
+ 'bright red': '#ff000d',
+ 'pinkish red': '#f10c45',
+ 'cornflower blue': '#5170d7',
+ 'light olive': '#acbf69',
+ 'grape': '#6c3461',
+ 'greyish blue': '#5e819d',
+ 'purplish blue': '#601ef9',
+ 'yellowish green': '#b0dd16',
+ 'greenish yellow': '#cdfd02',
+ 'medium blue': '#2c6fbb',
+ 'dusty rose': '#c0737a',
+ 'light violet': '#d6b4fc',
+ 'midnight blue': '#020035',
+ 'bluish purple': '#703be7',
+ 'red orange': '#fd3c06',
+ 'dark magenta': '#960056',
+ 'greenish': '#40a368',
+ 'ocean blue': '#03719c',
+ 'coral': '#fc5a50',
+ 'cream': '#ffffc2',
+ 'reddish brown': '#7f2b0a',
+ 'burnt sienna': '#b04e0f',
+ 'brick': '#a03623',
+ 'sage': '#87ae73',
+ 'grey green': '#789b73',
+ 'white': '#ffffff',
+ "robin's egg blue": '#98eff9',
+ 'moss green': '#658b38',
+ 'steel blue': '#5a7d9a',
+ 'eggplant': '#380835',
+ 'light yellow': '#fffe7a',
+ 'leaf green': '#5ca904',
+ 'light grey': '#d8dcd6',
+ 'puke': '#a5a502',
+ 'pinkish purple': '#d648d7',
+ 'sea blue': '#047495',
+ 'pale purple': '#b790d4',
+ 'slate blue': '#5b7c99',
+ 'blue grey': '#607c8e',
+ 'hunter green': '#0b4008',
+ 'fuchsia': '#ed0dd9',
+ 'crimson': '#8c000f',
+ 'pale yellow': '#ffff84',
+ 'ochre': '#bf9005',
+ 'mustard yellow': '#d2bd0a',
+ 'light red': '#ff474c',
+ 'cerulean': '#0485d1',
+ 'pale pink': '#ffcfdc',
+ 'deep blue': '#040273',
+ 'rust': '#a83c09',
+ 'light teal': '#90e4c1',
+ 'slate': '#516572',
+ 'goldenrod': '#fac205',
+ 'dark yellow': '#d5b60a',
+ 'dark grey': '#363737',
+ 'army green': '#4b5d16',
+ 'grey blue': '#6b8ba4',
+ 'seafoam': '#80f9ad',
+ 'puce': '#a57e52',
+ 'spring green': '#a9f971',
+ 'dark orange': '#c65102',
+ 'sand': '#e2ca76',
+ 'pastel green': '#b0ff9d',
+ 'mint': '#9ffeb0',
+ 'light orange': '#fdaa48',
+ 'bright pink': '#fe01b1',
+ 'chartreuse': '#c1f80a',
+ 'deep purple': '#36013f',
+ 'dark brown': '#341c02',
+ 'taupe': '#b9a281',
+ 'pea green': '#8eab12',
+ 'puke green': '#9aae07',
+ 'kelly green': '#02ab2e',
+ 'seafoam green': '#7af9ab',
+ 'blue green': '#137e6d',
+ 'khaki': '#aaa662',
+ 'burgundy': '#610023',
+ 'dark teal': '#014d4e',
+ 'brick red': '#8f1402',
+ 'royal purple': '#4b006e',
+ 'plum': '#580f41',
+ 'mint green': '#8fff9f',
+ 'gold': '#dbb40c',
+ 'baby blue': '#a2cffe',
+ 'yellow green': '#c0fb2d',
+ 'bright purple': '#be03fd',
+ 'dark red': '#840000',
+ 'pale blue': '#d0fefe',
+ 'grass green': '#3f9b0b',
+ 'navy': '#01153e',
+ 'aquamarine': '#04d8b2',
+ 'burnt orange': '#c04e01',
+ 'neon green': '#0cff0c',
+ 'bright blue': '#0165fc',
+ 'rose': '#cf6275',
+ 'light pink': '#ffd1df',
+ 'mustard': '#ceb301',
+ 'indigo': '#380282',
+ 'lime': '#aaff32',
+ 'sea green': '#53fca1',
+ 'periwinkle': '#8e82fe',
+ 'dark pink': '#cb416b',
+ 'olive green': '#677a04',
+ 'peach': '#ffb07c',
+ 'pale green': '#c7fdb5',
+ 'light brown': '#ad8150',
+ 'hot pink': '#ff028d',
+ 'black': '#000000',
+ 'lilac': '#cea2fd',
+ 'navy blue': '#001146',
+ 'royal blue': '#0504aa',
+ 'beige': '#e6daa6',
+ 'salmon': '#ff796c',
+ 'olive': '#6e750e',
+ 'maroon': '#650021',
+ 'bright green': '#01ff07',
+ 'dark purple': '#35063e',
+ 'mauve': '#ae7181',
+ 'forest green': '#06470c',
+ 'aqua': '#13eac9',
+ 'cyan': '#00ffff',
+ 'tan': '#d1b26f',
+ 'dark blue': '#00035b',
+ 'lavender': '#c79fef',
+ 'turquoise': '#06c2ac',
+ 'dark green': '#033500',
+ 'violet': '#9a0eea',
+ 'light purple': '#bf77f6',
+ 'lime green': '#89fe05',
+ 'grey': '#929591',
+ 'sky blue': '#75bbfd',
+ 'yellow': '#ffff14',
+ 'magenta': '#c20078',
+ 'light green': '#96f97b',
+ 'orange': '#f97306',
+ 'teal': '#029386',
+ 'light blue': '#95d0fc',
+ 'red': '#e50000',
+ 'brown': '#653700',
+ 'pink': '#ff81c0',
+ 'blue': '#0343df',
+ 'green': '#15b01a',
+ 'purple': '#7e1e9c'}
+
+# Normalize name to "xkcd:<name>" to avoid name collisions.
+XKCD_COLORS = {'xkcd:' + name: value for name, value in XKCD_COLORS.items()}
+
+
+# https://drafts.csswg.org/css-color-4/#named-colors
+CSS4_COLORS = {
+ 'aliceblue': '#F0F8FF',
+ 'antiquewhite': '#FAEBD7',
+ 'aqua': '#00FFFF',
+ 'aquamarine': '#7FFFD4',
+ 'azure': '#F0FFFF',
+ 'beige': '#F5F5DC',
+ 'bisque': '#FFE4C4',
+ 'black': '#000000',
+ 'blanchedalmond': '#FFEBCD',
+ 'blue': '#0000FF',
+ 'blueviolet': '#8A2BE2',
+ 'brown': '#A52A2A',
+ 'burlywood': '#DEB887',
+ 'cadetblue': '#5F9EA0',
+ 'chartreuse': '#7FFF00',
+ 'chocolate': '#D2691E',
+ 'coral': '#FF7F50',
+ 'cornflowerblue': '#6495ED',
+ 'cornsilk': '#FFF8DC',
+ 'crimson': '#DC143C',
+ 'cyan': '#00FFFF',
+ 'darkblue': '#00008B',
+ 'darkcyan': '#008B8B',
+ 'darkgoldenrod': '#B8860B',
+ 'darkgray': '#A9A9A9',
+ 'darkgreen': '#006400',
+ 'darkgrey': '#A9A9A9',
+ 'darkkhaki': '#BDB76B',
+ 'darkmagenta': '#8B008B',
+ 'darkolivegreen': '#556B2F',
+ 'darkorange': '#FF8C00',
+ 'darkorchid': '#9932CC',
+ 'darkred': '#8B0000',
+ 'darksalmon': '#E9967A',
+ 'darkseagreen': '#8FBC8F',
+ 'darkslateblue': '#483D8B',
+ 'darkslategray': '#2F4F4F',
+ 'darkslategrey': '#2F4F4F',
+ 'darkturquoise': '#00CED1',
+ 'darkviolet': '#9400D3',
+ 'deeppink': '#FF1493',
+ 'deepskyblue': '#00BFFF',
+ 'dimgray': '#696969',
+ 'dimgrey': '#696969',
+ 'dodgerblue': '#1E90FF',
+ 'firebrick': '#B22222',
+ 'floralwhite': '#FFFAF0',
+ 'forestgreen': '#228B22',
+ 'fuchsia': '#FF00FF',
+ 'gainsboro': '#DCDCDC',
+ 'ghostwhite': '#F8F8FF',
+ 'gold': '#FFD700',
+ 'goldenrod': '#DAA520',
+ 'gray': '#808080',
+ 'green': '#008000',
+ 'greenyellow': '#ADFF2F',
+ 'grey': '#808080',
+ 'honeydew': '#F0FFF0',
+ 'hotpink': '#FF69B4',
+ 'indianred': '#CD5C5C',
+ 'indigo': '#4B0082',
+ 'ivory': '#FFFFF0',
+ 'khaki': '#F0E68C',
+ 'lavender': '#E6E6FA',
+ 'lavenderblush': '#FFF0F5',
+ 'lawngreen': '#7CFC00',
+ 'lemonchiffon': '#FFFACD',
+ 'lightblue': '#ADD8E6',
+ 'lightcoral': '#F08080',
+ 'lightcyan': '#E0FFFF',
+ 'lightgoldenrodyellow': '#FAFAD2',
+ 'lightgray': '#D3D3D3',
+ 'lightgreen': '#90EE90',
+ 'lightgrey': '#D3D3D3',
+ 'lightpink': '#FFB6C1',
+ 'lightsalmon': '#FFA07A',
+ 'lightseagreen': '#20B2AA',
+ 'lightskyblue': '#87CEFA',
+ 'lightslategray': '#778899',
+ 'lightslategrey': '#778899',
+ 'lightsteelblue': '#B0C4DE',
+ 'lightyellow': '#FFFFE0',
+ 'lime': '#00FF00',
+ 'limegreen': '#32CD32',
+ 'linen': '#FAF0E6',
+ 'magenta': '#FF00FF',
+ 'maroon': '#800000',
+ 'mediumaquamarine': '#66CDAA',
+ 'mediumblue': '#0000CD',
+ 'mediumorchid': '#BA55D3',
+ 'mediumpurple': '#9370DB',
+ 'mediumseagreen': '#3CB371',
+ 'mediumslateblue': '#7B68EE',
+ 'mediumspringgreen': '#00FA9A',
+ 'mediumturquoise': '#48D1CC',
+ 'mediumvioletred': '#C71585',
+ 'midnightblue': '#191970',
+ 'mintcream': '#F5FFFA',
+ 'mistyrose': '#FFE4E1',
+ 'moccasin': '#FFE4B5',
+ 'navajowhite': '#FFDEAD',
+ 'navy': '#000080',
+ 'oldlace': '#FDF5E6',
+ 'olive': '#808000',
+ 'olivedrab': '#6B8E23',
+ 'orange': '#FFA500',
+ 'orangered': '#FF4500',
+ 'orchid': '#DA70D6',
+ 'palegoldenrod': '#EEE8AA',
+ 'palegreen': '#98FB98',
+ 'paleturquoise': '#AFEEEE',
+ 'palevioletred': '#DB7093',
+ 'papayawhip': '#FFEFD5',
+ 'peachpuff': '#FFDAB9',
+ 'peru': '#CD853F',
+ 'pink': '#FFC0CB',
+ 'plum': '#DDA0DD',
+ 'powderblue': '#B0E0E6',
+ 'purple': '#800080',
+ 'rebeccapurple': '#663399',
+ 'red': '#FF0000',
+ 'rosybrown': '#BC8F8F',
+ 'royalblue': '#4169E1',
+ 'saddlebrown': '#8B4513',
+ 'salmon': '#FA8072',
+ 'sandybrown': '#F4A460',
+ 'seagreen': '#2E8B57',
+ 'seashell': '#FFF5EE',
+ 'sienna': '#A0522D',
+ 'silver': '#C0C0C0',
+ 'skyblue': '#87CEEB',
+ 'slateblue': '#6A5ACD',
+ 'slategray': '#708090',
+ 'slategrey': '#708090',
+ 'snow': '#FFFAFA',
+ 'springgreen': '#00FF7F',
+ 'steelblue': '#4682B4',
+ 'tan': '#D2B48C',
+ 'teal': '#008080',
+ 'thistle': '#D8BFD8',
+ 'tomato': '#FF6347',
+ 'turquoise': '#40E0D0',
+ 'violet': '#EE82EE',
+ 'wheat': '#F5DEB3',
+ 'white': '#FFFFFF',
+ 'whitesmoke': '#F5F5F5',
+ 'yellow': '#FFFF00',
+ 'yellowgreen': '#9ACD32'}
diff --git a/contrib/python/matplotlib/py2/matplotlib/_constrained_layout.py b/contrib/python/matplotlib/py2/matplotlib/_constrained_layout.py
new file mode 100644
index 00000000000..b8325f5d636
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/_constrained_layout.py
@@ -0,0 +1,666 @@
+"""
+This module provides the routine to adjust subplot layouts so that there are
+no overlapping axes or axes decorations. All axes decorations are dealt with
+(labels, ticks, titles, ticklabels) and some dependent artists are also dealt
+with (colorbar, suptitle, legend).
+
+Layout is done via :meth:`~matplotlib.gridspec`, with one constraint per
+gridspec, so it is possible to have overlapping axes if the gridspecs
+overlap (i.e. using :meth:`~matplotlib.gridspec.GridSpecFromSubplotSpec`).
+Axes placed using ``figure.subplots()`` or ``figure.add_subplots()`` will
+participate in the layout. Axes manually placed via ``figure.add_axes()``
+will not.
+
+See Tutorial: :doc:`/tutorials/intermediate/constrainedlayout_guide`
+
+"""
+
+# Development Notes:
+
+# What gets a layoutbox:
+# - figure
+# - gridspec
+# - subplotspec
+# EITHER:
+# - axes + pos for the axes (i.e. the total area taken by axis and
+# the actual "position" argument that needs to be sent to
+# ax.set_position.)
+# - The axes layout box will also encomapss the legend, and that is
+# how legends get included (axes legeneds, not figure legends)
+# - colorbars are sibblings of the axes if they are single-axes
+# colorbars
+# OR:
+# - a gridspec can be inside a subplotspec.
+# - subplotspec
+# EITHER:
+# - axes...
+# OR:
+# - gridspec... with arbitrary nesting...
+# - colorbars are siblings of the subplotspecs if they are multi-axes
+# colorbars.
+# - suptitle:
+# - right now suptitles are just stacked atop everything else in figure.
+# Could imagine suptitles being gridspec suptitles, but not implimented
+#
+# Todo: AnchoredOffsetbox connected to gridspecs or axes. This would
+# be more general way to add extra-axes annotations.
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import numpy as np
+import logging
+import warnings
+
+from matplotlib.legend import Legend
+import matplotlib.transforms as transforms
+import matplotlib._layoutbox as layoutbox
+
+_log = logging.getLogger(__name__)
+
+
+def get_axall_tightbbox(ax, renderer):
+ '''
+ Get the tight_bbox of the axis ax, and any dependent decorations, like
+ a `Legend` instance.
+ '''
+
+ # main bbox of the axis....
+ bbox = ax.get_tightbbox(renderer=renderer)
+ # now add the possibility of the legend...
+ for child in ax.get_children():
+ if isinstance(child, Legend):
+ bboxn = child._legend_box.get_window_extent(renderer)
+ bbox = transforms.Bbox.union([bbox, bboxn])
+ # add other children here....
+ return bbox
+
+
+def in_same_column(colnum0min, colnum0max, colnumCmin, colnumCmax):
+ if colnum0min >= colnumCmin and colnum0min <= colnumCmax:
+ return True
+ if colnum0max >= colnumCmin and colnum0max <= colnumCmax:
+ return True
+ return False
+
+
+def in_same_row(rownum0min, rownum0max, rownumCmin, rownumCmax):
+ if rownum0min >= rownumCmin and rownum0min <= rownumCmax:
+ return True
+ if rownum0max >= rownumCmin and rownum0max <= rownumCmax:
+ return True
+ return False
+
+
+######################################################
+def do_constrained_layout(fig, renderer, h_pad, w_pad,
+ hspace=None, wspace=None):
+
+ """
+ Do the constrained_layout. Called at draw time in
+ ``figure.constrained_layout()``
+
+ Parameters
+ ----------
+
+
+ fig: Figure
+ is the ``figure`` instance to do the layout in.
+
+ renderer: Renderer
+ the renderer to use.
+
+ h_pad, w_pad : float
+ are in figure-normalized units, and are a padding around the axes
+ elements.
+
+ hspace, wspace : float
+ are in fractions of the subplot sizes.
+
+ """
+
+ ''' Steps:
+
+ 1. get a list of unique gridspecs in this figure. Each gridspec will be
+ constrained separately.
+ 2. Check for gaps in the gridspecs. i.e. if not every axes slot in the
+ gridspec has been filled. If empty, add a ghost axis that is made so
+ that it cannot be seen (though visible=True). This is needed to make
+ a blank spot in the layout.
+ 3. Compare the tight_bbox of each axes to its `position`, and assume that
+ the difference is the space needed by the elements around the edge of
+ the axes (decorations) like the title, ticklabels, x-labels, etc. This
+ can include legends who overspill the axes boundaries.
+ 4. Constrain gridspec elements to line up:
+ a) if colnum0 neq colnumC, the two subplotspecs are stacked next to
+ each other, with the appropriate order.
+ b) if colnum0 == columnC line up the left or right side of the
+ _poslayoutbox (depending if it is the min or max num that is equal).
+ c) do the same for rows...
+ 5. The above doesn't constrain relative sizes of the _poslayoutboxes at
+ all, and indeed zero-size is a solution that the solver often finds more
+ convenient than expanding the sizes. Right now the solution is to compare
+ subplotspec sizes (i.e. drowsC and drows0) and constrain the larger
+ _poslayoutbox to be larger than the ratio of the sizes. i.e. if drows0 >
+ drowsC, then ax._poslayoutbox > axc._poslayoutbox * drowsC / drows0. This
+ works fine *if* the decorations are similar between the axes. If the
+ larger subplotspec has much larger axes decorations, then the constraint
+ above is incorrect.
+
+ We need the greater than in the above, in general, rather than an equals
+ sign. Consider the case of the left column having 2 rows, and the right
+ column having 1 row. We want the top and bottom of the _poslayoutboxes to
+ line up. So that means if there are decorations on the left column axes
+ they will be smaller than half as large as the right hand axis.
+
+ This can break down if the decoration size for the right hand axis (the
+ margins) is very large. There must be a math way to check for this case.
+
+ '''
+
+ invTransFig = fig.transFigure.inverted().transform_bbox
+
+ # list of unique gridspecs that contain child axes:
+ gss = set([])
+ for ax in fig.axes:
+ if hasattr(ax, 'get_subplotspec'):
+ gs = ax.get_subplotspec().get_gridspec()
+ if gs._layoutbox is not None:
+ gss.add(gs)
+ if len(gss) == 0:
+ warnings.warn('There are no gridspecs with layoutboxes. '
+ 'Possibly did not call parent GridSpec with the figure= '
+ 'keyword')
+
+ # check for unoccupied gridspec slots and make ghost axes for these
+ # slots... Do for each gs separately. This is a pretty big kludge
+ # but shoudn't have too much ill effect. The worst is that
+ # someone querrying the figure will wonder why there are more
+ # axes than they thought.
+ if fig._layoutbox.constrained_layout_called < 1:
+ for gs in gss:
+ nrows, ncols = gs.get_geometry()
+ hassubplotspec = np.zeros(nrows * ncols, dtype=bool)
+ axs = []
+ for ax in fig.axes:
+ if (hasattr(ax, 'get_subplotspec')
+ and ax._layoutbox is not None
+ and ax.get_subplotspec().get_gridspec() == gs):
+ axs += [ax]
+ for ax in axs:
+ ss0 = ax.get_subplotspec()
+ if ss0.num2 is None:
+ ss0.num2 = ss0.num1
+ hassubplotspec[ss0.num1:(ss0.num2 + 1)] = True
+ for nn, hss in enumerate(hassubplotspec):
+ if not hss:
+ # this gridspec slot doesn't have an axis so we
+ # make a "ghost".
+ ax = fig.add_subplot(gs[nn])
+ ax.set_frame_on(False)
+ ax.set_xticks([])
+ ax.set_yticks([])
+ ax.set_facecolor((1, 0, 0, 0))
+
+ # for each axes, make a margin between the *pos* layoutbox and the
+ # *axes* layoutbox be a minimum size that can accomodate the
+ # decorations on the axis.
+ for ax in fig.axes:
+ _log.debug(ax._layoutbox)
+ if ax._layoutbox is not None:
+ pos = ax.get_position(original=True)
+ tightbbox = get_axall_tightbbox(ax, renderer)
+ bbox = invTransFig(tightbbox)
+ # use stored h_pad if it exists
+ h_padt = ax._poslayoutbox.h_pad
+ if h_padt is None:
+ h_padt = h_pad
+ w_padt = ax._poslayoutbox.w_pad
+ if w_padt is None:
+ w_padt = w_pad
+ ax._poslayoutbox.edit_left_margin_min(-bbox.x0 +
+ pos.x0 + w_padt)
+ ax._poslayoutbox.edit_right_margin_min(bbox.x1 -
+ pos.x1 + w_padt)
+ ax._poslayoutbox.edit_bottom_margin_min(
+ -bbox.y0 + pos.y0 + h_padt)
+ ax._poslayoutbox.edit_top_margin_min(bbox.y1-pos.y1+h_padt)
+ _log.debug('left %f', (-bbox.x0 + pos.x0 + w_pad))
+ _log.debug('right %f', (bbox.x1 - pos.x1 + w_pad))
+ _log.debug('bottom %f', (-bbox.y0 + pos.y0 + h_padt))
+ # Sometimes its possible for the solver to collapse
+ # rather than expand axes, so they all have zero height
+ # or width. This stops that... It *should* have been
+ # taken into account w/ pref_width...
+ if fig._layoutbox.constrained_layout_called < 1:
+ ax._poslayoutbox.constrain_height_min(20, strength='weak')
+ ax._poslayoutbox.constrain_width_min(20, strength='weak')
+ ax._layoutbox.constrain_height_min(20, strength='weak')
+ ax._layoutbox.constrain_width_min(20, strength='weak')
+ ax._poslayoutbox.constrain_top_margin(0, strength='weak')
+ ax._poslayoutbox.constrain_bottom_margin(0,
+ strength='weak')
+ ax._poslayoutbox.constrain_right_margin(0, strength='weak')
+ ax._poslayoutbox.constrain_left_margin(0, strength='weak')
+
+ # do layout for suptitle.
+ if fig._suptitle is not None and fig._suptitle._layoutbox is not None:
+ sup = fig._suptitle
+ bbox = invTransFig(sup.get_window_extent(renderer=renderer))
+ height = bbox.y1 - bbox.y0
+ sup._layoutbox.edit_height(height+h_pad)
+
+ # OK, the above lines up ax._poslayoutbox with ax._layoutbox
+ # now we need to
+ # 1) arrange the subplotspecs. We do it at this level because
+ # the subplotspecs are meant to contain other dependent axes
+ # like colorbars or legends.
+ # 2) line up the right and left side of the ax._poslayoutbox
+ # that have the same subplotspec maxes.
+
+ if fig._layoutbox.constrained_layout_called < 1:
+
+ # arrange the subplotspecs... This is all done relative to each
+ # other. Some subplotspecs conatain axes, and others contain gridspecs
+ # the ones that contain gridspecs are a set proportion of their
+ # parent gridspec. The ones that contain axes are not so constrained.
+ figlb = fig._layoutbox
+ for child in figlb.children:
+ if child._is_gridspec_layoutbox():
+ # farm the gridspec layout out.
+ #
+ # This routine makes all the subplot spec containers
+ # have the correct arrangement. It just stacks the
+ # subplot layoutboxes in the correct order...
+ _arange_subplotspecs(child, hspace=hspace, wspace=wspace)
+
+ # - Align right/left and bottom/top spines of appropriate subplots.
+ # - Compare size of subplotspec including height and width ratios
+ # and make sure that the axes spines are at least as large
+ # as they should be.
+ for gs in gss:
+ # for each gridspec...
+ nrows, ncols = gs.get_geometry()
+ width_ratios = gs.get_width_ratios()
+ height_ratios = gs.get_height_ratios()
+ if width_ratios is None:
+ width_ratios = np.ones(ncols)
+ if height_ratios is None:
+ height_ratios = np.ones(nrows)
+
+ # get axes in this gridspec....
+ axs = []
+ for ax in fig.axes:
+ if (hasattr(ax, 'get_subplotspec')
+ and ax._layoutbox is not None):
+ if ax.get_subplotspec().get_gridspec() == gs:
+ axs += [ax]
+ rownummin = np.zeros(len(axs), dtype=np.int8)
+ rownummax = np.zeros(len(axs), dtype=np.int8)
+ colnummin = np.zeros(len(axs), dtype=np.int8)
+ colnummax = np.zeros(len(axs), dtype=np.int8)
+ width = np.zeros(len(axs))
+ height = np.zeros(len(axs))
+
+ for n, ax in enumerate(axs):
+ ss0 = ax.get_subplotspec()
+ if ss0.num2 is None:
+ ss0.num2 = ss0.num1
+ rownummin[n], colnummin[n] = divmod(ss0.num1, ncols)
+ rownummax[n], colnummax[n] = divmod(ss0.num2, ncols)
+ width[n] = np.sum(
+ width_ratios[colnummin[n]:(colnummax[n] + 1)])
+ height[n] = np.sum(
+ height_ratios[rownummin[n]:(rownummax[n] + 1)])
+
+ for nn, ax in enumerate(axs[:-1]):
+ ss0 = ax.get_subplotspec()
+
+ # now compare ax to all the axs:
+ #
+ # If the subplotspecs have the same colnumXmax, then line
+ # up their right sides. If they have the same min, then
+ # line up their left sides (and vertical equivalents).
+ rownum0min, colnum0min = rownummin[nn], colnummin[nn]
+ rownum0max, colnum0max = rownummax[nn], colnummax[nn]
+ width0, height0 = width[nn], height[nn]
+ alignleft = False
+ alignright = False
+ alignbot = False
+ aligntop = False
+ alignheight = False
+ alignwidth = False
+ for mm in range(nn+1, len(axs)):
+ axc = axs[mm]
+ rownumCmin, colnumCmin = rownummin[mm], colnummin[mm]
+ rownumCmax, colnumCmax = rownummax[mm], colnummax[mm]
+ widthC, heightC = width[mm], height[mm]
+ # Horizontally align axes spines if they have the
+ # same min or max:
+ if not alignleft and colnum0min == colnumCmin:
+ # we want the _poslayoutboxes to line up on left
+ # side of the axes spines...
+ layoutbox.align([ax._poslayoutbox,
+ axc._poslayoutbox],
+ 'left')
+ alignleft = True
+
+ if not alignright and colnum0max == colnumCmax:
+ # line up right sides of _poslayoutbox
+ layoutbox.align([ax._poslayoutbox,
+ axc._poslayoutbox],
+ 'right')
+ alignright = True
+ # Vertically align axes spines if they have the
+ # same min or max:
+ if not aligntop and rownum0min == rownumCmin:
+ # line up top of _poslayoutbox
+ _log.debug('rownum0min == rownumCmin')
+ layoutbox.align([ax._poslayoutbox, axc._poslayoutbox],
+ 'top')
+ aligntop = True
+
+ if not alignbot and rownum0max == rownumCmax:
+ # line up bottom of _poslayoutbox
+ _log.debug('rownum0max == rownumCmax')
+ layoutbox.align([ax._poslayoutbox, axc._poslayoutbox],
+ 'bottom')
+ alignbot = True
+ ###########
+ # Now we make the widths and heights of position boxes
+ # similar. (i.e the spine locations)
+ # This allows vertically stacked subplots to have
+ # different sizes if they occupy different amounts
+ # of the gridspec: i.e.
+ # gs = gridspec.GridSpec(3,1)
+ # ax1 = gs[0,:]
+ # ax2 = gs[1:,:]
+ # then drows0 = 1, and drowsC = 2, and ax2
+ # should be at least twice as large as ax1.
+ # But it can be more than twice as large because
+ # it needs less room for the labeling.
+ #
+ # For height, this only needs to be done if the
+ # subplots share a column. For width if they
+ # share a row.
+
+ drowsC = (rownumCmax - rownumCmin + 1)
+ drows0 = (rownum0max - rownum0min + 1)
+ dcolsC = (colnumCmax - colnumCmin + 1)
+ dcols0 = (colnum0max - colnum0min + 1)
+
+ if not alignheight and drows0 == drowsC:
+ ax._poslayoutbox.constrain_height(
+ axc._poslayoutbox.height * height0 / heightC)
+ alignheight = True
+ elif in_same_column(colnum0min, colnum0max,
+ colnumCmin, colnumCmax):
+ if height0 > heightC:
+ ax._poslayoutbox.constrain_height_min(
+ axc._poslayoutbox.height * height0 / heightC)
+ # these constraints stop the smaller axes from
+ # being allowed to go to zero height...
+ axc._poslayoutbox.constrain_height_min(
+ ax._poslayoutbox.height * heightC /
+ (height0*1.8))
+ elif height0 < heightC:
+ axc._poslayoutbox.constrain_height_min(
+ ax._poslayoutbox.height * heightC / height0)
+ ax._poslayoutbox.constrain_height_min(
+ ax._poslayoutbox.height * height0 /
+ (heightC*1.8))
+ # widths...
+ if not alignwidth and dcols0 == dcolsC:
+ ax._poslayoutbox.constrain_width(
+ axc._poslayoutbox.width * width0 / widthC)
+ alignwidth = True
+ elif in_same_row(rownum0min, rownum0max,
+ rownumCmin, rownumCmax):
+ if width0 > widthC:
+ ax._poslayoutbox.constrain_width_min(
+ axc._poslayoutbox.width * width0 / widthC)
+ axc._poslayoutbox.constrain_width_min(
+ ax._poslayoutbox.width * widthC /
+ (width0*1.8))
+ elif width0 < widthC:
+ axc._poslayoutbox.constrain_width_min(
+ ax._poslayoutbox.width * widthC / width0)
+ ax._poslayoutbox.constrain_width_min(
+ axc._poslayoutbox.width * width0 /
+ (widthC*1.8))
+
+ fig._layoutbox.constrained_layout_called += 1
+ fig._layoutbox.update_variables()
+ # Now set the position of the axes...
+ for ax in fig.axes:
+ if ax._layoutbox is not None:
+ newpos = ax._poslayoutbox.get_rect()
+ _log.debug('newpos %r', newpos)
+ # Now set the new position.
+ # ax.set_position will zero out the layout for
+ # this axis, allowing users to hard-code the position,
+ # so this does the same w/o zeroing layout.
+ ax._set_position(newpos, which='original')
+
+
+def _arange_subplotspecs(gs, hspace=0, wspace=0):
+ """
+ arange the subplotspec children of this gridspec, and then recursively
+ do the same of any gridspec children of those gridspecs...
+ """
+ sschildren = []
+ for child in gs.children:
+ if child._is_subplotspec_layoutbox():
+ for child2 in child.children:
+ # check for gridspec children...
+ if child2._is_gridspec_layoutbox():
+ _arange_subplotspecs(child2, hspace=hspace, wspace=wspace)
+ sschildren += [child]
+ # now arrange the subplots...
+ for child0 in sschildren:
+ ss0 = child0.artist
+ nrows, ncols = ss0.get_gridspec().get_geometry()
+ if ss0.num2 is None:
+ ss0.num2 = ss0.num1
+ rowNum0min, colNum0min = divmod(ss0.num1, ncols)
+ rowNum0max, colNum0max = divmod(ss0.num2, ncols)
+ sschildren = sschildren[1:]
+ for childc in sschildren:
+ ssc = childc.artist
+ rowNumCmin, colNumCmin = divmod(ssc.num1, ncols)
+ if ssc.num2 is None:
+ ssc.num2 = ssc.num1
+ rowNumCmax, colNumCmax = divmod(ssc.num2, ncols)
+ # OK, this tells us the relative layout of ax
+ # with axc
+ thepad = wspace / ncols
+ if colNum0max < colNumCmin:
+ layoutbox.hstack([ss0._layoutbox, ssc._layoutbox],
+ padding=thepad)
+ if colNumCmax < colNum0min:
+ layoutbox.hstack([ssc._layoutbox, ss0._layoutbox],
+ padding=thepad)
+
+ ####
+ # vertical alignment
+ thepad = hspace / nrows
+ if rowNum0max < rowNumCmin:
+ layoutbox.vstack([ss0._layoutbox,
+ ssc._layoutbox],
+ padding=thepad)
+ if rowNumCmax < rowNum0min:
+ layoutbox.vstack([ssc._layoutbox,
+ ss0._layoutbox],
+ padding=thepad)
+
+
+def layoutcolorbarsingle(ax, cax, shrink, aspect, location, pad=0.05):
+ """
+ Do the layout for a colorbar, to not oeverly pollute colorbar.py
+
+ `pad` is in fraction of the original axis size.
+ """
+ axlb = ax._layoutbox
+ axpos = ax._poslayoutbox
+ axsslb = ax.get_subplotspec()._layoutbox
+ lb = layoutbox.LayoutBox(
+ parent=axsslb,
+ name=axsslb.name + '.cbar',
+ artist=cax)
+
+ if location in ('left', 'right'):
+ lbpos = layoutbox.LayoutBox(
+ parent=lb,
+ name=lb.name + '.pos',
+ tightwidth=False,
+ pos=True,
+ subplot=False,
+ artist=cax)
+
+ if location == 'right':
+ # arrange to right of parent axis
+ layoutbox.hstack([axlb, lb], padding=pad * axlb.width,
+ strength='strong')
+ else:
+ layoutbox.hstack([lb, axlb], padding=pad * axlb.width)
+ # constrain the height and center...
+ layoutbox.match_heights([axpos, lbpos], [1, shrink])
+ layoutbox.align([axpos, lbpos], 'v_center')
+ # set the width of the pos box
+ lbpos.constrain_width(shrink * axpos.height * (1/aspect),
+ strength='strong')
+ elif location in ('bottom', 'top'):
+ lbpos = layoutbox.LayoutBox(
+ parent=lb,
+ name=lb.name + '.pos',
+ tightheight=True,
+ pos=True,
+ subplot=False,
+ artist=cax)
+
+ if location == 'bottom':
+ layoutbox.vstack([axlb, lb], padding=pad * axlb.height)
+ else:
+ layoutbox.vstack([lb, axlb], padding=pad * axlb.height)
+ # constrain the height and center...
+ layoutbox.match_widths([axpos, lbpos],
+ [1, shrink], strength='strong')
+ layoutbox.align([axpos, lbpos], 'h_center')
+ # set the height of the pos box
+ lbpos.constrain_height(axpos.width * aspect * shrink,
+ strength='medium')
+
+ return lb, lbpos
+
+
+def layoutcolorbargridspec(parents, cax, shrink, aspect, location, pad=0.05):
+ """
+ Do the layout for a colorbar, to not oeverly pollute colorbar.py
+
+ `pad` is in fraction of the original axis size.
+ """
+
+ gs = parents[0].get_subplotspec().get_gridspec()
+ # parent layout box....
+ gslb = gs._layoutbox
+
+ lb = layoutbox.LayoutBox(parent=gslb.parent,
+ name=gslb.parent.name + '.cbar',
+ artist=cax)
+ if location in ('left', 'right'):
+ lbpos = layoutbox.LayoutBox(
+ parent=lb,
+ name=lb.name + '.pos',
+ tightwidth=False,
+ pos=True,
+ subplot=False,
+ artist=cax)
+
+ if location == 'right':
+ # arrange to right of the gridpec sibbling
+ layoutbox.hstack([gslb, lb], padding=pad * gslb.width,
+ strength='strong')
+ else:
+ layoutbox.hstack([lb, gslb], padding=pad * gslb.width)
+ # constrain the height and center...
+ # This isn't quite right. We'd like the colorbar
+ # pos to line up w/ the axes poss, not the size of the
+ # gs.
+ maxrow = -100000
+ minrow = 1000000
+ maxax = None
+ minax = None
+
+ for ax in parents:
+ subspec = ax.get_subplotspec()
+ nrows, ncols = subspec.get_gridspec().get_geometry()
+ for num in [subspec.num1, subspec.num2]:
+ rownum1, colnum1 = divmod(subspec.num1, ncols)
+ if rownum1 > maxrow:
+ maxrow = rownum1
+ maxax = ax
+ if rownum1 < minrow:
+ minrow = rownum1
+ minax = ax
+ # invert the order so these are bottom to top:
+ maxposlb = minax._poslayoutbox
+ minposlb = maxax._poslayoutbox
+ # now we want the height of the colorbar pos to be
+ # set by the top and bottom of these poss
+ # bottom top
+ # b t
+ # h = (top-bottom)*shrink
+ # b = bottom + (top-bottom - h) / 2.
+ lbpos.constrain_height(
+ (maxposlb.top - minposlb.bottom) *
+ shrink, strength='strong')
+ lbpos.constrain_bottom(
+ (maxposlb.top - minposlb.bottom) *
+ (1 - shrink)/2 + minposlb.bottom,
+ strength='strong')
+
+ # set the width of the pos box
+ lbpos.constrain_width(lbpos.height * (shrink / aspect),
+ strength='strong')
+ elif location in ('bottom', 'top'):
+ lbpos = layoutbox.LayoutBox(
+ parent=lb,
+ name=lb.name + '.pos',
+ tightheight=True,
+ pos=True,
+ subplot=False,
+ artist=cax)
+
+ if location == 'bottom':
+ layoutbox.vstack([gslb, lb], padding=pad * gslb.width)
+ else:
+ layoutbox.vstack([lb, gslb], padding=pad * gslb.width)
+
+ maxcol = -100000
+ mincol = 1000000
+ maxax = None
+ minax = None
+
+ for ax in parents:
+ subspec = ax.get_subplotspec()
+ nrows, ncols = subspec.get_gridspec().get_geometry()
+ for num in [subspec.num1, subspec.num2]:
+ rownum1, colnum1 = divmod(subspec.num1, ncols)
+ if colnum1 > maxcol:
+ maxcol = colnum1
+ maxax = ax
+ if rownum1 < mincol:
+ mincol = colnum1
+ minax = ax
+ maxposlb = maxax._poslayoutbox
+ minposlb = minax._poslayoutbox
+ lbpos.constrain_width((maxposlb.right - minposlb.left) *
+ shrink)
+ lbpos.constrain_left(
+ (maxposlb.right - minposlb.left) *
+ (1-shrink)/2 + minposlb.left)
+ # set the height of the pos box
+ lbpos.constrain_height(lbpos.width * shrink * aspect,
+ strength='medium')
+
+ return lb, lbpos
diff --git a/contrib/python/matplotlib/py2/matplotlib/_layoutbox.py b/contrib/python/matplotlib/py2/matplotlib/_layoutbox.py
new file mode 100644
index 00000000000..cb6f0315805
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/_layoutbox.py
@@ -0,0 +1,739 @@
+# -*- coding: utf-8 -*-
+"""
+
+Conventions:
+
+"constrain_x" means to constrain the variable with either
+another kiwisolver variable, or a float. i.e. `constrain_width(0.2)`
+will set a constraint that the width has to be 0.2 and this constraint is
+permanent - i.e. it will not be removed if it becomes obsolete.
+
+"edit_x" means to set x to a value (just a float), and that this value can
+change. So `edit_width(0.2)` will set width to be 0.2, but `edit_width(0.3)`
+will allow it to change to 0.3 later. Note that these values are still just
+"suggestions" in `kiwisolver` parlance, and could be over-ridden by
+other constrains.
+
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import itertools
+import kiwisolver as kiwi
+import logging
+import numpy as np
+import warnings
+
+import matplotlib
+
+_log = logging.getLogger(__name__)
+
+
+# renderers can be complicated
+def get_renderer(fig):
+ if fig._cachedRenderer:
+ renderer = fig._cachedRenderer
+ else:
+ canvas = fig.canvas
+ if canvas and hasattr(canvas, "get_renderer"):
+ renderer = canvas.get_renderer()
+ else:
+ # not sure if this can happen
+ # seems to with PDF...
+ _log.info("constrained_layout : falling back to Agg renderer")
+ from matplotlib.backends.backend_agg import FigureCanvasAgg
+ canvas = FigureCanvasAgg(fig)
+ renderer = canvas.get_renderer()
+
+ return renderer
+
+
+class LayoutBox(object):
+ """
+ Basic rectangle representation using kiwi solver variables
+ """
+
+ def __init__(self, parent=None, name='', tightwidth=False,
+ tightheight=False, artist=None,
+ lower_left=(0, 0), upper_right=(1, 1), pos=False,
+ subplot=False, h_pad=None, w_pad=None):
+ Variable = kiwi.Variable
+ self.parent = parent
+ self.name = name
+ sn = self.name + '_'
+ if parent is None:
+ self.solver = kiwi.Solver()
+ self.constrained_layout_called = 0
+ else:
+ self.solver = parent.solver
+ self.constrained_layout_called = None
+ # parent wants to know about this child!
+ parent.add_child(self)
+ # keep track of artist associated w/ this layout. Can be none
+ self.artist = artist
+ # keep track if this box is supposed to be a pos that is constrained
+ # by the parent.
+ self.pos = pos
+ # keep track of whether we need to match this subplot up with others.
+ self.subplot = subplot
+
+ # we need the str below for Py 2 which complains the string is unicode
+ self.top = Variable(str(sn + 'top'))
+ self.bottom = Variable(str(sn + 'bottom'))
+ self.left = Variable(str(sn + 'left'))
+ self.right = Variable(str(sn + 'right'))
+
+ self.width = Variable(str(sn + 'width'))
+ self.height = Variable(str(sn + 'height'))
+ self.h_center = Variable(str(sn + 'h_center'))
+ self.v_center = Variable(str(sn + 'v_center'))
+
+ self.min_width = Variable(str(sn + 'min_width'))
+ self.min_height = Variable(str(sn + 'min_height'))
+ self.pref_width = Variable(str(sn + 'pref_width'))
+ self.pref_height = Variable(str(sn + 'pref_height'))
+ # margis are only used for axes-position layout boxes. maybe should
+ # be a separate subclass:
+ self.left_margin = Variable(str(sn + 'left_margin'))
+ self.right_margin = Variable(str(sn + 'right_margin'))
+ self.bottom_margin = Variable(str(sn + 'bottom_margin'))
+ self.top_margin = Variable(str(sn + 'top_margin'))
+ # mins
+ self.left_margin_min = Variable(str(sn + 'left_margin_min'))
+ self.right_margin_min = Variable(str(sn + 'right_margin_min'))
+ self.bottom_margin_min = Variable(str(sn + 'bottom_margin_min'))
+ self.top_margin_min = Variable(str(sn + 'top_margin_min'))
+
+ right, top = upper_right
+ left, bottom = lower_left
+ self.tightheight = tightheight
+ self.tightwidth = tightwidth
+ self.add_constraints()
+ self.children = []
+ self.subplotspec = None
+ if self.pos:
+ self.constrain_margins()
+ self.h_pad = h_pad
+ self.w_pad = w_pad
+
+ def constrain_margins(self):
+ """
+ Only do this for pos. This sets a variable distance
+ margin between the position of the axes and the outer edge of
+ the axes.
+
+ Margins are variable because they change with the fogure size.
+
+ Margin minimums are set to make room for axes decorations. However,
+ the margins can be larger if we are mathicng the position size to
+ otehr axes.
+ """
+ sol = self.solver
+
+ # left
+ if not sol.hasEditVariable(self.left_margin_min):
+ sol.addEditVariable(self.left_margin_min, 'strong')
+ sol.suggestValue(self.left_margin_min, 0.0001)
+ c = (self.left_margin == self.left - self.parent.left)
+ self.solver.addConstraint(c | 'required')
+ c = (self.left_margin >= self.left_margin_min)
+ self.solver.addConstraint(c | 'strong')
+
+ # right
+ if not sol.hasEditVariable(self.right_margin_min):
+ sol.addEditVariable(self.right_margin_min, 'strong')
+ sol.suggestValue(self.right_margin_min, 0.0001)
+ c = (self.right_margin == self.parent.right - self.right)
+ self.solver.addConstraint(c | 'required')
+ c = (self.right_margin >= self.right_margin_min)
+ self.solver.addConstraint(c | 'required')
+ # bottom
+ if not sol.hasEditVariable(self.bottom_margin_min):
+ sol.addEditVariable(self.bottom_margin_min, 'strong')
+ sol.suggestValue(self.bottom_margin_min, 0.0001)
+ c = (self.bottom_margin == self.bottom - self.parent.bottom)
+ self.solver.addConstraint(c | 'required')
+ c = (self.bottom_margin >= self.bottom_margin_min)
+ self.solver.addConstraint(c | 'required')
+ # top
+ if not sol.hasEditVariable(self.top_margin_min):
+ sol.addEditVariable(self.top_margin_min, 'strong')
+ sol.suggestValue(self.top_margin_min, 0.0001)
+ c = (self.top_margin == self.parent.top - self.top)
+ self.solver.addConstraint(c | 'required')
+ c = (self.top_margin >= self.top_margin_min)
+ self.solver.addConstraint(c | 'required')
+
+ def add_child(self, child):
+ self.children += [child]
+
+ def remove_child(self, child):
+ try:
+ self.children.remove(child)
+ except ValueError:
+ _log.info("Tried to remove child that doesn't belong to parent")
+
+ def add_constraints(self):
+ sol = self.solver
+ # never let width and height go negative.
+ for i in [self.min_width, self.min_height]:
+ sol.addEditVariable(i, 1e9)
+ sol.suggestValue(i, 0.0)
+ # define relation ships between things thing width and right and left
+ self.hard_constraints()
+ # self.soft_constraints()
+ if self.parent:
+ self.parent_constrain()
+ # sol.updateVariables()
+
+ def parent_constrain(self):
+ parent = self.parent
+ hc = [self.left >= parent.left,
+ self.bottom >= parent.bottom,
+ self.top <= parent.top,
+ self.right <= parent.right]
+ for c in hc:
+ self.solver.addConstraint(c | 'required')
+
+ def hard_constraints(self):
+ hc = [self.width == self.right - self.left,
+ self.height == self.top - self.bottom,
+ self.h_center == (self.left + self.right) * 0.5,
+ self.v_center == (self.top + self.bottom) * 0.5,
+ self.width >= self.min_width,
+ self.height >= self.min_height]
+ for c in hc:
+ self.solver.addConstraint(c | 'required')
+
+ def soft_constraints(self):
+ sol = self.solver
+ if self.tightwidth:
+ suggest = 0.
+ else:
+ suggest = 20.
+ c = (self.pref_width == suggest)
+ for i in c:
+ sol.addConstraint(i | 'required')
+ if self.tightheight:
+ suggest = 0.
+ else:
+ suggest = 20.
+ c = (self.pref_height == suggest)
+ for i in c:
+ sol.addConstraint(i | 'required')
+
+ c = [(self.width >= suggest),
+ (self.height >= suggest)]
+ for i in c:
+ sol.addConstraint(i | 150000)
+
+ def set_parent(self, parent):
+ ''' replace the parent of this with the new parent
+ '''
+ self.parent = parent
+ self.parent_constrain()
+
+ def constrain_geometry(self, left, bottom, right, top, strength='strong'):
+ hc = [self.left == left,
+ self.right == right,
+ self.bottom == bottom,
+ self.top == top]
+ for c in hc:
+ self.solver.addConstraint((c | strength))
+ # self.solver.updateVariables()
+
+ def constrain_same(self, other, strength='strong'):
+ """
+ Make the layoutbox have same position as other layoutbox
+ """
+ hc = [self.left == other.left,
+ self.right == other.right,
+ self.bottom == other.bottom,
+ self.top == other.top]
+ for c in hc:
+ self.solver.addConstraint((c | strength))
+
+ def constrain_left_margin(self, margin, strength='strong'):
+ c = (self.left == self.parent.left + margin)
+ self.solver.addConstraint(c | strength)
+
+ def edit_left_margin_min(self, margin):
+ self.solver.suggestValue(self.left_margin_min, margin)
+
+ def constrain_right_margin(self, margin, strength='strong'):
+ c = (self.right == self.parent.right - margin)
+ self.solver.addConstraint(c | strength)
+
+ def edit_right_margin_min(self, margin):
+ self.solver.suggestValue(self.right_margin_min, margin)
+
+ def constrain_bottom_margin(self, margin, strength='strong'):
+ c = (self.bottom == self.parent.bottom + margin)
+ self.solver.addConstraint(c | strength)
+
+ def edit_bottom_margin_min(self, margin):
+ self.solver.suggestValue(self.bottom_margin_min, margin)
+
+ def constrain_top_margin(self, margin, strength='strong'):
+ c = (self.top == self.parent.top - margin)
+ self.solver.addConstraint(c | strength)
+
+ def edit_top_margin_min(self, margin):
+ self.solver.suggestValue(self.top_margin_min, margin)
+
+ def get_rect(self):
+ return (self.left.value(), self.bottom.value(),
+ self.width.value(), self.height.value())
+
+ def update_variables(self):
+ '''
+ Update *all* the variables that are part of the solver this LayoutBox
+ is created with
+ '''
+ self.solver.updateVariables()
+
+ def edit_height(self, height, strength='strong'):
+ '''
+ Set the height of the layout box.
+
+ This is done as an editable variable so that the value can change
+ due to resizing.
+ '''
+ sol = self.solver
+ for i in [self.height]:
+ if not sol.hasEditVariable(i):
+ sol.addEditVariable(i, strength)
+ sol.suggestValue(self.height, height)
+
+ def constrain_height(self, height, strength='strong'):
+ '''
+ Constrain the height of the layout box. height is
+ either a float or a layoutbox.height.
+ '''
+ c = (self.height == height)
+ self.solver.addConstraint(c | strength)
+
+ def constrain_height_min(self, height, strength='strong'):
+ c = (self.height >= height)
+ self.solver.addConstraint(c | strength)
+
+ def edit_width(self, width, strength='strong'):
+ sol = self.solver
+ for i in [self.width]:
+ if not sol.hasEditVariable(i):
+ sol.addEditVariable(i, strength)
+ sol.suggestValue(self.width, width)
+
+ def constrain_width(self, width, strength='strong'):
+ '''
+ Constrain the width of the layout box. `width` is
+ either a float or a layoutbox.width.
+ '''
+ c = (self.width == width)
+ self.solver.addConstraint(c | strength)
+
+ def constrain_width_min(self, width, strength='strong'):
+ c = (self.width >= width)
+ self.solver.addConstraint(c | strength)
+
+ def constrain_left(self, left, strength='strong'):
+ c = (self.left == left)
+ self.solver.addConstraint(c | strength)
+
+ def constrain_bottom(self, bottom, strength='strong'):
+ c = (self.bottom == bottom)
+ self.solver.addConstraint(c | strength)
+
+ def constrain_right(self, right, strength='strong'):
+ c = (self.right == right)
+ self.solver.addConstraint(c | strength)
+
+ def constrain_top(self, top, strength='strong'):
+ c = (self.top == top)
+ self.solver.addConstraint(c | strength)
+
+ def _is_subplotspec_layoutbox(self):
+ '''
+ Helper to check if this layoutbox is the layoutbox of a
+ subplotspec
+ '''
+ name = (self.name).split('.')[-1]
+ return name[:2] == 'ss'
+
+ def _is_gridspec_layoutbox(self):
+ '''
+ Helper to check if this layoutbox is the layoutbox of a
+ gridspec
+ '''
+ name = (self.name).split('.')[-1]
+ return name[:8] == 'gridspec'
+
+ def find_child_subplots(self):
+ '''
+ Find children of this layout box that are subplots. We want to line
+ poss up, and this is an easy way to find them all.
+ '''
+ if self.subplot:
+ subplots = [self]
+ else:
+ subplots = []
+ for child in self.children:
+ subplots += child.find_child_subplots()
+ return subplots
+
+ def layout_from_subplotspec(self, subspec,
+ name='', artist=None, pos=False):
+ ''' Make a layout box from a subplotspec. The layout box is
+ constrained to be a fraction of the width/height of the parent,
+ and be a fraction of the parent width/height from the left/bottom
+ of the parent. Therefore the parent can move around and the
+ layout for the subplot spec should move with it.
+
+ The parent is *usually* the gridspec that made the subplotspec.??
+ '''
+ lb = LayoutBox(parent=self, name=name, artist=artist, pos=pos)
+ gs = subspec.get_gridspec()
+ nrows, ncols = gs.get_geometry()
+ parent = self.parent
+
+ # OK, now, we want to set the position of this subplotspec
+ # based on its subplotspec parameters. The new gridspec will inherit.
+
+ # from gridspec. prob should be new method in gridspec
+ left = 0.0
+ right = 1.0
+ bottom = 0.0
+ top = 1.0
+ totWidth = right-left
+ totHeight = top-bottom
+ hspace = 0.
+ wspace = 0.
+
+ # calculate accumulated heights of columns
+ cellH = totHeight / (nrows + hspace * (nrows - 1))
+ sepH = hspace*cellH
+
+ if gs._row_height_ratios is not None:
+ netHeight = cellH * nrows
+ tr = float(sum(gs._row_height_ratios))
+ cellHeights = [netHeight*r/tr for r in gs._row_height_ratios]
+ else:
+ cellHeights = [cellH] * nrows
+
+ sepHeights = [0] + ([sepH] * (nrows - 1))
+ cellHs = np.add.accumulate(np.ravel(
+ list(zip(sepHeights, cellHeights))))
+
+ # calculate accumulated widths of rows
+ cellW = totWidth/(ncols + wspace * (ncols - 1))
+ sepW = wspace*cellW
+
+ if gs._col_width_ratios is not None:
+ netWidth = cellW * ncols
+ tr = float(sum(gs._col_width_ratios))
+ cellWidths = [netWidth * r / tr for r in gs._col_width_ratios]
+ else:
+ cellWidths = [cellW] * ncols
+
+ sepWidths = [0] + ([sepW] * (ncols - 1))
+ cellWs = np.add.accumulate(np.ravel(list(zip(sepWidths, cellWidths))))
+
+ figTops = [top - cellHs[2 * rowNum] for rowNum in range(nrows)]
+ figBottoms = [top - cellHs[2 * rowNum + 1] for rowNum in range(nrows)]
+ figLefts = [left + cellWs[2 * colNum] for colNum in range(ncols)]
+ figRights = [left + cellWs[2 * colNum + 1] for colNum in range(ncols)]
+
+ rowNum, colNum = divmod(subspec.num1, ncols)
+ figBottom = figBottoms[rowNum]
+ figTop = figTops[rowNum]
+ figLeft = figLefts[colNum]
+ figRight = figRights[colNum]
+
+ if subspec.num2 is not None:
+
+ rowNum2, colNum2 = divmod(subspec.num2, ncols)
+ figBottom2 = figBottoms[rowNum2]
+ figTop2 = figTops[rowNum2]
+ figLeft2 = figLefts[colNum2]
+ figRight2 = figRights[colNum2]
+
+ figBottom = min(figBottom, figBottom2)
+ figLeft = min(figLeft, figLeft2)
+ figTop = max(figTop, figTop2)
+ figRight = max(figRight, figRight2)
+ # These are numbers relative to 0,0,1,1. Need to constrain
+ # relative to parent.
+
+ width = figRight - figLeft
+ height = figTop - figBottom
+ parent = self.parent
+ cs = [self.left == parent.left + parent.width * figLeft,
+ self.bottom == parent.bottom + parent.height * figBottom,
+ self.width == parent.width * width,
+ self.height == parent.height * height]
+ for c in cs:
+ self.solver.addConstraint((c | 'required'))
+
+ return lb
+
+ def __repr__(self):
+ args = (self.name, self.left.value(), self.bottom.value(),
+ self.right.value(), self.top.value())
+ return ('LayoutBox: %25s, (left: %1.3f) (bot: %1.3f) '
+ '(right: %1.3f) (top: %1.3f) ') % args
+
+
+# Utility functions that act on layoutboxes...
+def hstack(boxes, padding=0, strength='strong'):
+ '''
+ Stack LayoutBox instances from left to right.
+ `padding` is in figure-relative units.
+ '''
+
+ for i in range(1, len(boxes)):
+ c = (boxes[i-1].right + padding <= boxes[i].left)
+ boxes[i].solver.addConstraint(c | strength)
+
+
+def hpack(boxes, padding=0, strength='strong'):
+ '''
+ Stack LayoutBox instances from left to right.
+ '''
+
+ for i in range(1, len(boxes)):
+ c = (boxes[i-1].right + padding == boxes[i].left)
+ boxes[i].solver.addConstraint(c | strength)
+
+
+def vstack(boxes, padding=0, strength='strong'):
+ '''
+ Stack LayoutBox instances from top to bottom
+ '''
+
+ for i in range(1, len(boxes)):
+ c = (boxes[i-1].bottom - padding >= boxes[i].top)
+ boxes[i].solver.addConstraint(c | strength)
+
+
+def vpack(boxes, padding=0, strength='strong'):
+ '''
+ Stack LayoutBox instances from top to bottom
+ '''
+
+ for i in range(1, len(boxes)):
+ c = (boxes[i-1].bottom - padding >= boxes[i].top)
+ boxes[i].solver.addConstraint(c | strength)
+
+
+def match_heights(boxes, height_ratios=None, strength='medium'):
+ '''
+ Stack LayoutBox instances from top to bottom
+ '''
+
+ if height_ratios is None:
+ height_ratios = np.ones(len(boxes))
+ for i in range(1, len(boxes)):
+ c = (boxes[i-1].height ==
+ boxes[i].height*height_ratios[i-1]/height_ratios[i])
+ boxes[i].solver.addConstraint(c | strength)
+
+
+def match_widths(boxes, width_ratios=None, strength='medium'):
+ '''
+ Stack LayoutBox instances from top to bottom
+ '''
+
+ if width_ratios is None:
+ width_ratios = np.ones(len(boxes))
+ for i in range(1, len(boxes)):
+ c = (boxes[i-1].width ==
+ boxes[i].width*width_ratios[i-1]/width_ratios[i])
+ boxes[i].solver.addConstraint(c | strength)
+
+
+def vstackeq(boxes, padding=0, height_ratios=None):
+ vstack(boxes, padding=padding)
+ match_heights(boxes, height_ratios=height_ratios)
+
+
+def hstackeq(boxes, padding=0, width_ratios=None):
+ hstack(boxes, padding=padding)
+ match_widths(boxes, width_ratios=width_ratios)
+
+
+def align(boxes, attr, strength='strong'):
+ cons = []
+ for box in boxes[1:]:
+ cons = (getattr(boxes[0], attr) == getattr(box, attr))
+ boxes[0].solver.addConstraint(cons | strength)
+
+
+def match_top_margins(boxes, levels=1):
+ box0 = boxes[0]
+ top0 = box0
+ for n in range(levels):
+ top0 = top0.parent
+ for box in boxes[1:]:
+ topb = box
+ for n in range(levels):
+ topb = topb.parent
+ c = (box0.top-top0.top == box.top-topb.top)
+ box0.solver.addConstraint(c | 'strong')
+
+
+def match_bottom_margins(boxes, levels=1):
+ box0 = boxes[0]
+ top0 = box0
+ for n in range(levels):
+ top0 = top0.parent
+ for box in boxes[1:]:
+ topb = box
+ for n in range(levels):
+ topb = topb.parent
+ c = (box0.bottom-top0.bottom == box.bottom-topb.bottom)
+ box0.solver.addConstraint(c | 'strong')
+
+
+def match_left_margins(boxes, levels=1):
+ box0 = boxes[0]
+ top0 = box0
+ for n in range(levels):
+ top0 = top0.parent
+ for box in boxes[1:]:
+ topb = box
+ for n in range(levels):
+ topb = topb.parent
+ c = (box0.left-top0.left == box.left-topb.left)
+ box0.solver.addConstraint(c | 'strong')
+
+
+def match_right_margins(boxes, levels=1):
+ box0 = boxes[0]
+ top0 = box0
+ for n in range(levels):
+ top0 = top0.parent
+ for box in boxes[1:]:
+ topb = box
+ for n in range(levels):
+ topb = topb.parent
+ c = (box0.right-top0.right == box.right-topb.right)
+ box0.solver.addConstraint(c | 'strong')
+
+
+def match_width_margins(boxes, levels=1):
+ match_left_margins(boxes, levels=levels)
+ match_right_margins(boxes, levels=levels)
+
+
+def match_height_margins(boxes, levels=1):
+ match_top_margins(boxes, levels=levels)
+ match_bottom_margins(boxes, levels=levels)
+
+
+def match_margins(boxes, levels=1):
+ match_width_margins(boxes, levels=levels)
+ match_height_margins(boxes, levels=levels)
+
+
+_layoutboxobjnum = itertools.count()
+
+
+def seq_id():
+ '''
+ Generate a short sequential id for layoutbox objects...
+ '''
+
+ global _layoutboxobjnum
+
+ return ('%06d' % (next(_layoutboxobjnum)))
+
+
+def print_children(lb):
+ '''
+ Print the children of the layoutbox
+ '''
+ print(lb)
+ for child in lb.children:
+ print_children(child)
+
+
+def nonetree(lb):
+ '''
+ Make all elements in this tree none... This signals not to do any more
+ layout.
+ '''
+ if lb is not None:
+ if lb.parent is None:
+ # Clear the solver. Hopefully this garbage collects.
+ lb.solver.reset()
+ nonechildren(lb)
+ else:
+ nonetree(lb.parent)
+
+
+def nonechildren(lb):
+ for child in lb.children:
+ nonechildren(child)
+ lb.artist._layoutbox = None
+ lb = None
+
+
+def print_tree(lb):
+ '''
+ Print the tree of layoutboxes
+ '''
+
+ if lb.parent is None:
+ print('LayoutBox Tree\n')
+ print('==============\n')
+ print_children(lb)
+ print('\n')
+ else:
+ print_tree(lb.parent)
+
+
+def plot_children(fig, box, level=0, printit=True):
+ '''
+ Simple plotting to show where boxes are
+ '''
+ import matplotlib
+ import matplotlib.pyplot as plt
+
+ if isinstance(fig, matplotlib.figure.Figure):
+ ax = fig.add_axes([0., 0., 1., 1.])
+ ax.set_facecolor([1., 1., 1., 0.7])
+ ax.set_alpha(0.3)
+ fig.draw(fig.canvas.get_renderer())
+ else:
+ ax = fig
+
+ import matplotlib.patches as patches
+ colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
+ if printit:
+ print("Level:", level)
+ for child in box.children:
+ rect = child.get_rect()
+ if printit:
+ print(child)
+ ax.add_patch(
+ patches.Rectangle(
+ (child.left.value(), child.bottom.value()), # (x,y)
+ child.width.value(), # width
+ child.height.value(), # height
+ fc='none',
+ alpha=0.8,
+ ec=colors[level]
+ )
+ )
+ if level > 0:
+ name = child.name.split('.')[-1]
+ if level % 2 == 0:
+ ax.text(child.left.value(), child.bottom.value(), name,
+ size=12-level, color=colors[level])
+ else:
+ ax.text(child.right.value(), child.top.value(), name,
+ ha='right', va='top', size=12-level,
+ color=colors[level])
+
+ plot_children(ax, child, level=level+1, printit=printit)
diff --git a/contrib/python/matplotlib/py2/matplotlib/_mathtext_data.py b/contrib/python/matplotlib/py2/matplotlib/_mathtext_data.py
new file mode 100644
index 00000000000..d042d25892d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/_mathtext_data.py
@@ -0,0 +1,2548 @@
+"""
+font data tables for truetype and afm computer modern fonts
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+latex_to_bakoma = {
+ '\\__sqrt__' : ('cmex10', 0x70),
+ '\\bigcap' : ('cmex10', 0x5c),
+ '\\bigcup' : ('cmex10', 0x5b),
+ '\\bigodot' : ('cmex10', 0x4b),
+ '\\bigoplus' : ('cmex10', 0x4d),
+ '\\bigotimes' : ('cmex10', 0x4f),
+ '\\biguplus' : ('cmex10', 0x5d),
+ '\\bigvee' : ('cmex10', 0x5f),
+ '\\bigwedge' : ('cmex10', 0x5e),
+ '\\coprod' : ('cmex10', 0x61),
+ '\\int' : ('cmex10', 0x5a),
+ '\\langle' : ('cmex10', 0xad),
+ '\\leftangle' : ('cmex10', 0xad),
+ '\\leftbrace' : ('cmex10', 0xa9),
+ '\\oint' : ('cmex10', 0x49),
+ '\\prod' : ('cmex10', 0x59),
+ '\\rangle' : ('cmex10', 0xae),
+ '\\rightangle' : ('cmex10', 0xae),
+ '\\rightbrace' : ('cmex10', 0xaa),
+ '\\sum' : ('cmex10', 0x58),
+ '\\widehat' : ('cmex10', 0x62),
+ '\\widetilde' : ('cmex10', 0x65),
+ '\\{' : ('cmex10', 0xa9),
+ '\\}' : ('cmex10', 0xaa),
+ '{' : ('cmex10', 0xa9),
+ '}' : ('cmex10', 0xaa),
+
+ ',' : ('cmmi10', 0x3b),
+ '.' : ('cmmi10', 0x3a),
+ '/' : ('cmmi10', 0x3d),
+ '<' : ('cmmi10', 0x3c),
+ '>' : ('cmmi10', 0x3e),
+ '\\alpha' : ('cmmi10', 0xae),
+ '\\beta' : ('cmmi10', 0xaf),
+ '\\chi' : ('cmmi10', 0xc2),
+ '\\combiningrightarrowabove' : ('cmmi10', 0x7e),
+ '\\delta' : ('cmmi10', 0xb1),
+ '\\ell' : ('cmmi10', 0x60),
+ '\\epsilon' : ('cmmi10', 0xb2),
+ '\\eta' : ('cmmi10', 0xb4),
+ '\\flat' : ('cmmi10', 0x5b),
+ '\\frown' : ('cmmi10', 0x5f),
+ '\\gamma' : ('cmmi10', 0xb0),
+ '\\imath' : ('cmmi10', 0x7b),
+ '\\iota' : ('cmmi10', 0xb6),
+ '\\jmath' : ('cmmi10', 0x7c),
+ '\\kappa' : ('cmmi10', 0x2219),
+ '\\lambda' : ('cmmi10', 0xb8),
+ '\\leftharpoondown' : ('cmmi10', 0x29),
+ '\\leftharpoonup' : ('cmmi10', 0x28),
+ '\\mu' : ('cmmi10', 0xb9),
+ '\\natural' : ('cmmi10', 0x5c),
+ '\\nu' : ('cmmi10', 0xba),
+ '\\omega' : ('cmmi10', 0x21),
+ '\\phi' : ('cmmi10', 0xc1),
+ '\\pi' : ('cmmi10', 0xbc),
+ '\\psi' : ('cmmi10', 0xc3),
+ '\\rho' : ('cmmi10', 0xbd),
+ '\\rightharpoondown' : ('cmmi10', 0x2b),
+ '\\rightharpoonup' : ('cmmi10', 0x2a),
+ '\\sharp' : ('cmmi10', 0x5d),
+ '\\sigma' : ('cmmi10', 0xbe),
+ '\\smile' : ('cmmi10', 0x5e),
+ '\\tau' : ('cmmi10', 0xbf),
+ '\\theta' : ('cmmi10', 0xb5),
+ '\\triangleleft' : ('cmmi10', 0x2f),
+ '\\triangleright' : ('cmmi10', 0x2e),
+ '\\upsilon' : ('cmmi10', 0xc0),
+ '\\varepsilon' : ('cmmi10', 0x22),
+ '\\varphi' : ('cmmi10', 0x27),
+ '\\varrho' : ('cmmi10', 0x25),
+ '\\varsigma' : ('cmmi10', 0x26),
+ '\\vartheta' : ('cmmi10', 0x23),
+ '\\wp' : ('cmmi10', 0x7d),
+ '\\xi' : ('cmmi10', 0xbb),
+ '\\zeta' : ('cmmi10', 0xb3),
+
+ '!' : ('cmr10', 0x21),
+ '%' : ('cmr10', 0x25),
+ '&' : ('cmr10', 0x26),
+ '(' : ('cmr10', 0x28),
+ ')' : ('cmr10', 0x29),
+ '+' : ('cmr10', 0x2b),
+ '0' : ('cmr10', 0x30),
+ '1' : ('cmr10', 0x31),
+ '2' : ('cmr10', 0x32),
+ '3' : ('cmr10', 0x33),
+ '4' : ('cmr10', 0x34),
+ '5' : ('cmr10', 0x35),
+ '6' : ('cmr10', 0x36),
+ '7' : ('cmr10', 0x37),
+ '8' : ('cmr10', 0x38),
+ '9' : ('cmr10', 0x39),
+ ':' : ('cmr10', 0x3a),
+ ';' : ('cmr10', 0x3b),
+ '=' : ('cmr10', 0x3d),
+ '?' : ('cmr10', 0x3f),
+ '@' : ('cmr10', 0x40),
+ '[' : ('cmr10', 0x5b),
+ '\\#' : ('cmr10', 0x23),
+ '\\$' : ('cmr10', 0x24),
+ '\\%' : ('cmr10', 0x25),
+ '\\Delta' : ('cmr10', 0xa2),
+ '\\Gamma' : ('cmr10', 0xa1),
+ '\\Lambda' : ('cmr10', 0xa4),
+ '\\Omega' : ('cmr10', 0xad),
+ '\\Phi' : ('cmr10', 0xa9),
+ '\\Pi' : ('cmr10', 0xa6),
+ '\\Psi' : ('cmr10', 0xaa),
+ '\\Sigma' : ('cmr10', 0xa7),
+ '\\Theta' : ('cmr10', 0xa3),
+ '\\Upsilon' : ('cmr10', 0xa8),
+ '\\Xi' : ('cmr10', 0xa5),
+ '\\circumflexaccent' : ('cmr10', 0x5e),
+ '\\combiningacuteaccent' : ('cmr10', 0xb6),
+ '\\combiningbreve' : ('cmr10', 0xb8),
+ '\\combiningdiaeresis' : ('cmr10', 0xc4),
+ '\\combiningdotabove' : ('cmr10', 0x5f),
+ '\\combininggraveaccent' : ('cmr10', 0xb5),
+ '\\combiningoverline' : ('cmr10', 0xb9),
+ '\\combiningtilde' : ('cmr10', 0x7e),
+ '\\leftbracket' : ('cmr10', 0x5b),
+ '\\leftparen' : ('cmr10', 0x28),
+ '\\rightbracket' : ('cmr10', 0x5d),
+ '\\rightparen' : ('cmr10', 0x29),
+ '\\widebar' : ('cmr10', 0xb9),
+ ']' : ('cmr10', 0x5d),
+
+ '*' : ('cmsy10', 0xa4),
+ '-' : ('cmsy10', 0xa1),
+ '\\Downarrow' : ('cmsy10', 0x2b),
+ '\\Im' : ('cmsy10', 0x3d),
+ '\\Leftarrow' : ('cmsy10', 0x28),
+ '\\Leftrightarrow' : ('cmsy10', 0x2c),
+ '\\P' : ('cmsy10', 0x7b),
+ '\\Re' : ('cmsy10', 0x3c),
+ '\\Rightarrow' : ('cmsy10', 0x29),
+ '\\S' : ('cmsy10', 0x78),
+ '\\Uparrow' : ('cmsy10', 0x2a),
+ '\\Updownarrow' : ('cmsy10', 0x6d),
+ '\\Vert' : ('cmsy10', 0x6b),
+ '\\aleph' : ('cmsy10', 0x40),
+ '\\approx' : ('cmsy10', 0xbc),
+ '\\ast' : ('cmsy10', 0xa4),
+ '\\asymp' : ('cmsy10', 0xb3),
+ '\\backslash' : ('cmsy10', 0x6e),
+ '\\bigcirc' : ('cmsy10', 0xb0),
+ '\\bigtriangledown' : ('cmsy10', 0x35),
+ '\\bigtriangleup' : ('cmsy10', 0x34),
+ '\\bot' : ('cmsy10', 0x3f),
+ '\\bullet' : ('cmsy10', 0xb2),
+ '\\cap' : ('cmsy10', 0x5c),
+ '\\cdot' : ('cmsy10', 0xa2),
+ '\\circ' : ('cmsy10', 0xb1),
+ '\\clubsuit' : ('cmsy10', 0x7c),
+ '\\cup' : ('cmsy10', 0x5b),
+ '\\dag' : ('cmsy10', 0x79),
+ '\\dashv' : ('cmsy10', 0x61),
+ '\\ddag' : ('cmsy10', 0x7a),
+ '\\diamond' : ('cmsy10', 0xa6),
+ '\\diamondsuit' : ('cmsy10', 0x7d),
+ '\\div' : ('cmsy10', 0xa5),
+ '\\downarrow' : ('cmsy10', 0x23),
+ '\\emptyset' : ('cmsy10', 0x3b),
+ '\\equiv' : ('cmsy10', 0xb4),
+ '\\exists' : ('cmsy10', 0x39),
+ '\\forall' : ('cmsy10', 0x38),
+ '\\geq' : ('cmsy10', 0xb8),
+ '\\gg' : ('cmsy10', 0xc0),
+ '\\heartsuit' : ('cmsy10', 0x7e),
+ '\\in' : ('cmsy10', 0x32),
+ '\\infty' : ('cmsy10', 0x31),
+ '\\lbrace' : ('cmsy10', 0x66),
+ '\\lceil' : ('cmsy10', 0x64),
+ '\\leftarrow' : ('cmsy10', 0xc3),
+ '\\leftrightarrow' : ('cmsy10', 0x24),
+ '\\leq' : ('cmsy10', 0x2219),
+ '\\lfloor' : ('cmsy10', 0x62),
+ '\\ll' : ('cmsy10', 0xbf),
+ '\\mid' : ('cmsy10', 0x6a),
+ '\\mp' : ('cmsy10', 0xa8),
+ '\\nabla' : ('cmsy10', 0x72),
+ '\\nearrow' : ('cmsy10', 0x25),
+ '\\neg' : ('cmsy10', 0x3a),
+ '\\ni' : ('cmsy10', 0x33),
+ '\\nwarrow' : ('cmsy10', 0x2d),
+ '\\odot' : ('cmsy10', 0xaf),
+ '\\ominus' : ('cmsy10', 0xaa),
+ '\\oplus' : ('cmsy10', 0xa9),
+ '\\oslash' : ('cmsy10', 0xae),
+ '\\otimes' : ('cmsy10', 0xad),
+ '\\pm' : ('cmsy10', 0xa7),
+ '\\prec' : ('cmsy10', 0xc1),
+ '\\preceq' : ('cmsy10', 0xb9),
+ '\\prime' : ('cmsy10', 0x30),
+ '\\propto' : ('cmsy10', 0x2f),
+ '\\rbrace' : ('cmsy10', 0x67),
+ '\\rceil' : ('cmsy10', 0x65),
+ '\\rfloor' : ('cmsy10', 0x63),
+ '\\rightarrow' : ('cmsy10', 0x21),
+ '\\searrow' : ('cmsy10', 0x26),
+ '\\sim' : ('cmsy10', 0xbb),
+ '\\simeq' : ('cmsy10', 0x27),
+ '\\slash' : ('cmsy10', 0x36),
+ '\\spadesuit' : ('cmsy10', 0xc4),
+ '\\sqcap' : ('cmsy10', 0x75),
+ '\\sqcup' : ('cmsy10', 0x74),
+ '\\sqsubseteq' : ('cmsy10', 0x76),
+ '\\sqsupseteq' : ('cmsy10', 0x77),
+ '\\subset' : ('cmsy10', 0xbd),
+ '\\subseteq' : ('cmsy10', 0xb5),
+ '\\succ' : ('cmsy10', 0xc2),
+ '\\succeq' : ('cmsy10', 0xba),
+ '\\supset' : ('cmsy10', 0xbe),
+ '\\supseteq' : ('cmsy10', 0xb6),
+ '\\swarrow' : ('cmsy10', 0x2e),
+ '\\times' : ('cmsy10', 0xa3),
+ '\\to' : ('cmsy10', 0x21),
+ '\\top' : ('cmsy10', 0x3e),
+ '\\uparrow' : ('cmsy10', 0x22),
+ '\\updownarrow' : ('cmsy10', 0x6c),
+ '\\uplus' : ('cmsy10', 0x5d),
+ '\\vdash' : ('cmsy10', 0x60),
+ '\\vee' : ('cmsy10', 0x5f),
+ '\\vert' : ('cmsy10', 0x6a),
+ '\\wedge' : ('cmsy10', 0x5e),
+ '\\wr' : ('cmsy10', 0x6f),
+ '\\|' : ('cmsy10', 0x6b),
+ '|' : ('cmsy10', 0x6a),
+
+ '\\_' : ('cmtt10', 0x5f)
+}
+
+latex_to_cmex = {
+ r'\__sqrt__' : 112,
+ r'\bigcap' : 92,
+ r'\bigcup' : 91,
+ r'\bigodot' : 75,
+ r'\bigoplus' : 77,
+ r'\bigotimes' : 79,
+ r'\biguplus' : 93,
+ r'\bigvee' : 95,
+ r'\bigwedge' : 94,
+ r'\coprod' : 97,
+ r'\int' : 90,
+ r'\leftangle' : 173,
+ r'\leftbrace' : 169,
+ r'\oint' : 73,
+ r'\prod' : 89,
+ r'\rightangle' : 174,
+ r'\rightbrace' : 170,
+ r'\sum' : 88,
+ r'\widehat' : 98,
+ r'\widetilde' : 101,
+}
+
+latex_to_standard = {
+ r'\cong' : ('psyr', 64),
+ r'\Delta' : ('psyr', 68),
+ r'\Phi' : ('psyr', 70),
+ r'\Gamma' : ('psyr', 89),
+ r'\alpha' : ('psyr', 97),
+ r'\beta' : ('psyr', 98),
+ r'\chi' : ('psyr', 99),
+ r'\delta' : ('psyr', 100),
+ r'\varepsilon' : ('psyr', 101),
+ r'\phi' : ('psyr', 102),
+ r'\gamma' : ('psyr', 103),
+ r'\eta' : ('psyr', 104),
+ r'\iota' : ('psyr', 105),
+ r'\varpsi' : ('psyr', 106),
+ r'\kappa' : ('psyr', 108),
+ r'\nu' : ('psyr', 110),
+ r'\pi' : ('psyr', 112),
+ r'\theta' : ('psyr', 113),
+ r'\rho' : ('psyr', 114),
+ r'\sigma' : ('psyr', 115),
+ r'\tau' : ('psyr', 116),
+ '\\upsilon' : ('psyr', 117),
+ r'\varpi' : ('psyr', 118),
+ r'\omega' : ('psyr', 119),
+ r'\xi' : ('psyr', 120),
+ r'\psi' : ('psyr', 121),
+ r'\zeta' : ('psyr', 122),
+ r'\sim' : ('psyr', 126),
+ r'\leq' : ('psyr', 163),
+ r'\infty' : ('psyr', 165),
+ r'\clubsuit' : ('psyr', 167),
+ r'\diamondsuit' : ('psyr', 168),
+ r'\heartsuit' : ('psyr', 169),
+ r'\spadesuit' : ('psyr', 170),
+ r'\leftrightarrow' : ('psyr', 171),
+ r'\leftarrow' : ('psyr', 172),
+ '\\uparrow' : ('psyr', 173),
+ r'\rightarrow' : ('psyr', 174),
+ r'\downarrow' : ('psyr', 175),
+ r'\pm' : ('psyr', 176),
+ r'\geq' : ('psyr', 179),
+ r'\times' : ('psyr', 180),
+ r'\propto' : ('psyr', 181),
+ r'\partial' : ('psyr', 182),
+ r'\bullet' : ('psyr', 183),
+ r'\div' : ('psyr', 184),
+ r'\neq' : ('psyr', 185),
+ r'\equiv' : ('psyr', 186),
+ r'\approx' : ('psyr', 187),
+ r'\ldots' : ('psyr', 188),
+ r'\aleph' : ('psyr', 192),
+ r'\Im' : ('psyr', 193),
+ r'\Re' : ('psyr', 194),
+ r'\wp' : ('psyr', 195),
+ r'\otimes' : ('psyr', 196),
+ r'\oplus' : ('psyr', 197),
+ r'\oslash' : ('psyr', 198),
+ r'\cap' : ('psyr', 199),
+ r'\cup' : ('psyr', 200),
+ r'\supset' : ('psyr', 201),
+ r'\supseteq' : ('psyr', 202),
+ r'\subset' : ('psyr', 204),
+ r'\subseteq' : ('psyr', 205),
+ r'\in' : ('psyr', 206),
+ r'\notin' : ('psyr', 207),
+ r'\angle' : ('psyr', 208),
+ r'\nabla' : ('psyr', 209),
+ r'\textregistered' : ('psyr', 210),
+ r'\copyright' : ('psyr', 211),
+ r'\texttrademark' : ('psyr', 212),
+ r'\Pi' : ('psyr', 213),
+ r'\prod' : ('psyr', 213),
+ r'\surd' : ('psyr', 214),
+ r'\__sqrt__' : ('psyr', 214),
+ r'\cdot' : ('psyr', 215),
+ '\\urcorner' : ('psyr', 216),
+ r'\vee' : ('psyr', 217),
+ r'\wedge' : ('psyr', 218),
+ r'\Leftrightarrow' : ('psyr', 219),
+ r'\Leftarrow' : ('psyr', 220),
+ '\\Uparrow' : ('psyr', 221),
+ r'\Rightarrow' : ('psyr', 222),
+ r'\Downarrow' : ('psyr', 223),
+ r'\Diamond' : ('psyr', 224),
+ r'\Sigma' : ('psyr', 229),
+ r'\sum' : ('psyr', 229),
+ r'\forall' : ('psyr', 34),
+ r'\exists' : ('psyr', 36),
+ r'\lceil' : ('psyr', 233),
+ r'\lbrace' : ('psyr', 123),
+ r'\Psi' : ('psyr', 89),
+ r'\bot' : ('psyr', 0o136),
+ r'\Omega' : ('psyr', 0o127),
+ r'\leftbracket' : ('psyr', 0o133),
+ r'\rightbracket' : ('psyr', 0o135),
+ r'\leftbrace' : ('psyr', 123),
+ r'\leftparen' : ('psyr', 0o50),
+ r'\prime' : ('psyr', 0o242),
+ r'\sharp' : ('psyr', 0o43),
+ r'\slash' : ('psyr', 0o57),
+ r'\Lamda' : ('psyr', 0o114),
+ r'\neg' : ('psyr', 0o330),
+ '\\Upsilon' : ('psyr', 0o241),
+ r'\rightbrace' : ('psyr', 0o175),
+ r'\rfloor' : ('psyr', 0o373),
+ r'\lambda' : ('psyr', 0o154),
+ r'\to' : ('psyr', 0o256),
+ r'\Xi' : ('psyr', 0o130),
+ r'\emptyset' : ('psyr', 0o306),
+ r'\lfloor' : ('psyr', 0o353),
+ r'\rightparen' : ('psyr', 0o51),
+ r'\rceil' : ('psyr', 0o371),
+ r'\ni' : ('psyr', 0o47),
+ r'\epsilon' : ('psyr', 0o145),
+ r'\Theta' : ('psyr', 0o121),
+ r'\langle' : ('psyr', 0o341),
+ r'\leftangle' : ('psyr', 0o341),
+ r'\rangle' : ('psyr', 0o361),
+ r'\rightangle' : ('psyr', 0o361),
+ r'\rbrace' : ('psyr', 0o175),
+ r'\circ' : ('psyr', 0o260),
+ r'\diamond' : ('psyr', 0o340),
+ r'\mu' : ('psyr', 0o155),
+ r'\mid' : ('psyr', 0o352),
+ r'\imath' : ('pncri8a', 105),
+ r'\%' : ('pncr8a', 37),
+ r'\$' : ('pncr8a', 36),
+ r'\{' : ('pncr8a', 123),
+ r'\}' : ('pncr8a', 125),
+ r'\backslash' : ('pncr8a', 92),
+ r'\ast' : ('pncr8a', 42),
+ r'\#' : ('pncr8a', 35),
+
+ r'\circumflexaccent' : ('pncri8a', 124), # for \hat
+ r'\combiningbreve' : ('pncri8a', 81), # for \breve
+ r'\combininggraveaccent' : ('pncri8a', 114), # for \grave
+ r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute
+ r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot
+ r'\combiningtilde' : ('pncri8a', 75), # for \tilde
+ r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec
+ r'\combiningdotabove' : ('pncri8a', 26), # for \dot
+}
+
+# Automatically generated.
+
+type12uni = {
+ 'uni24C8' : 9416,
+ 'aring' : 229,
+ 'uni22A0' : 8864,
+ 'uni2292' : 8850,
+ 'quotedblright' : 8221,
+ 'uni03D2' : 978,
+ 'uni2215' : 8725,
+ 'uni03D0' : 976,
+ 'V' : 86,
+ 'dollar' : 36,
+ 'uni301E' : 12318,
+ 'uni03D5' : 981,
+ 'four' : 52,
+ 'uni25A0' : 9632,
+ 'uni013C' : 316,
+ 'uni013B' : 315,
+ 'uni013E' : 318,
+ 'Yacute' : 221,
+ 'uni25DE' : 9694,
+ 'uni013F' : 319,
+ 'uni255A' : 9562,
+ 'uni2606' : 9734,
+ 'uni0180' : 384,
+ 'uni22B7' : 8887,
+ 'uni044F' : 1103,
+ 'uni22B5' : 8885,
+ 'uni22B4' : 8884,
+ 'uni22AE' : 8878,
+ 'uni22B2' : 8882,
+ 'uni22B1' : 8881,
+ 'uni22B0' : 8880,
+ 'uni25CD' : 9677,
+ 'uni03CE' : 974,
+ 'uni03CD' : 973,
+ 'uni03CC' : 972,
+ 'uni03CB' : 971,
+ 'uni03CA' : 970,
+ 'uni22B8' : 8888,
+ 'uni22C9' : 8905,
+ 'uni0449' : 1097,
+ 'uni20DD' : 8413,
+ 'uni20DC' : 8412,
+ 'uni20DB' : 8411,
+ 'uni2231' : 8753,
+ 'uni25CF' : 9679,
+ 'uni306E' : 12398,
+ 'uni03D1' : 977,
+ 'uni01A1' : 417,
+ 'uni20D7' : 8407,
+ 'uni03D6' : 982,
+ 'uni2233' : 8755,
+ 'uni20D2' : 8402,
+ 'uni20D1' : 8401,
+ 'uni20D0' : 8400,
+ 'P' : 80,
+ 'uni22BE' : 8894,
+ 'uni22BD' : 8893,
+ 'uni22BC' : 8892,
+ 'uni22BB' : 8891,
+ 'underscore' : 95,
+ 'uni03C8' : 968,
+ 'uni03C7' : 967,
+ 'uni0328' : 808,
+ 'uni03C5' : 965,
+ 'uni03C4' : 964,
+ 'uni03C3' : 963,
+ 'uni03C2' : 962,
+ 'uni03C1' : 961,
+ 'uni03C0' : 960,
+ 'uni2010' : 8208,
+ 'uni0130' : 304,
+ 'uni0133' : 307,
+ 'uni0132' : 306,
+ 'uni0135' : 309,
+ 'uni0134' : 308,
+ 'uni0137' : 311,
+ 'uni0136' : 310,
+ 'uni0139' : 313,
+ 'uni0138' : 312,
+ 'uni2244' : 8772,
+ 'uni229A' : 8858,
+ 'uni2571' : 9585,
+ 'uni0278' : 632,
+ 'uni2239' : 8761,
+ 'p' : 112,
+ 'uni3019' : 12313,
+ 'uni25CB' : 9675,
+ 'uni03DB' : 987,
+ 'uni03DC' : 988,
+ 'uni03DA' : 986,
+ 'uni03DF' : 991,
+ 'uni03DD' : 989,
+ 'uni013D' : 317,
+ 'uni220A' : 8714,
+ 'uni220C' : 8716,
+ 'uni220B' : 8715,
+ 'uni220E' : 8718,
+ 'uni220D' : 8717,
+ 'uni220F' : 8719,
+ 'uni22CC' : 8908,
+ 'Otilde' : 213,
+ 'uni25E5' : 9701,
+ 'uni2736' : 10038,
+ 'perthousand' : 8240,
+ 'zero' : 48,
+ 'uni279B' : 10139,
+ 'dotlessi' : 305,
+ 'uni2279' : 8825,
+ 'Scaron' : 352,
+ 'zcaron' : 382,
+ 'uni21D8' : 8664,
+ 'egrave' : 232,
+ 'uni0271' : 625,
+ 'uni01AA' : 426,
+ 'uni2332' : 9010,
+ 'section' : 167,
+ 'uni25E4' : 9700,
+ 'Icircumflex' : 206,
+ 'ntilde' : 241,
+ 'uni041E' : 1054,
+ 'ampersand' : 38,
+ 'uni041C' : 1052,
+ 'uni041A' : 1050,
+ 'uni22AB' : 8875,
+ 'uni21DB' : 8667,
+ 'dotaccent' : 729,
+ 'uni0416' : 1046,
+ 'uni0417' : 1047,
+ 'uni0414' : 1044,
+ 'uni0415' : 1045,
+ 'uni0412' : 1042,
+ 'uni0413' : 1043,
+ 'degree' : 176,
+ 'uni0411' : 1041,
+ 'K' : 75,
+ 'uni25EB' : 9707,
+ 'uni25EF' : 9711,
+ 'uni0418' : 1048,
+ 'uni0419' : 1049,
+ 'uni2263' : 8803,
+ 'uni226E' : 8814,
+ 'uni2251' : 8785,
+ 'uni02C8' : 712,
+ 'uni2262' : 8802,
+ 'acircumflex' : 226,
+ 'uni22B3' : 8883,
+ 'uni2261' : 8801,
+ 'uni2394' : 9108,
+ 'Aring' : 197,
+ 'uni2260' : 8800,
+ 'uni2254' : 8788,
+ 'uni0436' : 1078,
+ 'uni2267' : 8807,
+ 'k' : 107,
+ 'uni22C8' : 8904,
+ 'uni226A' : 8810,
+ 'uni231F' : 8991,
+ 'smalltilde' : 732,
+ 'uni2201' : 8705,
+ 'uni2200' : 8704,
+ 'uni2203' : 8707,
+ 'uni02BD' : 701,
+ 'uni2205' : 8709,
+ 'uni2204' : 8708,
+ 'Agrave' : 192,
+ 'uni2206' : 8710,
+ 'uni2209' : 8713,
+ 'uni2208' : 8712,
+ 'uni226D' : 8813,
+ 'uni2264' : 8804,
+ 'uni263D' : 9789,
+ 'uni2258' : 8792,
+ 'uni02D3' : 723,
+ 'uni02D2' : 722,
+ 'uni02D1' : 721,
+ 'uni02D0' : 720,
+ 'uni25E1' : 9697,
+ 'divide' : 247,
+ 'uni02D5' : 725,
+ 'uni02D4' : 724,
+ 'ocircumflex' : 244,
+ 'uni2524' : 9508,
+ 'uni043A' : 1082,
+ 'uni24CC' : 9420,
+ 'asciitilde' : 126,
+ 'uni22B9' : 8889,
+ 'uni24D2' : 9426,
+ 'uni211E' : 8478,
+ 'uni211D' : 8477,
+ 'uni24DD' : 9437,
+ 'uni211A' : 8474,
+ 'uni211C' : 8476,
+ 'uni211B' : 8475,
+ 'uni25C6' : 9670,
+ 'uni017F' : 383,
+ 'uni017A' : 378,
+ 'uni017C' : 380,
+ 'uni017B' : 379,
+ 'uni0346' : 838,
+ 'uni22F1' : 8945,
+ 'uni22F0' : 8944,
+ 'two' : 50,
+ 'uni2298' : 8856,
+ 'uni24D1' : 9425,
+ 'E' : 69,
+ 'uni025D' : 605,
+ 'scaron' : 353,
+ 'uni2322' : 8994,
+ 'uni25E3' : 9699,
+ 'uni22BF' : 8895,
+ 'F' : 70,
+ 'uni0440' : 1088,
+ 'uni255E' : 9566,
+ 'uni22BA' : 8890,
+ 'uni0175' : 373,
+ 'uni0174' : 372,
+ 'uni0177' : 375,
+ 'uni0176' : 374,
+ 'bracketleft' : 91,
+ 'uni0170' : 368,
+ 'uni0173' : 371,
+ 'uni0172' : 370,
+ 'asciicircum' : 94,
+ 'uni0179' : 377,
+ 'uni2590' : 9616,
+ 'uni25E2' : 9698,
+ 'uni2119' : 8473,
+ 'uni2118' : 8472,
+ 'uni25CC' : 9676,
+ 'f' : 102,
+ 'ordmasculine' : 186,
+ 'uni229B' : 8859,
+ 'uni22A1' : 8865,
+ 'uni2111' : 8465,
+ 'uni2110' : 8464,
+ 'uni2113' : 8467,
+ 'uni2112' : 8466,
+ 'mu' : 181,
+ 'uni2281' : 8833,
+ 'paragraph' : 182,
+ 'nine' : 57,
+ 'uni25EC' : 9708,
+ 'v' : 118,
+ 'uni040C' : 1036,
+ 'uni0113' : 275,
+ 'uni22D0' : 8912,
+ 'uni21CC' : 8652,
+ 'uni21CB' : 8651,
+ 'uni21CA' : 8650,
+ 'uni22A5' : 8869,
+ 'uni21CF' : 8655,
+ 'uni21CE' : 8654,
+ 'uni21CD' : 8653,
+ 'guilsinglleft' : 8249,
+ 'backslash' : 92,
+ 'uni2284' : 8836,
+ 'uni224E' : 8782,
+ 'uni224D' : 8781,
+ 'uni224F' : 8783,
+ 'uni224A' : 8778,
+ 'uni2287' : 8839,
+ 'uni224C' : 8780,
+ 'uni224B' : 8779,
+ 'uni21BD' : 8637,
+ 'uni2286' : 8838,
+ 'uni030F' : 783,
+ 'uni030D' : 781,
+ 'uni030E' : 782,
+ 'uni030B' : 779,
+ 'uni030C' : 780,
+ 'uni030A' : 778,
+ 'uni026E' : 622,
+ 'uni026D' : 621,
+ 'six' : 54,
+ 'uni026A' : 618,
+ 'uni026C' : 620,
+ 'uni25C1' : 9665,
+ 'uni20D6' : 8406,
+ 'uni045B' : 1115,
+ 'uni045C' : 1116,
+ 'uni256B' : 9579,
+ 'uni045A' : 1114,
+ 'uni045F' : 1119,
+ 'uni045E' : 1118,
+ 'A' : 65,
+ 'uni2569' : 9577,
+ 'uni0458' : 1112,
+ 'uni0459' : 1113,
+ 'uni0452' : 1106,
+ 'uni0453' : 1107,
+ 'uni2562' : 9570,
+ 'uni0451' : 1105,
+ 'uni0456' : 1110,
+ 'uni0457' : 1111,
+ 'uni0454' : 1108,
+ 'uni0455' : 1109,
+ 'icircumflex' : 238,
+ 'uni0307' : 775,
+ 'uni0304' : 772,
+ 'uni0305' : 773,
+ 'uni0269' : 617,
+ 'uni0268' : 616,
+ 'uni0300' : 768,
+ 'uni0301' : 769,
+ 'uni0265' : 613,
+ 'uni0264' : 612,
+ 'uni0267' : 615,
+ 'uni0266' : 614,
+ 'uni0261' : 609,
+ 'uni0260' : 608,
+ 'uni0263' : 611,
+ 'uni0262' : 610,
+ 'a' : 97,
+ 'uni2207' : 8711,
+ 'uni2247' : 8775,
+ 'uni2246' : 8774,
+ 'uni2241' : 8769,
+ 'uni2240' : 8768,
+ 'uni2243' : 8771,
+ 'uni2242' : 8770,
+ 'uni2312' : 8978,
+ 'ogonek' : 731,
+ 'uni2249' : 8777,
+ 'uni2248' : 8776,
+ 'uni3030' : 12336,
+ 'q' : 113,
+ 'uni21C2' : 8642,
+ 'uni21C1' : 8641,
+ 'uni21C0' : 8640,
+ 'uni21C7' : 8647,
+ 'uni21C6' : 8646,
+ 'uni21C5' : 8645,
+ 'uni21C4' : 8644,
+ 'uni225F' : 8799,
+ 'uni212C' : 8492,
+ 'uni21C8' : 8648,
+ 'uni2467' : 9319,
+ 'oacute' : 243,
+ 'uni028F' : 655,
+ 'uni028E' : 654,
+ 'uni026F' : 623,
+ 'uni028C' : 652,
+ 'uni028B' : 651,
+ 'uni028A' : 650,
+ 'uni2510' : 9488,
+ 'ograve' : 242,
+ 'edieresis' : 235,
+ 'uni22CE' : 8910,
+ 'uni22CF' : 8911,
+ 'uni219F' : 8607,
+ 'comma' : 44,
+ 'uni22CA' : 8906,
+ 'uni0429' : 1065,
+ 'uni03C6' : 966,
+ 'uni0427' : 1063,
+ 'uni0426' : 1062,
+ 'uni0425' : 1061,
+ 'uni0424' : 1060,
+ 'uni0423' : 1059,
+ 'uni0422' : 1058,
+ 'uni0421' : 1057,
+ 'uni0420' : 1056,
+ 'uni2465' : 9317,
+ 'uni24D0' : 9424,
+ 'uni2464' : 9316,
+ 'uni0430' : 1072,
+ 'otilde' : 245,
+ 'uni2661' : 9825,
+ 'uni24D6' : 9430,
+ 'uni2466' : 9318,
+ 'uni24D5' : 9429,
+ 'uni219A' : 8602,
+ 'uni2518' : 9496,
+ 'uni22B6' : 8886,
+ 'uni2461' : 9313,
+ 'uni24D4' : 9428,
+ 'uni2460' : 9312,
+ 'uni24EA' : 9450,
+ 'guillemotright' : 187,
+ 'ecircumflex' : 234,
+ 'greater' : 62,
+ 'uni2011' : 8209,
+ 'uacute' : 250,
+ 'uni2462' : 9314,
+ 'L' : 76,
+ 'bullet' : 8226,
+ 'uni02A4' : 676,
+ 'uni02A7' : 679,
+ 'cedilla' : 184,
+ 'uni02A2' : 674,
+ 'uni2015' : 8213,
+ 'uni22C4' : 8900,
+ 'uni22C5' : 8901,
+ 'uni22AD' : 8877,
+ 'uni22C7' : 8903,
+ 'uni22C0' : 8896,
+ 'uni2016' : 8214,
+ 'uni22C2' : 8898,
+ 'uni22C3' : 8899,
+ 'uni24CF' : 9423,
+ 'uni042F' : 1071,
+ 'uni042E' : 1070,
+ 'uni042D' : 1069,
+ 'ydieresis' : 255,
+ 'l' : 108,
+ 'logicalnot' : 172,
+ 'uni24CA' : 9418,
+ 'uni0287' : 647,
+ 'uni0286' : 646,
+ 'uni0285' : 645,
+ 'uni0284' : 644,
+ 'uni0283' : 643,
+ 'uni0282' : 642,
+ 'uni0281' : 641,
+ 'uni027C' : 636,
+ 'uni2664' : 9828,
+ 'exclamdown' : 161,
+ 'uni25C4' : 9668,
+ 'uni0289' : 649,
+ 'uni0288' : 648,
+ 'uni039A' : 922,
+ 'endash' : 8211,
+ 'uni2640' : 9792,
+ 'uni20E4' : 8420,
+ 'uni0473' : 1139,
+ 'uni20E1' : 8417,
+ 'uni2642' : 9794,
+ 'uni03B8' : 952,
+ 'uni03B9' : 953,
+ 'agrave' : 224,
+ 'uni03B4' : 948,
+ 'uni03B5' : 949,
+ 'uni03B6' : 950,
+ 'uni03B7' : 951,
+ 'uni03B0' : 944,
+ 'uni03B1' : 945,
+ 'uni03B2' : 946,
+ 'uni03B3' : 947,
+ 'uni2555' : 9557,
+ 'Adieresis' : 196,
+ 'germandbls' : 223,
+ 'Odieresis' : 214,
+ 'space' : 32,
+ 'uni0126' : 294,
+ 'uni0127' : 295,
+ 'uni0124' : 292,
+ 'uni0125' : 293,
+ 'uni0122' : 290,
+ 'uni0123' : 291,
+ 'uni0120' : 288,
+ 'uni0121' : 289,
+ 'quoteright' : 8217,
+ 'uni2560' : 9568,
+ 'uni2556' : 9558,
+ 'ucircumflex' : 251,
+ 'uni2561' : 9569,
+ 'uni2551' : 9553,
+ 'uni25B2' : 9650,
+ 'uni2550' : 9552,
+ 'uni2563' : 9571,
+ 'uni2553' : 9555,
+ 'G' : 71,
+ 'uni2564' : 9572,
+ 'uni2552' : 9554,
+ 'quoteleft' : 8216,
+ 'uni2565' : 9573,
+ 'uni2572' : 9586,
+ 'uni2568' : 9576,
+ 'uni2566' : 9574,
+ 'W' : 87,
+ 'uni214A' : 8522,
+ 'uni012F' : 303,
+ 'uni012D' : 301,
+ 'uni012E' : 302,
+ 'uni012B' : 299,
+ 'uni012C' : 300,
+ 'uni255C' : 9564,
+ 'uni012A' : 298,
+ 'uni2289' : 8841,
+ 'Q' : 81,
+ 'uni2320' : 8992,
+ 'uni2321' : 8993,
+ 'g' : 103,
+ 'uni03BD' : 957,
+ 'uni03BE' : 958,
+ 'uni03BF' : 959,
+ 'uni2282' : 8834,
+ 'uni2285' : 8837,
+ 'uni03BA' : 954,
+ 'uni03BB' : 955,
+ 'uni03BC' : 956,
+ 'uni2128' : 8488,
+ 'uni25B7' : 9655,
+ 'w' : 119,
+ 'uni0302' : 770,
+ 'uni03DE' : 990,
+ 'uni25DA' : 9690,
+ 'uni0303' : 771,
+ 'uni0463' : 1123,
+ 'uni0462' : 1122,
+ 'uni3018' : 12312,
+ 'uni2514' : 9492,
+ 'question' : 63,
+ 'uni25B3' : 9651,
+ 'uni24E1' : 9441,
+ 'one' : 49,
+ 'uni200A' : 8202,
+ 'uni2278' : 8824,
+ 'ring' : 730,
+ 'uni0195' : 405,
+ 'figuredash' : 8210,
+ 'uni22EC' : 8940,
+ 'uni0339' : 825,
+ 'uni0338' : 824,
+ 'uni0337' : 823,
+ 'uni0336' : 822,
+ 'uni0335' : 821,
+ 'uni0333' : 819,
+ 'uni0332' : 818,
+ 'uni0331' : 817,
+ 'uni0330' : 816,
+ 'uni01C1' : 449,
+ 'uni01C0' : 448,
+ 'uni01C3' : 451,
+ 'uni01C2' : 450,
+ 'uni2353' : 9043,
+ 'uni0308' : 776,
+ 'uni2218' : 8728,
+ 'uni2219' : 8729,
+ 'uni2216' : 8726,
+ 'uni2217' : 8727,
+ 'uni2214' : 8724,
+ 'uni0309' : 777,
+ 'uni2609' : 9737,
+ 'uni2213' : 8723,
+ 'uni2210' : 8720,
+ 'uni2211' : 8721,
+ 'uni2245' : 8773,
+ 'B' : 66,
+ 'uni25D6' : 9686,
+ 'iacute' : 237,
+ 'uni02E6' : 742,
+ 'uni02E7' : 743,
+ 'uni02E8' : 744,
+ 'uni02E9' : 745,
+ 'uni221D' : 8733,
+ 'uni221E' : 8734,
+ 'Ydieresis' : 376,
+ 'uni221C' : 8732,
+ 'uni22D7' : 8919,
+ 'uni221A' : 8730,
+ 'R' : 82,
+ 'uni24DC' : 9436,
+ 'uni033F' : 831,
+ 'uni033E' : 830,
+ 'uni033C' : 828,
+ 'uni033B' : 827,
+ 'uni033A' : 826,
+ 'b' : 98,
+ 'uni228A' : 8842,
+ 'uni22DB' : 8923,
+ 'uni2554' : 9556,
+ 'uni046B' : 1131,
+ 'uni046A' : 1130,
+ 'r' : 114,
+ 'uni24DB' : 9435,
+ 'Ccedilla' : 199,
+ 'minus' : 8722,
+ 'uni24DA' : 9434,
+ 'uni03F0' : 1008,
+ 'uni03F1' : 1009,
+ 'uni20AC' : 8364,
+ 'uni2276' : 8822,
+ 'uni24C0' : 9408,
+ 'uni0162' : 354,
+ 'uni0163' : 355,
+ 'uni011E' : 286,
+ 'uni011D' : 285,
+ 'uni011C' : 284,
+ 'uni011B' : 283,
+ 'uni0164' : 356,
+ 'uni0165' : 357,
+ 'Lslash' : 321,
+ 'uni0168' : 360,
+ 'uni0169' : 361,
+ 'uni25C9' : 9673,
+ 'uni02E5' : 741,
+ 'uni21C3' : 8643,
+ 'uni24C4' : 9412,
+ 'uni24E2' : 9442,
+ 'uni2277' : 8823,
+ 'uni013A' : 314,
+ 'uni2102' : 8450,
+ 'Uacute' : 218,
+ 'uni2317' : 8983,
+ 'uni2107' : 8455,
+ 'uni221F' : 8735,
+ 'yacute' : 253,
+ 'uni3012' : 12306,
+ 'Ucircumflex' : 219,
+ 'uni015D' : 349,
+ 'quotedbl' : 34,
+ 'uni25D9' : 9689,
+ 'uni2280' : 8832,
+ 'uni22AF' : 8879,
+ 'onehalf' : 189,
+ 'uni221B' : 8731,
+ 'Thorn' : 222,
+ 'uni2226' : 8742,
+ 'M' : 77,
+ 'uni25BA' : 9658,
+ 'uni2463' : 9315,
+ 'uni2336' : 9014,
+ 'eight' : 56,
+ 'uni2236' : 8758,
+ 'multiply' : 215,
+ 'uni210C' : 8460,
+ 'uni210A' : 8458,
+ 'uni21C9' : 8649,
+ 'grave' : 96,
+ 'uni210E' : 8462,
+ 'uni0117' : 279,
+ 'uni016C' : 364,
+ 'uni0115' : 277,
+ 'uni016A' : 362,
+ 'uni016F' : 367,
+ 'uni0112' : 274,
+ 'uni016D' : 365,
+ 'uni016E' : 366,
+ 'Ocircumflex' : 212,
+ 'uni2305' : 8965,
+ 'm' : 109,
+ 'uni24DF' : 9439,
+ 'uni0119' : 281,
+ 'uni0118' : 280,
+ 'uni20A3' : 8355,
+ 'uni20A4' : 8356,
+ 'uni20A7' : 8359,
+ 'uni2288' : 8840,
+ 'uni24C3' : 9411,
+ 'uni251C' : 9500,
+ 'uni228D' : 8845,
+ 'uni222F' : 8751,
+ 'uni222E' : 8750,
+ 'uni222D' : 8749,
+ 'uni222C' : 8748,
+ 'uni222B' : 8747,
+ 'uni222A' : 8746,
+ 'uni255B' : 9563,
+ 'Ugrave' : 217,
+ 'uni24DE' : 9438,
+ 'guilsinglright' : 8250,
+ 'uni250A' : 9482,
+ 'Ntilde' : 209,
+ 'uni0279' : 633,
+ 'questiondown' : 191,
+ 'uni256C' : 9580,
+ 'Atilde' : 195,
+ 'uni0272' : 626,
+ 'uni0273' : 627,
+ 'uni0270' : 624,
+ 'ccedilla' : 231,
+ 'uni0276' : 630,
+ 'uni0277' : 631,
+ 'uni0274' : 628,
+ 'uni0275' : 629,
+ 'uni2252' : 8786,
+ 'uni041F' : 1055,
+ 'uni2250' : 8784,
+ 'Z' : 90,
+ 'uni2256' : 8790,
+ 'uni2257' : 8791,
+ 'copyright' : 169,
+ 'uni2255' : 8789,
+ 'uni043D' : 1085,
+ 'uni043E' : 1086,
+ 'uni043F' : 1087,
+ 'yen' : 165,
+ 'uni041D' : 1053,
+ 'uni043B' : 1083,
+ 'uni043C' : 1084,
+ 'uni21B0' : 8624,
+ 'uni21B1' : 8625,
+ 'uni21B2' : 8626,
+ 'uni21B3' : 8627,
+ 'uni21B4' : 8628,
+ 'uni21B5' : 8629,
+ 'uni21B6' : 8630,
+ 'uni21B7' : 8631,
+ 'uni21B8' : 8632,
+ 'Eacute' : 201,
+ 'uni2311' : 8977,
+ 'uni2310' : 8976,
+ 'uni228F' : 8847,
+ 'uni25DB' : 9691,
+ 'uni21BA' : 8634,
+ 'uni21BB' : 8635,
+ 'uni21BC' : 8636,
+ 'uni2017' : 8215,
+ 'uni21BE' : 8638,
+ 'uni21BF' : 8639,
+ 'uni231C' : 8988,
+ 'H' : 72,
+ 'uni0293' : 659,
+ 'uni2202' : 8706,
+ 'uni22A4' : 8868,
+ 'uni231E' : 8990,
+ 'uni2232' : 8754,
+ 'uni225B' : 8795,
+ 'uni225C' : 8796,
+ 'uni24D9' : 9433,
+ 'uni225A' : 8794,
+ 'uni0438' : 1080,
+ 'uni0439' : 1081,
+ 'uni225D' : 8797,
+ 'uni225E' : 8798,
+ 'uni0434' : 1076,
+ 'X' : 88,
+ 'uni007F' : 127,
+ 'uni0437' : 1079,
+ 'Idieresis' : 207,
+ 'uni0431' : 1073,
+ 'uni0432' : 1074,
+ 'uni0433' : 1075,
+ 'uni22AC' : 8876,
+ 'uni22CD' : 8909,
+ 'uni25A3' : 9635,
+ 'bar' : 124,
+ 'uni24BB' : 9403,
+ 'uni037E' : 894,
+ 'uni027B' : 635,
+ 'h' : 104,
+ 'uni027A' : 634,
+ 'uni027F' : 639,
+ 'uni027D' : 637,
+ 'uni027E' : 638,
+ 'uni2227' : 8743,
+ 'uni2004' : 8196,
+ 'uni2225' : 8741,
+ 'uni2224' : 8740,
+ 'uni2223' : 8739,
+ 'uni2222' : 8738,
+ 'uni2221' : 8737,
+ 'uni2220' : 8736,
+ 'x' : 120,
+ 'uni2323' : 8995,
+ 'uni2559' : 9561,
+ 'uni2558' : 9560,
+ 'uni2229' : 8745,
+ 'uni2228' : 8744,
+ 'udieresis' : 252,
+ 'uni029D' : 669,
+ 'ordfeminine' : 170,
+ 'uni22CB' : 8907,
+ 'uni233D' : 9021,
+ 'uni0428' : 1064,
+ 'uni24C6' : 9414,
+ 'uni22DD' : 8925,
+ 'uni24C7' : 9415,
+ 'uni015C' : 348,
+ 'uni015B' : 347,
+ 'uni015A' : 346,
+ 'uni22AA' : 8874,
+ 'uni015F' : 351,
+ 'uni015E' : 350,
+ 'braceleft' : 123,
+ 'uni24C5' : 9413,
+ 'uni0410' : 1040,
+ 'uni03AA' : 938,
+ 'uni24C2' : 9410,
+ 'uni03AC' : 940,
+ 'uni03AB' : 939,
+ 'macron' : 175,
+ 'uni03AD' : 941,
+ 'uni03AF' : 943,
+ 'uni0294' : 660,
+ 'uni0295' : 661,
+ 'uni0296' : 662,
+ 'uni0297' : 663,
+ 'uni0290' : 656,
+ 'uni0291' : 657,
+ 'uni0292' : 658,
+ 'atilde' : 227,
+ 'Acircumflex' : 194,
+ 'uni2370' : 9072,
+ 'uni24C1' : 9409,
+ 'uni0298' : 664,
+ 'uni0299' : 665,
+ 'Oslash' : 216,
+ 'uni029E' : 670,
+ 'C' : 67,
+ 'quotedblleft' : 8220,
+ 'uni029B' : 667,
+ 'uni029C' : 668,
+ 'uni03A9' : 937,
+ 'uni03A8' : 936,
+ 'S' : 83,
+ 'uni24C9' : 9417,
+ 'uni03A1' : 929,
+ 'uni03A0' : 928,
+ 'exclam' : 33,
+ 'uni03A5' : 933,
+ 'uni03A4' : 932,
+ 'uni03A7' : 935,
+ 'Zcaron' : 381,
+ 'uni2133' : 8499,
+ 'uni2132' : 8498,
+ 'uni0159' : 345,
+ 'uni0158' : 344,
+ 'uni2137' : 8503,
+ 'uni2005' : 8197,
+ 'uni2135' : 8501,
+ 'uni2134' : 8500,
+ 'uni02BA' : 698,
+ 'uni2033' : 8243,
+ 'uni0151' : 337,
+ 'uni0150' : 336,
+ 'uni0157' : 343,
+ 'equal' : 61,
+ 'uni0155' : 341,
+ 'uni0154' : 340,
+ 's' : 115,
+ 'uni233F' : 9023,
+ 'eth' : 240,
+ 'uni24BE' : 9406,
+ 'uni21E9' : 8681,
+ 'uni2060' : 8288,
+ 'Egrave' : 200,
+ 'uni255D' : 9565,
+ 'uni24CD' : 9421,
+ 'uni21E1' : 8673,
+ 'uni21B9' : 8633,
+ 'hyphen' : 45,
+ 'uni01BE' : 446,
+ 'uni01BB' : 443,
+ 'period' : 46,
+ 'igrave' : 236,
+ 'uni01BA' : 442,
+ 'uni2296' : 8854,
+ 'uni2297' : 8855,
+ 'uni2294' : 8852,
+ 'uni2295' : 8853,
+ 'colon' : 58,
+ 'uni2293' : 8851,
+ 'uni2290' : 8848,
+ 'uni2291' : 8849,
+ 'uni032D' : 813,
+ 'uni032E' : 814,
+ 'uni032F' : 815,
+ 'uni032A' : 810,
+ 'uni032B' : 811,
+ 'uni032C' : 812,
+ 'uni231D' : 8989,
+ 'Ecircumflex' : 202,
+ 'uni24D7' : 9431,
+ 'uni25DD' : 9693,
+ 'trademark' : 8482,
+ 'Aacute' : 193,
+ 'cent' : 162,
+ 'uni0445' : 1093,
+ 'uni266E' : 9838,
+ 'uni266D' : 9837,
+ 'uni266B' : 9835,
+ 'uni03C9' : 969,
+ 'uni2003' : 8195,
+ 'uni2047' : 8263,
+ 'lslash' : 322,
+ 'uni03A6' : 934,
+ 'uni2043' : 8259,
+ 'uni250C' : 9484,
+ 'uni2040' : 8256,
+ 'uni255F' : 9567,
+ 'uni24CB' : 9419,
+ 'uni0472' : 1138,
+ 'uni0446' : 1094,
+ 'uni0474' : 1140,
+ 'uni0475' : 1141,
+ 'uni2508' : 9480,
+ 'uni2660' : 9824,
+ 'uni2506' : 9478,
+ 'uni2502' : 9474,
+ 'c' : 99,
+ 'uni2500' : 9472,
+ 'N' : 78,
+ 'uni22A6' : 8870,
+ 'uni21E7' : 8679,
+ 'uni2130' : 8496,
+ 'uni2002' : 8194,
+ 'breve' : 728,
+ 'uni0442' : 1090,
+ 'Oacute' : 211,
+ 'uni229F' : 8863,
+ 'uni25C7' : 9671,
+ 'uni229D' : 8861,
+ 'uni229E' : 8862,
+ 'guillemotleft' : 171,
+ 'uni0329' : 809,
+ 'uni24E5' : 9445,
+ 'uni011F' : 287,
+ 'uni0324' : 804,
+ 'uni0325' : 805,
+ 'uni0326' : 806,
+ 'uni0327' : 807,
+ 'uni0321' : 801,
+ 'uni0322' : 802,
+ 'n' : 110,
+ 'uni2032' : 8242,
+ 'uni2269' : 8809,
+ 'uni2268' : 8808,
+ 'uni0306' : 774,
+ 'uni226B' : 8811,
+ 'uni21EA' : 8682,
+ 'uni0166' : 358,
+ 'uni203B' : 8251,
+ 'uni01B5' : 437,
+ 'idieresis' : 239,
+ 'uni02BC' : 700,
+ 'uni01B0' : 432,
+ 'braceright' : 125,
+ 'seven' : 55,
+ 'uni02BB' : 699,
+ 'uni011A' : 282,
+ 'uni29FB' : 10747,
+ 'brokenbar' : 166,
+ 'uni2036' : 8246,
+ 'uni25C0' : 9664,
+ 'uni0156' : 342,
+ 'uni22D5' : 8917,
+ 'uni0258' : 600,
+ 'ugrave' : 249,
+ 'uni22D6' : 8918,
+ 'uni22D1' : 8913,
+ 'uni2034' : 8244,
+ 'uni22D3' : 8915,
+ 'uni22D2' : 8914,
+ 'uni203C' : 8252,
+ 'uni223E' : 8766,
+ 'uni02BF' : 703,
+ 'uni22D9' : 8921,
+ 'uni22D8' : 8920,
+ 'uni25BD' : 9661,
+ 'uni25BE' : 9662,
+ 'uni25BF' : 9663,
+ 'uni041B' : 1051,
+ 'periodcentered' : 183,
+ 'uni25BC' : 9660,
+ 'uni019E' : 414,
+ 'uni019B' : 411,
+ 'uni019A' : 410,
+ 'uni2007' : 8199,
+ 'uni0391' : 913,
+ 'uni0390' : 912,
+ 'uni0393' : 915,
+ 'uni0392' : 914,
+ 'uni0395' : 917,
+ 'uni0394' : 916,
+ 'uni0397' : 919,
+ 'uni0396' : 918,
+ 'uni0399' : 921,
+ 'uni0398' : 920,
+ 'uni25C8' : 9672,
+ 'uni2468' : 9320,
+ 'sterling' : 163,
+ 'uni22EB' : 8939,
+ 'uni039C' : 924,
+ 'uni039B' : 923,
+ 'uni039E' : 926,
+ 'uni039D' : 925,
+ 'uni039F' : 927,
+ 'I' : 73,
+ 'uni03E1' : 993,
+ 'uni03E0' : 992,
+ 'uni2319' : 8985,
+ 'uni228B' : 8843,
+ 'uni25B5' : 9653,
+ 'uni25B6' : 9654,
+ 'uni22EA' : 8938,
+ 'uni24B9' : 9401,
+ 'uni044E' : 1102,
+ 'uni0199' : 409,
+ 'uni2266' : 8806,
+ 'Y' : 89,
+ 'uni22A2' : 8866,
+ 'Eth' : 208,
+ 'uni266F' : 9839,
+ 'emdash' : 8212,
+ 'uni263B' : 9787,
+ 'uni24BD' : 9405,
+ 'uni22DE' : 8926,
+ 'uni0360' : 864,
+ 'uni2557' : 9559,
+ 'uni22DF' : 8927,
+ 'uni22DA' : 8922,
+ 'uni22DC' : 8924,
+ 'uni0361' : 865,
+ 'i' : 105,
+ 'uni24BF' : 9407,
+ 'uni0362' : 866,
+ 'uni263E' : 9790,
+ 'uni028D' : 653,
+ 'uni2259' : 8793,
+ 'uni0323' : 803,
+ 'uni2265' : 8805,
+ 'daggerdbl' : 8225,
+ 'y' : 121,
+ 'uni010A' : 266,
+ 'plusminus' : 177,
+ 'less' : 60,
+ 'uni21AE' : 8622,
+ 'uni0315' : 789,
+ 'uni230B' : 8971,
+ 'uni21AF' : 8623,
+ 'uni21AA' : 8618,
+ 'uni21AC' : 8620,
+ 'uni21AB' : 8619,
+ 'uni01FB' : 507,
+ 'uni01FC' : 508,
+ 'uni223A' : 8762,
+ 'uni01FA' : 506,
+ 'uni01FF' : 511,
+ 'uni01FD' : 509,
+ 'uni01FE' : 510,
+ 'uni2567' : 9575,
+ 'uni25E0' : 9696,
+ 'uni0104' : 260,
+ 'uni0105' : 261,
+ 'uni0106' : 262,
+ 'uni0107' : 263,
+ 'uni0100' : 256,
+ 'uni0101' : 257,
+ 'uni0102' : 258,
+ 'uni0103' : 259,
+ 'uni2038' : 8248,
+ 'uni2009' : 8201,
+ 'uni2008' : 8200,
+ 'uni0108' : 264,
+ 'uni0109' : 265,
+ 'uni02A1' : 673,
+ 'uni223B' : 8763,
+ 'uni226C' : 8812,
+ 'uni25AC' : 9644,
+ 'uni24D3' : 9427,
+ 'uni21E0' : 8672,
+ 'uni21E3' : 8675,
+ 'Udieresis' : 220,
+ 'uni21E2' : 8674,
+ 'D' : 68,
+ 'uni21E5' : 8677,
+ 'uni2621' : 9761,
+ 'uni21D1' : 8657,
+ 'uni203E' : 8254,
+ 'uni22C6' : 8902,
+ 'uni21E4' : 8676,
+ 'uni010D' : 269,
+ 'uni010E' : 270,
+ 'uni010F' : 271,
+ 'five' : 53,
+ 'T' : 84,
+ 'uni010B' : 267,
+ 'uni010C' : 268,
+ 'uni2605' : 9733,
+ 'uni2663' : 9827,
+ 'uni21E6' : 8678,
+ 'uni24B6' : 9398,
+ 'uni22C1' : 8897,
+ 'oslash' : 248,
+ 'acute' : 180,
+ 'uni01F0' : 496,
+ 'd' : 100,
+ 'OE' : 338,
+ 'uni22E3' : 8931,
+ 'Igrave' : 204,
+ 'uni2308' : 8968,
+ 'uni2309' : 8969,
+ 'uni21A9' : 8617,
+ 't' : 116,
+ 'uni2313' : 8979,
+ 'uni03A3' : 931,
+ 'uni21A4' : 8612,
+ 'uni21A7' : 8615,
+ 'uni21A6' : 8614,
+ 'uni21A1' : 8609,
+ 'uni21A0' : 8608,
+ 'uni21A3' : 8611,
+ 'uni21A2' : 8610,
+ 'parenright' : 41,
+ 'uni256A' : 9578,
+ 'uni25DC' : 9692,
+ 'uni24CE' : 9422,
+ 'uni042C' : 1068,
+ 'uni24E0' : 9440,
+ 'uni042B' : 1067,
+ 'uni0409' : 1033,
+ 'uni0408' : 1032,
+ 'uni24E7' : 9447,
+ 'uni25B4' : 9652,
+ 'uni042A' : 1066,
+ 'uni228E' : 8846,
+ 'uni0401' : 1025,
+ 'adieresis' : 228,
+ 'uni0403' : 1027,
+ 'quotesingle' : 39,
+ 'uni0405' : 1029,
+ 'uni0404' : 1028,
+ 'uni0407' : 1031,
+ 'uni0406' : 1030,
+ 'uni229C' : 8860,
+ 'uni2306' : 8966,
+ 'uni2253' : 8787,
+ 'twodotenleader' : 8229,
+ 'uni2131' : 8497,
+ 'uni21DA' : 8666,
+ 'uni2234' : 8756,
+ 'uni2235' : 8757,
+ 'uni01A5' : 421,
+ 'uni2237' : 8759,
+ 'uni2230' : 8752,
+ 'uni02CC' : 716,
+ 'slash' : 47,
+ 'uni01A0' : 416,
+ 'ellipsis' : 8230,
+ 'uni2299' : 8857,
+ 'uni2238' : 8760,
+ 'numbersign' : 35,
+ 'uni21A8' : 8616,
+ 'uni223D' : 8765,
+ 'uni01AF' : 431,
+ 'uni223F' : 8767,
+ 'uni01AD' : 429,
+ 'uni01AB' : 427,
+ 'odieresis' : 246,
+ 'uni223C' : 8764,
+ 'uni227D' : 8829,
+ 'uni0280' : 640,
+ 'O' : 79,
+ 'uni227E' : 8830,
+ 'uni21A5' : 8613,
+ 'uni22D4' : 8916,
+ 'uni25D4' : 9684,
+ 'uni227F' : 8831,
+ 'uni0435' : 1077,
+ 'uni2302' : 8962,
+ 'uni2669' : 9833,
+ 'uni24E3' : 9443,
+ 'uni2720' : 10016,
+ 'uni22A8' : 8872,
+ 'uni22A9' : 8873,
+ 'uni040A' : 1034,
+ 'uni22A7' : 8871,
+ 'oe' : 339,
+ 'uni040B' : 1035,
+ 'uni040E' : 1038,
+ 'uni22A3' : 8867,
+ 'o' : 111,
+ 'uni040F' : 1039,
+ 'Edieresis' : 203,
+ 'uni25D5' : 9685,
+ 'plus' : 43,
+ 'uni044D' : 1101,
+ 'uni263C' : 9788,
+ 'uni22E6' : 8934,
+ 'uni2283' : 8835,
+ 'uni258C' : 9612,
+ 'uni219E' : 8606,
+ 'uni24E4' : 9444,
+ 'uni2136' : 8502,
+ 'dagger' : 8224,
+ 'uni24B7' : 9399,
+ 'uni219B' : 8603,
+ 'uni22E5' : 8933,
+ 'three' : 51,
+ 'uni210B' : 8459,
+ 'uni2534' : 9524,
+ 'uni24B8' : 9400,
+ 'uni230A' : 8970,
+ 'hungarumlaut' : 733,
+ 'parenleft' : 40,
+ 'uni0148' : 328,
+ 'uni0149' : 329,
+ 'uni2124' : 8484,
+ 'uni2125' : 8485,
+ 'uni2126' : 8486,
+ 'uni2127' : 8487,
+ 'uni0140' : 320,
+ 'uni2129' : 8489,
+ 'uni25C5' : 9669,
+ 'uni0143' : 323,
+ 'uni0144' : 324,
+ 'uni0145' : 325,
+ 'uni0146' : 326,
+ 'uni0147' : 327,
+ 'uni210D' : 8461,
+ 'fraction' : 8260,
+ 'uni2031' : 8241,
+ 'uni2196' : 8598,
+ 'uni2035' : 8245,
+ 'uni24E6' : 9446,
+ 'uni016B' : 363,
+ 'uni24BA' : 9402,
+ 'uni266A' : 9834,
+ 'uni0116' : 278,
+ 'uni2115' : 8469,
+ 'registered' : 174,
+ 'J' : 74,
+ 'uni25DF' : 9695,
+ 'uni25CE' : 9678,
+ 'uni273D' : 10045,
+ 'dieresis' : 168,
+ 'uni212B' : 8491,
+ 'uni0114' : 276,
+ 'uni212D' : 8493,
+ 'uni212E' : 8494,
+ 'uni212F' : 8495,
+ 'uni014A' : 330,
+ 'uni014B' : 331,
+ 'uni014C' : 332,
+ 'uni014D' : 333,
+ 'uni014E' : 334,
+ 'uni014F' : 335,
+ 'uni025E' : 606,
+ 'uni24E8' : 9448,
+ 'uni0111' : 273,
+ 'uni24E9' : 9449,
+ 'Ograve' : 210,
+ 'j' : 106,
+ 'uni2195' : 8597,
+ 'uni2194' : 8596,
+ 'uni2197' : 8599,
+ 'uni2037' : 8247,
+ 'uni2191' : 8593,
+ 'uni2190' : 8592,
+ 'uni2193' : 8595,
+ 'uni2192' : 8594,
+ 'uni29FA' : 10746,
+ 'uni2713' : 10003,
+ 'z' : 122,
+ 'uni2199' : 8601,
+ 'uni2198' : 8600,
+ 'uni2667' : 9831,
+ 'ae' : 230,
+ 'uni0448' : 1096,
+ 'semicolon' : 59,
+ 'uni2666' : 9830,
+ 'uni038F' : 911,
+ 'uni0444' : 1092,
+ 'uni0447' : 1095,
+ 'uni038E' : 910,
+ 'uni0441' : 1089,
+ 'uni038C' : 908,
+ 'uni0443' : 1091,
+ 'uni038A' : 906,
+ 'uni0250' : 592,
+ 'uni0251' : 593,
+ 'uni0252' : 594,
+ 'uni0253' : 595,
+ 'uni0254' : 596,
+ 'at' : 64,
+ 'uni0256' : 598,
+ 'uni0257' : 599,
+ 'uni0167' : 359,
+ 'uni0259' : 601,
+ 'uni228C' : 8844,
+ 'uni2662' : 9826,
+ 'uni0319' : 793,
+ 'uni0318' : 792,
+ 'uni24BC' : 9404,
+ 'uni0402' : 1026,
+ 'uni22EF' : 8943,
+ 'Iacute' : 205,
+ 'uni22ED' : 8941,
+ 'uni22EE' : 8942,
+ 'uni0311' : 785,
+ 'uni0310' : 784,
+ 'uni21E8' : 8680,
+ 'uni0312' : 786,
+ 'percent' : 37,
+ 'uni0317' : 791,
+ 'uni0316' : 790,
+ 'uni21D6' : 8662,
+ 'uni21D7' : 8663,
+ 'uni21D4' : 8660,
+ 'uni21D5' : 8661,
+ 'uni21D2' : 8658,
+ 'uni21D3' : 8659,
+ 'uni21D0' : 8656,
+ 'uni2138' : 8504,
+ 'uni2270' : 8816,
+ 'uni2271' : 8817,
+ 'uni2272' : 8818,
+ 'uni2273' : 8819,
+ 'uni2274' : 8820,
+ 'uni2275' : 8821,
+ 'bracketright' : 93,
+ 'uni21D9' : 8665,
+ 'uni21DF' : 8671,
+ 'uni21DD' : 8669,
+ 'uni21DE' : 8670,
+ 'AE' : 198,
+ 'uni03AE' : 942,
+ 'uni227A' : 8826,
+ 'uni227B' : 8827,
+ 'uni227C' : 8828,
+ 'asterisk' : 42,
+ 'aacute' : 225,
+ 'uni226F' : 8815,
+ 'uni22E2' : 8930,
+ 'uni0386' : 902,
+ 'uni22E0' : 8928,
+ 'uni22E1' : 8929,
+ 'U' : 85,
+ 'uni22E7' : 8935,
+ 'uni22E4' : 8932,
+ 'uni0387' : 903,
+ 'uni031A' : 794,
+ 'eacute' : 233,
+ 'uni22E8' : 8936,
+ 'uni22E9' : 8937,
+ 'uni24D8' : 9432,
+ 'uni025A' : 602,
+ 'uni025B' : 603,
+ 'uni025C' : 604,
+ 'e' : 101,
+ 'uni0128' : 296,
+ 'uni025F' : 607,
+ 'uni2665' : 9829,
+ 'thorn' : 254,
+ 'uni0129' : 297,
+ 'uni253C' : 9532,
+ 'uni25D7' : 9687,
+ 'u' : 117,
+ 'uni0388' : 904,
+ 'uni0389' : 905,
+ 'uni0255' : 597,
+ 'uni0171' : 369,
+ 'uni0384' : 900,
+ 'uni0385' : 901,
+ 'uni044A' : 1098,
+ 'uni252C' : 9516,
+ 'uni044C' : 1100,
+ 'uni044B' : 1099
+}
+
+uni2type1 = dict(((v,k) for k,v in six.iteritems(type12uni)))
+
+tex2uni = {
+ 'widehat' : 0x0302,
+ 'widetilde' : 0x0303,
+ 'widebar' : 0x0305,
+ 'langle' : 0x27e8,
+ 'rangle' : 0x27e9,
+ 'perp' : 0x27c2,
+ 'neq' : 0x2260,
+ 'Join' : 0x2a1d,
+ 'leqslant' : 0x2a7d,
+ 'geqslant' : 0x2a7e,
+ 'lessapprox' : 0x2a85,
+ 'gtrapprox' : 0x2a86,
+ 'lesseqqgtr' : 0x2a8b,
+ 'gtreqqless' : 0x2a8c,
+ 'triangleeq' : 0x225c,
+ 'eqslantless' : 0x2a95,
+ 'eqslantgtr' : 0x2a96,
+ 'backepsilon' : 0x03f6,
+ 'precapprox' : 0x2ab7,
+ 'succapprox' : 0x2ab8,
+ 'fallingdotseq' : 0x2252,
+ 'subseteqq' : 0x2ac5,
+ 'supseteqq' : 0x2ac6,
+ 'varpropto' : 0x221d,
+ 'precnapprox' : 0x2ab9,
+ 'succnapprox' : 0x2aba,
+ 'subsetneqq' : 0x2acb,
+ 'supsetneqq' : 0x2acc,
+ 'lnapprox' : 0x2ab9,
+ 'gnapprox' : 0x2aba,
+ 'longleftarrow' : 0x27f5,
+ 'longrightarrow' : 0x27f6,
+ 'longleftrightarrow' : 0x27f7,
+ 'Longleftarrow' : 0x27f8,
+ 'Longrightarrow' : 0x27f9,
+ 'Longleftrightarrow' : 0x27fa,
+ 'longmapsto' : 0x27fc,
+ 'leadsto' : 0x21dd,
+ 'dashleftarrow' : 0x290e,
+ 'dashrightarrow' : 0x290f,
+ 'circlearrowleft' : 0x21ba,
+ 'circlearrowright' : 0x21bb,
+ 'leftrightsquigarrow' : 0x21ad,
+ 'leftsquigarrow' : 0x219c,
+ 'rightsquigarrow' : 0x219d,
+ 'Game' : 0x2141,
+ 'hbar' : 0x0127,
+ 'hslash' : 0x210f,
+ 'ldots' : 0x2026,
+ 'vdots' : 0x22ee,
+ 'doteqdot' : 0x2251,
+ 'doteq' : 8784,
+ 'partial' : 8706,
+ 'gg' : 8811,
+ 'asymp' : 8781,
+ 'blacktriangledown' : 9662,
+ 'otimes' : 8855,
+ 'nearrow' : 8599,
+ 'varpi' : 982,
+ 'vee' : 8744,
+ 'vec' : 8407,
+ 'smile' : 8995,
+ 'succnsim' : 8937,
+ 'gimel' : 8503,
+ 'vert' : 124,
+ '|' : 124,
+ 'varrho' : 1009,
+ 'P' : 182,
+ 'approxident' : 8779,
+ 'Swarrow' : 8665,
+ 'textasciicircum' : 94,
+ 'imageof' : 8887,
+ 'ntriangleleft' : 8938,
+ 'nleq' : 8816,
+ 'div' : 247,
+ 'nparallel' : 8742,
+ 'Leftarrow' : 8656,
+ 'lll' : 8920,
+ 'oiint' : 8751,
+ 'ngeq' : 8817,
+ 'Theta' : 920,
+ 'origof' : 8886,
+ 'blacksquare' : 9632,
+ 'solbar' : 9023,
+ 'neg' : 172,
+ 'sum' : 8721,
+ 'Vdash' : 8873,
+ 'coloneq' : 8788,
+ 'degree' : 176,
+ 'bowtie' : 8904,
+ 'blacktriangleright' : 9654,
+ 'varsigma' : 962,
+ 'leq' : 8804,
+ 'ggg' : 8921,
+ 'lneqq' : 8808,
+ 'scurel' : 8881,
+ 'stareq' : 8795,
+ 'BbbN' : 8469,
+ 'nLeftarrow' : 8653,
+ 'nLeftrightarrow' : 8654,
+ 'k' : 808,
+ 'bot' : 8869,
+ 'BbbC' : 8450,
+ 'Lsh' : 8624,
+ 'leftleftarrows' : 8647,
+ 'BbbZ' : 8484,
+ 'digamma' : 989,
+ 'BbbR' : 8477,
+ 'BbbP' : 8473,
+ 'BbbQ' : 8474,
+ 'vartriangleright' : 8883,
+ 'succsim' : 8831,
+ 'wedge' : 8743,
+ 'lessgtr' : 8822,
+ 'veebar' : 8891,
+ 'mapsdown' : 8615,
+ 'Rsh' : 8625,
+ 'chi' : 967,
+ 'prec' : 8826,
+ 'nsubseteq' : 8840,
+ 'therefore' : 8756,
+ 'eqcirc' : 8790,
+ 'textexclamdown' : 161,
+ 'nRightarrow' : 8655,
+ 'flat' : 9837,
+ 'notin' : 8713,
+ 'llcorner' : 8990,
+ 'varepsilon' : 949,
+ 'bigtriangleup' : 9651,
+ 'aleph' : 8501,
+ 'dotminus' : 8760,
+ 'upsilon' : 965,
+ 'Lambda' : 923,
+ 'cap' : 8745,
+ 'barleftarrow' : 8676,
+ 'mu' : 956,
+ 'boxplus' : 8862,
+ 'mp' : 8723,
+ 'circledast' : 8859,
+ 'tau' : 964,
+ 'in' : 8712,
+ 'backslash' : 92,
+ 'varnothing' : 8709,
+ 'sharp' : 9839,
+ 'eqsim' : 8770,
+ 'gnsim' : 8935,
+ 'Searrow' : 8664,
+ 'updownarrows' : 8645,
+ 'heartsuit' : 9825,
+ 'trianglelefteq' : 8884,
+ 'ddag' : 8225,
+ 'sqsubseteq' : 8849,
+ 'mapsfrom' : 8612,
+ 'boxbar' : 9707,
+ 'sim' : 8764,
+ 'Nwarrow' : 8662,
+ 'nequiv' : 8802,
+ 'succ' : 8827,
+ 'vdash' : 8866,
+ 'Leftrightarrow' : 8660,
+ 'parallel' : 8741,
+ 'invnot' : 8976,
+ 'natural' : 9838,
+ 'ss' : 223,
+ 'uparrow' : 8593,
+ 'nsim' : 8769,
+ 'hookrightarrow' : 8618,
+ 'Equiv' : 8803,
+ 'approx' : 8776,
+ 'Vvdash' : 8874,
+ 'nsucc' : 8833,
+ 'leftrightharpoons' : 8651,
+ 'Re' : 8476,
+ 'boxminus' : 8863,
+ 'equiv' : 8801,
+ 'Lleftarrow' : 8666,
+ 'll' : 8810,
+ 'Cup' : 8915,
+ 'measeq' : 8798,
+ 'upharpoonleft' : 8639,
+ 'lq' : 8216,
+ 'Upsilon' : 933,
+ 'subsetneq' : 8842,
+ 'greater' : 62,
+ 'supsetneq' : 8843,
+ 'Cap' : 8914,
+ 'L' : 321,
+ 'spadesuit' : 9824,
+ 'lrcorner' : 8991,
+ 'not' : 824,
+ 'bar' : 772,
+ 'rightharpoonaccent' : 8401,
+ 'boxdot' : 8865,
+ 'l' : 322,
+ 'leftharpoondown' : 8637,
+ 'bigcup' : 8899,
+ 'iint' : 8748,
+ 'bigwedge' : 8896,
+ 'downharpoonleft' : 8643,
+ 'textasciitilde' : 126,
+ 'subset' : 8834,
+ 'leqq' : 8806,
+ 'mapsup' : 8613,
+ 'nvDash' : 8877,
+ 'looparrowleft' : 8619,
+ 'nless' : 8814,
+ 'rightarrowbar' : 8677,
+ 'Vert' : 8214,
+ 'downdownarrows' : 8650,
+ 'uplus' : 8846,
+ 'simeq' : 8771,
+ 'napprox' : 8777,
+ 'ast' : 8727,
+ 'twoheaduparrow' : 8607,
+ 'doublebarwedge' : 8966,
+ 'Sigma' : 931,
+ 'leftharpoonaccent' : 8400,
+ 'ntrianglelefteq' : 8940,
+ 'nexists' : 8708,
+ 'times' : 215,
+ 'measuredangle' : 8737,
+ 'bumpeq' : 8783,
+ 'carriagereturn' : 8629,
+ 'adots' : 8944,
+ 'checkmark' : 10003,
+ 'lambda' : 955,
+ 'xi' : 958,
+ 'rbrace' : 125,
+ 'rbrack' : 93,
+ 'Nearrow' : 8663,
+ 'maltese' : 10016,
+ 'clubsuit' : 9827,
+ 'top' : 8868,
+ 'overarc' : 785,
+ 'varphi' : 966,
+ 'Delta' : 916,
+ 'iota' : 953,
+ 'nleftarrow' : 8602,
+ 'candra' : 784,
+ 'supset' : 8835,
+ 'triangleleft' : 9665,
+ 'gtreqless' : 8923,
+ 'ntrianglerighteq' : 8941,
+ 'quad' : 8195,
+ 'Xi' : 926,
+ 'gtrdot' : 8919,
+ 'leftthreetimes' : 8907,
+ 'minus' : 8722,
+ 'preccurlyeq' : 8828,
+ 'nleftrightarrow' : 8622,
+ 'lambdabar' : 411,
+ 'blacktriangle' : 9652,
+ 'kernelcontraction' : 8763,
+ 'Phi' : 934,
+ 'angle' : 8736,
+ 'spadesuitopen' : 9828,
+ 'eqless' : 8924,
+ 'mid' : 8739,
+ 'varkappa' : 1008,
+ 'Ldsh' : 8626,
+ 'updownarrow' : 8597,
+ 'beta' : 946,
+ 'textquotedblleft' : 8220,
+ 'rho' : 961,
+ 'alpha' : 945,
+ 'intercal' : 8890,
+ 'beth' : 8502,
+ 'grave' : 768,
+ 'acwopencirclearrow' : 8634,
+ 'nmid' : 8740,
+ 'nsupset' : 8837,
+ 'sigma' : 963,
+ 'dot' : 775,
+ 'Rightarrow' : 8658,
+ 'turnednot' : 8985,
+ 'backsimeq' : 8909,
+ 'leftarrowtail' : 8610,
+ 'approxeq' : 8778,
+ 'curlyeqsucc' : 8927,
+ 'rightarrowtail' : 8611,
+ 'Psi' : 936,
+ 'copyright' : 169,
+ 'yen' : 165,
+ 'vartriangleleft' : 8882,
+ 'rasp' : 700,
+ 'triangleright' : 9655,
+ 'precsim' : 8830,
+ 'infty' : 8734,
+ 'geq' : 8805,
+ 'updownarrowbar' : 8616,
+ 'precnsim' : 8936,
+ 'H' : 779,
+ 'ulcorner' : 8988,
+ 'looparrowright' : 8620,
+ 'ncong' : 8775,
+ 'downarrow' : 8595,
+ 'circeq' : 8791,
+ 'subseteq' : 8838,
+ 'bigstar' : 9733,
+ 'prime' : 8242,
+ 'lceil' : 8968,
+ 'Rrightarrow' : 8667,
+ 'oiiint' : 8752,
+ 'curlywedge' : 8911,
+ 'vDash' : 8872,
+ 'lfloor' : 8970,
+ 'ddots' : 8945,
+ 'exists' : 8707,
+ 'underbar' : 817,
+ 'Pi' : 928,
+ 'leftrightarrows' : 8646,
+ 'sphericalangle' : 8738,
+ 'coprod' : 8720,
+ 'circledcirc' : 8858,
+ 'gtrsim' : 8819,
+ 'gneqq' : 8809,
+ 'between' : 8812,
+ 'theta' : 952,
+ 'complement' : 8705,
+ 'arceq' : 8792,
+ 'nVdash' : 8878,
+ 'S' : 167,
+ 'wr' : 8768,
+ 'wp' : 8472,
+ 'backcong' : 8780,
+ 'lasp' : 701,
+ 'c' : 807,
+ 'nabla' : 8711,
+ 'dotplus' : 8724,
+ 'eta' : 951,
+ 'forall' : 8704,
+ 'eth' : 240,
+ 'colon' : 58,
+ 'sqcup' : 8852,
+ 'rightrightarrows' : 8649,
+ 'sqsupset' : 8848,
+ 'mapsto' : 8614,
+ 'bigtriangledown' : 9661,
+ 'sqsupseteq' : 8850,
+ 'propto' : 8733,
+ 'pi' : 960,
+ 'pm' : 177,
+ 'dots' : 0x2026,
+ 'nrightarrow' : 8603,
+ 'textasciiacute' : 180,
+ 'Doteq' : 8785,
+ 'breve' : 774,
+ 'sqcap' : 8851,
+ 'twoheadrightarrow' : 8608,
+ 'kappa' : 954,
+ 'vartriangle' : 9653,
+ 'diamondsuit' : 9826,
+ 'pitchfork' : 8916,
+ 'blacktriangleleft' : 9664,
+ 'nprec' : 8832,
+ 'curvearrowright' : 8631,
+ 'barwedge' : 8892,
+ 'multimap' : 8888,
+ 'textquestiondown' : 191,
+ 'cong' : 8773,
+ 'rtimes' : 8906,
+ 'rightzigzagarrow' : 8669,
+ 'rightarrow' : 8594,
+ 'leftarrow' : 8592,
+ '__sqrt__' : 8730,
+ 'twoheaddownarrow' : 8609,
+ 'oint' : 8750,
+ 'bigvee' : 8897,
+ 'eqdef' : 8797,
+ 'sterling' : 163,
+ 'phi' : 981,
+ 'Updownarrow' : 8661,
+ 'backprime' : 8245,
+ 'emdash' : 8212,
+ 'Gamma' : 915,
+ 'i' : 305,
+ 'rceil' : 8969,
+ 'leftharpoonup' : 8636,
+ 'Im' : 8465,
+ 'curvearrowleft' : 8630,
+ 'wedgeq' : 8793,
+ 'curlyeqprec' : 8926,
+ 'questeq' : 8799,
+ 'less' : 60,
+ 'upuparrows' : 8648,
+ 'tilde' : 771,
+ 'textasciigrave' : 96,
+ 'smallsetminus' : 8726,
+ 'ell' : 8467,
+ 'cup' : 8746,
+ 'danger' : 9761,
+ 'nVDash' : 8879,
+ 'cdotp' : 183,
+ 'cdots' : 8943,
+ 'hat' : 770,
+ 'eqgtr' : 8925,
+ 'psi' : 968,
+ 'frown' : 8994,
+ 'acute' : 769,
+ 'downzigzagarrow' : 8623,
+ 'ntriangleright' : 8939,
+ 'cupdot' : 8845,
+ 'circleddash' : 8861,
+ 'oslash' : 8856,
+ 'mho' : 8487,
+ 'd' : 803,
+ 'sqsubset' : 8847,
+ 'cdot' : 8901,
+ 'Omega' : 937,
+ 'OE' : 338,
+ 'veeeq' : 8794,
+ 'Finv' : 8498,
+ 't' : 865,
+ 'leftrightarrow' : 8596,
+ 'swarrow' : 8601,
+ 'rightthreetimes' : 8908,
+ 'rightleftharpoons' : 8652,
+ 'lesssim' : 8818,
+ 'searrow' : 8600,
+ 'because' : 8757,
+ 'gtrless' : 8823,
+ 'star' : 8902,
+ 'nsubset' : 8836,
+ 'zeta' : 950,
+ 'dddot' : 8411,
+ 'bigcirc' : 9675,
+ 'Supset' : 8913,
+ 'circ' : 8728,
+ 'slash' : 8725,
+ 'ocirc' : 778,
+ 'prod' : 8719,
+ 'twoheadleftarrow' : 8606,
+ 'daleth' : 8504,
+ 'upharpoonright' : 8638,
+ 'odot' : 8857,
+ 'Uparrow' : 8657,
+ 'O' : 216,
+ 'hookleftarrow' : 8617,
+ 'trianglerighteq' : 8885,
+ 'nsime' : 8772,
+ 'oe' : 339,
+ 'nwarrow' : 8598,
+ 'o' : 248,
+ 'ddddot' : 8412,
+ 'downharpoonright' : 8642,
+ 'succcurlyeq' : 8829,
+ 'gamma' : 947,
+ 'scrR' : 8475,
+ 'dag' : 8224,
+ 'thickspace' : 8197,
+ 'frakZ' : 8488,
+ 'lessdot' : 8918,
+ 'triangledown' : 9663,
+ 'ltimes' : 8905,
+ 'scrB' : 8492,
+ 'endash' : 8211,
+ 'scrE' : 8496,
+ 'scrF' : 8497,
+ 'scrH' : 8459,
+ 'scrI' : 8464,
+ 'rightharpoondown' : 8641,
+ 'scrL' : 8466,
+ 'scrM' : 8499,
+ 'frakC' : 8493,
+ 'nsupseteq' : 8841,
+ 'circledR' : 174,
+ 'circledS' : 9416,
+ 'ngtr' : 8815,
+ 'bigcap' : 8898,
+ 'scre' : 8495,
+ 'Downarrow' : 8659,
+ 'scrg' : 8458,
+ 'overleftrightarrow' : 8417,
+ 'scro' : 8500,
+ 'lnsim' : 8934,
+ 'eqcolon' : 8789,
+ 'curlyvee' : 8910,
+ 'urcorner' : 8989,
+ 'lbrace' : 123,
+ 'Bumpeq' : 8782,
+ 'delta' : 948,
+ 'boxtimes' : 8864,
+ 'overleftarrow' : 8406,
+ 'prurel' : 8880,
+ 'clubsuitopen' : 9831,
+ 'cwopencirclearrow' : 8635,
+ 'geqq' : 8807,
+ 'rightleftarrows' : 8644,
+ 'ac' : 8766,
+ 'ae' : 230,
+ 'int' : 8747,
+ 'rfloor' : 8971,
+ 'risingdotseq' : 8787,
+ 'nvdash' : 8876,
+ 'diamond' : 8900,
+ 'ddot' : 776,
+ 'backsim' : 8765,
+ 'oplus' : 8853,
+ 'triangleq' : 8796,
+ 'check' : 780,
+ 'ni' : 8715,
+ 'iiint' : 8749,
+ 'ne' : 8800,
+ 'lesseqgtr' : 8922,
+ 'obar' : 9021,
+ 'supseteq' : 8839,
+ 'nu' : 957,
+ 'AA' : 197,
+ 'AE' : 198,
+ 'models' : 8871,
+ 'ominus' : 8854,
+ 'dashv' : 8867,
+ 'omega' : 969,
+ 'rq' : 8217,
+ 'Subset' : 8912,
+ 'rightharpoonup' : 8640,
+ 'Rdsh' : 8627,
+ 'bullet' : 8729,
+ 'divideontimes' : 8903,
+ 'lbrack' : 91,
+ 'textquotedblright' : 8221,
+ 'Colon' : 8759,
+ '%' : 37,
+ '$' : 36,
+ '{' : 123,
+ '}' : 125,
+ '_' : 95,
+ '#' : 35,
+ 'imath' : 0x131,
+ 'circumflexaccent' : 770,
+ 'combiningbreve' : 774,
+ 'combiningoverline' : 772,
+ 'combininggraveaccent' : 768,
+ 'combiningacuteaccent' : 769,
+ 'combiningdiaeresis' : 776,
+ 'combiningtilde' : 771,
+ 'combiningrightarrowabove' : 8407,
+ 'combiningdotabove' : 775,
+ 'to' : 8594,
+ 'succeq' : 8829,
+ 'emptyset' : 8709,
+ 'leftparen' : 40,
+ 'rightparen' : 41,
+ 'bigoplus' : 10753,
+ 'leftangle' : 10216,
+ 'rightangle' : 10217,
+ 'leftbrace' : 124,
+ 'rightbrace' : 125,
+ 'jmath' : 567,
+ 'bigodot' : 10752,
+ 'preceq' : 8828,
+ 'biguplus' : 10756,
+ 'epsilon' : 949,
+ 'vartheta' : 977,
+ 'bigotimes' : 10754,
+ 'guillemotleft' : 171,
+ 'ring' : 730,
+ 'Thorn' : 222,
+ 'guilsinglright' : 8250,
+ 'perthousand' : 8240,
+ 'macron' : 175,
+ 'cent' : 162,
+ 'guillemotright' : 187,
+ 'equal' : 61,
+ 'asterisk' : 42,
+ 'guilsinglleft' : 8249,
+ 'plus' : 43,
+ 'thorn' : 254,
+ 'dagger' : 8224
+}
+
+# Each element is a 4-tuple of the form:
+# src_start, src_end, dst_font, dst_start
+#
+stix_virtual_fonts = {
+ 'bb':
+ {
+ 'rm':
+ [
+ (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
+ (0x0041, 0x0042, 'rm', 0x1d538), # A-B
+ (0x0043, 0x0043, 'rm', 0x2102), # C
+ (0x0044, 0x0047, 'rm', 0x1d53b), # D-G
+ (0x0048, 0x0048, 'rm', 0x210d), # H
+ (0x0049, 0x004d, 'rm', 0x1d540), # I-M
+ (0x004e, 0x004e, 'rm', 0x2115), # N
+ (0x004f, 0x004f, 'rm', 0x1d546), # O
+ (0x0050, 0x0051, 'rm', 0x2119), # P-Q
+ (0x0052, 0x0052, 'rm', 0x211d), # R
+ (0x0053, 0x0059, 'rm', 0x1d54a), # S-Y
+ (0x005a, 0x005a, 'rm', 0x2124), # Z
+ (0x0061, 0x007a, 'rm', 0x1d552), # a-z
+ (0x0393, 0x0393, 'rm', 0x213e), # \Gamma
+ (0x03a0, 0x03a0, 'rm', 0x213f), # \Pi
+ (0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma
+ (0x03b3, 0x03b3, 'rm', 0x213d), # \gamma
+ (0x03c0, 0x03c0, 'rm', 0x213c), # \pi
+ ],
+ 'it':
+ [
+ (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
+ (0x0041, 0x0042, 'it', 0xe154), # A-B
+ (0x0043, 0x0043, 'it', 0x2102), # C
+ (0x0044, 0x0044, 'it', 0x2145), # D
+ (0x0045, 0x0047, 'it', 0xe156), # E-G
+ (0x0048, 0x0048, 'it', 0x210d), # H
+ (0x0049, 0x004d, 'it', 0xe159), # I-M
+ (0x004e, 0x004e, 'it', 0x2115), # N
+ (0x004f, 0x004f, 'it', 0xe15e), # O
+ (0x0050, 0x0051, 'it', 0x2119), # P-Q
+ (0x0052, 0x0052, 'it', 0x211d), # R
+ (0x0053, 0x0059, 'it', 0xe15f), # S-Y
+ (0x005a, 0x005a, 'it', 0x2124), # Z
+ (0x0061, 0x0063, 'it', 0xe166), # a-c
+ (0x0064, 0x0065, 'it', 0x2146), # d-e
+ (0x0066, 0x0068, 'it', 0xe169), # f-h
+ (0x0069, 0x006a, 'it', 0x2148), # i-j
+ (0x006b, 0x007a, 'it', 0xe16c), # k-z
+ (0x0393, 0x0393, 'it', 0x213e), # \Gamma (not in beta STIX fonts)
+ (0x03a0, 0x03a0, 'it', 0x213f), # \Pi
+ (0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (not in beta STIX fonts)
+ (0x03b3, 0x03b3, 'it', 0x213d), # \gamma (not in beta STIX fonts)
+ (0x03c0, 0x03c0, 'it', 0x213c), # \pi
+ ],
+ 'bf':
+ [
+ (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
+ (0x0041, 0x0042, 'bf', 0xe38a), # A-B
+ (0x0043, 0x0043, 'bf', 0x2102), # C
+ (0x0044, 0x0044, 'bf', 0x2145), # D
+ (0x0045, 0x0047, 'bf', 0xe38d), # E-G
+ (0x0048, 0x0048, 'bf', 0x210d), # H
+ (0x0049, 0x004d, 'bf', 0xe390), # I-M
+ (0x004e, 0x004e, 'bf', 0x2115), # N
+ (0x004f, 0x004f, 'bf', 0xe395), # O
+ (0x0050, 0x0051, 'bf', 0x2119), # P-Q
+ (0x0052, 0x0052, 'bf', 0x211d), # R
+ (0x0053, 0x0059, 'bf', 0xe396), # S-Y
+ (0x005a, 0x005a, 'bf', 0x2124), # Z
+ (0x0061, 0x0063, 'bf', 0xe39d), # a-c
+ (0x0064, 0x0065, 'bf', 0x2146), # d-e
+ (0x0066, 0x0068, 'bf', 0xe3a2), # f-h
+ (0x0069, 0x006a, 'bf', 0x2148), # i-j
+ (0x006b, 0x007a, 'bf', 0xe3a7), # k-z
+ (0x0393, 0x0393, 'bf', 0x213e), # \Gamma
+ (0x03a0, 0x03a0, 'bf', 0x213f), # \Pi
+ (0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma
+ (0x03b3, 0x03b3, 'bf', 0x213d), # \gamma
+ (0x03c0, 0x03c0, 'bf', 0x213c), # \pi
+ ],
+ },
+ 'cal':
+ [
+ (0x0041, 0x005a, 'it', 0xe22d), # A-Z
+ ],
+ 'circled':
+ {
+ 'rm':
+ [
+ (0x0030, 0x0030, 'rm', 0x24ea), # 0
+ (0x0031, 0x0039, 'rm', 0x2460), # 1-9
+ (0x0041, 0x005a, 'rm', 0x24b6), # A-Z
+ (0x0061, 0x007a, 'rm', 0x24d0) # a-z
+ ],
+ 'it':
+ [
+ (0x0030, 0x0030, 'rm', 0x24ea), # 0
+ (0x0031, 0x0039, 'rm', 0x2460), # 1-9
+ (0x0041, 0x005a, 'it', 0x24b6), # A-Z
+ (0x0061, 0x007a, 'it', 0x24d0) # a-z
+ ],
+ 'bf':
+ [
+ (0x0030, 0x0030, 'bf', 0x24ea), # 0
+ (0x0031, 0x0039, 'bf', 0x2460), # 1-9
+ (0x0041, 0x005a, 'bf', 0x24b6), # A-Z
+ (0x0061, 0x007a, 'bf', 0x24d0) # a-z
+ ],
+ },
+ 'frak':
+ {
+ 'rm':
+ [
+ (0x0041, 0x0042, 'rm', 0x1d504), # A-B
+ (0x0043, 0x0043, 'rm', 0x212d), # C
+ (0x0044, 0x0047, 'rm', 0x1d507), # D-G
+ (0x0048, 0x0048, 'rm', 0x210c), # H
+ (0x0049, 0x0049, 'rm', 0x2111), # I
+ (0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
+ (0x0052, 0x0052, 'rm', 0x211c), # R
+ (0x0053, 0x0059, 'rm', 0x1d516), # S-Y
+ (0x005a, 0x005a, 'rm', 0x2128), # Z
+ (0x0061, 0x007a, 'rm', 0x1d51e), # a-z
+ ],
+ 'it':
+ [
+ (0x0041, 0x0042, 'rm', 0x1d504), # A-B
+ (0x0043, 0x0043, 'rm', 0x212d), # C
+ (0x0044, 0x0047, 'rm', 0x1d507), # D-G
+ (0x0048, 0x0048, 'rm', 0x210c), # H
+ (0x0049, 0x0049, 'rm', 0x2111), # I
+ (0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
+ (0x0052, 0x0052, 'rm', 0x211c), # R
+ (0x0053, 0x0059, 'rm', 0x1d516), # S-Y
+ (0x005a, 0x005a, 'rm', 0x2128), # Z
+ (0x0061, 0x007a, 'rm', 0x1d51e), # a-z
+ ],
+ 'bf':
+ [
+ (0x0041, 0x005a, 'bf', 0x1d56c), # A-Z
+ (0x0061, 0x007a, 'bf', 0x1d586), # a-z
+ ],
+ },
+ 'scr':
+ [
+ (0x0041, 0x0041, 'it', 0x1d49c), # A
+ (0x0042, 0x0042, 'it', 0x212c), # B
+ (0x0043, 0x0044, 'it', 0x1d49e), # C-D
+ (0x0045, 0x0046, 'it', 0x2130), # E-F
+ (0x0047, 0x0047, 'it', 0x1d4a2), # G
+ (0x0048, 0x0048, 'it', 0x210b), # H
+ (0x0049, 0x0049, 'it', 0x2110), # I
+ (0x004a, 0x004b, 'it', 0x1d4a5), # J-K
+ (0x004c, 0x004c, 'it', 0x2112), # L
+ (0x004d, 0x004d, 'it', 0x2133), # M
+ (0x004e, 0x0051, 'it', 0x1d4a9), # N-Q
+ (0x0052, 0x0052, 'it', 0x211b), # R
+ (0x0053, 0x005a, 'it', 0x1d4ae), # S-Z
+ (0x0061, 0x0064, 'it', 0x1d4b6), # a-d
+ (0x0065, 0x0065, 'it', 0x212f), # e
+ (0x0066, 0x0066, 'it', 0x1d4bb), # f
+ (0x0067, 0x0067, 'it', 0x210a), # g
+ (0x0068, 0x006e, 'it', 0x1d4bd), # h-n
+ (0x006f, 0x006f, 'it', 0x2134), # o
+ (0x0070, 0x007a, 'it', 0x1d4c5), # p-z
+ ],
+ 'sf':
+ {
+ 'rm':
+ [
+ (0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
+ (0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z
+ (0x0061, 0x007a, 'rm', 0x1d5ba), # a-z
+ (0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
+ (0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega
+ (0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant
+ (0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant
+ (0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant
+ (0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant
+ (0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon
+ (0x2202, 0x2202, 'rm', 0xe17c), # partial differential
+ ],
+ 'it':
+ [
+ # These numerals are actually upright. We don't actually
+ # want italic numerals ever.
+ (0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
+ (0x0041, 0x005a, 'it', 0x1d608), # A-Z
+ (0x0061, 0x007a, 'it', 0x1d622), # a-z
+ (0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
+ (0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega
+ (0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant
+ (0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant
+ (0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant
+ (0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant
+ (0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon
+ ],
+ 'bf':
+ [
+ (0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9
+ (0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z
+ (0x0061, 0x007a, 'bf', 0x1d5ee), # a-z
+ (0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega
+ (0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega
+ (0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant
+ (0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant
+ (0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant
+ (0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant
+ (0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant
+ (0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon
+ (0x2202, 0x2202, 'bf', 0x1d789), # partial differential
+ (0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla
+ ],
+ },
+ 'tt':
+ [
+ (0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9
+ (0x0041, 0x005a, 'rm', 0x1d670), # A-Z
+ (0x0061, 0x007a, 'rm', 0x1d68a) # a-z
+ ],
+ }
diff --git a/contrib/python/matplotlib/py2/matplotlib/_pylab_helpers.py b/contrib/python/matplotlib/py2/matplotlib/_pylab_helpers.py
new file mode 100644
index 00000000000..a1d37f21e20
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/_pylab_helpers.py
@@ -0,0 +1,138 @@
+"""
+Manage figures for pyplot interface.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import atexit
+import gc
+import sys
+
+
+class Gcf(object):
+ """
+ Singleton to manage a set of integer-numbered figures.
+
+ This class is never instantiated; it consists of two class
+ attributes (a list and a dictionary), and a set of static
+ methods that operate on those attributes, accessing them
+ directly as class attributes.
+
+ Attributes:
+
+ *figs*:
+ dictionary of the form {*num*: *manager*, ...}
+
+ *_activeQue*:
+ list of *managers*, with active one at the end
+
+ """
+ _activeQue = []
+ figs = {}
+
+ @classmethod
+ def get_fig_manager(cls, num):
+ """
+ If figure manager *num* exists, make it the active
+ figure and return the manager; otherwise return *None*.
+ """
+ manager = cls.figs.get(num, None)
+ if manager is not None:
+ cls.set_active(manager)
+ return manager
+
+ @classmethod
+ def destroy(cls, num):
+ """
+ Try to remove all traces of figure *num*.
+
+ In the interactive backends, this is bound to the
+ window "destroy" and "delete" events.
+ """
+ if not cls.has_fignum(num):
+ return
+ manager = cls.figs[num]
+ manager.canvas.mpl_disconnect(manager._cidgcf)
+ cls._activeQue.remove(manager)
+ del cls.figs[num]
+ manager.destroy()
+ gc.collect(1)
+
+ @classmethod
+ def destroy_fig(cls, fig):
+ "*fig* is a Figure instance"
+ num = next((manager.num for manager in six.itervalues(cls.figs)
+ if manager.canvas.figure == fig), None)
+ if num is not None:
+ cls.destroy(num)
+
+ @classmethod
+ def destroy_all(cls):
+ # this is need to ensure that gc is available in corner cases
+ # where modules are being torn down after install with easy_install
+ import gc # noqa
+ for manager in list(cls.figs.values()):
+ manager.canvas.mpl_disconnect(manager._cidgcf)
+ manager.destroy()
+
+ cls._activeQue = []
+ cls.figs.clear()
+ gc.collect(1)
+
+ @classmethod
+ def has_fignum(cls, num):
+ """
+ Return *True* if figure *num* exists.
+ """
+ return num in cls.figs
+
+ @classmethod
+ def get_all_fig_managers(cls):
+ """
+ Return a list of figure managers.
+ """
+ return list(cls.figs.values())
+
+ @classmethod
+ def get_num_fig_managers(cls):
+ """
+ Return the number of figures being managed.
+ """
+ return len(cls.figs)
+
+ @classmethod
+ def get_active(cls):
+ """
+ Return the manager of the active figure, or *None*.
+ """
+ if len(cls._activeQue) == 0:
+ return None
+ else:
+ return cls._activeQue[-1]
+
+ @classmethod
+ def set_active(cls, manager):
+ """
+ Make the figure corresponding to *manager* the active one.
+ """
+ oldQue = cls._activeQue[:]
+ cls._activeQue = []
+ for m in oldQue:
+ if m != manager:
+ cls._activeQue.append(m)
+ cls._activeQue.append(manager)
+ cls.figs[manager.num] = manager
+
+ @classmethod
+ def draw_all(cls, force=False):
+ """
+ Redraw all figures registered with the pyplot
+ state machine.
+ """
+ for f_mgr in cls.get_all_fig_managers():
+ if force or f_mgr.canvas.figure.stale:
+ f_mgr.canvas.draw_idle()
+
+atexit.register(Gcf.destroy_all)
diff --git a/contrib/python/matplotlib/py2/matplotlib/_version.py b/contrib/python/matplotlib/py2/matplotlib/_version.py
new file mode 100644
index 00000000000..73a3f6fbf38
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/_version.py
@@ -0,0 +1,460 @@
+
+# This file helps to compute a version number in source trees obtained from
+# git-archive tarball (such as those provided by githubs download-from-tag
+# feature). Distribution tarballs (built by setup.py sdist) and build
+# directories (produced by setup.py build) will contain a much shorter file
+# that just contains the computed version number.
+
+# This file is released into the public domain. Generated by
+# versioneer-0.15 (https://github.com/warner/python-versioneer)
+
+import errno
+import os
+import re
+import subprocess
+import sys
+
+
+def get_keywords():
+ # these strings will be replaced by git during git-archive.
+ # setup.py/versioneer.py will grep for the variable names, so they must
+ # each be defined on a line of their own. _version.py will just call
+ # get_keywords().
+ git_refnames = " (tag: v2.2.4)"
+ git_full = "bff1e4b201baf34df3f3a6fbb408f0e2c1068dd9"
+ keywords = {"refnames": git_refnames, "full": git_full}
+ return keywords
+
+
+class VersioneerConfig:
+ pass
+
+
+def get_config():
+ # these strings are filled in when 'setup.py versioneer' creates
+ # _version.py
+ cfg = VersioneerConfig()
+ cfg.VCS = "git"
+ cfg.style = "pep440-post"
+ cfg.tag_prefix = "v"
+ cfg.parentdir_prefix = "matplotlib-"
+ cfg.versionfile_source = "lib/matplotlib/_version.py"
+ cfg.verbose = False
+ return cfg
+
+
+class NotThisMethod(Exception):
+ pass
+
+
+LONG_VERSION_PY = {}
+HANDLERS = {}
+
+
+def register_vcs_handler(vcs, method): # decorator
+ def decorate(f):
+ if vcs not in HANDLERS:
+ HANDLERS[vcs] = {}
+ HANDLERS[vcs][method] = f
+ return f
+ return decorate
+
+
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+ assert isinstance(commands, list)
+ p = None
+ for c in commands:
+ try:
+ dispcmd = str([c] + args)
+ # remember shell=False, so use git.cmd on windows, not just git
+ p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
+ stderr=(subprocess.PIPE if hide_stderr
+ else None))
+ break
+ except EnvironmentError:
+ e = sys.exc_info()[1]
+ if e.errno == errno.ENOENT:
+ continue
+ if verbose:
+ print("unable to run %s" % dispcmd)
+ print(e)
+ return None
+ else:
+ if verbose:
+ print("unable to find command, tried %s" % (commands,))
+ return None
+ stdout = p.communicate()[0].strip()
+ if sys.version_info[0] >= 3:
+ stdout = stdout.decode()
+ if p.returncode != 0:
+ if verbose:
+ print("unable to run %s (error)" % dispcmd)
+ return None
+ return stdout
+
+
+def versions_from_parentdir(parentdir_prefix, root, verbose):
+ # Source tarballs conventionally unpack into a directory that includes
+ # both the project name and a version string.
+ dirname = os.path.basename(root)
+ if not dirname.startswith(parentdir_prefix):
+ if verbose:
+ print("guessing rootdir is '%s', but '%s' doesn't start with "
+ "prefix '%s'" % (root, dirname, parentdir_prefix))
+ raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
+ return {"version": dirname[len(parentdir_prefix):],
+ "full-revisionid": None,
+ "dirty": False, "error": None}
+
+
+@register_vcs_handler("git", "get_keywords")
+def git_get_keywords(versionfile_abs):
+ # the code embedded in _version.py can just fetch the value of these
+ # keywords. When used from setup.py, we don't want to import _version.py,
+ # so we do it with a regexp instead. This function is not used from
+ # _version.py.
+ keywords = {}
+ try:
+ f = open(versionfile_abs, "r")
+ for line in f.readlines():
+ if line.strip().startswith("git_refnames ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["refnames"] = mo.group(1)
+ if line.strip().startswith("git_full ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["full"] = mo.group(1)
+ f.close()
+ except EnvironmentError:
+ pass
+ return keywords
+
+
+@register_vcs_handler("git", "keywords")
+def git_versions_from_keywords(keywords, tag_prefix, verbose):
+ if not keywords:
+ raise NotThisMethod("no keywords at all, weird")
+ refnames = keywords["refnames"].strip()
+ if refnames.startswith("$Format"):
+ if verbose:
+ print("keywords are unexpanded, not using")
+ raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
+ refs = set([r.strip() for r in refnames.strip("()").split(",")])
+ # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
+ # just "foo-1.0". If we see a "tag: " prefix, prefer those.
+ TAG = "tag: "
+ tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
+ if not tags:
+ # Either we're using git < 1.8.3, or there really are no tags. We use
+ # a heuristic: assume all version tags have a digit. The old git %d
+ # expansion behaves like git log --decorate=short and strips out the
+ # refs/heads/ and refs/tags/ prefixes that would let us distinguish
+ # between branches and tags. By ignoring refnames without digits, we
+ # filter out many common branch names like "release" and
+ # "stabilization", as well as "HEAD" and "master".
+ tags = set([r for r in refs if re.search(r'\d', r)])
+ if verbose:
+ print("discarding '%s', no digits" % ",".join(refs-tags))
+ if verbose:
+ print("likely tags: %s" % ",".join(sorted(tags)))
+ for ref in sorted(tags):
+ # sorting will prefer e.g. "2.0" over "2.0rc1"
+ if ref.startswith(tag_prefix):
+ r = ref[len(tag_prefix):]
+ if verbose:
+ print("picking %s" % r)
+ return {"version": r,
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": None
+ }
+ # no suitable tags, so version is "0+unknown", but full hex is still there
+ if verbose:
+ print("no suitable tags, using unknown + full revision id")
+ return {"version": "0+unknown",
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": "no suitable tags"}
+
+
+@register_vcs_handler("git", "pieces_from_vcs")
+def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
+ # this runs 'git' from the root of the source tree. This only gets called
+ # if the git-archive 'subst' keywords were *not* expanded, and
+ # _version.py hasn't already been rewritten with a short version string,
+ # meaning we're inside a checked out source tree.
+
+ if not os.path.exists(os.path.join(root, ".git")):
+ if verbose:
+ print("no .git in %s" % root)
+ raise NotThisMethod("no .git directory")
+
+ GITS = ["git"]
+ if sys.platform == "win32":
+ GITS = ["git.cmd", "git.exe"]
+ # if there is a tag, this yields TAG-NUM-gHEX[-dirty]
+ # if there are no tags, this yields HEX[-dirty] (no NUM)
+ describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
+ "--always", "--long"],
+ cwd=root)
+ # --long was added in git-1.5.5
+ if describe_out is None:
+ raise NotThisMethod("'git describe' failed")
+ describe_out = describe_out.strip()
+ full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+ if full_out is None:
+ raise NotThisMethod("'git rev-parse' failed")
+ full_out = full_out.strip()
+
+ pieces = {}
+ pieces["long"] = full_out
+ pieces["short"] = full_out[:7] # maybe improved later
+ pieces["error"] = None
+
+ # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
+ # TAG might have hyphens.
+ git_describe = describe_out
+
+ # look for -dirty suffix
+ dirty = git_describe.endswith("-dirty")
+ pieces["dirty"] = dirty
+ if dirty:
+ git_describe = git_describe[:git_describe.rindex("-dirty")]
+
+ # now we have TAG-NUM-gHEX or HEX
+
+ if "-" in git_describe:
+ # TAG-NUM-gHEX
+ mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
+ if not mo:
+ # unparseable. Maybe git-describe is misbehaving?
+ pieces["error"] = ("unable to parse git-describe output: '%s'"
+ % describe_out)
+ return pieces
+
+ # tag
+ full_tag = mo.group(1)
+ if not full_tag.startswith(tag_prefix):
+ if verbose:
+ fmt = "tag '%s' doesn't start with prefix '%s'"
+ print(fmt % (full_tag, tag_prefix))
+ pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
+ % (full_tag, tag_prefix))
+ return pieces
+ pieces["closest-tag"] = full_tag[len(tag_prefix):]
+
+ # distance: number of commits since tag
+ pieces["distance"] = int(mo.group(2))
+
+ # commit: short hex revision ID
+ pieces["short"] = mo.group(3)
+
+ else:
+ # HEX: no tags
+ pieces["closest-tag"] = None
+ count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
+ cwd=root)
+ pieces["distance"] = int(count_out) # total number of commits
+
+ return pieces
+
+
+def plus_or_dot(pieces):
+ if "+" in pieces.get("closest-tag", ""):
+ return "."
+ return "+"
+
+
+def render_pep440(pieces):
+ # now build up version string, with post-release "local version
+ # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
+ # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+
+ # exceptions:
+ # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += plus_or_dot(pieces)
+ rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ else:
+ # exception #1
+ rendered = "0+untagged.%d.g%s" % (pieces["distance"],
+ pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ return rendered
+
+
+def render_pep440_pre(pieces):
+ # TAG[.post.devDISTANCE] . No -dirty
+
+ # exceptions:
+ # 1: no tags. 0.post.devDISTANCE
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += ".post.dev%d" % pieces["distance"]
+ else:
+ # exception #1
+ rendered = "0.post.dev%d" % pieces["distance"]
+ return rendered
+
+
+def render_pep440_post(pieces):
+ # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
+ # .dev0 sorts backwards (a dirty tree will appear "older" than the
+ # corresponding clean one), but you shouldn't be releasing software with
+ # -dirty anyways.
+
+ # exceptions:
+ # 1: no tags. 0.postDISTANCE[.dev0]
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += plus_or_dot(pieces)
+ rendered += "g%s" % pieces["short"]
+ else:
+ # exception #1
+ rendered = "0.post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += "+g%s" % pieces["short"]
+ return rendered
+
+
+def render_pep440_old(pieces):
+ # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
+
+ # exceptions:
+ # 1: no tags. 0.postDISTANCE[.dev0]
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ else:
+ # exception #1
+ rendered = "0.post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ return rendered
+
+
+def render_git_describe(pieces):
+ # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
+ # --always'
+
+ # exceptions:
+ # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render_git_describe_long(pieces):
+ # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
+ # --always -long'. The distance/hash is unconditional.
+
+ # exceptions:
+ # 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render(pieces, style):
+ if pieces["error"]:
+ return {"version": "unknown",
+ "full-revisionid": pieces.get("long"),
+ "dirty": None,
+ "error": pieces["error"]}
+
+ if not style or style == "default":
+ style = "pep440" # the default
+
+ if style == "pep440":
+ rendered = render_pep440(pieces)
+ elif style == "pep440-pre":
+ rendered = render_pep440_pre(pieces)
+ elif style == "pep440-post":
+ rendered = render_pep440_post(pieces)
+ elif style == "pep440-old":
+ rendered = render_pep440_old(pieces)
+ elif style == "git-describe":
+ rendered = render_git_describe(pieces)
+ elif style == "git-describe-long":
+ rendered = render_git_describe_long(pieces)
+ else:
+ raise ValueError("unknown style '%s'" % style)
+
+ return {"version": rendered, "full-revisionid": pieces["long"],
+ "dirty": pieces["dirty"], "error": None}
+
+
+def get_versions():
+ # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
+ # __file__, we can work backwards from there to the root. Some
+ # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
+ # case we can only use expanded keywords.
+
+ cfg = get_config()
+ verbose = cfg.verbose
+
+ try:
+ return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
+ verbose)
+ except NotThisMethod:
+ pass
+
+ try:
+ root = os.path.realpath(__file__)
+ # versionfile_source is the relative path from the top of the source
+ # tree (where the .git directory might live) to this file. Invert
+ # this to find the root from __file__.
+ for i in cfg.versionfile_source.split('/'):
+ root = os.path.dirname(root)
+ except NameError:
+ return {"version": "0+unknown", "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to find root of source tree"}
+
+ try:
+ pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
+ return render(pieces, cfg.style)
+ except NotThisMethod:
+ pass
+
+ try:
+ if cfg.parentdir_prefix:
+ return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
+ except NotThisMethod:
+ pass
+
+ return {"version": "0+unknown", "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to compute version"}
diff --git a/contrib/python/matplotlib/py2/matplotlib/afm.py b/contrib/python/matplotlib/py2/matplotlib/afm.py
new file mode 100644
index 00000000000..1b5f4d5f6a0
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/afm.py
@@ -0,0 +1,547 @@
+"""
+This is a python interface to Adobe Font Metrics Files. Although a
+number of other python implementations exist, and may be more complete
+than this, it was decided not to go with them because they were
+either:
+
+ 1) copyrighted or used a non-BSD compatible license
+
+ 2) had too many dependencies and a free standing lib was needed
+
+ 3) Did more than needed and it was easier to write afresh rather than
+ figure out how to get just what was needed.
+
+It is pretty easy to use, and requires only built-in python libs:
+
+ >>> from matplotlib import rcParams
+ >>> import os.path
+ >>> afm_fname = os.path.join(rcParams['datapath'],
+ ... 'fonts', 'afm', 'ptmr8a.afm')
+ >>>
+ >>> from matplotlib.afm import AFM
+ >>> with open(afm_fname, 'rb') as fh:
+ ... afm = AFM(fh)
+ >>> afm.string_width_height('What the heck?')
+ (6220.0, 694)
+ >>> afm.get_fontname()
+ 'Times-Roman'
+ >>> afm.get_kern_dist('A', 'f')
+ 0
+ >>> afm.get_kern_dist('A', 'y')
+ -92.0
+ >>> afm.get_bbox_char('!')
+ [130, -9, 238, 676]
+
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import map
+
+import sys
+import re
+from ._mathtext_data import uni2type1
+
+# Convert string the a python type
+
+# some afm files have floats where we are expecting ints -- there is
+# probably a better way to handle this (support floats, round rather
+# than truncate). But I don't know what the best approach is now and
+# this change to _to_int should at least prevent mpl from crashing on
+# these JDH (2009-11-06)
+
+
+def _to_int(x):
+ return int(float(x))
+
+
+_to_float = float
+
+
+def _to_str(x):
+ return x.decode('utf8')
+
+
+def _to_list_of_ints(s):
+ s = s.replace(b',', b' ')
+ return [_to_int(val) for val in s.split()]
+
+
+def _to_list_of_floats(s):
+ return [_to_float(val) for val in s.split()]
+
+
+def _to_bool(s):
+ if s.lower().strip() in (b'false', b'0', b'no'):
+ return False
+ else:
+ return True
+
+
+def _sanity_check(fh):
+ """
+ Check if the file at least looks like AFM.
+ If not, raise :exc:`RuntimeError`.
+ """
+
+ # Remember the file position in case the caller wants to
+ # do something else with the file.
+ pos = fh.tell()
+ try:
+ line = next(fh)
+ finally:
+ fh.seek(pos, 0)
+
+ # AFM spec, Section 4: The StartFontMetrics keyword [followed by a
+ # version number] must be the first line in the file, and the
+ # EndFontMetrics keyword must be the last non-empty line in the
+ # file. We just check the first line.
+ if not line.startswith(b'StartFontMetrics'):
+ raise RuntimeError('Not an AFM file')
+
+
+def _parse_header(fh):
+ """
+ Reads the font metrics header (up to the char metrics) and returns
+ a dictionary mapping *key* to *val*. *val* will be converted to the
+ appropriate python type as necessary; e.g.:
+
+ * 'False'->False
+ * '0'->0
+ * '-168 -218 1000 898'-> [-168, -218, 1000, 898]
+
+ Dictionary keys are
+
+ StartFontMetrics, FontName, FullName, FamilyName, Weight,
+ ItalicAngle, IsFixedPitch, FontBBox, UnderlinePosition,
+ UnderlineThickness, Version, Notice, EncodingScheme, CapHeight,
+ XHeight, Ascender, Descender, StartCharMetrics
+
+ """
+ headerConverters = {
+ b'StartFontMetrics': _to_float,
+ b'FontName': _to_str,
+ b'FullName': _to_str,
+ b'FamilyName': _to_str,
+ b'Weight': _to_str,
+ b'ItalicAngle': _to_float,
+ b'IsFixedPitch': _to_bool,
+ b'FontBBox': _to_list_of_ints,
+ b'UnderlinePosition': _to_int,
+ b'UnderlineThickness': _to_int,
+ b'Version': _to_str,
+ b'Notice': _to_str,
+ b'EncodingScheme': _to_str,
+ b'CapHeight': _to_float, # Is the second version a mistake, or
+ b'Capheight': _to_float, # do some AFM files contain 'Capheight'? -JKS
+ b'XHeight': _to_float,
+ b'Ascender': _to_float,
+ b'Descender': _to_float,
+ b'StdHW': _to_float,
+ b'StdVW': _to_float,
+ b'StartCharMetrics': _to_int,
+ b'CharacterSet': _to_str,
+ b'Characters': _to_int,
+ }
+
+ d = {}
+ for line in fh:
+ line = line.rstrip()
+ if line.startswith(b'Comment'):
+ continue
+ lst = line.split(b' ', 1)
+
+ key = lst[0]
+ if len(lst) == 2:
+ val = lst[1]
+ else:
+ val = b''
+
+ try:
+ d[key] = headerConverters[key](val)
+ except ValueError:
+ print('Value error parsing header in AFM:',
+ key, val, file=sys.stderr)
+ continue
+ except KeyError:
+ print('Found an unknown keyword in AFM header (was %r)' % key,
+ file=sys.stderr)
+ continue
+ if key == b'StartCharMetrics':
+ return d
+ raise RuntimeError('Bad parse')
+
+
+def _parse_char_metrics(fh):
+ """
+ Return a character metric dictionary. Keys are the ASCII num of
+ the character, values are a (*wx*, *name*, *bbox*) tuple, where
+ *wx* is the character width, *name* is the postscript language
+ name, and *bbox* is a (*llx*, *lly*, *urx*, *ury*) tuple.
+
+ This function is incomplete per the standard, but thus far parses
+ all the sample afm files tried.
+ """
+
+ ascii_d = {}
+ name_d = {}
+ for line in fh:
+ # We are defensively letting values be utf8. The spec requires
+ # ascii, but there are non-compliant fonts in circulation
+ line = _to_str(line.rstrip()) # Convert from byte-literal
+ if line.startswith('EndCharMetrics'):
+ return ascii_d, name_d
+ # Split the metric line into a dictionary, keyed by metric identifiers
+ vals = dict(s.strip().split(' ', 1) for s in line.split(';') if s)
+ # There may be other metrics present, but only these are needed
+ if not {'C', 'WX', 'N', 'B'}.issubset(vals):
+ raise RuntimeError('Bad char metrics line: %s' % line)
+ num = _to_int(vals['C'])
+ wx = _to_float(vals['WX'])
+ name = vals['N']
+ bbox = _to_list_of_floats(vals['B'])
+ bbox = list(map(int, bbox))
+ # Workaround: If the character name is 'Euro', give it the
+ # corresponding character code, according to WinAnsiEncoding (see PDF
+ # Reference).
+ if name == 'Euro':
+ num = 128
+ if num != -1:
+ ascii_d[num] = (wx, name, bbox)
+ name_d[name] = (wx, bbox)
+ raise RuntimeError('Bad parse')
+
+
+def _parse_kern_pairs(fh):
+ """
+ Return a kern pairs dictionary; keys are (*char1*, *char2*) tuples and
+ values are the kern pair value. For example, a kern pairs line like
+ ``KPX A y -50``
+
+ will be represented as::
+
+ d[ ('A', 'y') ] = -50
+
+ """
+
+ line = next(fh)
+ if not line.startswith(b'StartKernPairs'):
+ raise RuntimeError('Bad start of kern pairs data: %s' % line)
+
+ d = {}
+ for line in fh:
+ line = line.rstrip()
+ if not line:
+ continue
+ if line.startswith(b'EndKernPairs'):
+ next(fh) # EndKernData
+ return d
+ vals = line.split()
+ if len(vals) != 4 or vals[0] != b'KPX':
+ raise RuntimeError('Bad kern pairs line: %s' % line)
+ c1, c2, val = _to_str(vals[1]), _to_str(vals[2]), _to_float(vals[3])
+ d[(c1, c2)] = val
+ raise RuntimeError('Bad kern pairs parse')
+
+
+def _parse_composites(fh):
+ """
+ Return a composites dictionary. Keys are the names of the
+ composites. Values are a num parts list of composite information,
+ with each element being a (*name*, *dx*, *dy*) tuple. Thus a
+ composites line reading:
+
+ CC Aacute 2 ; PCC A 0 0 ; PCC acute 160 170 ;
+
+ will be represented as::
+
+ d['Aacute'] = [ ('A', 0, 0), ('acute', 160, 170) ]
+
+ """
+ d = {}
+ for line in fh:
+ line = line.rstrip()
+ if not line:
+ continue
+ if line.startswith(b'EndComposites'):
+ return d
+ vals = line.split(b';')
+ cc = vals[0].split()
+ name, numParts = cc[1], _to_int(cc[2])
+ pccParts = []
+ for s in vals[1:-1]:
+ pcc = s.split()
+ name, dx, dy = pcc[1], _to_float(pcc[2]), _to_float(pcc[3])
+ pccParts.append((name, dx, dy))
+ d[name] = pccParts
+
+ raise RuntimeError('Bad composites parse')
+
+
+def _parse_optional(fh):
+ """
+ Parse the optional fields for kern pair data and composites
+
+ return value is a (*kernDict*, *compositeDict*) which are the
+ return values from :func:`_parse_kern_pairs`, and
+ :func:`_parse_composites` if the data exists, or empty dicts
+ otherwise
+ """
+ optional = {
+ b'StartKernData': _parse_kern_pairs,
+ b'StartComposites': _parse_composites,
+ }
+
+ d = {b'StartKernData': {}, b'StartComposites': {}}
+ for line in fh:
+ line = line.rstrip()
+ if not line:
+ continue
+ key = line.split()[0]
+
+ if key in optional:
+ d[key] = optional[key](fh)
+
+ l = (d[b'StartKernData'], d[b'StartComposites'])
+ return l
+
+
+def parse_afm(fh):
+ """
+ Parse the Adobe Font Metics file in file handle *fh*. Return value
+ is a (*dhead*, *dcmetrics_ascii*, *dmetrics_name*, *dkernpairs*,
+ *dcomposite*) tuple where
+ *dhead* is a :func:`_parse_header` dict,
+ *dcmetrics_ascii* and *dcmetrics_name* are the two resulting dicts
+ from :func:`_parse_char_metrics`,
+ *dkernpairs* is a :func:`_parse_kern_pairs` dict (possibly {}) and
+ *dcomposite* is a :func:`_parse_composites` dict (possibly {})
+ """
+ _sanity_check(fh)
+ dhead = _parse_header(fh)
+ dcmetrics_ascii, dcmetrics_name = _parse_char_metrics(fh)
+ doptional = _parse_optional(fh)
+ return dhead, dcmetrics_ascii, dcmetrics_name, doptional[0], doptional[1]
+
+
+class AFM(object):
+
+ def __init__(self, fh):
+ """
+ Parse the AFM file in file object *fh*
+ """
+ (dhead, dcmetrics_ascii, dcmetrics_name, dkernpairs, dcomposite) = \
+ parse_afm(fh)
+ self._header = dhead
+ self._kern = dkernpairs
+ self._metrics = dcmetrics_ascii
+ self._metrics_by_name = dcmetrics_name
+ self._composite = dcomposite
+
+ def get_bbox_char(self, c, isord=False):
+ if not isord:
+ c = ord(c)
+ wx, name, bbox = self._metrics[c]
+ return bbox
+
+ def string_width_height(self, s):
+ """
+ Return the string width (including kerning) and string height
+ as a (*w*, *h*) tuple.
+ """
+ if not len(s):
+ return 0, 0
+ totalw = 0
+ namelast = None
+ miny = 1e9
+ maxy = 0
+ for c in s:
+ if c == '\n':
+ continue
+ wx, name, bbox = self._metrics[ord(c)]
+ l, b, w, h = bbox
+
+ # find the width with kerning
+ try:
+ kp = self._kern[(namelast, name)]
+ except KeyError:
+ kp = 0
+ totalw += wx + kp
+
+ # find the max y
+ thismax = b + h
+ if thismax > maxy:
+ maxy = thismax
+
+ # find the min y
+ thismin = b
+ if thismin < miny:
+ miny = thismin
+ namelast = name
+
+ return totalw, maxy - miny
+
+ def get_str_bbox_and_descent(self, s):
+ """
+ Return the string bounding box
+ """
+ if not len(s):
+ return 0, 0, 0, 0
+ totalw = 0
+ namelast = None
+ miny = 1e9
+ maxy = 0
+ left = 0
+ if not isinstance(s, six.text_type):
+ s = _to_str(s)
+ for c in s:
+ if c == '\n':
+ continue
+ name = uni2type1.get(ord(c), 'question')
+ try:
+ wx, bbox = self._metrics_by_name[name]
+ except KeyError:
+ name = 'question'
+ wx, bbox = self._metrics_by_name[name]
+ l, b, w, h = bbox
+ if l < left:
+ left = l
+ # find the width with kerning
+ try:
+ kp = self._kern[(namelast, name)]
+ except KeyError:
+ kp = 0
+ totalw += wx + kp
+
+ # find the max y
+ thismax = b + h
+ if thismax > maxy:
+ maxy = thismax
+
+ # find the min y
+ thismin = b
+ if thismin < miny:
+ miny = thismin
+ namelast = name
+
+ return left, miny, totalw, maxy - miny, -miny
+
+ def get_str_bbox(self, s):
+ """
+ Return the string bounding box
+ """
+ return self.get_str_bbox_and_descent(s)[:4]
+
+ def get_name_char(self, c, isord=False):
+ """
+ Get the name of the character, i.e., ';' is 'semicolon'
+ """
+ if not isord:
+ c = ord(c)
+ wx, name, bbox = self._metrics[c]
+ return name
+
+ def get_width_char(self, c, isord=False):
+ """
+ Get the width of the character from the character metric WX
+ field
+ """
+ if not isord:
+ c = ord(c)
+ wx, name, bbox = self._metrics[c]
+ return wx
+
+ def get_width_from_char_name(self, name):
+ """
+ Get the width of the character from a type1 character name
+ """
+ wx, bbox = self._metrics_by_name[name]
+ return wx
+
+ def get_height_char(self, c, isord=False):
+ """
+ Get the height of character *c* from the bounding box. This
+ is the ink height (space is 0)
+ """
+ if not isord:
+ c = ord(c)
+ wx, name, bbox = self._metrics[c]
+ return bbox[-1]
+
+ def get_kern_dist(self, c1, c2):
+ """
+ Return the kerning pair distance (possibly 0) for chars *c1*
+ and *c2*
+ """
+ name1, name2 = self.get_name_char(c1), self.get_name_char(c2)
+ return self.get_kern_dist_from_name(name1, name2)
+
+ def get_kern_dist_from_name(self, name1, name2):
+ """
+ Return the kerning pair distance (possibly 0) for chars
+ *name1* and *name2*
+ """
+ return self._kern.get((name1, name2), 0)
+
+ def get_fontname(self):
+ "Return the font name, e.g., 'Times-Roman'"
+ return self._header[b'FontName']
+
+ def get_fullname(self):
+ "Return the font full name, e.g., 'Times-Roman'"
+ name = self._header.get(b'FullName')
+ if name is None: # use FontName as a substitute
+ name = self._header[b'FontName']
+ return name
+
+ def get_familyname(self):
+ "Return the font family name, e.g., 'Times'"
+ name = self._header.get(b'FamilyName')
+ if name is not None:
+ return name
+
+ # FamilyName not specified so we'll make a guess
+ name = self.get_fullname()
+ extras = (r'(?i)([ -](regular|plain|italic|oblique|bold|semibold|'
+ r'light|ultralight|extra|condensed))+$')
+ return re.sub(extras, '', name)
+
+ @property
+ def family_name(self):
+ return self.get_familyname()
+
+ def get_weight(self):
+ "Return the font weight, e.g., 'Bold' or 'Roman'"
+ return self._header[b'Weight']
+
+ def get_angle(self):
+ "Return the fontangle as float"
+ return self._header[b'ItalicAngle']
+
+ def get_capheight(self):
+ "Return the cap height as float"
+ return self._header[b'CapHeight']
+
+ def get_xheight(self):
+ "Return the xheight as float"
+ return self._header[b'XHeight']
+
+ def get_underline_thickness(self):
+ "Return the underline thickness as float"
+ return self._header[b'UnderlineThickness']
+
+ def get_horizontal_stem_width(self):
+ """
+ Return the standard horizontal stem width as float, or *None* if
+ not specified in AFM file.
+ """
+ return self._header.get(b'StdHW', None)
+
+ def get_vertical_stem_width(self):
+ """
+ Return the standard vertical stem width as float, or *None* if
+ not specified in AFM file.
+ """
+ return self._header.get(b'StdVW', None)
diff --git a/contrib/python/matplotlib/py2/matplotlib/animation.py b/contrib/python/matplotlib/py2/matplotlib/animation.py
new file mode 100644
index 00000000000..e2e6f51e706
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/animation.py
@@ -0,0 +1,1778 @@
+# TODO:
+# * Loop Delay is broken on GTKAgg. This is because source_remove() is not
+# working as we want. PyGTK bug?
+# * Documentation -- this will need a new section of the User's Guide.
+# Both for Animations and just timers.
+# - Also need to update http://www.scipy.org/Cookbook/Matplotlib/Animations
+# * Blit
+# * Currently broken with Qt4 for widgets that don't start on screen
+# * Still a few edge cases that aren't working correctly
+# * Can this integrate better with existing matplotlib animation artist flag?
+# - If animated removes from default draw(), perhaps we could use this to
+# simplify initial draw.
+# * Example
+# * Frameless animation - pure procedural with no loop
+# * Need example that uses something like inotify or subprocess
+# * Complex syncing examples
+# * Movies
+# * Can blit be enabled for movies?
+# * Need to consider event sources to allow clicking through multiple figures
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange, zip
+
+import abc
+import contextlib
+from io import BytesIO
+import itertools
+import logging
+import os
+import platform
+import sys
+import tempfile
+import uuid
+
+import numpy as np
+
+from matplotlib._animation_data import (DISPLAY_TEMPLATE, INCLUDED_FRAMES,
+ JS_INCLUDE)
+from matplotlib.compat import subprocess
+from matplotlib import cbook, rcParams, rcParamsDefault, rc_context
+
+if six.PY2:
+ from base64 import encodestring as encodebytes
+else:
+ from base64 import encodebytes
+
+
+_log = logging.getLogger(__name__)
+
+# Process creation flag for subprocess to prevent it raising a terminal
+# window. See for example:
+# https://stackoverflow.com/questions/24130623/using-python-subprocess-popen-cant-prevent-exe-stopped-working-prompt
+if platform.system() == 'Windows':
+ subprocess_creation_flags = CREATE_NO_WINDOW = 0x08000000
+else:
+ # Apparently None won't work here
+ subprocess_creation_flags = 0
+
+# Other potential writing methods:
+# * http://pymedia.org/
+# * libmng (produces swf) python wrappers: https://github.com/libming/libming
+# * Wrap x264 API:
+
+# (http://stackoverflow.com/questions/2940671/
+# how-to-encode-series-of-images-into-h264-using-x264-api-c-c )
+
+
+def adjusted_figsize(w, h, dpi, n):
+ '''Compute figure size so that pixels are a multiple of n
+
+ Parameters
+ ----------
+ w, h : float
+ Size in inches
+
+ dpi : float
+ The dpi
+
+ n : int
+ The target multiple
+
+ Returns
+ -------
+ wnew, hnew : float
+ The new figure size in inches.
+ '''
+
+ # this maybe simplified if / when we adopt consistent rounding for
+ # pixel size across the whole library
+ def correct_roundoff(x, dpi, n):
+ if int(x*dpi) % n != 0:
+ if int(np.nextafter(x, np.inf)*dpi) % n == 0:
+ x = np.nextafter(x, np.inf)
+ elif int(np.nextafter(x, -np.inf)*dpi) % n == 0:
+ x = np.nextafter(x, -np.inf)
+ return x
+
+ wnew = int(w * dpi / n) * n / dpi
+ hnew = int(h * dpi / n) * n / dpi
+ return (correct_roundoff(wnew, dpi, n), correct_roundoff(hnew, dpi, n))
+
+
+# A registry for available MovieWriter classes
+class MovieWriterRegistry(object):
+ '''Registry of available writer classes by human readable name.'''
+ def __init__(self):
+ self.avail = dict()
+ self._registered = dict()
+ self._dirty = False
+
+ def set_dirty(self):
+ """Sets a flag to re-setup the writers."""
+ self._dirty = True
+
+ def register(self, name):
+ """Decorator for registering a class under a name.
+
+ Example use::
+
+ @registry.register(name)
+ class Foo:
+ pass
+ """
+ def wrapper(writerClass):
+ self._registered[name] = writerClass
+ if writerClass.isAvailable():
+ self.avail[name] = writerClass
+ return writerClass
+ return wrapper
+
+ def ensure_not_dirty(self):
+ """If dirty, reasks the writers if they are available"""
+ if self._dirty:
+ self.reset_available_writers()
+
+ def reset_available_writers(self):
+ """Reset the available state of all registered writers"""
+ self.avail = {}
+ for name, writerClass in self._registered.items():
+ if writerClass.isAvailable():
+ self.avail[name] = writerClass
+ self._dirty = False
+
+ def list(self):
+ '''Get a list of available MovieWriters.'''
+ self.ensure_not_dirty()
+ return list(self.avail)
+
+ def is_available(self, name):
+ '''Check if given writer is available by name.
+
+ Parameters
+ ----------
+ name : str
+
+ Returns
+ -------
+ available : bool
+ '''
+ self.ensure_not_dirty()
+ return name in self.avail
+
+ def __getitem__(self, name):
+ self.ensure_not_dirty()
+ if not self.avail:
+ raise RuntimeError("No MovieWriters available!")
+ try:
+ return self.avail[name]
+ except KeyError:
+ raise RuntimeError(
+ 'Requested MovieWriter ({}) not available'.format(name))
+
+
+writers = MovieWriterRegistry()
+
+
+class AbstractMovieWriter(six.with_metaclass(abc.ABCMeta)):
+ '''
+ Abstract base class for writing movies. Fundamentally, what a MovieWriter
+ does is provide is a way to grab frames by calling grab_frame().
+
+ setup() is called to start the process and finish() is called afterwards.
+
+ This class is set up to provide for writing movie frame data to a pipe.
+ saving() is provided as a context manager to facilitate this process as::
+
+ with moviewriter.saving(fig, outfile='myfile.mp4', dpi=100):
+ # Iterate over frames
+ moviewriter.grab_frame(**savefig_kwargs)
+
+ The use of the context manager ensures that setup() and finish() are
+ performed as necessary.
+
+ An instance of a concrete subclass of this class can be given as the
+ ``writer`` argument of `Animation.save()`.
+ '''
+
+ @abc.abstractmethod
+ def setup(self, fig, outfile, dpi=None):
+ '''
+ Perform setup for writing the movie file.
+
+ Parameters
+ ----------
+ fig: `matplotlib.figure.Figure` instance
+ The figure object that contains the information for frames
+ outfile: string
+ The filename of the resulting movie file
+ dpi: int, optional
+ The DPI (or resolution) for the file. This controls the size
+ in pixels of the resulting movie file. Default is ``fig.dpi``.
+ '''
+
+ @abc.abstractmethod
+ def grab_frame(self, **savefig_kwargs):
+ '''
+ Grab the image information from the figure and save as a movie frame.
+
+ All keyword arguments in savefig_kwargs are passed on to the `savefig`
+ command that saves the figure.
+ '''
+
+ @abc.abstractmethod
+ def finish(self):
+ '''Finish any processing for writing the movie.'''
+
+ @contextlib.contextmanager
+ def saving(self, fig, outfile, dpi, *args, **kwargs):
+ '''
+ Context manager to facilitate writing the movie file.
+
+ ``*args, **kw`` are any parameters that should be passed to `setup`.
+ '''
+ # This particular sequence is what contextlib.contextmanager wants
+ self.setup(fig, outfile, dpi, *args, **kwargs)
+ try:
+ yield self
+ finally:
+ self.finish()
+
+
+class MovieWriter(AbstractMovieWriter):
+ '''Base class for writing movies.
+
+ This class is set up to provide for writing movie frame data to a pipe.
+ See examples for how to use these classes.
+
+ Attributes
+ ----------
+ frame_format : str
+ The format used in writing frame data, defaults to 'rgba'
+ fig : `~matplotlib.figure.Figure`
+ The figure to capture data from.
+ This must be provided by the sub-classes.
+
+ '''
+
+ def __init__(self, fps=5, codec=None, bitrate=None, extra_args=None,
+ metadata=None):
+ '''MovieWriter
+
+ Parameters
+ ----------
+ fps: int
+ Framerate for movie.
+ codec: string or None, optional
+ The codec to use. If ``None`` (the default) the ``animation.codec``
+ rcParam is used.
+ bitrate: int or None, optional
+ The bitrate for the saved movie file, which is one way to control
+ the output file size and quality. The default value is ``None``,
+ which uses the ``animation.bitrate`` rcParam. A value of -1
+ implies that the bitrate should be determined automatically by the
+ underlying utility.
+ extra_args: list of strings or None, optional
+ A list of extra string arguments to be passed to the underlying
+ movie utility. The default is ``None``, which passes the additional
+ arguments in the ``animation.extra_args`` rcParam.
+ metadata: Dict[str, str] or None
+ A dictionary of keys and values for metadata to include in the
+ output file. Some keys that may be of use include:
+ title, artist, genre, subject, copyright, srcform, comment.
+ '''
+ self.fps = fps
+ self.frame_format = 'rgba'
+
+ if codec is None:
+ self.codec = rcParams['animation.codec']
+ else:
+ self.codec = codec
+
+ if bitrate is None:
+ self.bitrate = rcParams['animation.bitrate']
+ else:
+ self.bitrate = bitrate
+
+ if extra_args is None:
+ self.extra_args = list(rcParams[self.args_key])
+ else:
+ self.extra_args = extra_args
+
+ if metadata is None:
+ self.metadata = dict()
+ else:
+ self.metadata = metadata
+
+ @property
+ def frame_size(self):
+ '''A tuple ``(width, height)`` in pixels of a movie frame.'''
+ w, h = self.fig.get_size_inches()
+ return int(w * self.dpi), int(h * self.dpi)
+
+ def _adjust_frame_size(self):
+ if self.codec == 'h264':
+ wo, ho = self.fig.get_size_inches()
+ w, h = adjusted_figsize(wo, ho, self.dpi, 2)
+ if not (wo, ho) == (w, h):
+ self.fig.set_size_inches(w, h, forward=True)
+ _log.info('figure size (inches) has been adjusted '
+ 'from %s x %s to %s x %s', wo, ho, w, h)
+ else:
+ w, h = self.fig.get_size_inches()
+ _log.debug('frame size in pixels is %s x %s', *self.frame_size)
+ return w, h
+
+ def setup(self, fig, outfile, dpi=None):
+ '''
+ Perform setup for writing the movie file.
+
+ Parameters
+ ----------
+ fig : matplotlib.figure.Figure
+ The figure object that contains the information for frames
+ outfile : string
+ The filename of the resulting movie file
+ dpi : int, optional
+ The DPI (or resolution) for the file. This controls the size
+ in pixels of the resulting movie file. Default is fig.dpi.
+ '''
+ self.outfile = outfile
+ self.fig = fig
+ if dpi is None:
+ dpi = self.fig.dpi
+ self.dpi = dpi
+ self._w, self._h = self._adjust_frame_size()
+
+ # Run here so that grab_frame() can write the data to a pipe. This
+ # eliminates the need for temp files.
+ self._run()
+
+ def _run(self):
+ # Uses subprocess to call the program for assembling frames into a
+ # movie file. *args* returns the sequence of command line arguments
+ # from a few configuration options.
+ command = self._args()
+ output = subprocess.PIPE
+ _log.info('MovieWriter.run: running command: %s', command)
+ self._proc = subprocess.Popen(command, shell=False,
+ stdout=output, stderr=output,
+ stdin=subprocess.PIPE,
+ creationflags=subprocess_creation_flags)
+
+ def finish(self):
+ '''Finish any processing for writing the movie.'''
+ self.cleanup()
+
+ def grab_frame(self, **savefig_kwargs):
+ '''
+ Grab the image information from the figure and save as a movie frame.
+
+ All keyword arguments in savefig_kwargs are passed on to the `savefig`
+ command that saves the figure.
+ '''
+ _log.debug('MovieWriter.grab_frame: Grabbing frame.')
+ try:
+ # re-adjust the figure size in case it has been changed by the
+ # user. We must ensure that every frame is the same size or
+ # the movie will not save correctly.
+ self.fig.set_size_inches(self._w, self._h)
+ # Tell the figure to save its data to the sink, using the
+ # frame format and dpi.
+ self.fig.savefig(self._frame_sink(), format=self.frame_format,
+ dpi=self.dpi, **savefig_kwargs)
+ except (RuntimeError, IOError) as e:
+ out, err = self._proc.communicate()
+ _log.info('MovieWriter -- Error '
+ 'running proc:\n%s\n%s' % (out, err))
+ raise IOError('Error saving animation to file (cause: {0}) '
+ 'Stdout: {1} StdError: {2}. It may help to re-run '
+ 'with logging level set to '
+ 'DEBUG.'.format(e, out, err))
+
+ def _frame_sink(self):
+ '''Returns the place to which frames should be written.'''
+ return self._proc.stdin
+
+ def _args(self):
+ '''Assemble list of utility-specific command-line arguments.'''
+ return NotImplementedError("args needs to be implemented by subclass.")
+
+ def cleanup(self):
+ '''Clean-up and collect the process used to write the movie file.'''
+ out, err = self._proc.communicate()
+ self._frame_sink().close()
+ _log.debug('MovieWriter -- Command stdout:\n%s', out)
+ _log.debug('MovieWriter -- Command stderr:\n%s', err)
+
+ @classmethod
+ def bin_path(cls):
+ '''
+ Returns the binary path to the commandline tool used by a specific
+ subclass. This is a class method so that the tool can be looked for
+ before making a particular MovieWriter subclass available.
+ '''
+ return str(rcParams[cls.exec_key])
+
+ @classmethod
+ def isAvailable(cls):
+ '''
+ Check to see if a MovieWriter subclass is actually available by
+ running the commandline tool.
+ '''
+ bin_path = cls.bin_path()
+ if not bin_path:
+ return False
+ try:
+ p = subprocess.Popen(
+ bin_path,
+ shell=False,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ creationflags=subprocess_creation_flags)
+ return cls._handle_subprocess(p)
+ except OSError:
+ return False
+
+ @classmethod
+ def _handle_subprocess(cls, process):
+ process.communicate()
+ return True
+
+
+class FileMovieWriter(MovieWriter):
+ '''`MovieWriter` for writing to individual files and stitching at the end.
+
+ This must be sub-classed to be useful.
+ '''
+ def __init__(self, *args, **kwargs):
+ MovieWriter.__init__(self, *args, **kwargs)
+ self.frame_format = rcParams['animation.frame_format']
+
+ def setup(self, fig, outfile, dpi=None, frame_prefix='_tmp',
+ clear_temp=True):
+ '''Perform setup for writing the movie file.
+
+ Parameters
+ ----------
+ fig : matplotlib.figure.Figure
+ The figure to grab the rendered frames from.
+ outfile : str
+ The filename of the resulting movie file.
+ dpi : number, optional
+ The dpi of the output file. This, with the figure size,
+ controls the size in pixels of the resulting movie file.
+ Default is fig.dpi.
+ frame_prefix : str, optional
+ The filename prefix to use for temporary files. Defaults to
+ ``'_tmp'``.
+ clear_temp : bool, optional
+ If the temporary files should be deleted after stitching
+ the final result. Setting this to ``False`` can be useful for
+ debugging. Defaults to ``True``.
+
+ '''
+ self.fig = fig
+ self.outfile = outfile
+ if dpi is None:
+ dpi = self.fig.dpi
+ self.dpi = dpi
+ self._adjust_frame_size()
+
+ self.clear_temp = clear_temp
+ self.temp_prefix = frame_prefix
+ self._frame_counter = 0 # used for generating sequential file names
+ self._temp_names = list()
+ self.fname_format_str = '%s%%07d.%s'
+
+ @property
+ def frame_format(self):
+ '''
+ Format (png, jpeg, etc.) to use for saving the frames, which can be
+ decided by the individual subclasses.
+ '''
+ return self._frame_format
+
+ @frame_format.setter
+ def frame_format(self, frame_format):
+ if frame_format in self.supported_formats:
+ self._frame_format = frame_format
+ else:
+ self._frame_format = self.supported_formats[0]
+
+ def _base_temp_name(self):
+ # Generates a template name (without number) given the frame format
+ # for extension and the prefix.
+ return self.fname_format_str % (self.temp_prefix, self.frame_format)
+
+ def _frame_sink(self):
+ # Creates a filename for saving using the basename and the current
+ # counter.
+ fname = self._base_temp_name() % self._frame_counter
+
+ # Save the filename so we can delete it later if necessary
+ self._temp_names.append(fname)
+ _log.debug('FileMovieWriter.frame_sink: saving frame %d to fname=%s',
+ self._frame_counter, fname)
+ self._frame_counter += 1 # Ensures each created name is 'unique'
+
+ # This file returned here will be closed once it's used by savefig()
+ # because it will no longer be referenced and will be gc-ed.
+ return open(fname, 'wb')
+
+ def grab_frame(self, **savefig_kwargs):
+ '''
+ Grab the image information from the figure and save as a movie frame.
+ All keyword arguments in savefig_kwargs are passed on to the `savefig`
+ command that saves the figure.
+ '''
+ # Overloaded to explicitly close temp file.
+ _log.debug('MovieWriter.grab_frame: Grabbing frame.')
+ try:
+ # Tell the figure to save its data to the sink, using the
+ # frame format and dpi.
+ with self._frame_sink() as myframesink:
+ self.fig.savefig(myframesink, format=self.frame_format,
+ dpi=self.dpi, **savefig_kwargs)
+
+ except RuntimeError:
+ out, err = self._proc.communicate()
+ _log.info('MovieWriter -- Error '
+ 'running proc:\n%s\n%s' % (out, err))
+ raise
+
+ def finish(self):
+ # Call run here now that all frame grabbing is done. All temp files
+ # are available to be assembled.
+ self._run()
+ MovieWriter.finish(self) # Will call clean-up
+
+ # Check error code for creating file here, since we just run
+ # the process here, rather than having an open pipe.
+ if self._proc.returncode:
+ try:
+ stdout = [s.decode() for s in self._proc._stdout_buff]
+ stderr = [s.decode() for s in self._proc._stderr_buff]
+ _log.info("MovieWriter.finish: stdout: %s", stdout)
+ _log.info("MovieWriter.finish: stderr: %s", stderr)
+ except Exception as e:
+ pass
+ raise RuntimeError('Error creating movie, return code: {}'
+ .format(self._proc.returncode))
+
+ def cleanup(self):
+ MovieWriter.cleanup(self)
+
+ # Delete temporary files
+ if self.clear_temp:
+ _log.debug('MovieWriter: clearing temporary fnames=%s',
+ self._temp_names)
+ for fname in self._temp_names:
+ os.remove(fname)
+
+
+@writers.register('pillow')
+class PillowWriter(MovieWriter):
+ @classmethod
+ def isAvailable(cls):
+ try:
+ import PIL
+ except ImportError:
+ return False
+ return True
+
+ def __init__(self, *args, **kwargs):
+ if kwargs.get("extra_args") is None:
+ kwargs["extra_args"] = ()
+ super(PillowWriter, self).__init__(*args, **kwargs)
+
+ def setup(self, fig, outfile, dpi=None):
+ self._frames = []
+ self._outfile = outfile
+ self._dpi = dpi
+ self._fig = fig
+
+ def grab_frame(self, **savefig_kwargs):
+ from PIL import Image
+ buf = BytesIO()
+ self._fig.savefig(buf, **dict(savefig_kwargs, format="rgba"))
+ renderer = self._fig.canvas.get_renderer()
+ # Using frombuffer / getbuffer may be slightly more efficient, but
+ # Py3-only.
+ self._frames.append(Image.frombytes(
+ "RGBA",
+ (int(renderer.width), int(renderer.height)),
+ buf.getvalue()))
+
+ def finish(self):
+ self._frames[0].save(
+ self._outfile, save_all=True, append_images=self._frames[1:],
+ duration=int(1000 / self.fps))
+
+
+# Base class of ffmpeg information. Has the config keys and the common set
+# of arguments that controls the *output* side of things.
+class FFMpegBase(object):
+ '''Mixin class for FFMpeg output.
+
+ To be useful this must be multiply-inherited from with a
+ `MovieWriterBase` sub-class.
+ '''
+
+ exec_key = 'animation.ffmpeg_path'
+ args_key = 'animation.ffmpeg_args'
+
+ @property
+ def output_args(self):
+ args = ['-vcodec', self.codec]
+ # For h264, the default format is yuv444p, which is not compatible
+ # with quicktime (and others). Specifying yuv420p fixes playback on
+ # iOS,as well as HTML5 video in firefox and safari (on both Win and
+ # OSX). Also fixes internet explorer. This is as of 2015/10/29.
+ if self.codec == 'h264' and '-pix_fmt' not in self.extra_args:
+ args.extend(['-pix_fmt', 'yuv420p'])
+ # The %dk adds 'k' as a suffix so that ffmpeg treats our bitrate as in
+ # kbps
+ if self.bitrate > 0:
+ args.extend(['-b', '%dk' % self.bitrate])
+ if self.extra_args:
+ args.extend(self.extra_args)
+ for k, v in six.iteritems(self.metadata):
+ args.extend(['-metadata', '%s=%s' % (k, v)])
+
+ return args + ['-y', self.outfile]
+
+ @classmethod
+ def _handle_subprocess(cls, process):
+ _, err = process.communicate()
+ # Ubuntu 12.04 ships a broken ffmpeg binary which we shouldn't use
+ # NOTE : when removed, remove the same method in AVConvBase.
+ if 'Libav' in err.decode():
+ return False
+ return True
+
+
+# Combine FFMpeg options with pipe-based writing
+@writers.register('ffmpeg')
+class FFMpegWriter(FFMpegBase, MovieWriter):
+ '''Pipe-based ffmpeg writer.
+
+ Frames are streamed directly to ffmpeg via a pipe and written in a single
+ pass.
+ '''
+ def _args(self):
+ # Returns the command line parameters for subprocess to use
+ # ffmpeg to create a movie using a pipe.
+ args = [self.bin_path(), '-f', 'rawvideo', '-vcodec', 'rawvideo',
+ '-s', '%dx%d' % self.frame_size, '-pix_fmt', self.frame_format,
+ '-r', str(self.fps)]
+ # Logging is quieted because subprocess.PIPE has limited buffer size.
+ # If you have a lot of frames in your animation and set logging to
+ # DEBUG, you will have a buffer overrun.
+ if (_log.getEffectiveLevel() > logging.DEBUG):
+ args += ['-loglevel', 'quiet']
+ args += ['-i', 'pipe:'] + self.output_args
+ return args
+
+
+# Combine FFMpeg options with temp file-based writing
+@writers.register('ffmpeg_file')
+class FFMpegFileWriter(FFMpegBase, FileMovieWriter):
+ '''File-based ffmpeg writer.
+
+ Frames are written to temporary files on disk and then stitched
+ together at the end.
+
+ '''
+ supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
+ 'pbm', 'raw', 'rgba']
+
+ def _args(self):
+ # Returns the command line parameters for subprocess to use
+ # ffmpeg to create a movie using a collection of temp images
+ return [self.bin_path(), '-r', str(self.fps),
+ '-i', self._base_temp_name(),
+ '-vframes', str(self._frame_counter)] + self.output_args
+
+
+# Base class of avconv information. AVConv has identical arguments to
+# FFMpeg
+class AVConvBase(FFMpegBase):
+ '''Mixin class for avconv output.
+
+ To be useful this must be multiply-inherited from with a
+ `MovieWriterBase` sub-class.
+ '''
+
+ exec_key = 'animation.avconv_path'
+ args_key = 'animation.avconv_args'
+
+ # NOTE : should be removed when the same method is removed in FFMpegBase.
+ @classmethod
+ def _handle_subprocess(cls, process):
+ return MovieWriter._handle_subprocess(process)
+
+
+# Combine AVConv options with pipe-based writing
+@writers.register('avconv')
+class AVConvWriter(AVConvBase, FFMpegWriter):
+ '''Pipe-based avconv writer.
+
+ Frames are streamed directly to avconv via a pipe and written in a single
+ pass.
+ '''
+
+
+# Combine AVConv options with file-based writing
+@writers.register('avconv_file')
+class AVConvFileWriter(AVConvBase, FFMpegFileWriter):
+ '''File-based avconv writer.
+
+ Frames are written to temporary files on disk and then stitched
+ together at the end.
+ '''
+
+
+# Base class for animated GIFs with convert utility
+class ImageMagickBase(object):
+ '''Mixin class for ImageMagick output.
+
+ To be useful this must be multiply-inherited from with a
+ `MovieWriterBase` sub-class.
+ '''
+
+ exec_key = 'animation.convert_path'
+ args_key = 'animation.convert_args'
+
+ @property
+ def delay(self):
+ return 100. / self.fps
+
+ @property
+ def output_args(self):
+ return [self.outfile]
+
+ @classmethod
+ def _init_from_registry(cls):
+ if sys.platform != 'win32' or rcParams[cls.exec_key] != 'convert':
+ return
+ from six.moves import winreg
+ for flag in (0, winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY):
+ try:
+ hkey = winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE,
+ 'Software\\Imagemagick\\Current',
+ 0, winreg.KEY_QUERY_VALUE | flag)
+ binpath = winreg.QueryValueEx(hkey, 'BinPath')[0]
+ winreg.CloseKey(hkey)
+ binpath += '\\convert.exe'
+ break
+ except Exception:
+ binpath = ''
+ rcParams[cls.exec_key] = rcParamsDefault[cls.exec_key] = binpath
+
+ @classmethod
+ def isAvailable(cls):
+ '''
+ Check to see if a ImageMagickWriter is actually available.
+
+ Done by first checking the windows registry (if applicable) and then
+ running the commandline tool.
+ '''
+ bin_path = cls.bin_path()
+ if bin_path == "convert":
+ cls._init_from_registry()
+ return super(ImageMagickBase, cls).isAvailable()
+
+ImageMagickBase._init_from_registry()
+
+
+# Note: the base classes need to be in that order to get
+# isAvailable() from ImageMagickBase called and not the
+# one from MovieWriter. The latter is then called by the
+# former.
+@writers.register('imagemagick')
+class ImageMagickWriter(ImageMagickBase, MovieWriter):
+ '''Pipe-based animated gif.
+
+ Frames are streamed directly to ImageMagick via a pipe and written
+ in a single pass.
+
+ '''
+ def _args(self):
+ return ([self.bin_path(),
+ '-size', '%ix%i' % self.frame_size, '-depth', '8',
+ '-delay', str(self.delay), '-loop', '0',
+ '%s:-' % self.frame_format]
+ + self.output_args)
+
+
+# Note: the base classes need to be in that order to get
+# isAvailable() from ImageMagickBase called and not the
+# one from MovieWriter. The latter is then called by the
+# former.
+@writers.register('imagemagick_file')
+class ImageMagickFileWriter(ImageMagickBase, FileMovieWriter):
+ '''File-based animated gif writer.
+
+ Frames are written to temporary files on disk and then stitched
+ together at the end.
+
+ '''
+
+ supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
+ 'pbm', 'raw', 'rgba']
+
+ def _args(self):
+ return ([self.bin_path(), '-delay', str(self.delay), '-loop', '0',
+ '%s*.%s' % (self.temp_prefix, self.frame_format)]
+ + self.output_args)
+
+
+# Taken directly from jakevdp's JSAnimation package at
+# http://github.com/jakevdp/JSAnimation
+def _included_frames(frame_list, frame_format):
+ """frame_list should be a list of filenames"""
+ return INCLUDED_FRAMES.format(Nframes=len(frame_list),
+ frame_dir=os.path.dirname(frame_list[0]),
+ frame_format=frame_format)
+
+
+def _embedded_frames(frame_list, frame_format):
+ """frame_list should be a list of base64-encoded png files"""
+ template = ' frames[{0}] = "data:image/{1};base64,{2}"\n'
+ return "\n" + "".join(
+ template.format(i, frame_format, frame_data.replace('\n', '\\\n'))
+ for i, frame_data in enumerate(frame_list))
+
+
+@writers.register('html')
+class HTMLWriter(FileMovieWriter):
+ supported_formats = ['png', 'jpeg', 'tiff', 'svg']
+ args_key = 'animation.html_args'
+
+ @classmethod
+ def isAvailable(cls):
+ return True
+
+ def __init__(self, fps=30, codec=None, bitrate=None, extra_args=None,
+ metadata=None, embed_frames=False, default_mode='loop',
+ embed_limit=None):
+ self.embed_frames = embed_frames
+ self.default_mode = default_mode.lower()
+
+ # Save embed limit, which is given in MB
+ if embed_limit is None:
+ self._bytes_limit = rcParams['animation.embed_limit']
+ else:
+ self._bytes_limit = embed_limit
+
+ # Convert from MB to bytes
+ self._bytes_limit *= 1024 * 1024
+
+ if self.default_mode not in ['loop', 'once', 'reflect']:
+ self.default_mode = 'loop'
+ _log.warning("unrecognized default_mode: using 'loop'")
+
+ self._saved_frames = []
+ self._total_bytes = 0
+ self._hit_limit = False
+ super(HTMLWriter, self).__init__(fps, codec, bitrate,
+ extra_args, metadata)
+
+ def setup(self, fig, outfile, dpi, frame_dir=None):
+ root, ext = os.path.splitext(outfile)
+ if ext not in ['.html', '.htm']:
+ raise ValueError("outfile must be *.htm or *.html")
+
+ if not self.embed_frames:
+ if frame_dir is None:
+ frame_dir = root + '_frames'
+ if not os.path.exists(frame_dir):
+ os.makedirs(frame_dir)
+ frame_prefix = os.path.join(frame_dir, 'frame')
+ else:
+ frame_prefix = None
+
+ super(HTMLWriter, self).setup(fig, outfile, dpi,
+ frame_prefix, clear_temp=False)
+
+ def grab_frame(self, **savefig_kwargs):
+ if self.embed_frames:
+ # Just stop processing if we hit the limit
+ if self._hit_limit:
+ return
+ suffix = '.' + self.frame_format
+ f = BytesIO()
+ self.fig.savefig(f, format=self.frame_format,
+ dpi=self.dpi, **savefig_kwargs)
+ imgdata64 = encodebytes(f.getvalue()).decode('ascii')
+ self._total_bytes += len(imgdata64)
+ if self._total_bytes >= self._bytes_limit:
+ _log.warning(
+ "Animation size has reached %s bytes, exceeding the limit "
+ "of %s. If you're sure you want a larger animation "
+ "embedded, set the animation.embed_limit rc parameter to "
+ "a larger value (in MB). This and further frames will be "
+ "dropped.", self._total_bytes, self._bytes_limit)
+ self._hit_limit = True
+ else:
+ self._saved_frames.append(imgdata64)
+ else:
+ return super(HTMLWriter, self).grab_frame(**savefig_kwargs)
+
+ def _run(self):
+ # make a duck-typed subprocess stand in
+ # this is called by the MovieWriter base class, but not used here.
+ class ProcessStandin(object):
+ returncode = 0
+
+ def communicate(self):
+ return '', ''
+
+ self._proc = ProcessStandin()
+
+ # save the frames to an html file
+ if self.embed_frames:
+ fill_frames = _embedded_frames(self._saved_frames,
+ self.frame_format)
+ else:
+ # temp names is filled by FileMovieWriter
+ fill_frames = _included_frames(self._temp_names,
+ self.frame_format)
+
+ mode_dict = dict(once_checked='',
+ loop_checked='',
+ reflect_checked='')
+ mode_dict[self.default_mode + '_checked'] = 'checked'
+
+ interval = 1000 // self.fps
+
+ with open(self.outfile, 'w') as of:
+ of.write(JS_INCLUDE)
+ of.write(DISPLAY_TEMPLATE.format(id=uuid.uuid4().hex,
+ Nframes=len(self._temp_names),
+ fill_frames=fill_frames,
+ interval=interval,
+ **mode_dict))
+
+
+class Animation(object):
+ '''This class wraps the creation of an animation using matplotlib.
+
+ It is only a base class which should be subclassed to provide
+ needed behavior.
+
+ This class is not typically used directly.
+
+ Parameters
+ ----------
+ fig : matplotlib.figure.Figure
+ The figure object that is used to get draw, resize, and any
+ other needed events.
+
+ event_source : object, optional
+ A class that can run a callback when desired events
+ are generated, as well as be stopped and started.
+
+ Examples include timers (see :class:`TimedAnimation`) and file
+ system notifications.
+
+ blit : bool, optional
+ controls whether blitting is used to optimize drawing. Defaults
+ to ``False``.
+
+ See Also
+ --------
+ FuncAnimation, ArtistAnimation
+
+ '''
+ def __init__(self, fig, event_source=None, blit=False):
+ self._fig = fig
+ # Disables blitting for backends that don't support it. This
+ # allows users to request it if available, but still have a
+ # fallback that works if it is not.
+ self._blit = blit and fig.canvas.supports_blit
+
+ # These are the basics of the animation. The frame sequence represents
+ # information for each frame of the animation and depends on how the
+ # drawing is handled by the subclasses. The event source fires events
+ # that cause the frame sequence to be iterated.
+ self.frame_seq = self.new_frame_seq()
+ self.event_source = event_source
+
+ # Instead of starting the event source now, we connect to the figure's
+ # draw_event, so that we only start once the figure has been drawn.
+ self._first_draw_id = fig.canvas.mpl_connect('draw_event', self._start)
+
+ # Connect to the figure's close_event so that we don't continue to
+ # fire events and try to draw to a deleted figure.
+ self._close_id = self._fig.canvas.mpl_connect('close_event',
+ self._stop)
+ if self._blit:
+ self._setup_blit()
+
+ def _start(self, *args):
+ '''
+ Starts interactive animation. Adds the draw frame command to the GUI
+ handler, calls show to start the event loop.
+ '''
+ # First disconnect our draw event handler
+ self._fig.canvas.mpl_disconnect(self._first_draw_id)
+ self._first_draw_id = None # So we can check on save
+
+ # Now do any initial draw
+ self._init_draw()
+
+ # Add our callback for stepping the animation and
+ # actually start the event_source.
+ self.event_source.add_callback(self._step)
+ self.event_source.start()
+
+ def _stop(self, *args):
+ # On stop we disconnect all of our events.
+ if self._blit:
+ self._fig.canvas.mpl_disconnect(self._resize_id)
+ self._fig.canvas.mpl_disconnect(self._close_id)
+ self.event_source.remove_callback(self._step)
+ self.event_source = None
+
+ def save(self, filename, writer=None, fps=None, dpi=None, codec=None,
+ bitrate=None, extra_args=None, metadata=None, extra_anim=None,
+ savefig_kwargs=None):
+ '''Saves a movie file by drawing every frame.
+
+ Parameters
+ ----------
+
+ filename : str
+ The output filename, e.g., :file:`mymovie.mp4`.
+
+ writer : :class:`MovieWriter` or str, optional
+ A `MovieWriter` instance to use or a key that identifies a
+ class to use, such as 'ffmpeg'. If ``None``, defaults to
+ :rc:`animation.writer`.
+
+ fps : number, optional
+ Frames per second in the movie. Defaults to ``None``, which will use
+ the animation's specified interval to set the frames per second.
+
+ dpi : number, optional
+ Controls the dots per inch for the movie frames. This combined with
+ the figure's size in inches controls the size of the movie. If
+ ``None``, defaults to :rc:`savefig.dpi`.
+
+ codec : str, optional
+ The video codec to be used. Not all codecs are supported
+ by a given :class:`MovieWriter`. If ``None``, default to
+ :rc:`animation.codec`.
+
+ bitrate : number, optional
+ Specifies the number of bits used per second in the compressed
+ movie, in kilobits per second. A higher number means a higher
+ quality movie, but at the cost of increased file size. If ``None``,
+ defaults to :rc:`animation.bitrate`.
+
+ extra_args : list, optional
+ List of extra string arguments to be passed to the underlying movie
+ utility. If ``None``, defaults to :rc:`animation.extra_args`.
+
+ metadata : Dict[str, str], optional
+ Dictionary of keys and values for metadata to include in
+ the output file. Some keys that may be of use include:
+ title, artist, genre, subject, copyright, srcform, comment.
+
+ extra_anim : list, optional
+ Additional `Animation` objects that should be included
+ in the saved movie file. These need to be from the same
+ `matplotlib.figure.Figure` instance. Also, animation frames will
+ just be simply combined, so there should be a 1:1 correspondence
+ between the frames from the different animations.
+
+ savefig_kwargs : dict, optional
+ Is a dictionary containing keyword arguments to be passed
+ on to the `savefig` command which is called repeatedly to
+ save the individual frames.
+
+ Notes
+ -----
+ fps, codec, bitrate, extra_args, metadata are used to
+ construct a :class:`MovieWriter` instance and can only be
+ passed if `writer` is a string. If they are passed as
+ non-`None` and ``writer`` is a :class:`MovieWriter`, a
+ `RuntimeError` will be raised.
+
+ '''
+ # If the writer is None, use the rc param to find the name of the one
+ # to use
+ if writer is None:
+ writer = rcParams['animation.writer']
+ elif (not isinstance(writer, six.string_types) and
+ any(arg is not None
+ for arg in (fps, codec, bitrate, extra_args, metadata))):
+ raise RuntimeError('Passing in values for arguments '
+ 'fps, codec, bitrate, extra_args, or metadata '
+ 'is not supported when writer is an existing '
+ 'MovieWriter instance. These should instead be '
+ 'passed as arguments when creating the '
+ 'MovieWriter instance.')
+
+ if savefig_kwargs is None:
+ savefig_kwargs = {}
+
+ # Need to disconnect the first draw callback, since we'll be doing
+ # draws. Otherwise, we'll end up starting the animation.
+ if self._first_draw_id is not None:
+ self._fig.canvas.mpl_disconnect(self._first_draw_id)
+ reconnect_first_draw = True
+ else:
+ reconnect_first_draw = False
+
+ if fps is None and hasattr(self, '_interval'):
+ # Convert interval in ms to frames per second
+ fps = 1000. / self._interval
+
+ # Re-use the savefig DPI for ours if none is given
+ if dpi is None:
+ dpi = rcParams['savefig.dpi']
+ if dpi == 'figure':
+ dpi = self._fig.dpi
+
+ if codec is None:
+ codec = rcParams['animation.codec']
+
+ if bitrate is None:
+ bitrate = rcParams['animation.bitrate']
+
+ all_anim = [self]
+ if extra_anim is not None:
+ all_anim.extend(anim
+ for anim
+ in extra_anim if anim._fig is self._fig)
+
+ # If we have the name of a writer, instantiate an instance of the
+ # registered class.
+ if isinstance(writer, six.string_types):
+ if writer in writers.avail:
+ writer = writers[writer](fps, codec, bitrate,
+ extra_args=extra_args,
+ metadata=metadata)
+ else:
+ _log.warning("MovieWriter %s unavailable.", writer)
+
+ try:
+ writer = writers[writers.list()[0]](fps, codec, bitrate,
+ extra_args=extra_args,
+ metadata=metadata)
+ except IndexError:
+ raise ValueError("Cannot save animation: no writers are "
+ "available. Please install ffmpeg to "
+ "save animations.")
+ _log.info('Animation.save using %s', type(writer))
+
+ if 'bbox_inches' in savefig_kwargs:
+ _log.warning("Warning: discarding the 'bbox_inches' argument in "
+ "'savefig_kwargs' as it may cause frame size "
+ "to vary, which is inappropriate for animation.")
+ savefig_kwargs.pop('bbox_inches')
+
+ # Create a new sequence of frames for saved data. This is different
+ # from new_frame_seq() to give the ability to save 'live' generated
+ # frame information to be saved later.
+ # TODO: Right now, after closing the figure, saving a movie won't work
+ # since GUI widgets are gone. Either need to remove extra code to
+ # allow for this non-existent use case or find a way to make it work.
+ with rc_context():
+ if rcParams['savefig.bbox'] == 'tight':
+ _log.info("Disabling savefig.bbox = 'tight', as it may cause "
+ "frame size to vary, which is inappropriate for "
+ "animation.")
+ rcParams['savefig.bbox'] = None
+ with writer.saving(self._fig, filename, dpi):
+ for anim in all_anim:
+ # Clear the initial frame
+ anim._init_draw()
+ for data in zip(*[a.new_saved_frame_seq() for a in all_anim]):
+ for anim, d in zip(all_anim, data):
+ # TODO: See if turning off blit is really necessary
+ anim._draw_next_frame(d, blit=False)
+ writer.grab_frame(**savefig_kwargs)
+
+ # Reconnect signal for first draw if necessary
+ if reconnect_first_draw:
+ self._first_draw_id = self._fig.canvas.mpl_connect('draw_event',
+ self._start)
+
+ def _step(self, *args):
+ '''
+ Handler for getting events. By default, gets the next frame in the
+ sequence and hands the data off to be drawn.
+ '''
+ # Returns True to indicate that the event source should continue to
+ # call _step, until the frame sequence reaches the end of iteration,
+ # at which point False will be returned.
+ try:
+ framedata = next(self.frame_seq)
+ self._draw_next_frame(framedata, self._blit)
+ return True
+ except StopIteration:
+ return False
+
+ def new_frame_seq(self):
+ '''Creates a new sequence of frame information.'''
+ # Default implementation is just an iterator over self._framedata
+ return iter(self._framedata)
+
+ def new_saved_frame_seq(self):
+ '''Creates a new sequence of saved/cached frame information.'''
+ # Default is the same as the regular frame sequence
+ return self.new_frame_seq()
+
+ def _draw_next_frame(self, framedata, blit):
+ # Breaks down the drawing of the next frame into steps of pre- and
+ # post- draw, as well as the drawing of the frame itself.
+ self._pre_draw(framedata, blit)
+ self._draw_frame(framedata)
+ self._post_draw(framedata, blit)
+
+ def _init_draw(self):
+ # Initial draw to clear the frame. Also used by the blitting code
+ # when a clean base is required.
+ pass
+
+ def _pre_draw(self, framedata, blit):
+ # Perform any cleaning or whatnot before the drawing of the frame.
+ # This default implementation allows blit to clear the frame.
+ if blit:
+ self._blit_clear(self._drawn_artists, self._blit_cache)
+
+ def _draw_frame(self, framedata):
+ # Performs actual drawing of the frame.
+ raise NotImplementedError('Needs to be implemented by subclasses to'
+ ' actually make an animation.')
+
+ def _post_draw(self, framedata, blit):
+ # After the frame is rendered, this handles the actual flushing of
+ # the draw, which can be a direct draw_idle() or make use of the
+ # blitting.
+ if blit and self._drawn_artists:
+ self._blit_draw(self._drawn_artists, self._blit_cache)
+ else:
+ self._fig.canvas.draw_idle()
+
+ # The rest of the code in this class is to facilitate easy blitting
+ def _blit_draw(self, artists, bg_cache):
+ # Handles blitted drawing, which renders only the artists given instead
+ # of the entire figure.
+ updated_ax = []
+ for a in artists:
+ # If we haven't cached the background for this axes object, do
+ # so now. This might not always be reliable, but it's an attempt
+ # to automate the process.
+ if a.axes not in bg_cache:
+ bg_cache[a.axes] = a.figure.canvas.copy_from_bbox(a.axes.bbox)
+ a.axes.draw_artist(a)
+ updated_ax.append(a.axes)
+
+ # After rendering all the needed artists, blit each axes individually.
+ for ax in set(updated_ax):
+ ax.figure.canvas.blit(ax.bbox)
+
+ def _blit_clear(self, artists, bg_cache):
+ # Get a list of the axes that need clearing from the artists that
+ # have been drawn. Grab the appropriate saved background from the
+ # cache and restore.
+ axes = set(a.axes for a in artists)
+ for a in axes:
+ if a in bg_cache:
+ a.figure.canvas.restore_region(bg_cache[a])
+
+ def _setup_blit(self):
+ # Setting up the blit requires: a cache of the background for the
+ # axes
+ self._blit_cache = dict()
+ self._drawn_artists = []
+ self._resize_id = self._fig.canvas.mpl_connect('resize_event',
+ self._handle_resize)
+ self._post_draw(None, self._blit)
+
+ def _handle_resize(self, *args):
+ # On resize, we need to disable the resize event handling so we don't
+ # get too many events. Also stop the animation events, so that
+ # we're paused. Reset the cache and re-init. Set up an event handler
+ # to catch once the draw has actually taken place.
+ self._fig.canvas.mpl_disconnect(self._resize_id)
+ self.event_source.stop()
+ self._blit_cache.clear()
+ self._init_draw()
+ self._resize_id = self._fig.canvas.mpl_connect('draw_event',
+ self._end_redraw)
+
+ def _end_redraw(self, evt):
+ # Now that the redraw has happened, do the post draw flushing and
+ # blit handling. Then re-enable all of the original events.
+ self._post_draw(None, False)
+ self.event_source.start()
+ self._fig.canvas.mpl_disconnect(self._resize_id)
+ self._resize_id = self._fig.canvas.mpl_connect('resize_event',
+ self._handle_resize)
+
+ def to_html5_video(self, embed_limit=None):
+ '''Returns animation as an HTML5 video tag.
+
+ This saves the animation as an h264 video, encoded in base64
+ directly into the HTML5 video tag. This respects the rc parameters
+ for the writer as well as the bitrate. This also makes use of the
+ ``interval`` to control the speed, and uses the ``repeat``
+ parameter to decide whether to loop.
+ '''
+ VIDEO_TAG = r'''<video {size} {options}>
+ <source type="video/mp4" src="data:video/mp4;base64,{video}">
+ Your browser does not support the video tag.
+</video>'''
+ # Cache the rendering of the video as HTML
+ if not hasattr(self, '_base64_video'):
+ # Save embed limit, which is given in MB
+ if embed_limit is None:
+ embed_limit = rcParams['animation.embed_limit']
+
+ # Convert from MB to bytes
+ embed_limit *= 1024 * 1024
+
+ # First write the video to a tempfile. Set delete to False
+ # so we can re-open to read binary data.
+ with tempfile.NamedTemporaryFile(suffix='.m4v',
+ delete=False) as f:
+ # We create a writer manually so that we can get the
+ # appropriate size for the tag
+ Writer = writers[rcParams['animation.writer']]
+ writer = Writer(codec='h264',
+ bitrate=rcParams['animation.bitrate'],
+ fps=1000. / self._interval)
+ self.save(f.name, writer=writer)
+
+ # Now open and base64 encode
+ with open(f.name, 'rb') as video:
+ vid64 = encodebytes(video.read())
+ vid_len = len(vid64)
+ if vid_len >= embed_limit:
+ _log.warning(
+ "Animation movie is %s bytes, exceeding the limit of "
+ "%s. If you're sure you want a large animation "
+ "embedded, set the animation.embed_limit rc parameter "
+ "to a larger value (in MB).", vid_len, embed_limit)
+ else:
+ self._base64_video = vid64.decode('ascii')
+ self._video_size = 'width="{}" height="{}"'.format(
+ *writer.frame_size)
+
+ # Now we can remove
+ os.remove(f.name)
+
+ # If we exceeded the size, this attribute won't exist
+ if hasattr(self, '_base64_video'):
+ # Default HTML5 options are to autoplay and display video controls
+ options = ['controls', 'autoplay']
+
+ # If we're set to repeat, make it loop
+ if hasattr(self, 'repeat') and self.repeat:
+ options.append('loop')
+
+ return VIDEO_TAG.format(video=self._base64_video,
+ size=self._video_size,
+ options=' '.join(options))
+ else:
+ return 'Video too large to embed.'
+
+ def to_jshtml(self, fps=None, embed_frames=True, default_mode=None):
+ """Generate HTML representation of the animation"""
+ if fps is None and hasattr(self, '_interval'):
+ # Convert interval in ms to frames per second
+ fps = 1000 / self._interval
+
+ # If we're not given a default mode, choose one base on the value of
+ # the repeat attribute
+ if default_mode is None:
+ default_mode = 'loop' if self.repeat else 'once'
+
+ if hasattr(self, "_html_representation"):
+ return self._html_representation
+ else:
+ # Can't open a second time while opened on windows. So we avoid
+ # deleting when closed, and delete manually later.
+ with tempfile.NamedTemporaryFile(suffix='.html',
+ delete=False) as f:
+ self.save(f.name, writer=HTMLWriter(fps=fps,
+ embed_frames=embed_frames,
+ default_mode=default_mode))
+ # Re-open and get content
+ with open(f.name) as fobj:
+ html = fobj.read()
+
+ # Now we can delete
+ os.remove(f.name)
+
+ self._html_representation = html
+ return html
+
+ def _repr_html_(self):
+ '''IPython display hook for rendering.'''
+ fmt = rcParams['animation.html']
+ if fmt == 'html5':
+ return self.to_html5_video()
+ elif fmt == 'jshtml':
+ return self.to_jshtml()
+
+
+class TimedAnimation(Animation):
+ ''':class:`Animation` subclass for time-based animation.
+
+ A new frame is drawn every *interval* milliseconds.
+
+ Parameters
+ ----------
+ fig : matplotlib.figure.Figure
+ The figure object that is used to get draw, resize, and any
+ other needed events.
+
+ interval : number, optional
+ Delay between frames in milliseconds. Defaults to 200.
+
+ repeat_delay : number, optional
+ If the animation in repeated, adds a delay in milliseconds
+ before repeating the animation. Defaults to ``None``.
+
+ repeat : bool, optional
+ Controls whether the animation should repeat when the sequence
+ of frames is completed. Defaults to ``True``.
+
+ blit : bool, optional
+ Controls whether blitting is used to optimize drawing. Defaults
+ to ``False``.
+
+ '''
+ def __init__(self, fig, interval=200, repeat_delay=None, repeat=True,
+ event_source=None, *args, **kwargs):
+ # Store the timing information
+ self._interval = interval
+ self._repeat_delay = repeat_delay
+ self.repeat = repeat
+
+ # If we're not given an event source, create a new timer. This permits
+ # sharing timers between animation objects for syncing animations.
+ if event_source is None:
+ event_source = fig.canvas.new_timer()
+ event_source.interval = self._interval
+
+ Animation.__init__(self, fig, event_source=event_source,
+ *args, **kwargs)
+
+ def _step(self, *args):
+ '''
+ Handler for getting events.
+ '''
+ # Extends the _step() method for the Animation class. If
+ # Animation._step signals that it reached the end and we want to
+ # repeat, we refresh the frame sequence and return True. If
+ # _repeat_delay is set, change the event_source's interval to our loop
+ # delay and set the callback to one which will then set the interval
+ # back.
+ still_going = Animation._step(self, *args)
+ if not still_going and self.repeat:
+ self._init_draw()
+ self.frame_seq = self.new_frame_seq()
+ if self._repeat_delay:
+ self.event_source.remove_callback(self._step)
+ self.event_source.add_callback(self._loop_delay)
+ self.event_source.interval = self._repeat_delay
+ return True
+ else:
+ return Animation._step(self, *args)
+ else:
+ return still_going
+
+ def _stop(self, *args):
+ # If we stop in the middle of a loop delay (which is relatively likely
+ # given the potential pause here, remove the loop_delay callback as
+ # well.
+ self.event_source.remove_callback(self._loop_delay)
+ Animation._stop(self)
+
+ def _loop_delay(self, *args):
+ # Reset the interval and change callbacks after the delay.
+ self.event_source.remove_callback(self._loop_delay)
+ self.event_source.interval = self._interval
+ self.event_source.add_callback(self._step)
+ Animation._step(self)
+
+
+class ArtistAnimation(TimedAnimation):
+ '''Animation using a fixed set of `Artist` objects.
+
+ Before creating an instance, all plotting should have taken place
+ and the relevant artists saved.
+
+ Parameters
+ ----------
+ fig : matplotlib.figure.Figure
+ The figure object that is used to get draw, resize, and any
+ other needed events.
+
+ artists : list
+ Each list entry a collection of artists that represent what
+ needs to be enabled on each frame. These will be disabled for
+ other frames.
+
+ interval : number, optional
+ Delay between frames in milliseconds. Defaults to 200.
+
+ repeat_delay : number, optional
+ If the animation in repeated, adds a delay in milliseconds
+ before repeating the animation. Defaults to ``None``.
+
+ repeat : bool, optional
+ Controls whether the animation should repeat when the sequence
+ of frames is completed. Defaults to ``True``.
+
+ blit : bool, optional
+ Controls whether blitting is used to optimize drawing. Defaults
+ to ``False``.
+
+ '''
+ def __init__(self, fig, artists, *args, **kwargs):
+ # Internal list of artists drawn in the most recent frame.
+ self._drawn_artists = []
+
+ # Use the list of artists as the framedata, which will be iterated
+ # over by the machinery.
+ self._framedata = artists
+ TimedAnimation.__init__(self, fig, *args, **kwargs)
+
+ def _init_draw(self):
+ # Make all the artists involved in *any* frame invisible
+ figs = set()
+ for f in self.new_frame_seq():
+ for artist in f:
+ artist.set_visible(False)
+ artist.set_animated(self._blit)
+ # Assemble a list of unique figures that need flushing
+ if artist.get_figure() not in figs:
+ figs.add(artist.get_figure())
+
+ # Flush the needed figures
+ for fig in figs:
+ fig.canvas.draw_idle()
+
+ def _pre_draw(self, framedata, blit):
+ '''
+ Clears artists from the last frame.
+ '''
+ if blit:
+ # Let blit handle clearing
+ self._blit_clear(self._drawn_artists, self._blit_cache)
+ else:
+ # Otherwise, make all the artists from the previous frame invisible
+ for artist in self._drawn_artists:
+ artist.set_visible(False)
+
+ def _draw_frame(self, artists):
+ # Save the artists that were passed in as framedata for the other
+ # steps (esp. blitting) to use.
+ self._drawn_artists = artists
+
+ # Make all the artists from the current frame visible
+ for artist in artists:
+ artist.set_visible(True)
+
+
+class FuncAnimation(TimedAnimation):
+ '''
+ Makes an animation by repeatedly calling a function ``func``.
+
+ Parameters
+ ----------
+ fig : matplotlib.figure.Figure
+ The figure object that is used to get draw, resize, and any
+ other needed events.
+
+ func : callable
+ The function to call at each frame. The first argument will
+ be the next value in ``frames``. Any additional positional
+ arguments can be supplied via the ``fargs`` parameter.
+
+ The required signature is::
+
+ def func(frame, *fargs) -> iterable_of_artists:
+
+ frames : iterable, int, generator function, or None, optional
+ Source of data to pass ``func`` and each frame of the animation
+
+ If an iterable, then simply use the values provided. If the
+ iterable has a length, it will override the ``save_count`` kwarg.
+
+ If an integer, then equivalent to passing ``range(frames)``
+
+ If a generator function, then must have the signature::
+
+ def gen_function() -> obj:
+
+ If ``None``, then equivalent to passing ``itertools.count``.
+
+ In all of these cases, the values in *frames* is simply passed through
+ to the user-supplied *func* and thus can be of any type.
+
+ init_func : callable, optional
+ A function used to draw a clear frame. If not given, the
+ results of drawing from the first item in the frames sequence
+ will be used. This function will be called once before the
+ first frame.
+
+ If ``blit == True``, ``init_func`` must return an iterable of artists
+ to be re-drawn.
+
+ The required signature is::
+
+ def init_func() -> iterable_of_artists:
+
+ fargs : tuple or None, optional
+ Additional arguments to pass to each call to *func*.
+
+ save_count : int, optional
+ The number of values from *frames* to cache.
+
+ interval : number, optional
+ Delay between frames in milliseconds. Defaults to 200.
+
+ repeat_delay : number, optional
+ If the animation in repeated, adds a delay in milliseconds
+ before repeating the animation. Defaults to ``None``.
+
+ repeat : bool, optional
+ Controls whether the animation should repeat when the sequence
+ of frames is completed. Defaults to ``True``.
+
+ blit : bool, optional
+ Controls whether blitting is used to optimize drawing. Defaults
+ to ``False``.
+
+ '''
+ def __init__(self, fig, func, frames=None, init_func=None, fargs=None,
+ save_count=None, **kwargs):
+ if fargs:
+ self._args = fargs
+ else:
+ self._args = ()
+ self._func = func
+
+ # Amount of framedata to keep around for saving movies. This is only
+ # used if we don't know how many frames there will be: in the case
+ # of no generator or in the case of a callable.
+ self.save_count = save_count
+ # Set up a function that creates a new iterable when needed. If nothing
+ # is passed in for frames, just use itertools.count, which will just
+ # keep counting from 0. A callable passed in for frames is assumed to
+ # be a generator. An iterable will be used as is, and anything else
+ # will be treated as a number of frames.
+ if frames is None:
+ self._iter_gen = itertools.count
+ elif callable(frames):
+ self._iter_gen = frames
+ elif cbook.iterable(frames):
+ self._iter_gen = lambda: iter(frames)
+ if hasattr(frames, '__len__'):
+ self.save_count = len(frames)
+ else:
+ self._iter_gen = lambda: iter(xrange(frames))
+ self.save_count = frames
+
+ if self.save_count is None:
+ # If we're passed in and using the default, set save_count to 100.
+ self.save_count = 100
+ else:
+ # itertools.islice returns an error when passed a numpy int instead
+ # of a native python int (http://bugs.python.org/issue30537).
+ # As a workaround, convert save_count to a native python int.
+ self.save_count = int(self.save_count)
+
+ self._init_func = init_func
+
+ # Needs to be initialized so the draw functions work without checking
+ self._save_seq = []
+
+ TimedAnimation.__init__(self, fig, **kwargs)
+
+ # Need to reset the saved seq, since right now it will contain data
+ # for a single frame from init, which is not what we want.
+ self._save_seq = []
+
+ def new_frame_seq(self):
+ # Use the generating function to generate a new frame sequence
+ return self._iter_gen()
+
+ def new_saved_frame_seq(self):
+ # Generate an iterator for the sequence of saved data. If there are
+ # no saved frames, generate a new frame sequence and take the first
+ # save_count entries in it.
+ if self._save_seq:
+ # While iterating we are going to update _save_seq
+ # so make a copy to safely iterate over
+ self._old_saved_seq = list(self._save_seq)
+ return iter(self._old_saved_seq)
+ else:
+ if self.save_count is not None:
+ return itertools.islice(self.new_frame_seq(), self.save_count)
+
+ else:
+ frame_seq = self.new_frame_seq()
+
+ def gen():
+ try:
+ for _ in range(100):
+ yield next(frame_seq)
+ except StopIteration:
+ pass
+ else:
+ cbook.warn_deprecated(
+ "2.2", "FuncAnimation.save has truncated your "
+ "animation to 100 frames. In the future, no such "
+ "truncation will occur; please pass 'save_count' "
+ "accordingly.")
+
+ return gen()
+
+ def _init_draw(self):
+ # Initialize the drawing either using the given init_func or by
+ # calling the draw function with the first item of the frame sequence.
+ # For blitting, the init_func should return a sequence of modified
+ # artists.
+ if self._init_func is None:
+ self._draw_frame(next(self.new_frame_seq()))
+
+ else:
+ self._drawn_artists = self._init_func()
+ if self._blit:
+ if self._drawn_artists is None:
+ raise RuntimeError('The init_func must return a '
+ 'sequence of Artist objects.')
+ for a in self._drawn_artists:
+ a.set_animated(self._blit)
+ self._save_seq = []
+
+ def _draw_frame(self, framedata):
+ # Save the data for potential saving of movies.
+ self._save_seq.append(framedata)
+
+ # Make sure to respect save_count (keep only the last save_count
+ # around)
+ self._save_seq = self._save_seq[-self.save_count:]
+
+ # Call the func with framedata and args. If blitting is desired,
+ # func needs to return a sequence of any artists that were modified.
+ self._drawn_artists = self._func(framedata, *self._args)
+ if self._blit:
+ if self._drawn_artists is None:
+ raise RuntimeError('The animation function must return a '
+ 'sequence of Artist objects.')
+ for a in self._drawn_artists:
+ a.set_animated(self._blit)
diff --git a/contrib/python/matplotlib/py2/matplotlib/artist.py b/contrib/python/matplotlib/py2/matplotlib/artist.py
new file mode 100644
index 00000000000..8dc1034bc70
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/artist.py
@@ -0,0 +1,1482 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from collections import OrderedDict, namedtuple
+from functools import wraps
+import inspect
+import re
+import warnings
+
+import numpy as np
+
+import matplotlib
+from . import cbook, docstring, rcParams
+from .path import Path
+from .transforms import (Bbox, IdentityTransform, Transform, TransformedBbox,
+ TransformedPatchPath, TransformedPath)
+# Note, matplotlib artists use the doc strings for set and get
+# methods to enable the introspection methods of setp and getp. Every
+# set_* method should have a docstring containing the line
+#
+# ACCEPTS: [ legal | values ]
+#
+# and aliases for setters and getters should have a docstring that
+# starts with 'alias for ', as in 'alias for set_somemethod'
+#
+# You may wonder why we use so much boiler-plate manually defining the
+# set_alias and get_alias functions, rather than using some clever
+# python trick. The answer is that I need to be able to manipulate
+# the docstring, and there is no clever way to do that in python 2.2,
+# as far as I can see - see
+#
+# https://mail.python.org/pipermail/python-list/2004-October/242925.html
+
+
+def allow_rasterization(draw):
+ """
+ Decorator for Artist.draw method. Provides routines
+ that run before and after the draw call. The before and after functions
+ are useful for changing artist-dependent renderer attributes or making
+ other setup function calls, such as starting and flushing a mixed-mode
+ renderer.
+ """
+
+ # the axes class has a second argument inframe for its draw method.
+ @wraps(draw)
+ def draw_wrapper(artist, renderer, *args, **kwargs):
+ try:
+ if artist.get_rasterized():
+ renderer.start_rasterizing()
+ if artist.get_agg_filter() is not None:
+ renderer.start_filter()
+
+ return draw(artist, renderer, *args, **kwargs)
+ finally:
+ if artist.get_agg_filter() is not None:
+ renderer.stop_filter(artist.get_agg_filter())
+ if artist.get_rasterized():
+ renderer.stop_rasterizing()
+
+ draw_wrapper._supports_rasterization = True
+ return draw_wrapper
+
+
+def _stale_axes_callback(self, val):
+ if self.axes:
+ self.axes.stale = val
+
+
+_XYPair = namedtuple("_XYPair", "x y")
+
+
+class Artist(object):
+ """
+ Abstract base class for someone who renders into a
+ :class:`FigureCanvas`.
+ """
+
+ aname = 'Artist'
+ zorder = 0
+ # order of precedence when bulk setting/updating properties
+ # via update. The keys should be property names and the values
+ # integers
+ _prop_order = dict(color=-1)
+
+ def __init__(self):
+ self._stale = True
+ self.stale_callback = None
+ self._axes = None
+ self.figure = None
+
+ self._transform = None
+ self._transformSet = False
+ self._visible = True
+ self._animated = False
+ self._alpha = None
+ self.clipbox = None
+ self._clippath = None
+ self._clipon = True
+ self._label = ''
+ self._picker = None
+ self._contains = None
+ self._rasterized = None
+ self._agg_filter = None
+ self._mouseover = False
+ self.eventson = False # fire events only if eventson
+ self._oid = 0 # an observer id
+ self._propobservers = {} # a dict from oids to funcs
+ try:
+ self.axes = None
+ except AttributeError:
+ # Handle self.axes as a read-only property, as in Figure.
+ pass
+ self._remove_method = None
+ self._url = None
+ self._gid = None
+ self._snap = None
+ self._sketch = rcParams['path.sketch']
+ self._path_effects = rcParams['path.effects']
+ self._sticky_edges = _XYPair([], [])
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ # remove the unpicklable remove method, this will get re-added on load
+ # (by the axes) if the artist lives on an axes.
+ d['_remove_method'] = None
+ d['stale_callback'] = None
+ return d
+
+ def remove(self):
+ """
+ Remove the artist from the figure if possible. The effect
+ will not be visible until the figure is redrawn, e.g., with
+ :meth:`matplotlib.axes.Axes.draw_idle`. Call
+ :meth:`matplotlib.axes.Axes.relim` to update the axes limits
+ if desired.
+
+ Note: :meth:`~matplotlib.axes.Axes.relim` will not see
+ collections even if the collection was added to axes with
+ *autolim* = True.
+
+ Note: there is no support for removing the artist's legend entry.
+ """
+
+ # There is no method to set the callback. Instead the parent should
+ # set the _remove_method attribute directly. This would be a
+ # protected attribute if Python supported that sort of thing. The
+ # callback has one parameter, which is the child to be removed.
+ if self._remove_method is not None:
+ self._remove_method(self)
+ # clear stale callback
+ self.stale_callback = None
+ _ax_flag = False
+ if hasattr(self, 'axes') and self.axes:
+ # remove from the mouse hit list
+ self.axes.mouseover_set.discard(self)
+ # mark the axes as stale
+ self.axes.stale = True
+ # decouple the artist from the axes
+ self.axes = None
+ _ax_flag = True
+
+ if self.figure:
+ self.figure = None
+ if not _ax_flag:
+ self.figure = True
+
+ else:
+ raise NotImplementedError('cannot remove artist')
+ # TODO: the fix for the collections relim problem is to move the
+ # limits calculation into the artist itself, including the property of
+ # whether or not the artist should affect the limits. Then there will
+ # be no distinction between axes.add_line, axes.add_patch, etc.
+ # TODO: add legend support
+
+ def have_units(self):
+ 'Return *True* if units are set on the *x* or *y* axes'
+ ax = self.axes
+ if ax is None or ax.xaxis is None:
+ return False
+ return ax.xaxis.have_units() or ax.yaxis.have_units()
+
+ def convert_xunits(self, x):
+ """For artists in an axes, if the xaxis has units support,
+ convert *x* using xaxis unit type
+ """
+ ax = getattr(self, 'axes', None)
+ if ax is None or ax.xaxis is None:
+ return x
+ return ax.xaxis.convert_units(x)
+
+ def convert_yunits(self, y):
+ """For artists in an axes, if the yaxis has units support,
+ convert *y* using yaxis unit type
+ """
+ ax = getattr(self, 'axes', None)
+ if ax is None or ax.yaxis is None:
+ return y
+ return ax.yaxis.convert_units(y)
+
+ @property
+ def axes(self):
+ """
+ The :class:`~matplotlib.axes.Axes` instance the artist
+ resides in, or *None*.
+ """
+ return self._axes
+
+ @axes.setter
+ def axes(self, new_axes):
+ if (new_axes is not None and self._axes is not None
+ and new_axes != self._axes):
+ raise ValueError("Can not reset the axes. You are probably "
+ "trying to re-use an artist in more than one "
+ "Axes which is not supported")
+ self._axes = new_axes
+ if new_axes is not None and new_axes is not self:
+ self.stale_callback = _stale_axes_callback
+ return new_axes
+
+ @property
+ def stale(self):
+ """
+ If the artist is 'stale' and needs to be re-drawn for the output to
+ match the internal state of the artist.
+ """
+ return self._stale
+
+ @stale.setter
+ def stale(self, val):
+ self._stale = val
+
+ # if the artist is animated it does not take normal part in the
+ # draw stack and is not expected to be drawn as part of the normal
+ # draw loop (when not saving) so do not propagate this change
+ if self.get_animated():
+ return
+
+ if val and self.stale_callback is not None:
+ self.stale_callback(self, val)
+
+ def get_window_extent(self, renderer):
+ """
+ Get the axes bounding box in display space.
+ Subclasses should override for inclusion in the bounding box
+ "tight" calculation. Default is to return an empty bounding
+ box at 0, 0.
+
+ Be careful when using this function, the results will not update
+ if the artist window extent of the artist changes. The extent
+ can change due to any changes in the transform stack, such as
+ changing the axes limits, the figure size, or the canvas used
+ (as is done when saving a figure). This can lead to unexpected
+ behavior where interactive figures will look fine on the screen,
+ but will save incorrectly.
+ """
+ return Bbox([[0, 0], [0, 0]])
+
+ def add_callback(self, func):
+ """
+ Adds a callback function that will be called whenever one of
+ the :class:`Artist`'s properties changes.
+
+ Returns an *id* that is useful for removing the callback with
+ :meth:`remove_callback` later.
+ """
+ oid = self._oid
+ self._propobservers[oid] = func
+ self._oid += 1
+ return oid
+
+ def remove_callback(self, oid):
+ """
+ Remove a callback based on its *id*.
+
+ .. seealso::
+
+ :meth:`add_callback`
+ For adding callbacks
+
+ """
+ try:
+ del self._propobservers[oid]
+ except KeyError:
+ pass
+
+ def pchanged(self):
+ """
+ Fire an event when property changed, calling all of the
+ registered callbacks.
+ """
+ for oid, func in six.iteritems(self._propobservers):
+ func(self)
+
+ def is_transform_set(self):
+ """
+ Returns *True* if :class:`Artist` has a transform explicitly
+ set.
+ """
+ return self._transformSet
+
+ def set_transform(self, t):
+ """
+ Set the artist transform.
+
+ Parameters
+ ----------
+ t : `.Transform`
+ .. ACCEPTS: `.Transform`
+ """
+ self._transform = t
+ self._transformSet = True
+ self.pchanged()
+ self.stale = True
+
+ def get_transform(self):
+ """
+ Return the :class:`~matplotlib.transforms.Transform`
+ instance used by this artist.
+ """
+ if self._transform is None:
+ self._transform = IdentityTransform()
+ elif (not isinstance(self._transform, Transform)
+ and hasattr(self._transform, '_as_mpl_transform')):
+ self._transform = self._transform._as_mpl_transform(self.axes)
+ return self._transform
+
+ @cbook.deprecated("2.2")
+ def hitlist(self, event):
+ """
+ List the children of the artist which contain the mouse event *event*.
+ """
+ L = []
+ try:
+ hascursor, info = self.contains(event)
+ if hascursor:
+ L.append(self)
+ except:
+ import traceback
+ traceback.print_exc()
+ print("while checking", self.__class__)
+
+ for a in self.get_children():
+ L.extend(a.hitlist(event))
+ return L
+
+ def get_children(self):
+ """
+ Return a list of the child :class:`Artist`s this
+ :class:`Artist` contains.
+ """
+ return []
+
+ def contains(self, mouseevent):
+ """Test whether the artist contains the mouse event.
+
+ Returns the truth value and a dictionary of artist specific details of
+ selection, such as which points are contained in the pick radius. See
+ individual artists for details.
+ """
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+ warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
+ return False, {}
+
+ def set_contains(self, picker):
+ """
+ Replace the contains test used by this artist. The new picker
+ should be a callable function which determines whether the
+ artist is hit by the mouse event::
+
+ hit, props = picker(artist, mouseevent)
+
+ If the mouse event is over the artist, return *hit* = *True*
+ and *props* is a dictionary of properties you want returned
+ with the contains test.
+
+ Parameters
+ ----------
+ picker : callable
+ .. ACCEPTS: a callable function
+ """
+ self._contains = picker
+
+ def get_contains(self):
+ """
+ Return the _contains test used by the artist, or *None* for default.
+ """
+ return self._contains
+
+ def pickable(self):
+ 'Return *True* if :class:`Artist` is pickable.'
+ return (self.figure is not None and
+ self.figure.canvas is not None and
+ self._picker is not None)
+
+ def pick(self, mouseevent):
+ """
+ Process pick event
+
+ each child artist will fire a pick event if *mouseevent* is over
+ the artist and the artist has picker set
+ """
+ # Pick self
+ if self.pickable():
+ picker = self.get_picker()
+ if callable(picker):
+ inside, prop = picker(self, mouseevent)
+ else:
+ inside, prop = self.contains(mouseevent)
+ if inside:
+ self.figure.canvas.pick_event(mouseevent, self, **prop)
+
+ # Pick children
+ for a in self.get_children():
+ # make sure the event happened in the same axes
+ ax = getattr(a, 'axes', None)
+ if (mouseevent.inaxes is None or ax is None
+ or mouseevent.inaxes == ax):
+ # we need to check if mouseevent.inaxes is None
+ # because some objects associated with an axes (e.g., a
+ # tick label) can be outside the bounding box of the
+ # axes and inaxes will be None
+ # also check that ax is None so that it traverse objects
+ # which do no have an axes property but children might
+ a.pick(mouseevent)
+
+ def set_picker(self, picker):
+ """
+ Set the epsilon for picking used by this artist
+
+ *picker* can be one of the following:
+
+ * *None*: picking is disabled for this artist (default)
+
+ * A boolean: if *True* then picking will be enabled and the
+ artist will fire a pick event if the mouse event is over
+ the artist
+
+ * A float: if picker is a number it is interpreted as an
+ epsilon tolerance in points and the artist will fire
+ off an event if it's data is within epsilon of the mouse
+ event. For some artists like lines and patch collections,
+ the artist may provide additional data to the pick event
+ that is generated, e.g., the indices of the data within
+ epsilon of the pick event
+
+ * A function: if picker is callable, it is a user supplied
+ function which determines whether the artist is hit by the
+ mouse event::
+
+ hit, props = picker(artist, mouseevent)
+
+ to determine the hit test. if the mouse event is over the
+ artist, return *hit=True* and props is a dictionary of
+ properties you want added to the PickEvent attributes.
+
+ Parameters
+ ----------
+ picker : None or bool or float or callable
+ .. ACCEPTS: [None | bool | float | callable]
+ """
+ self._picker = picker
+
+ def get_picker(self):
+ """Return the picker object used by this artist."""
+ return self._picker
+
+ @cbook.deprecated("2.2", "artist.figure is not None")
+ def is_figure_set(self):
+ """Returns whether the artist is assigned to a `.Figure`."""
+ return self.figure is not None
+
+ def get_url(self):
+ """Returns the url."""
+ return self._url
+
+ def set_url(self, url):
+ """
+ Sets the url for the artist.
+
+ Parameters
+ ----------
+ url : str
+ .. ACCEPTS: a url string
+ """
+ self._url = url
+
+ def get_gid(self):
+ """Returns the group id."""
+ return self._gid
+
+ def set_gid(self, gid):
+ """
+ Sets the (group) id for the artist.
+
+ Parameters
+ ----------
+ gid : str
+ .. ACCEPTS: an id string
+ """
+ self._gid = gid
+
+ def get_snap(self):
+ """
+ Returns the snap setting which may be:
+
+ * True: snap vertices to the nearest pixel center
+
+ * False: leave vertices as-is
+
+ * None: (auto) If the path contains only rectilinear line
+ segments, round to the nearest pixel center
+
+ Only supported by the Agg and MacOSX backends.
+ """
+ if rcParams['path.snap']:
+ return self._snap
+ else:
+ return False
+
+ def set_snap(self, snap):
+ """
+ Sets the snap setting which may be:
+
+ * True: snap vertices to the nearest pixel center
+
+ * False: leave vertices as-is
+
+ * None: (auto) If the path contains only rectilinear line
+ segments, round to the nearest pixel center
+
+ Only supported by the Agg and MacOSX backends.
+
+ Parameters
+ ----------
+ snap : bool or None
+ .. ACCEPTS: bool or None
+ """
+ self._snap = snap
+ self.stale = True
+
+ def get_sketch_params(self):
+ """
+ Returns the sketch parameters for the artist.
+
+ Returns
+ -------
+ sketch_params : tuple or `None`
+
+ A 3-tuple with the following elements:
+
+ * `scale`: The amplitude of the wiggle perpendicular to the
+ source line.
+
+ * `length`: The length of the wiggle along the line.
+
+ * `randomness`: The scale factor by which the length is
+ shrunken or expanded.
+
+ May return `None` if no sketch parameters were set.
+ """
+ return self._sketch
+
+ def set_sketch_params(self, scale=None, length=None, randomness=None):
+ """
+ Sets the sketch parameters.
+
+ Parameters
+ ----------
+
+ scale : float, optional
+ The amplitude of the wiggle perpendicular to the source
+ line, in pixels. If scale is `None`, or not provided, no
+ sketch filter will be provided.
+
+ length : float, optional
+ The length of the wiggle along the line, in pixels
+ (default 128.0)
+
+ randomness : float, optional
+ The scale factor by which the length is shrunken or
+ expanded (default 16.0)
+
+ .. ACCEPTS: (scale: float, length: float, randomness: float)
+ """
+ if scale is None:
+ self._sketch = None
+ else:
+ self._sketch = (scale, length or 128.0, randomness or 16.0)
+ self.stale = True
+
+ def set_path_effects(self, path_effects):
+ """Set the path effects.
+
+ Parameters
+ ----------
+ path_effects : `.AbstractPathEffect`
+ .. ACCEPTS: `.AbstractPathEffect`
+ """
+ self._path_effects = path_effects
+ self.stale = True
+
+ def get_path_effects(self):
+ return self._path_effects
+
+ def get_figure(self):
+ """Return the `.Figure` instance the artist belongs to."""
+ return self.figure
+
+ def set_figure(self, fig):
+ """
+ Set the `.Figure` instance the artist belongs to.
+
+ Parameters
+ ----------
+ fig : `.Figure`
+ .. ACCEPTS: a `.Figure` instance
+ """
+ # if this is a no-op just return
+ if self.figure is fig:
+ return
+ # if we currently have a figure (the case of both `self.figure`
+ # and `fig` being none is taken care of above) we then user is
+ # trying to change the figure an artist is associated with which
+ # is not allowed for the same reason as adding the same instance
+ # to more than one Axes
+ if self.figure is not None:
+ raise RuntimeError("Can not put single artist in "
+ "more than one figure")
+ self.figure = fig
+ if self.figure and self.figure is not self:
+ self.pchanged()
+ self.stale = True
+
+ def set_clip_box(self, clipbox):
+ """
+ Set the artist's clip `.Bbox`.
+
+ Parameters
+ ----------
+ clipbox : `.Bbox`
+ .. ACCEPTS: a `.Bbox` instance
+ """
+ self.clipbox = clipbox
+ self.pchanged()
+ self.stale = True
+
+ def set_clip_path(self, path, transform=None):
+ """
+ Set the artist's clip path, which may be:
+
+ - a :class:`~matplotlib.patches.Patch` (or subclass) instance; or
+ - a :class:`~matplotlib.path.Path` instance, in which case a
+ :class:`~matplotlib.transforms.Transform` instance, which will be
+ applied to the path before using it for clipping, must be provided;
+ or
+ - ``None``, to remove a previously set clipping path.
+
+ For efficiency, if the path happens to be an axis-aligned rectangle,
+ this method will set the clipping box to the corresponding rectangle
+ and set the clipping path to ``None``.
+
+ ACCEPTS: [(`~matplotlib.path.Path`, `.Transform`) | `.Patch` | None]
+ """
+ from matplotlib.patches import Patch, Rectangle
+
+ success = False
+ if transform is None:
+ if isinstance(path, Rectangle):
+ self.clipbox = TransformedBbox(Bbox.unit(),
+ path.get_transform())
+ self._clippath = None
+ success = True
+ elif isinstance(path, Patch):
+ self._clippath = TransformedPatchPath(path)
+ success = True
+ elif isinstance(path, tuple):
+ path, transform = path
+
+ if path is None:
+ self._clippath = None
+ success = True
+ elif isinstance(path, Path):
+ self._clippath = TransformedPath(path, transform)
+ success = True
+ elif isinstance(path, TransformedPatchPath):
+ self._clippath = path
+ success = True
+ elif isinstance(path, TransformedPath):
+ self._clippath = path
+ success = True
+
+ if not success:
+ raise TypeError(
+ "Invalid arguments to set_clip_path, of type {} and {}"
+ .format(type(path).__name__, type(transform).__name__))
+ # This may result in the callbacks being hit twice, but guarantees they
+ # will be hit at least once.
+ self.pchanged()
+ self.stale = True
+
+ def get_alpha(self):
+ """
+ Return the alpha value used for blending - not supported on all
+ backends
+ """
+ return self._alpha
+
+ def get_visible(self):
+ "Return the artist's visiblity"
+ return self._visible
+
+ def get_animated(self):
+ "Return the artist's animated state"
+ return self._animated
+
+ def get_clip_on(self):
+ 'Return whether artist uses clipping'
+ return self._clipon
+
+ def get_clip_box(self):
+ 'Return artist clipbox'
+ return self.clipbox
+
+ def get_clip_path(self):
+ 'Return artist clip path'
+ return self._clippath
+
+ def get_transformed_clip_path_and_affine(self):
+ '''
+ Return the clip path with the non-affine part of its
+ transformation applied, and the remaining affine part of its
+ transformation.
+ '''
+ if self._clippath is not None:
+ return self._clippath.get_transformed_path_and_affine()
+ return None, None
+
+ def set_clip_on(self, b):
+ """
+ Set whether artist uses clipping.
+
+ When False artists will be visible out side of the axes which
+ can lead to unexpected results.
+
+ Parameters
+ ----------
+ b : bool
+ .. ACCEPTS: bool
+ """
+ self._clipon = b
+ # This may result in the callbacks being hit twice, but ensures they
+ # are hit at least once
+ self.pchanged()
+ self.stale = True
+
+ def _set_gc_clip(self, gc):
+ 'Set the clip properly for the gc'
+ if self._clipon:
+ if self.clipbox is not None:
+ gc.set_clip_rectangle(self.clipbox)
+ gc.set_clip_path(self._clippath)
+ else:
+ gc.set_clip_rectangle(None)
+ gc.set_clip_path(None)
+
+ def get_rasterized(self):
+ """Return whether the artist is to be rasterized."""
+ return self._rasterized
+
+ def set_rasterized(self, rasterized):
+ """
+ Force rasterized (bitmap) drawing in vector backend output.
+
+ Defaults to None, which implies the backend's default behavior.
+
+ Parameters
+ ----------
+ rasterized : bool or None
+ .. ACCEPTS: bool or None
+ """
+ if rasterized and not hasattr(self.draw, "_supports_rasterization"):
+ warnings.warn("Rasterization of '%s' will be ignored" % self)
+
+ self._rasterized = rasterized
+
+ def get_agg_filter(self):
+ """Return filter function to be used for agg filter."""
+ return self._agg_filter
+
+ def set_agg_filter(self, filter_func):
+ """Set the agg filter.
+
+ Parameters
+ ----------
+ filter_func : callable
+ A filter function, which takes a (m, n, 3) float array and a dpi
+ value, and returns a (m, n, 3) array.
+
+ .. ACCEPTS: a filter function, which takes a (m, n, 3) float array
+ and a dpi value, and returns a (m, n, 3) array
+ """
+ self._agg_filter = filter_func
+ self.stale = True
+
+ def draw(self, renderer, *args, **kwargs):
+ 'Derived classes drawing method'
+ if not self.get_visible():
+ return
+ self.stale = False
+
+ def set_alpha(self, alpha):
+ """
+ Set the alpha value used for blending - not supported on
+ all backends.
+
+ Parameters
+ ----------
+ alpha : float
+ .. ACCEPTS: float (0.0 transparent through 1.0 opaque)
+ """
+ self._alpha = alpha
+ self.pchanged()
+ self.stale = True
+
+ def set_visible(self, b):
+ """
+ Set the artist's visibility.
+
+ Parameters
+ ----------
+ b : bool
+ .. ACCEPTS: bool
+ """
+ self._visible = b
+ self.pchanged()
+ self.stale = True
+
+ def set_animated(self, b):
+ """
+ Set the artist's animation state.
+
+ Parameters
+ ----------
+ b : bool
+ .. ACCEPTS: bool
+ """
+ if self._animated != b:
+ self._animated = b
+ self.pchanged()
+
+ def update(self, props):
+ """
+ Update this artist's properties from the dictionary *prop*.
+ """
+ def _update_property(self, k, v):
+ """Sorting out how to update property (setter or setattr).
+
+ Parameters
+ ----------
+ k : str
+ The name of property to update
+ v : obj
+ The value to assign to the property
+
+ Returns
+ -------
+ ret : obj or None
+ If using a `set_*` method return it's return, else None.
+ """
+ k = k.lower()
+ # white list attributes we want to be able to update through
+ # art.update, art.set, setp
+ if k in {'axes'}:
+ return setattr(self, k, v)
+ else:
+ func = getattr(self, 'set_' + k, None)
+ if not callable(func):
+ raise AttributeError('Unknown property %s' % k)
+ return func(v)
+
+ store = self.eventson
+ self.eventson = False
+ try:
+ ret = [_update_property(self, k, v)
+ for k, v in props.items()]
+ finally:
+ self.eventson = store
+
+ if len(ret):
+ self.pchanged()
+ self.stale = True
+ return ret
+
+ def get_label(self):
+ """Get the label used for this artist in the legend."""
+ return self._label
+
+ def set_label(self, s):
+ """
+ Set the label to *s* for auto legend.
+
+ Parameters
+ ----------
+ s : object
+ *s* will be converted to a string by calling `str` (`unicode` on
+ Py2).
+
+ .. ACCEPTS: object
+ """
+ if s is not None:
+ self._label = six.text_type(s)
+ else:
+ self._label = None
+ self.pchanged()
+ self.stale = True
+
+ def get_zorder(self):
+ """Return the artist's zorder."""
+ return self.zorder
+
+ def set_zorder(self, level):
+ """
+ Set the zorder for the artist. Artists with lower zorder
+ values are drawn first.
+
+ Parameters
+ ----------
+ level : float
+ .. ACCEPTS: float
+ """
+ if level is None:
+ level = self.__class__.zorder
+ self.zorder = level
+ self.pchanged()
+ self.stale = True
+
+ @property
+ def sticky_edges(self):
+ """
+ `x` and `y` sticky edge lists.
+
+ When performing autoscaling, if a data limit coincides with a value in
+ the corresponding sticky_edges list, then no margin will be added--the
+ view limit "sticks" to the edge. A typical usecase is histograms,
+ where one usually expects no margin on the bottom edge (0) of the
+ histogram.
+
+ This attribute cannot be assigned to; however, the `x` and `y` lists
+ can be modified in place as needed.
+
+ Examples
+ --------
+
+ >>> artist.sticky_edges.x[:] = (xmin, xmax)
+ >>> artist.sticky_edges.y[:] = (ymin, ymax)
+
+ """
+ return self._sticky_edges
+
+ def update_from(self, other):
+ 'Copy properties from *other* to *self*.'
+ self._transform = other._transform
+ self._transformSet = other._transformSet
+ self._visible = other._visible
+ self._alpha = other._alpha
+ self.clipbox = other.clipbox
+ self._clipon = other._clipon
+ self._clippath = other._clippath
+ self._label = other._label
+ self._sketch = other._sketch
+ self._path_effects = other._path_effects
+ self.sticky_edges.x[:] = other.sticky_edges.x[:]
+ self.sticky_edges.y[:] = other.sticky_edges.y[:]
+ self.pchanged()
+ self.stale = True
+
+ def properties(self):
+ """
+ return a dictionary mapping property name -> value for all Artist props
+ """
+ return ArtistInspector(self).properties()
+
+ def set(self, **kwargs):
+ """A property batch setter. Pass *kwargs* to set properties.
+ """
+ props = OrderedDict(
+ sorted(kwargs.items(), reverse=True,
+ key=lambda x: (self._prop_order.get(x[0], 0), x[0])))
+
+ return self.update(props)
+
+ def findobj(self, match=None, include_self=True):
+ """
+ Find artist objects.
+
+ Recursively find all :class:`~matplotlib.artist.Artist` instances
+ contained in self.
+
+ *match* can be
+
+ - None: return all objects contained in artist.
+
+ - function with signature ``boolean = match(artist)``
+ used to filter matches
+
+ - class instance: e.g., Line2D. Only return artists of class type.
+
+ If *include_self* is True (default), include self in the list to be
+ checked for a match.
+
+ """
+ if match is None: # always return True
+ def matchfunc(x):
+ return True
+ elif isinstance(match, type) and issubclass(match, Artist):
+ def matchfunc(x):
+ return isinstance(x, match)
+ elif callable(match):
+ matchfunc = match
+ else:
+ raise ValueError('match must be None, a matplotlib.artist.Artist '
+ 'subclass, or a callable')
+
+ artists = sum([c.findobj(matchfunc) for c in self.get_children()], [])
+ if include_self and matchfunc(self):
+ artists.append(self)
+ return artists
+
+ def get_cursor_data(self, event):
+ """
+ Get the cursor data for a given event.
+ """
+ return None
+
+ def format_cursor_data(self, data):
+ """
+ Return *cursor data* string formatted.
+ """
+ try:
+ data[0]
+ except (TypeError, IndexError):
+ data = [data]
+ return ', '.join('{:0.3g}'.format(item) for item in data if
+ isinstance(item, (np.floating, np.integer, int, float)))
+
+ @property
+ def mouseover(self):
+ return self._mouseover
+
+ @mouseover.setter
+ def mouseover(self, val):
+ val = bool(val)
+ self._mouseover = val
+ ax = self.axes
+ if ax:
+ if val:
+ ax.mouseover_set.add(self)
+ else:
+ ax.mouseover_set.discard(self)
+
+
+class ArtistInspector(object):
+ """
+ A helper class to inspect an :class:`~matplotlib.artist.Artist`
+ and return information about it's settable properties and their
+ current values.
+ """
+ def __init__(self, o):
+ r"""
+ Initialize the artist inspector with an `Artist` or an iterable of
+ `Artist`\s. If an iterable is used, we assume it is a homogeneous
+ sequence (all `Artists` are of the same type) and it is your
+ responsibility to make sure this is so.
+ """
+ if not isinstance(o, Artist):
+ if cbook.iterable(o):
+ o = list(o)
+ if len(o):
+ o = o[0]
+
+ self.oorig = o
+ if not inspect.isclass(o):
+ o = type(o)
+ self.o = o
+
+ self.aliasd = self.get_aliases()
+
+ def get_aliases(self):
+ """
+ Get a dict mapping *fullname* -> *alias* for each *alias* in
+ the :class:`~matplotlib.artist.ArtistInspector`.
+
+ e.g., for lines::
+
+ {'markerfacecolor': 'mfc',
+ 'linewidth' : 'lw',
+ }
+
+ """
+ names = [name for name in dir(self.o)
+ if name.startswith(('set_', 'get_'))
+ and callable(getattr(self.o, name))]
+ aliases = {}
+ for name in names:
+ func = getattr(self.o, name)
+ if not self.is_alias(func):
+ continue
+ docstring = func.__doc__
+ fullname = docstring[10:]
+ aliases.setdefault(fullname[4:], {})[name[4:]] = None
+ return aliases
+
+ _get_valid_values_regex = re.compile(
+ r"\n\s*(?:\.\.\s+)?ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))"
+ )
+
+ def get_valid_values(self, attr):
+ """
+ Get the legal arguments for the setter associated with *attr*.
+
+ This is done by querying the docstring of the function *set_attr*
+ for a line that begins with "ACCEPTS" or ".. ACCEPTS":
+
+ e.g., for a line linestyle, return
+ "[ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'steps'`` | ``'None'``
+ ]"
+ """
+
+ name = 'set_%s' % attr
+ if not hasattr(self.o, name):
+ raise AttributeError('%s has no function %s' % (self.o, name))
+ func = getattr(self.o, name)
+
+ docstring = func.__doc__
+ if docstring is None:
+ return 'unknown'
+
+ if docstring.startswith('alias for '):
+ return None
+
+ match = self._get_valid_values_regex.search(docstring)
+ if match is not None:
+ return re.sub("\n *", " ", match.group(1))
+ return 'unknown'
+
+ def _get_setters_and_targets(self):
+ """
+ Get the attribute strings and a full path to where the setter
+ is defined for all setters in an object.
+ """
+
+ setters = []
+ for name in dir(self.o):
+ if not name.startswith('set_'):
+ continue
+ func = getattr(self.o, name)
+ if not callable(func):
+ continue
+ if six.PY2:
+ nargs = len(inspect.getargspec(func)[0])
+ else:
+ nargs = len(inspect.getfullargspec(func)[0])
+ if nargs < 2 or self.is_alias(func):
+ continue
+ source_class = self.o.__module__ + "." + self.o.__name__
+ for cls in self.o.mro():
+ if name in cls.__dict__:
+ source_class = cls.__module__ + "." + cls.__name__
+ break
+ setters.append((name[4:], source_class + "." + name))
+ return setters
+
+ def get_setters(self):
+ """
+ Get the attribute strings with setters for object. e.g., for a line,
+ return ``['markerfacecolor', 'linewidth', ....]``.
+ """
+
+ return [prop for prop, target in self._get_setters_and_targets()]
+
+ def is_alias(self, o):
+ """
+ Return *True* if method object *o* is an alias for another
+ function.
+ """
+ ds = o.__doc__
+ if ds is None:
+ return False
+ return ds.startswith('alias for ')
+
+ def aliased_name(self, s):
+ """
+ return 'PROPNAME or alias' if *s* has an alias, else return
+ PROPNAME.
+
+ e.g., for the line markerfacecolor property, which has an
+ alias, return 'markerfacecolor or mfc' and for the transform
+ property, which does not, return 'transform'
+ """
+
+ if s in self.aliasd:
+ return s + ''.join([' or %s' % x
+ for x in sorted(self.aliasd[s])])
+ else:
+ return s
+
+ def aliased_name_rest(self, s, target):
+ """
+ return 'PROPNAME or alias' if *s* has an alias, else return
+ PROPNAME formatted for ReST
+
+ e.g., for the line markerfacecolor property, which has an
+ alias, return 'markerfacecolor or mfc' and for the transform
+ property, which does not, return 'transform'
+ """
+
+ if s in self.aliasd:
+ aliases = ''.join([' or %s' % x
+ for x in sorted(self.aliasd[s])])
+ else:
+ aliases = ''
+ return ':meth:`%s <%s>`%s' % (s, target, aliases)
+
+ def pprint_setters(self, prop=None, leadingspace=2):
+ """
+ If *prop* is *None*, return a list of strings of all settable
+ properties and their valid values.
+
+ If *prop* is not *None*, it is a valid property name and that
+ property will be returned as a string of property : valid
+ values.
+ """
+ if leadingspace:
+ pad = ' ' * leadingspace
+ else:
+ pad = ''
+ if prop is not None:
+ accepts = self.get_valid_values(prop)
+ return '%s%s: %s' % (pad, prop, accepts)
+
+ attrs = self._get_setters_and_targets()
+ attrs.sort()
+ lines = []
+
+ for prop, path in attrs:
+ accepts = self.get_valid_values(prop)
+ name = self.aliased_name(prop)
+
+ lines.append('%s%s: %s' % (pad, name, accepts))
+ return lines
+
+ def pprint_setters_rest(self, prop=None, leadingspace=4):
+ """
+ If *prop* is *None*, return a list of strings of all settable
+ properties and their valid values. Format the output for ReST
+
+ If *prop* is not *None*, it is a valid property name and that
+ property will be returned as a string of property : valid
+ values.
+ """
+ if leadingspace:
+ pad = ' ' * leadingspace
+ else:
+ pad = ''
+ if prop is not None:
+ accepts = self.get_valid_values(prop)
+ return '%s%s: %s' % (pad, prop, accepts)
+
+ attrs = self._get_setters_and_targets()
+ attrs.sort()
+ lines = []
+
+ ########
+ names = [self.aliased_name_rest(prop, target)
+ for prop, target in attrs]
+ accepts = [self.get_valid_values(prop) for prop, target in attrs]
+
+ col0_len = max(len(n) for n in names)
+ col1_len = max(len(a) for a in accepts)
+
+ lines.append('')
+ lines.append(pad + '.. table::')
+ lines.append(pad + ' :class: property-table')
+ pad += ' '
+
+ table_formatstr = pad + '=' * col0_len + ' ' + '=' * col1_len
+
+ lines.append('')
+ lines.append(table_formatstr)
+ lines.append(pad + 'Property'.ljust(col0_len + 3) +
+ 'Description'.ljust(col1_len))
+ lines.append(table_formatstr)
+
+ lines.extend([pad + n.ljust(col0_len + 3) + a.ljust(col1_len)
+ for n, a in zip(names, accepts)])
+
+ lines.append(table_formatstr)
+ lines.append('')
+ return lines
+
+ def properties(self):
+ """
+ return a dictionary mapping property name -> value
+ """
+ o = self.oorig
+ getters = [name for name in dir(o)
+ if name.startswith('get_') and callable(getattr(o, name))]
+ getters.sort()
+ d = dict()
+ for name in getters:
+ func = getattr(o, name)
+ if self.is_alias(func):
+ continue
+
+ try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore')
+ val = func()
+ except:
+ continue
+ else:
+ d[name[4:]] = val
+
+ return d
+
+ def pprint_getters(self):
+ """
+ Return the getters and actual values as list of strings.
+ """
+
+ lines = []
+ for name, val in sorted(six.iteritems(self.properties())):
+ if getattr(val, 'shape', ()) != () and len(val) > 6:
+ s = str(val[:6]) + '...'
+ else:
+ s = str(val)
+ s = s.replace('\n', ' ')
+ if len(s) > 50:
+ s = s[:50] + '...'
+ name = self.aliased_name(name)
+ lines.append(' %s = %s' % (name, s))
+ return lines
+
+
+def getp(obj, property=None):
+ """
+ Return the value of object's property. *property* is an optional string
+ for the property you want to return
+
+ Example usage::
+
+ getp(obj) # get all the object properties
+ getp(obj, 'linestyle') # get the linestyle property
+
+ *obj* is a :class:`Artist` instance, e.g.,
+ :class:`~matplotllib.lines.Line2D` or an instance of a
+ :class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
+ If the *property* is 'somename', this function returns
+
+ obj.get_somename()
+
+ :func:`getp` can be used to query all the gettable properties with
+ ``getp(obj)``. Many properties have aliases for shorter typing, e.g.
+ 'lw' is an alias for 'linewidth'. In the output, aliases and full
+ property names will be listed as:
+
+ property or alias = value
+
+ e.g.:
+
+ linewidth or lw = 2
+ """
+ if property is None:
+ insp = ArtistInspector(obj)
+ ret = insp.pprint_getters()
+ print('\n'.join(ret))
+ return
+
+ func = getattr(obj, 'get_' + property)
+ return func()
+
+# alias
+get = getp
+
+
+def setp(obj, *args, **kwargs):
+ """
+ Set a property on an artist object.
+
+ matplotlib supports the use of :func:`setp` ("set property") and
+ :func:`getp` to set and get object properties, as well as to do
+ introspection on the object. For example, to set the linestyle of a
+ line to be dashed, you can do::
+
+ >>> line, = plot([1,2,3])
+ >>> setp(line, linestyle='--')
+
+ If you want to know the valid types of arguments, you can provide
+ the name of the property you want to set without a value::
+
+ >>> setp(line, 'linestyle')
+ linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
+
+ If you want to see all the properties that can be set, and their
+ possible values, you can do::
+
+ >>> setp(line)
+ ... long output listing omitted
+
+ You may specify another output file to `setp` if `sys.stdout` is not
+ acceptable for some reason using the `file` keyword-only argument::
+
+ >>> with fopen('output.log') as f:
+ >>> setp(line, file=f)
+
+ :func:`setp` operates on a single instance or a iterable of
+ instances. If you are in query mode introspecting the possible
+ values, only the first instance in the sequence is used. When
+ actually setting values, all the instances will be set. e.g.,
+ suppose you have a list of two lines, the following will make both
+ lines thicker and red::
+
+ >>> x = arange(0,1.0,0.01)
+ >>> y1 = sin(2*pi*x)
+ >>> y2 = sin(4*pi*x)
+ >>> lines = plot(x, y1, x, y2)
+ >>> setp(lines, linewidth=2, color='r')
+
+ :func:`setp` works with the MATLAB style string/value pairs or
+ with python kwargs. For example, the following are equivalent::
+
+ >>> setp(lines, 'linewidth', 2, 'color', 'r') # MATLAB style
+ >>> setp(lines, linewidth=2, color='r') # python style
+ """
+
+ if isinstance(obj, Artist):
+ objs = [obj]
+ else:
+ objs = list(cbook.flatten(obj))
+
+ if not objs:
+ return
+
+ insp = ArtistInspector(objs[0])
+
+ # file has to be popped before checking if kwargs is empty
+ printArgs = {}
+ if 'file' in kwargs:
+ printArgs['file'] = kwargs.pop('file')
+
+ if not kwargs and len(args) < 2:
+ if args:
+ print(insp.pprint_setters(prop=args[0]), **printArgs)
+ else:
+ print('\n'.join(insp.pprint_setters()), **printArgs)
+ return
+
+ if len(args) % 2:
+ raise ValueError('The set args must be string, value pairs')
+
+ # put args into ordereddict to maintain order
+ funcvals = OrderedDict()
+ for i in range(0, len(args) - 1, 2):
+ funcvals[args[i]] = args[i + 1]
+
+ ret = [o.update(funcvals) for o in objs]
+ ret.extend([o.set(**kwargs) for o in objs])
+ return [x for x in cbook.flatten(ret)]
+
+
+def kwdoc(a):
+ hardcopy = matplotlib.rcParams['docstring.hardcopy']
+ if hardcopy:
+ return '\n'.join(ArtistInspector(a).pprint_setters_rest(
+ leadingspace=4))
+ else:
+ return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
+
+docstring.interpd.update(Artist=kwdoc(Artist))
diff --git a/contrib/python/matplotlib/py2/matplotlib/axes/__init__.py b/contrib/python/matplotlib/py2/matplotlib/axes/__init__.py
new file mode 100644
index 00000000000..82c54389194
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/axes/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from ._subplots import *
+from ._axes import *
diff --git a/contrib/python/matplotlib/py2/matplotlib/axes/_axes.py b/contrib/python/matplotlib/py2/matplotlib/axes/_axes.py
new file mode 100644
index 00000000000..2027d250a8a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/axes/_axes.py
@@ -0,0 +1,8153 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange, zip, zip_longest
+
+import functools
+import itertools
+import logging
+import math
+import warnings
+
+import numpy as np
+from numpy import ma
+
+import matplotlib
+from matplotlib import _preprocess_data
+
+import matplotlib.cbook as cbook
+import matplotlib.collections as mcoll
+import matplotlib.colors as mcolors
+import matplotlib.contour as mcontour
+import matplotlib.category as _ # <-registers a category unit converter
+import matplotlib.dates as _ # <-registers a date unit converter
+import matplotlib.docstring as docstring
+import matplotlib.image as mimage
+import matplotlib.legend as mlegend
+import matplotlib.lines as mlines
+import matplotlib.markers as mmarkers
+import matplotlib.mlab as mlab
+import matplotlib.path as mpath
+import matplotlib.patches as mpatches
+import matplotlib.quiver as mquiver
+import matplotlib.stackplot as mstack
+import matplotlib.streamplot as mstream
+import matplotlib.table as mtable
+import matplotlib.text as mtext
+import matplotlib.ticker as mticker
+import matplotlib.transforms as mtransforms
+import matplotlib.tri as mtri
+from matplotlib.cbook import (
+ _backports, mplDeprecation, warn_deprecated,
+ STEP_LOOKUP_MAP, iterable, safe_first_element)
+from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
+from matplotlib.axes._base import _AxesBase, _process_plot_format
+
+_log = logging.getLogger(__name__)
+
+rcParams = matplotlib.rcParams
+
+_alias_map = {'color': ['c'],
+ 'linewidth': ['lw'],
+ 'linestyle': ['ls'],
+ 'facecolor': ['fc'],
+ 'edgecolor': ['ec'],
+ 'markerfacecolor': ['mfc'],
+ 'markeredgecolor': ['mec'],
+ 'markeredgewidth': ['mew'],
+ 'markersize': ['ms'],
+ }
+
+
+def _plot_args_replacer(args, data):
+ if len(args) == 1:
+ return ["y"]
+ elif len(args) == 2:
+ # this can be two cases: x,y or y,c
+ if not args[1] in data:
+ # this is not in data, so just assume that it is something which
+ # will not get replaced (color spec or array like).
+ return ["y", "c"]
+ # it's data, but could be a color code like 'ro' or 'b--'
+ # -> warn the user in that case...
+ try:
+ _process_plot_format(args[1])
+ except ValueError:
+ pass
+ else:
+ warnings.warn(
+ "Second argument {!r} is ambiguous: could be a color spec but "
+ "is in data; using as data. Either rename the entry in data "
+ "or use three arguments to plot.".format(args[1]),
+ RuntimeWarning, stacklevel=3)
+ return ["x", "y"]
+ elif len(args) == 3:
+ return ["x", "y", "c"]
+ else:
+ raise ValueError("Using arbitrary long args with data is not "
+ "supported due to ambiguity of arguments.\nUse "
+ "multiple plotting calls instead.")
+
+
+# The axes module contains all the wrappers to plotting functions.
+# All the other methods should go in the _AxesBase class.
+
+class Axes(_AxesBase):
+ """
+ The :class:`Axes` contains most of the figure elements:
+ :class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
+ :class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
+ :class:`~matplotlib.patches.Polygon`, etc., and sets the
+ coordinate system.
+
+ The :class:`Axes` instance supports callbacks through a callbacks
+ attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
+ instance. The events you can connect to are 'xlim_changed' and
+ 'ylim_changed' and the callback will be called with func(*ax*)
+ where *ax* is the :class:`Axes` instance.
+ """
+ ### Labelling, legend and texts
+
+ aname = 'Axes'
+
+ def get_title(self, loc="center"):
+ """
+ Get an axes title.
+
+ Get one of the three available axes titles. The available titles
+ are positioned above the axes in the center, flush with the left
+ edge, and flush with the right edge.
+
+ Parameters
+ ----------
+ loc : {'center', 'left', 'right'}, str, optional
+ Which title to get, defaults to 'center'.
+
+ Returns
+ -------
+ title : str
+ The title text string.
+
+ """
+ try:
+ title = {'left': self._left_title,
+ 'center': self.title,
+ 'right': self._right_title}[loc.lower()]
+ except KeyError:
+ raise ValueError("'%s' is not a valid location" % loc)
+ return title.get_text()
+
+ def set_title(self, label, fontdict=None, loc="center", pad=None,
+ **kwargs):
+ """
+ Set a title for the axes.
+
+ Set one of the three available axes titles. The available titles
+ are positioned above the axes in the center, flush with the left
+ edge, and flush with the right edge.
+
+ Parameters
+ ----------
+ label : str
+ Text to use for the title
+
+ fontdict : dict
+ A dictionary controlling the appearance of the title text,
+ the default `fontdict` is::
+
+ {'fontsize': rcParams['axes.titlesize'],
+ 'fontweight' : rcParams['axes.titleweight'],
+ 'verticalalignment': 'baseline',
+ 'horizontalalignment': loc}
+
+ loc : {'center', 'left', 'right'}, str, optional
+ Which title to set, defaults to 'center'
+
+ pad : float
+ The offset of the title from the top of the axes, in points.
+ Default is ``None`` to use rcParams['axes.titlepad'].
+
+ Returns
+ -------
+ text : :class:`~matplotlib.text.Text`
+ The matplotlib text instance representing the title
+
+ Other Parameters
+ ----------------
+ **kwargs : `~matplotlib.text.Text` properties
+ Other keyword arguments are text properties, see
+ :class:`~matplotlib.text.Text` for a list of valid text
+ properties.
+ """
+ try:
+ title = {'left': self._left_title,
+ 'center': self.title,
+ 'right': self._right_title}[loc.lower()]
+ except KeyError:
+ raise ValueError("'%s' is not a valid location" % loc)
+ default = {
+ 'fontsize': rcParams['axes.titlesize'],
+ 'fontweight': rcParams['axes.titleweight'],
+ 'verticalalignment': 'baseline',
+ 'horizontalalignment': loc.lower()}
+ if pad is None:
+ pad = rcParams['axes.titlepad']
+ self._set_title_offset_trans(float(pad))
+ title.set_text(label)
+ title.update(default)
+ if fontdict is not None:
+ title.update(fontdict)
+ title.update(kwargs)
+ return title
+
+ def get_xlabel(self):
+ """
+ Get the xlabel text string.
+ """
+ label = self.xaxis.get_label()
+ return label.get_text()
+
+ def set_xlabel(self, xlabel, fontdict=None, labelpad=None, **kwargs):
+ """
+ Set the label for the x-axis.
+
+ Parameters
+ ----------
+ xlabel : str
+ The label text.
+
+ labelpad : scalar, optional, default: None
+ Spacing in points between the label and the x-axis.
+
+ Other Parameters
+ ----------------
+ **kwargs : `.Text` properties
+ `.Text` properties control the appearance of the label.
+
+ See also
+ --------
+ text : for information on how override and the optional args work
+ """
+ if labelpad is not None:
+ self.xaxis.labelpad = labelpad
+ return self.xaxis.set_label_text(xlabel, fontdict, **kwargs)
+
+ def get_ylabel(self):
+ """
+ Get the ylabel text string.
+ """
+ label = self.yaxis.get_label()
+ return label.get_text()
+
+ def set_ylabel(self, ylabel, fontdict=None, labelpad=None, **kwargs):
+ """
+ Set the label for the y-axis.
+
+ Parameters
+ ----------
+ ylabel : str
+ The label text.
+
+ labelpad : scalar, optional, default: None
+ Spacing in points between the label and the y-axis.
+
+ Other Parameters
+ ----------------
+ **kwargs : `.Text` properties
+ `.Text` properties control the appearance of the label.
+
+ See also
+ --------
+ text : for information on how override and the optional args work
+
+ """
+ if labelpad is not None:
+ self.yaxis.labelpad = labelpad
+ return self.yaxis.set_label_text(ylabel, fontdict, **kwargs)
+
+ def get_legend_handles_labels(self, legend_handler_map=None):
+ """
+ Return handles and labels for legend
+
+ ``ax.legend()`` is equivalent to ::
+
+ h, l = ax.get_legend_handles_labels()
+ ax.legend(h, l)
+
+ """
+
+ # pass through to legend.
+ handles, labels = mlegend._get_legend_handles_labels([self],
+ legend_handler_map)
+ return handles, labels
+
+ @docstring.dedent_interpd
+ def legend(self, *args, **kwargs):
+ """
+ Places a legend on the axes.
+
+ Call signatures::
+
+ legend()
+ legend(labels)
+ legend(handles, labels)
+
+ The call signatures correspond to three different ways how to use
+ this method.
+
+ **1. Automatic detection of elements to be shown in the legend**
+
+ The elements to be added to the legend are automatically determined,
+ when you do not pass in any extra arguments.
+
+ In this case, the labels are taken from the artist. You can specify
+ them either at artist creation or by calling the
+ :meth:`~.Artist.set_label` method on the artist::
+
+ line, = ax.plot([1, 2, 3], label='Inline label')
+ ax.legend()
+
+ or::
+
+ line.set_label('Label via method')
+ line, = ax.plot([1, 2, 3])
+ ax.legend()
+
+ Specific lines can be excluded from the automatic legend element
+ selection by defining a label starting with an underscore.
+ This is default for all artists, so calling `Axes.legend` without
+ any arguments and without setting the labels manually will result in
+ no legend being drawn.
+
+
+ **2. Labeling existing plot elements**
+
+ To make a legend for lines which already exist on the axes
+ (via plot for instance), simply call this function with an iterable
+ of strings, one for each legend item. For example::
+
+ ax.plot([1, 2, 3])
+ ax.legend(['A simple line'])
+
+ Note: This way of using is discouraged, because the relation between
+ plot elements and labels is only implicit by their order and can
+ easily be mixed up.
+
+
+ **3. Explicitly defining the elements in the legend**
+
+ For full control of which artists have a legend entry, it is possible
+ to pass an iterable of legend artists followed by an iterable of
+ legend labels respectively::
+
+ legend((line1, line2, line3), ('label1', 'label2', 'label3'))
+
+ Parameters
+ ----------
+
+ handles : sequence of `.Artist`, optional
+ A list of Artists (lines, patches) to be added to the legend.
+ Use this together with *labels*, if you need full control on what
+ is shown in the legend and the automatic mechanism described above
+ is not sufficient.
+
+ The length of handles and labels should be the same in this
+ case. If they are not, they are truncated to the smaller length.
+
+ labels : sequence of strings, optional
+ A list of labels to show next to the artists.
+ Use this together with *handles*, if you need full control on what
+ is shown in the legend and the automatic mechanism described above
+ is not sufficient.
+
+ Other Parameters
+ ----------------
+
+ loc : int or string or pair of floats, default: 'upper right'
+ The location of the legend. Possible codes are:
+
+ =============== =============
+ Location String Location Code
+ =============== =============
+ 'best' 0
+ 'upper right' 1
+ 'upper left' 2
+ 'lower left' 3
+ 'lower right' 4
+ 'right' 5
+ 'center left' 6
+ 'center right' 7
+ 'lower center' 8
+ 'upper center' 9
+ 'center' 10
+ =============== =============
+
+
+ Alternatively can be a 2-tuple giving ``x, y`` of the lower-left
+ corner of the legend in axes coordinates (in which case
+ ``bbox_to_anchor`` will be ignored).
+
+ bbox_to_anchor : `.BboxBase` or pair of floats
+ Specify any arbitrary location for the legend in `bbox_transform`
+ coordinates (default Axes coordinates).
+
+ For example, to put the legend's upper right hand corner in the
+ center of the axes the following keywords can be used::
+
+ loc='upper right', bbox_to_anchor=(0.5, 0.5)
+
+ ncol : integer
+ The number of columns that the legend has. Default is 1.
+
+ prop : None or :class:`matplotlib.font_manager.FontProperties` or dict
+ The font properties of the legend. If None (default), the current
+ :data:`matplotlib.rcParams` will be used.
+
+ fontsize : int or float or {'xx-small', 'x-small', 'small', 'medium', \
+'large', 'x-large', 'xx-large'}
+ Controls the font size of the legend. If the value is numeric the
+ size will be the absolute font size in points. String values are
+ relative to the current default font size. This argument is only
+ used if `prop` is not specified.
+
+ numpoints : None or int
+ The number of marker points in the legend when creating a legend
+ entry for a `.Line2D` (line).
+ Default is ``None``, which will take the value from
+ :rc:`legend.numpoints`.
+
+ scatterpoints : None or int
+ The number of marker points in the legend when creating
+ a legend entry for a `.PathCollection` (scatter plot).
+ Default is ``None``, which will take the value from
+ :rc:`legend.scatterpoints`.
+
+ scatteryoffsets : iterable of floats
+ The vertical offset (relative to the font size) for the markers
+ created for a scatter plot legend entry. 0.0 is at the base the
+ legend text, and 1.0 is at the top. To draw all markers at the
+ same height, set to ``[0.5]``. Default is ``[0.375, 0.5, 0.3125]``.
+
+ markerscale : None or int or float
+ The relative size of legend markers compared with the originally
+ drawn ones.
+ Default is ``None``, which will take the value from
+ :rc:`legend.markerscale`.
+
+ markerfirst : bool
+ If *True*, legend marker is placed to the left of the legend label.
+ If *False*, legend marker is placed to the right of the legend
+ label.
+ Default is *True*.
+
+ frameon : None or bool
+ Control whether the legend should be drawn on a patch
+ (frame).
+ Default is ``None``, which will take the value from
+ :rc:`legend.frameon`.
+
+ fancybox : None or bool
+ Control whether round edges should be enabled around the
+ :class:`~matplotlib.patches.FancyBboxPatch` which makes up the
+ legend's background.
+ Default is ``None``, which will take the value from
+ :rc:`legend.fancybox`.
+
+ shadow : None or bool
+ Control whether to draw a shadow behind the legend.
+ Default is ``None``, which will take the value from
+ :rc:`legend.shadow`.
+
+ framealpha : None or float
+ Control the alpha transparency of the legend's background.
+ Default is ``None``, which will take the value from
+ :rc:`legend.framealpha`. If shadow is activated and
+ *framealpha* is ``None``, the default value is ignored.
+
+ facecolor : None or "inherit" or a color spec
+ Control the legend's background color.
+ Default is ``None``, which will take the value from
+ :rc:`legend.facecolor`. If ``"inherit"``, it will take
+ :rc:`axes.facecolor`.
+
+ edgecolor : None or "inherit" or a color spec
+ Control the legend's background patch edge color.
+ Default is ``None``, which will take the value from
+ :rc:`legend.edgecolor` If ``"inherit"``, it will take
+ :rc:`axes.edgecolor`.
+
+ mode : {"expand", None}
+ If `mode` is set to ``"expand"`` the legend will be horizontally
+ expanded to fill the axes area (or `bbox_to_anchor` if defines
+ the legend's size).
+
+ bbox_transform : None or :class:`matplotlib.transforms.Transform`
+ The transform for the bounding box (`bbox_to_anchor`). For a value
+ of ``None`` (default) the Axes'
+ :data:`~matplotlib.axes.Axes.transAxes` transform will be used.
+
+ title : str or None
+ The legend's title. Default is no title (``None``).
+
+ borderpad : float or None
+ The fractional whitespace inside the legend border.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.borderpad`.
+
+ labelspacing : float or None
+ The vertical space between the legend entries.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.labelspacing`.
+
+ handlelength : float or None
+ The length of the legend handles.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.handlelength`.
+
+ handletextpad : float or None
+ The pad between the legend handle and text.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.handletextpad`.
+
+ borderaxespad : float or None
+ The pad between the axes and legend border.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.borderaxespad`.
+
+ columnspacing : float or None
+ The spacing between columns.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.columnspacing`.
+
+ handler_map : dict or None
+ The custom dictionary mapping instances or types to a legend
+ handler. This `handler_map` updates the default handler map
+ found at :func:`matplotlib.legend.Legend.get_legend_handler_map`.
+
+ Returns
+ -------
+
+ :class:`matplotlib.legend.Legend` instance
+
+ Notes
+ -----
+
+ Not all kinds of artist are supported by the legend command. See
+ :doc:`/tutorials/intermediate/legend_guide` for details.
+
+ Examples
+ --------
+
+ .. plot:: gallery/api/legend.py
+
+ """
+ handles, labels, extra_args, kwargs = mlegend._parse_legend_args(
+ [self],
+ *args,
+ **kwargs)
+ if len(extra_args):
+ raise TypeError('legend only accepts two non-keyword arguments')
+ self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
+ self.legend_._remove_method = lambda h: setattr(self, 'legend_', None)
+ return self.legend_
+
+ def text(self, x, y, s, fontdict=None, withdash=False, **kwargs):
+ """
+ Add text to the axes.
+
+ Add the text *s* to the axes at location *x*, *y* in data coordinates.
+
+ Parameters
+ ----------
+ x, y : scalars
+ The position to place the text. By default, this is in data
+ coordinates. The coordinate system can be changed using the
+ *transform* parameter.
+
+ s : str
+ The text.
+
+ fontdict : dictionary, optional, default: None
+ A dictionary to override the default text properties. If fontdict
+ is None, the defaults are determined by your rc parameters.
+
+ withdash : boolean, optional, default: False
+ Creates a `~matplotlib.text.TextWithDash` instance instead of a
+ `~matplotlib.text.Text` instance.
+
+ Returns
+ -------
+ text : `.Text`
+ The created `.Text` instance.
+
+ Other Parameters
+ ----------------
+ **kwargs : `~matplotlib.text.Text` properties.
+ Other miscellaneous text parameters.
+
+ Examples
+ --------
+ Individual keyword arguments can be used to override any given
+ parameter::
+
+ >>> text(x, y, s, fontsize=12)
+
+ The default transform specifies that text is in data coords,
+ alternatively, you can specify text in axis coords (0,0 is
+ lower-left and 1,1 is upper-right). The example below places
+ text in the center of the axes::
+
+ >>> text(0.5, 0.5, 'matplotlib', horizontalalignment='center',
+ ... verticalalignment='center', transform=ax.transAxes)
+
+ You can put a rectangular box around the text instance (e.g., to
+ set a background color) by using the keyword `bbox`. `bbox` is
+ a dictionary of `~matplotlib.patches.Rectangle`
+ properties. For example::
+
+ >>> text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
+ """
+ default = {
+ 'verticalalignment': 'baseline',
+ 'horizontalalignment': 'left',
+ 'transform': self.transData,
+ 'clip_on': False}
+
+ # At some point if we feel confident that TextWithDash
+ # is robust as a drop-in replacement for Text and that
+ # the performance impact of the heavier-weight class
+ # isn't too significant, it may make sense to eliminate
+ # the withdash kwarg and simply delegate whether there's
+ # a dash to TextWithDash and dashlength.
+ if withdash:
+ t = mtext.TextWithDash(
+ x=x, y=y, text=s)
+ else:
+ t = mtext.Text(
+ x=x, y=y, text=s)
+
+ t.update(default)
+ if fontdict is not None:
+ t.update(fontdict)
+ t.update(kwargs)
+
+ t.set_clip_path(self.patch)
+ self._add_text(t)
+ return t
+
+ @docstring.dedent_interpd
+ def annotate(self, *args, **kwargs):
+ a = mtext.Annotation(*args, **kwargs)
+ a.set_transform(mtransforms.IdentityTransform())
+ if 'clip_on' in kwargs:
+ a.set_clip_path(self.patch)
+ self._add_text(a)
+ return a
+ annotate.__doc__ = mtext.Annotation.__init__.__doc__
+ #### Lines and spans
+
+ @docstring.dedent_interpd
+ def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
+ """
+ Add a horizontal line across the axis.
+
+ Parameters
+ ----------
+ y : scalar, optional, default: 0
+ y position in data coordinates of the horizontal line.
+
+ xmin : scalar, optional, default: 0
+ Should be between 0 and 1, 0 being the far left of the plot, 1 the
+ far right of the plot.
+
+ xmax : scalar, optional, default: 1
+ Should be between 0 and 1, 0 being the far left of the plot, 1 the
+ far right of the plot.
+
+ Returns
+ -------
+ line : :class:`~matplotlib.lines.Line2D`
+
+ Other Parameters
+ ----------------
+ **kwargs :
+ Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
+ with the exception of 'transform':
+
+ %(Line2D)s
+
+ See also
+ --------
+ hlines : Add horizontal lines in data coordinates.
+ axhspan : Add a horizontal span (rectangle) across the axis.
+
+ Examples
+ --------
+
+ * draw a thick red hline at 'y' = 0 that spans the xrange::
+
+ >>> axhline(linewidth=4, color='r')
+
+ * draw a default hline at 'y' = 1 that spans the xrange::
+
+ >>> axhline(y=1)
+
+ * draw a default hline at 'y' = .5 that spans the middle half of
+ the xrange::
+
+ >>> axhline(y=.5, xmin=0.25, xmax=0.75)
+
+ """
+ if "transform" in kwargs:
+ raise ValueError(
+ "'transform' is not allowed as a kwarg;"
+ + "axhline generates its own transform.")
+ ymin, ymax = self.get_ybound()
+
+ # We need to strip away the units for comparison with
+ # non-unitized bounds
+ self._process_unit_info(ydata=y, kwargs=kwargs)
+ yy = self.convert_yunits(y)
+ scaley = (yy < ymin) or (yy > ymax)
+
+ trans = self.get_yaxis_transform(which='grid')
+ l = mlines.Line2D([xmin, xmax], [y, y], transform=trans, **kwargs)
+ self.add_line(l)
+ self.autoscale_view(scalex=False, scaley=scaley)
+ return l
+
+ @docstring.dedent_interpd
+ def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
+ """
+ Add a vertical line across the axes.
+
+ Parameters
+ ----------
+ x : scalar, optional, default: 0
+ x position in data coordinates of the vertical line.
+
+ ymin : scalar, optional, default: 0
+ Should be between 0 and 1, 0 being the bottom of the plot, 1 the
+ top of the plot.
+
+ ymax : scalar, optional, default: 1
+ Should be between 0 and 1, 0 being the bottom of the plot, 1 the
+ top of the plot.
+
+ Returns
+ -------
+ line : :class:`~matplotlib.lines.Line2D`
+
+ Other Parameters
+ ----------------
+ **kwargs :
+ Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
+ with the exception of 'transform':
+
+ %(Line2D)s
+
+ Examples
+ --------
+ * draw a thick red vline at *x* = 0 that spans the yrange::
+
+ >>> axvline(linewidth=4, color='r')
+
+ * draw a default vline at *x* = 1 that spans the yrange::
+
+ >>> axvline(x=1)
+
+ * draw a default vline at *x* = .5 that spans the middle half of
+ the yrange::
+
+ >>> axvline(x=.5, ymin=0.25, ymax=0.75)
+
+ See also
+ --------
+ vlines : Add vertical lines in data coordinates.
+ axvspan : Add a vertical span (rectangle) across the axis.
+ """
+
+ if "transform" in kwargs:
+ raise ValueError(
+ "'transform' is not allowed as a kwarg;"
+ + "axvline generates its own transform.")
+ xmin, xmax = self.get_xbound()
+
+ # We need to strip away the units for comparison with
+ # non-unitized bounds
+ self._process_unit_info(xdata=x, kwargs=kwargs)
+ xx = self.convert_xunits(x)
+ scalex = (xx < xmin) or (xx > xmax)
+
+ trans = self.get_xaxis_transform(which='grid')
+ l = mlines.Line2D([x, x], [ymin, ymax], transform=trans, **kwargs)
+ self.add_line(l)
+ self.autoscale_view(scalex=scalex, scaley=False)
+ return l
+
+ @docstring.dedent_interpd
+ def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
+ """
+ Add a horizontal span (rectangle) across the axis.
+
+ Draw a horizontal span (rectangle) from *ymin* to *ymax*.
+ With the default values of *xmin* = 0 and *xmax* = 1, this
+ always spans the xrange, regardless of the xlim settings, even
+ if you change them, e.g., with the :meth:`set_xlim` command.
+ That is, the horizontal extent is in axes coords: 0=left,
+ 0.5=middle, 1.0=right but the *y* location is in data
+ coordinates.
+
+ Parameters
+ ----------
+ ymin : float
+ Lower limit of the horizontal span in data units.
+ ymax : float
+ Upper limit of the horizontal span in data units.
+ xmin : float, optional, default: 0
+ Lower limit of the vertical span in axes (relative
+ 0-1) units.
+ xmax : float, optional, default: 1
+ Upper limit of the vertical span in axes (relative
+ 0-1) units.
+
+ Returns
+ -------
+ Polygon : `~matplotlib.patches.Polygon`
+
+ Other Parameters
+ ----------------
+ **kwargs : `~matplotlib.patches.Polygon` properties.
+
+ %(Polygon)s
+
+ See Also
+ --------
+ axvspan : Add a vertical span across the axes.
+ """
+ trans = self.get_yaxis_transform(which='grid')
+
+ # process the unit information
+ self._process_unit_info([xmin, xmax], [ymin, ymax], kwargs=kwargs)
+
+ # first we need to strip away the units
+ xmin, xmax = self.convert_xunits([xmin, xmax])
+ ymin, ymax = self.convert_yunits([ymin, ymax])
+
+ verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
+ p = mpatches.Polygon(verts, **kwargs)
+ p.set_transform(trans)
+ self.add_patch(p)
+ self.autoscale_view(scalex=False)
+ return p
+
+ def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
+ """
+ Add a vertical span (rectangle) across the axes.
+
+ Draw a vertical span (rectangle) from `xmin` to `xmax`. With
+ the default values of `ymin` = 0 and `ymax` = 1. This always
+ spans the yrange, regardless of the ylim settings, even if you
+ change them, e.g., with the :meth:`set_ylim` command. That is,
+ the vertical extent is in axes coords: 0=bottom, 0.5=middle,
+ 1.0=top but the y location is in data coordinates.
+
+ Parameters
+ ----------
+ xmin : scalar
+ Number indicating the first X-axis coordinate of the vertical
+ span rectangle in data units.
+ xmax : scalar
+ Number indicating the second X-axis coordinate of the vertical
+ span rectangle in data units.
+ ymin : scalar, optional
+ Number indicating the first Y-axis coordinate of the vertical
+ span rectangle in relative Y-axis units (0-1). Default to 0.
+ ymax : scalar, optional
+ Number indicating the second Y-axis coordinate of the vertical
+ span rectangle in relative Y-axis units (0-1). Default to 1.
+
+ Returns
+ -------
+ rectangle : matplotlib.patches.Polygon
+ Vertical span (rectangle) from (xmin, ymin) to (xmax, ymax).
+
+ Other Parameters
+ ----------------
+ **kwargs
+ Optional parameters are properties of the class
+ matplotlib.patches.Polygon.
+
+ See Also
+ --------
+ axhspan : Add a horizontal span across the axes.
+
+ Examples
+ --------
+ Draw a vertical, green, translucent rectangle from x = 1.25 to
+ x = 1.55 that spans the yrange of the axes.
+
+ >>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
+
+ """
+ trans = self.get_xaxis_transform(which='grid')
+
+ # process the unit information
+ self._process_unit_info([xmin, xmax], [ymin, ymax], kwargs=kwargs)
+
+ # first we need to strip away the units
+ xmin, xmax = self.convert_xunits([xmin, xmax])
+ ymin, ymax = self.convert_yunits([ymin, ymax])
+
+ verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
+ p = mpatches.Polygon(verts, **kwargs)
+ p.set_transform(trans)
+ self.add_patch(p)
+ self.autoscale_view(scaley=False)
+ return p
+
+ @_preprocess_data(replace_names=["y", "xmin", "xmax", "colors"],
+ label_namer="y")
+ def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
+ label='', **kwargs):
+ """
+ Plot horizontal lines at each *y* from *xmin* to *xmax*.
+
+ Parameters
+ ----------
+ y : scalar or sequence of scalar
+ y-indexes where to plot the lines.
+
+ xmin, xmax : scalar or 1D array_like
+ Respective beginning and end of each line. If scalars are
+ provided, all lines will have same length.
+
+ colors : array_like of colors, optional, default: 'k'
+
+ linestyles : ['solid' | 'dashed' | 'dashdot' | 'dotted'], optional
+
+ label : string, optional, default: ''
+
+ Returns
+ -------
+ lines : `~matplotlib.collections.LineCollection`
+
+ Other Parameters
+ ----------------
+ **kwargs : `~matplotlib.collections.LineCollection` properties.
+
+ See also
+ --------
+ vlines : vertical lines
+ axhline: horizontal line across the axes
+ """
+
+ # We do the conversion first since not all unitized data is uniform
+ # process the unit information
+ self._process_unit_info([xmin, xmax], y, kwargs=kwargs)
+ y = self.convert_yunits(y)
+ xmin = self.convert_xunits(xmin)
+ xmax = self.convert_xunits(xmax)
+
+ if not iterable(y):
+ y = [y]
+ if not iterable(xmin):
+ xmin = [xmin]
+ if not iterable(xmax):
+ xmax = [xmax]
+
+ y, xmin, xmax = cbook.delete_masked_points(y, xmin, xmax)
+
+ y = np.ravel(y)
+ xmin = np.resize(xmin, y.shape)
+ xmax = np.resize(xmax, y.shape)
+
+ verts = [((thisxmin, thisy), (thisxmax, thisy))
+ for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
+ lines = mcoll.LineCollection(verts, colors=colors,
+ linestyles=linestyles, label=label)
+ self.add_collection(lines, autolim=False)
+ lines.update(kwargs)
+
+ if len(y) > 0:
+ minx = min(xmin.min(), xmax.min())
+ maxx = max(xmin.max(), xmax.max())
+ miny = y.min()
+ maxy = y.max()
+
+ corners = (minx, miny), (maxx, maxy)
+
+ self.update_datalim(corners)
+ self.autoscale_view()
+
+ return lines
+
+ @_preprocess_data(replace_names=["x", "ymin", "ymax", "colors"],
+ label_namer="x")
+ def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
+ label='', **kwargs):
+ """
+ Plot vertical lines.
+
+ Plot vertical lines at each *x* from *ymin* to *ymax*.
+
+ Parameters
+ ----------
+ x : scalar or 1D array_like
+ x-indexes where to plot the lines.
+
+ ymin, ymax : scalar or 1D array_like
+ Respective beginning and end of each line. If scalars are
+ provided, all lines will have same length.
+
+ colors : array_like of colors, optional, default: 'k'
+
+ linestyles : ['solid' | 'dashed' | 'dashdot' | 'dotted'], optional
+
+ label : string, optional, default: ''
+
+ Returns
+ -------
+ lines : `~matplotlib.collections.LineCollection`
+
+ Other Parameters
+ ----------------
+ **kwargs : `~matplotlib.collections.LineCollection` properties.
+
+ See also
+ --------
+ hlines : horizontal lines
+ axvline: vertical line across the axes
+ """
+
+ self._process_unit_info(xdata=x, ydata=[ymin, ymax], kwargs=kwargs)
+
+ # We do the conversion first since not all unitized data is uniform
+ x = self.convert_xunits(x)
+ ymin = self.convert_yunits(ymin)
+ ymax = self.convert_yunits(ymax)
+
+ if not iterable(x):
+ x = [x]
+ if not iterable(ymin):
+ ymin = [ymin]
+ if not iterable(ymax):
+ ymax = [ymax]
+
+ x, ymin, ymax = cbook.delete_masked_points(x, ymin, ymax)
+
+ x = np.ravel(x)
+ ymin = np.resize(ymin, x.shape)
+ ymax = np.resize(ymax, x.shape)
+
+ verts = [((thisx, thisymin), (thisx, thisymax))
+ for thisx, thisymin, thisymax in zip(x, ymin, ymax)]
+ lines = mcoll.LineCollection(verts, colors=colors,
+ linestyles=linestyles, label=label)
+ self.add_collection(lines, autolim=False)
+ lines.update(kwargs)
+
+ if len(x) > 0:
+ minx = x.min()
+ maxx = x.max()
+ miny = min(ymin.min(), ymax.min())
+ maxy = max(ymin.max(), ymax.max())
+
+ corners = (minx, miny), (maxx, maxy)
+ self.update_datalim(corners)
+ self.autoscale_view()
+
+ return lines
+
+ @_preprocess_data(replace_names=["positions", "lineoffsets",
+ "linelengths", "linewidths",
+ "colors", "linestyles"],
+ label_namer=None)
+ @docstring.dedent_interpd
+ def eventplot(self, positions, orientation='horizontal', lineoffsets=1,
+ linelengths=1, linewidths=None, colors=None,
+ linestyles='solid', **kwargs):
+ """
+ Plot identical parallel lines at the given positions.
+
+ *positions* should be a 1D or 2D array-like object, with each row
+ corresponding to a row or column of lines.
+
+ This type of plot is commonly used in neuroscience for representing
+ neural events, where it is usually called a spike raster, dot raster,
+ or raster plot.
+
+ However, it is useful in any situation where you wish to show the
+ timing or position of multiple sets of discrete events, such as the
+ arrival times of people to a business on each day of the month or the
+ date of hurricanes each year of the last century.
+
+ Parameters
+ ----------
+ positions : 1D or 2D array-like object
+ Each value is an event. If *positions* is a 2D array-like, each
+ row corresponds to a row or a column of lines (depending on the
+ *orientation* parameter).
+
+ orientation : {'horizontal', 'vertical'}, optional
+ Controls the direction of the event collections:
+
+ - 'horizontal' : the lines are arranged horizontally in rows,
+ and are vertical.
+ - 'vertical' : the lines are arranged vertically in columns,
+ and are horizontal.
+
+ lineoffsets : scalar or sequence of scalars, optional, default: 1
+ The offset of the center of the lines from the origin, in the
+ direction orthogonal to *orientation*.
+
+ linelengths : scalar or sequence of scalars, optional, default: 1
+ The total height of the lines (i.e. the lines stretches from
+ ``lineoffset - linelength/2`` to ``lineoffset + linelength/2``).
+
+ linewidths : scalar, scalar sequence or None, optional, default: None
+ The line width(s) of the event lines, in points. If it is None,
+ defaults to its rcParams setting.
+
+ colors : color, sequence of colors or None, optional, default: None
+ The color(s) of the event lines. If it is None, defaults to its
+ rcParams setting.
+
+ linestyles : str or tuple or a sequence of such values, optional
+ Default is 'solid'. Valid strings are ['solid', 'dashed',
+ 'dashdot', 'dotted', '-', '--', '-.', ':']. Dash tuples
+ should be of the form::
+
+ (offset, onoffseq),
+
+ where *onoffseq* is an even length tuple of on and off ink
+ in points.
+
+ **kwargs : optional
+ Other keyword arguments are line collection properties. See
+ :class:`~matplotlib.collections.LineCollection` for a list of
+ the valid properties.
+
+ Returns
+ -------
+
+ list : A list of :class:`~.collections.EventCollection` objects.
+ Contains the :class:`~.collections.EventCollection` that
+ were added.
+
+ Notes
+ -----
+
+ For *linelengths*, *linewidths*, *colors*, and *linestyles*, if only
+ a single value is given, that value is applied to all lines. If an
+ array-like is given, it must have the same length as *positions*, and
+ each value will be applied to the corresponding row of the array.
+
+ Examples
+ --------
+
+ .. plot:: gallery/lines_bars_and_markers/eventplot_demo.py
+ """
+ self._process_unit_info(xdata=positions,
+ ydata=[lineoffsets, linelengths],
+ kwargs=kwargs)
+
+ # We do the conversion first since not all unitized data is uniform
+ positions = self.convert_xunits(positions)
+ lineoffsets = self.convert_yunits(lineoffsets)
+ linelengths = self.convert_yunits(linelengths)
+
+ if not iterable(positions):
+ positions = [positions]
+ elif any(iterable(position) for position in positions):
+ positions = [np.asanyarray(position) for position in positions]
+ else:
+ positions = [np.asanyarray(positions)]
+
+ if len(positions) == 0:
+ return []
+
+ # prevent 'singular' keys from **kwargs dict from overriding the effect
+ # of 'plural' keyword arguments (e.g. 'color' overriding 'colors')
+ colors = cbook.local_over_kwdict(colors, kwargs, 'color')
+ linewidths = cbook.local_over_kwdict(linewidths, kwargs, 'linewidth')
+ linestyles = cbook.local_over_kwdict(linestyles, kwargs, 'linestyle')
+
+ if not iterable(lineoffsets):
+ lineoffsets = [lineoffsets]
+ if not iterable(linelengths):
+ linelengths = [linelengths]
+ if not iterable(linewidths):
+ linewidths = [linewidths]
+ if not iterable(colors):
+ colors = [colors]
+ if hasattr(linestyles, 'lower') or not iterable(linestyles):
+ linestyles = [linestyles]
+
+ lineoffsets = np.asarray(lineoffsets)
+ linelengths = np.asarray(linelengths)
+ linewidths = np.asarray(linewidths)
+
+ if len(lineoffsets) == 0:
+ lineoffsets = [None]
+ if len(linelengths) == 0:
+ linelengths = [None]
+ if len(linewidths) == 0:
+ lineoffsets = [None]
+ if len(linewidths) == 0:
+ lineoffsets = [None]
+ if len(colors) == 0:
+ colors = [None]
+ try:
+ # Early conversion of the colors into RGBA values to take care
+ # of cases like colors='0.5' or colors='C1'. (Issue #8193)
+ colors = mcolors.to_rgba_array(colors)
+ except ValueError:
+ # Will fail if any element of *colors* is None. But as long
+ # as len(colors) == 1 or len(positions), the rest of the
+ # code should process *colors* properly.
+ pass
+
+ if len(lineoffsets) == 1 and len(positions) != 1:
+ lineoffsets = np.tile(lineoffsets, len(positions))
+ lineoffsets[0] = 0
+ lineoffsets = np.cumsum(lineoffsets)
+ if len(linelengths) == 1:
+ linelengths = np.tile(linelengths, len(positions))
+ if len(linewidths) == 1:
+ linewidths = np.tile(linewidths, len(positions))
+ if len(colors) == 1:
+ colors = list(colors)
+ colors = colors * len(positions)
+ if len(linestyles) == 1:
+ linestyles = [linestyles] * len(positions)
+
+ if len(lineoffsets) != len(positions):
+ raise ValueError('lineoffsets and positions are unequal sized '
+ 'sequences')
+ if len(linelengths) != len(positions):
+ raise ValueError('linelengths and positions are unequal sized '
+ 'sequences')
+ if len(linewidths) != len(positions):
+ raise ValueError('linewidths and positions are unequal sized '
+ 'sequences')
+ if len(colors) != len(positions):
+ raise ValueError('colors and positions are unequal sized '
+ 'sequences')
+ if len(linestyles) != len(positions):
+ raise ValueError('linestyles and positions are unequal sized '
+ 'sequences')
+
+ colls = []
+ for position, lineoffset, linelength, linewidth, color, linestyle in \
+ zip(positions, lineoffsets, linelengths, linewidths,
+ colors, linestyles):
+ coll = mcoll.EventCollection(position,
+ orientation=orientation,
+ lineoffset=lineoffset,
+ linelength=linelength,
+ linewidth=linewidth,
+ color=color,
+ linestyle=linestyle)
+ self.add_collection(coll, autolim=False)
+ coll.update(kwargs)
+ colls.append(coll)
+
+ if len(positions) > 0:
+ # try to get min/max
+ min_max = [(np.min(_p), np.max(_p)) for _p in positions
+ if len(_p) > 0]
+ # if we have any non-empty positions, try to autoscale
+ if len(min_max) > 0:
+ mins, maxes = zip(*min_max)
+ minpos = np.min(mins)
+ maxpos = np.max(maxes)
+
+ minline = (lineoffsets - linelengths).min()
+ maxline = (lineoffsets + linelengths).max()
+
+ if (orientation is not None and
+ orientation.lower() == "vertical"):
+ corners = (minline, minpos), (maxline, maxpos)
+ else: # "horizontal", None or "none" (see EventCollection)
+ corners = (minpos, minline), (maxpos, maxline)
+ self.update_datalim(corners)
+ self.autoscale_view()
+
+ return colls
+
+ # ### Basic plotting
+ # The label_naming happens in `matplotlib.axes._base._plot_args`
+ @_preprocess_data(replace_names=["x", "y"],
+ positional_parameter_names=_plot_args_replacer,
+ label_namer=None)
+ @docstring.dedent_interpd
+ def plot(self, *args, **kwargs):
+ """
+ Plot y versus x as lines and/or markers.
+
+ Call signatures::
+
+ plot([x], y, [fmt], data=None, **kwargs)
+ plot([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
+
+ The coordinates of the points or line nodes are given by *x*, *y*.
+
+ The optional parameter *fmt* is a convenient way for defining basic
+ formatting like color, marker and linestyle. It's a shortcut string
+ notation described in the *Notes* section below.
+
+ >>> plot(x, y) # plot x and y using default line style and color
+ >>> plot(x, y, 'bo') # plot x and y using blue circle markers
+ >>> plot(y) # plot y using x as index array 0..N-1
+ >>> plot(y, 'r+') # ditto, but with red plusses
+
+ You can use `.Line2D` properties as keyword arguments for more
+ control on the appearance. Line properties and *fmt* can be mixed.
+ The following two calls yield identical results:
+
+ >>> plot(x, y, 'go--', linewidth=2, markersize=12)
+ >>> plot(x, y, color='green', marker='o', linestyle='dashed',
+ linewidth=2, markersize=12)
+
+ When conflicting with *fmt*, keyword arguments take precedence.
+
+ **Plotting labelled data**
+
+ There's a convenient way for plotting objects with labelled data (i.e.
+ data that can be accessed by index ``obj['y']``). Instead of giving
+ the data in *x* and *y*, you can provide the object in the *data*
+ parameter and just give the labels for *x* and *y*::
+
+ >>> plot('xlabel', 'ylabel', data=obj)
+
+ All indexable objects are supported. This could e.g. be a `dict`, a
+ `pandas.DataFame` or a structured numpy array.
+
+
+ **Plotting multiple sets of data**
+
+ There are various ways to plot multiple sets of data.
+
+ - The most straight forward way is just to call `plot` multiple times.
+ Example:
+
+ >>> plot(x1, y1, 'bo')
+ >>> plot(x2, y2, 'go')
+
+ - Alternatively, if your data is already a 2d array, you can pass it
+ directly to *x*, *y*. A separate data set will be drawn for every
+ column.
+
+ Example: an array ``a`` where the first column represents the *x*
+ values and the other columns are the *y* columns::
+
+ >>> plot(a[0], a[1:])
+
+ - The third way is to specify multiple sets of *[x]*, *y*, *[fmt]*
+ groups::
+
+ >>> plot(x1, y1, 'g^', x2, y2, 'g-')
+
+ In this case, any additional keyword argument applies to all
+ datasets. Also this syntax cannot be combined with the *data*
+ parameter.
+
+ By default, each line is assigned a different style specified by a
+ 'style cycle'. The *fmt* and line property parameters are only
+ necessary if you want explicit deviations from these defaults.
+ Alternatively, you can also change the style cycle using the
+ 'axes.prop_cycle' rcParam.
+
+ Parameters
+ ----------
+ x, y : array-like or scalar
+ The horizontal / vertical coordinates of the data points.
+ *x* values are optional. If not given, they default to
+ ``[0, ..., N-1]``.
+
+ Commonly, these parameters are arrays of length N. However,
+ scalars are supported as well (equivalent to an array with
+ constant value).
+
+ The parameters can also be 2-dimensional. Then, the columns
+ represent separate data sets.
+
+ fmt : str, optional
+ A format string, e.g. 'ro' for red circles. See the *Notes*
+ section for a full description of the format strings.
+
+ Format strings are just an abbreviation for quickly setting
+ basic line properties. All of these and more can also be
+ controlled by keyword arguments.
+
+ data : indexable object, optional
+ An object with labelled data. If given, provide the label names to
+ plot in *x* and *y*.
+
+ .. note::
+ Technically there's a slight ambiguity in calls where the
+ second label is a valid *fmt*. `plot('n', 'o', data=obj)`
+ could be `plt(x, y)` or `plt(y, fmt)`. In such cases,
+ the former interpretation is chosen, but a warning is issued.
+ You may suppress the warning by adding an empty format string
+ `plot('n', 'o', '', data=obj)`.
+
+
+ Other Parameters
+ ----------------
+ scalex, scaley : bool, optional, default: True
+ These parameters determined if the view limits are adapted to
+ the data limits. The values are passed on to `autoscale_view`.
+
+ **kwargs : `.Line2D` properties, optional
+ *kwargs* are used to specify properties like a line label (for
+ auto legends), linewidth, antialiasing, marker face color.
+ Example::
+
+ >>> plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
+ >>> plot([1,2,3], [1,4,9], 'rs', label='line 2')
+
+ If you make multiple lines with one plot command, the kwargs
+ apply to all those lines.
+
+ Here is a list of available `.Line2D` properties:
+
+ %(Line2D)s
+
+ Returns
+ -------
+ lines
+ A list of `.Line2D` objects representing the plotted data.
+
+
+ See Also
+ --------
+ scatter : XY scatter plot with markers of variing size and/or color (
+ sometimes also called bubble chart).
+
+
+ Notes
+ -----
+ **Format Strings**
+
+ A format string consists of a part for color, marker and line::
+
+ fmt = '[color][marker][line]'
+
+ Each of them is optional. If not provided, the value from the style
+ cycle is used. Exception: If ``line`` is given, but no ``marker``,
+ the data will be a line without markers.
+
+ **Colors**
+
+ The following color abbreviations are supported:
+
+ ============= ===============================
+ character color
+ ============= ===============================
+ ``'b'`` blue
+ ``'g'`` green
+ ``'r'`` red
+ ``'c'`` cyan
+ ``'m'`` magenta
+ ``'y'`` yellow
+ ``'k'`` black
+ ``'w'`` white
+ ============= ===============================
+
+ If the color is the only part of the format string, you can
+ additionally use any `matplotlib.colors` spec, e.g. full names
+ (``'green'``) or hex strings (``'#008000'``).
+
+ **Markers**
+
+ ============= ===============================
+ character description
+ ============= ===============================
+ ``'.'`` point marker
+ ``','`` pixel marker
+ ``'o'`` circle marker
+ ``'v'`` triangle_down marker
+ ``'^'`` triangle_up marker
+ ``'<'`` triangle_left marker
+ ``'>'`` triangle_right marker
+ ``'1'`` tri_down marker
+ ``'2'`` tri_up marker
+ ``'3'`` tri_left marker
+ ``'4'`` tri_right marker
+ ``'s'`` square marker
+ ``'p'`` pentagon marker
+ ``'*'`` star marker
+ ``'h'`` hexagon1 marker
+ ``'H'`` hexagon2 marker
+ ``'+'`` plus marker
+ ``'x'`` x marker
+ ``'D'`` diamond marker
+ ``'d'`` thin_diamond marker
+ ``'|'`` vline marker
+ ``'_'`` hline marker
+ ============= ===============================
+
+ **Line Styles**
+
+ ============= ===============================
+ character description
+ ============= ===============================
+ ``'-'`` solid line style
+ ``'--'`` dashed line style
+ ``'-.'`` dash-dot line style
+ ``':'`` dotted line style
+ ============= ===============================
+
+ Example format strings::
+
+ 'b' # blue markers with default shape
+ 'ro' # red circles
+ 'g-' # green solid line
+ '--' # dashed line with default color
+ 'k^:' # black triangle_up markers connected by a dotted line
+
+ """
+ scalex = kwargs.pop('scalex', True)
+ scaley = kwargs.pop('scaley', True)
+
+ if not self._hold:
+ self.cla()
+ lines = []
+
+ kwargs = cbook.normalize_kwargs(kwargs, _alias_map)
+
+ for line in self._get_lines(*args, **kwargs):
+ self.add_line(line)
+ lines.append(line)
+
+ self.autoscale_view(scalex=scalex, scaley=scaley)
+ return lines
+
+ @_preprocess_data(replace_names=["x", "y"], label_namer="y")
+ @docstring.dedent_interpd
+ def plot_date(self, x, y, fmt='o', tz=None, xdate=True, ydate=False,
+ **kwargs):
+ """
+ Plot data that contains dates.
+
+ Similar to `.plot`, this plots *y* vs. *x* as lines or markers.
+ However, the axis labels are formatted as dates depending on *xdate*
+ and *ydate*.
+
+ Parameters
+ ----------
+ x, y : array-like
+ The coordinates of the data points. If *xdate* or *ydate* is
+ *True*, the respective values *x* or *y* are interpreted as
+ :ref:`Matplotlib dates <date-format>`.
+
+ fmt : str, optional
+ The plot format string. For details, see the corresponding
+ parameter in `.plot`.
+
+ tz : [ *None* | timezone string | :class:`tzinfo` instance]
+ The time zone to use in labeling dates. If *None*, defaults to
+ rcParam ``timezone``.
+
+ xdate : bool, optional, default: True
+ If *True*, the *x*-axis will be interpreted as Matplotlib dates.
+
+ ydate : bool, optional, default: False
+ If *True*, the *y*-axis will be interpreted as Matplotlib dates.
+
+
+ Returns
+ -------
+ lines
+ A list of `~.Line2D` objects representing the plotted data.
+
+
+ Other Parameters
+ ----------------
+ **kwargs
+ Keyword arguments control the :class:`~matplotlib.lines.Line2D`
+ properties:
+
+ %(Line2D)s
+
+
+ See Also
+ --------
+ matplotlib.dates : Helper functions on dates.
+ matplotlib.dates.date2num : Convert dates to num.
+ matplotlib.dates.num2date : Convert num to dates.
+ matplotlib.dates.drange : Create an equally spaced sequence of dates.
+
+
+ Notes
+ -----
+ If you are using custom date tickers and formatters, it may be
+ necessary to set the formatters/locators after the call to
+ `.plot_date`. `.plot_date` will set the default tick locator to
+ `.AutoDateLocator` (if the tick locator is not already set to a
+ `.DateLocator` instance) and the default tick formatter to
+ `.AutoDateFormatter` (if the tick formatter is not already set to a
+ `.DateFormatter` instance).
+ """
+
+ if not self._hold:
+ self.cla()
+
+ if xdate:
+ self.xaxis_date(tz)
+ if ydate:
+ self.yaxis_date(tz)
+
+ ret = self.plot(x, y, fmt, **kwargs)
+
+ self.autoscale_view()
+
+ return ret
+
+ # @_preprocess_data() # let 'plot' do the unpacking..
+ @docstring.dedent_interpd
+ def loglog(self, *args, **kwargs):
+ """
+ Make a plot with log scaling on both the x and y axis.
+
+ Call signatures::
+
+ loglog([x], y, [fmt], data=None, **kwargs)
+ loglog([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
+
+ This is just a thin wrapper around `.plot` which additionally changes
+ both the x-axis and the y-axis to log scaling. All of the concepts and
+ parameters of plot can be used here as well.
+
+ The additional parameters *basex/y*, *subsx/y* and *nonposx/y* control
+ the x/y-axis properties. They are just forwarded to `.Axes.set_xscale`
+ and `.Axes.set_yscale`.
+
+ Parameters
+ ----------
+ basex, basey : scalar, optional, default 10
+ Base of the x/y logarithm.
+
+ subsx, subsy : sequence, optional
+ The location of the minor x/y ticks. If *None*, reasonable
+ locations are automatically chosen depending on the number of
+ decades in the plot.
+ See `.Axes.set_xscale` / `.Axes.set_yscale` for details.
+
+ nonposx, nonposy : {'mask', 'clip'}, optional, default 'mask'
+ Non-positive values in x or y can be masked as invalid, or clipped
+ to a very small positive number.
+
+ Returns
+ -------
+ lines
+ A list of `~.Line2D` objects representing the plotted data.
+
+ Other Parameters
+ ----------------
+ **kwargs
+ All parameters supported by `.plot`.
+ """
+ if not self._hold:
+ self.cla()
+
+ dx = {k: kwargs.pop(k) for k in ['basex', 'subsx', 'nonposx']
+ if k in kwargs}
+ dy = {k: kwargs.pop(k) for k in ['basey', 'subsy', 'nonposy']
+ if k in kwargs}
+
+ self.set_xscale('log', **dx)
+ self.set_yscale('log', **dy)
+
+ b = self._hold
+ self._hold = True # we've already processed the hold
+ l = self.plot(*args, **kwargs)
+ self._hold = b # restore the hold
+
+ return l
+
+ # @_preprocess_data() # let 'plot' do the unpacking..
+ @docstring.dedent_interpd
+ def semilogx(self, *args, **kwargs):
+ """
+ Make a plot with log scaling on the x axis.
+
+ Call signatures::
+
+ semilogx([x], y, [fmt], data=None, **kwargs)
+ semilogx([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
+
+ This is just a thin wrapper around `.plot` which additionally changes
+ the x-axis to log scaling. All of the concepts and parameters of plot
+ can be used here as well.
+
+ The additional parameters *basex*, *subsx* and *nonposx* control the
+ x-axis properties. They are just forwarded to `.Axes.set_xscale`.
+
+ Parameters
+ ----------
+ basex : scalar, optional, default 10
+ Base of the x logarithm.
+
+ subsx : array_like, optional
+ The location of the minor xticks. If *None*, reasonable locations
+ are automatically chosen depending on the number of decades in the
+ plot. See `.Axes.set_xscale` for details.
+
+ nonposx : {'mask', 'clip'}, optional, default 'mask'
+ Non-positive values in x can be masked as invalid, or clipped to a
+ very small positive number.
+
+ Returns
+ -------
+ lines
+ A list of `~.Line2D` objects representing the plotted data.
+
+ Other Parameters
+ ----------------
+ **kwargs
+ All parameters supported by `.plot`.
+ """
+ if not self._hold:
+ self.cla()
+ d = {k: kwargs.pop(k) for k in ['basex', 'subsx', 'nonposx']
+ if k in kwargs}
+
+ self.set_xscale('log', **d)
+ b = self._hold
+ self._hold = True # we've already processed the hold
+ l = self.plot(*args, **kwargs)
+ self._hold = b # restore the hold
+ return l
+
+ # @_preprocess_data() # let 'plot' do the unpacking..
+ @docstring.dedent_interpd
+ def semilogy(self, *args, **kwargs):
+ """
+ Make a plot with log scaling on the y axis.
+
+ Call signatures::
+
+ semilogy([x], y, [fmt], data=None, **kwargs)
+ semilogy([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
+
+ This is just a thin wrapper around `.plot` which additionally changes
+ the y-axis to log scaling. All of the concepts and parameters of plot
+ can be used here as well.
+
+ The additional parameters *basey*, *subsy* and *nonposy* control the
+ y-axis properties. They are just forwarded to `.Axes.set_yscale`.
+
+ Parameters
+ ----------
+ basey : scalar, optional, default 10
+ Base of the y logarithm.
+
+ subsy : array_like, optional
+ The location of the minor yticks. If *None*, reasonable locations
+ are automatically chosen depending on the number of decades in the
+ plot. See `.Axes.set_yscale` for details.
+
+ nonposy : {'mask', 'clip'}, optional, default 'mask'
+ Non-positive values in y can be masked as invalid, or clipped to a
+ very small positive number.
+
+ Returns
+ -------
+ lines
+ A list of `~.Line2D` objects representing the plotted data.
+
+ Other Parameters
+ ----------------
+ **kwargs
+ All parameters supported by `.plot`.
+ """
+ if not self._hold:
+ self.cla()
+ d = {k: kwargs.pop(k) for k in ['basey', 'subsy', 'nonposy']
+ if k in kwargs}
+ self.set_yscale('log', **d)
+ b = self._hold
+ self._hold = True # we've already processed the hold
+ l = self.plot(*args, **kwargs)
+ self._hold = b # restore the hold
+
+ return l
+
+ @_preprocess_data(replace_names=["x"], label_namer="x")
+ def acorr(self, x, **kwargs):
+ """
+ Plot the autocorrelation of *x*.
+
+ Parameters
+ ----------
+
+ x : sequence of scalar
+
+ hold : bool, optional, *deprecated*, default: True
+
+ detrend : callable, optional, default: `mlab.detrend_none`
+ *x* is detrended by the *detrend* callable. Default is no
+ normalization.
+
+ normed : bool, optional, default: True
+ If ``True``, input vectors are normalised to unit length.
+
+ usevlines : bool, optional, default: True
+ If ``True``, `Axes.vlines` is used to plot the vertical lines from
+ the origin to the acorr. Otherwise, `Axes.plot` is used.
+
+ maxlags : integer, optional, default: 10
+ Number of lags to show. If ``None``, will return all
+ ``2 * len(x) - 1`` lags.
+
+ Returns
+ -------
+ lags : array (lenth ``2*maxlags+1``)
+ lag vector.
+ c : array (length ``2*maxlags+1``)
+ auto correlation vector.
+ line : `.LineCollection` or `.Line2D`
+ `.Artist` added to the axes of the correlation.
+
+ `.LineCollection` if *usevlines* is True
+ `.Line2D` if *usevlines* is False
+ b : `.Line2D` or None
+ Horizontal line at 0 if *usevlines* is True
+ None *usevlines* is False
+
+ Other Parameters
+ ----------------
+ linestyle : `~matplotlib.lines.Line2D` prop, optional, default: None
+ Only used if usevlines is ``False``.
+
+ marker : string, optional, default: 'o'
+
+ Notes
+ -----
+ The cross correlation is performed with :func:`numpy.correlate` with
+ ``mode = 2``.
+ """
+ if "hold" in kwargs:
+ warnings.warn("the 'hold' kwarg is deprecated", mplDeprecation)
+ return self.xcorr(x, x, **kwargs)
+
+ @_preprocess_data(replace_names=["x", "y"], label_namer="y")
+ def xcorr(self, x, y, normed=True, detrend=mlab.detrend_none,
+ usevlines=True, maxlags=10, **kwargs):
+ r"""
+ Plot the cross correlation between *x* and *y*.
+
+ The correlation with lag k is defined as sum_n x[n+k] * conj(y[n]).
+
+ Parameters
+ ----------
+ x : sequence of scalars of length n
+
+ y : sequence of scalars of length n
+
+ hold : bool, optional, *deprecated*, default: True
+
+ detrend : callable, optional, default: `mlab.detrend_none`
+ *x* is detrended by the *detrend* callable. Default is no
+ normalization.
+
+ normed : bool, optional, default: True
+ If ``True``, input vectors are normalised to unit length.
+
+ usevlines : bool, optional, default: True
+ If ``True``, `Axes.vlines` is used to plot the vertical lines from
+ the origin to the acorr. Otherwise, `Axes.plot` is used.
+
+ maxlags : int, optional
+ Number of lags to show. If None, will return all ``2 * len(x) - 1``
+ lags. Default is 10.
+
+ Returns
+ -------
+ lags : array (lenth ``2*maxlags+1``)
+ lag vector.
+ c : array (length ``2*maxlags+1``)
+ auto correlation vector.
+ line : `.LineCollection` or `.Line2D`
+ `.Artist` added to the axes of the correlation
+
+ `.LineCollection` if *usevlines* is True
+ `.Line2D` if *usevlines* is False
+ b : `.Line2D` or None
+ Horizontal line at 0 if *usevlines* is True
+ None *usevlines* is False
+
+ Other Parameters
+ ----------------
+ linestyle : `~matplotlib.lines.Line2D` property, optional
+ Only used if usevlines is ``False``.
+
+ marker : string, optional
+ Default is 'o'.
+
+ Notes
+ -----
+ The cross correlation is performed with :func:`numpy.correlate` with
+ ``mode = 2``.
+ """
+ if "hold" in kwargs:
+ warnings.warn("the 'hold' kwarg is deprecated", mplDeprecation)
+
+ Nx = len(x)
+ if Nx != len(y):
+ raise ValueError('x and y must be equal length')
+
+ x = detrend(np.asarray(x))
+ y = detrend(np.asarray(y))
+
+ correls = np.correlate(x, y, mode=2)
+
+ if normed:
+ correls /= np.sqrt(np.dot(x, x) * np.dot(y, y))
+
+ if maxlags is None:
+ maxlags = Nx - 1
+
+ if maxlags >= Nx or maxlags < 1:
+ raise ValueError('maxlags must be None or strictly '
+ 'positive < %d' % Nx)
+
+ lags = np.arange(-maxlags, maxlags + 1)
+ correls = correls[Nx - 1 - maxlags:Nx + maxlags]
+
+ if usevlines:
+ a = self.vlines(lags, [0], correls, **kwargs)
+ # Make label empty so only vertical lines get a legend entry
+ kwargs.pop('label', '')
+ b = self.axhline(**kwargs)
+ else:
+ kwargs.setdefault('marker', 'o')
+ kwargs.setdefault('linestyle', 'None')
+ a, = self.plot(lags, correls, **kwargs)
+ b = None
+ return lags, correls, a, b
+
+ #### Specialized plotting
+
+ @_preprocess_data(replace_names=["x", "y"], label_namer="y")
+ def step(self, x, y, *args, **kwargs):
+ """
+ Make a step plot.
+
+ Call signatures::
+
+ step(x, y, [fmt], *, data=None, where='pre', **kwargs)
+ step(x, y, [fmt], x2, y2, [fmt2], ..., *, where='pre', **kwargs)
+
+ This is just a thin wrapper around `.plot` which changes some
+ formatting options. Most of the concepts and parameters of plot can be
+ used here as well.
+
+ Parameters
+ ----------
+ x : array_like
+ 1-D sequence of x positions. It is assumed, but not checked, that
+ it is uniformly increasing.
+
+ y : array_like
+ 1-D sequence of y levels.
+
+ fmt : str, optional
+ A format string, e.g. 'g' for a green line. See `.plot` for a more
+ detailed description.
+
+ Note: While full format strings are accepted, it is recommended to
+ only specify the color. Line styles are currently ignored (use
+ the keyword argument *linestyle* instead). Markers are accepted
+ and plotted on the given positions, however, this is a rarely
+ needed feature for step plots.
+
+ data : indexable object, optional
+ An object with labelled data. If given, provide the label names to
+ plot in *x* and *y*.
+
+ where : {'pre', 'post', 'mid'}, optional, default 'pre'
+ Define where the steps should be placed:
+
+ - 'pre': The y value is continued constantly to the left from
+ every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the
+ value ``y[i]``.
+ - 'post': The y value is continued constantly to the right from
+ every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the
+ value ``y[i]``.
+ - 'mid': Steps occur half-way between the *x* positions.
+
+ Returns
+ -------
+ lines
+ A list of `.Line2D` objects representing the plotted data.
+
+ Other Parameters
+ ----------------
+ **kwargs
+ Additional parameters are the same as those for `.plot`.
+
+ Notes
+ -----
+ .. [notes section required to get data note injection right]
+ """
+ where = kwargs.pop('where', 'pre')
+ if where not in ('pre', 'post', 'mid'):
+ raise ValueError("'where' argument to step must be "
+ "'pre', 'post' or 'mid'")
+ usr_linestyle = kwargs.pop('linestyle', '')
+ kwargs['linestyle'] = 'steps-' + where + usr_linestyle
+
+ return self.plot(x, y, *args, **kwargs)
+
+ @_preprocess_data(replace_names=["x", "left",
+ "height", "width",
+ "y", "bottom",
+ "color", "edgecolor", "linewidth",
+ "tick_label", "xerr", "yerr",
+ "ecolor"],
+ label_namer=None,
+ replace_all_args=True
+ )
+ @docstring.dedent_interpd
+ def bar(self, *args, **kwargs):
+ r"""
+ Make a bar plot.
+
+ Call signatures::
+
+ bar(x, height, *, align='center', **kwargs)
+ bar(x, height, width, *, align='center', **kwargs)
+ bar(x, height, width, bottom, *, align='center', **kwargs)
+
+ The bars are positioned at *x* with the given *align* ment. Their
+ dimensions are given by *width* and *height*. The vertical baseline
+ is *bottom* (default 0).
+
+ Each of *x*, *height*, *width*, and *bottom* may either be a scalar
+ applying to all bars, or it may be a sequence of length N providing a
+ separate value for each bar.
+
+
+ Parameters
+ ----------
+ x : sequence of scalars
+ The x coordinates of the bars. See also *align* for the
+ alignment of the bars to the coordinates.
+
+ height : scalar or sequence of scalars
+ The height(s) of the bars.
+
+ width : scalar or array-like, optional
+ The width(s) of the bars (default: 0.8).
+
+ bottom : scalar or array-like, optional
+ The y coordinate(s) of the bars bases (default: 0).
+
+ align : {'center', 'edge'}, optional, default: 'center'
+ Alignment of the bars to the *x* coordinates:
+
+ - 'center': Center the base on the *x* positions.
+ - 'edge': Align the left edges of the bars with the *x* positions.
+
+ To align the bars on the right edge pass a negative *width* and
+ ``align='edge'``.
+
+ Returns
+ -------
+ container : `.BarContainer`
+ Container with all the bars and optionally errorbars.
+
+ Other Parameters
+ ----------------
+ color : scalar or array-like, optional
+ The colors of the bar faces.
+
+ edgecolor : scalar or array-like, optional
+ The colors of the bar edges.
+
+ linewidth : scalar or array-like, optional
+ Width of the bar edge(s). If 0, don't draw edges.
+
+ tick_label : string or array-like, optional
+ The tick labels of the bars.
+ Default: None (Use default numeric labels.)
+
+ xerr, yerr : scalar or array-like of shape(N,) or shape(2,N), optional
+ If not *None*, add horizontal / vertical errorbars to the bar tips.
+ The values are +/- sizes relative to the data:
+
+ - scalar: symmetric +/- values for all bars
+ - shape(N,): symmetric +/- values for each bar
+ - shape(2,N): Separate - and + values for each bar. First row
+ contains the lower errors, the second row contains the
+ upper errors.
+ - *None*: No errorbar. (Default)
+
+ See :doc:`/gallery/statistics/errorbar_features`
+ for an example on the usage of ``xerr`` and ``yerr``.
+
+ ecolor : scalar or array-like, optional, default: 'black'
+ The line color of the errorbars.
+
+ capsize : scalar, optional
+ The length of the error bar caps in points.
+ Default: None, which will take the value from
+ :rc:`errorbar.capsize`.
+
+ error_kw : dict, optional
+ Dictionary of kwargs to be passed to the `~.Axes.errorbar`
+ method. Values of *ecolor* or *capsize* defined here take
+ precedence over the independent kwargs.
+
+ log : bool, optional, default: False
+ If *True*, set the y-axis to be log scale.
+
+ orientation : {'vertical', 'horizontal'}, optional
+ *This is for internal use only.* Please use `barh` for
+ horizontal bar plots. Default: 'vertical'.
+
+ See also
+ --------
+ barh: Plot a horizontal bar plot.
+
+ Notes
+ -----
+ The optional arguments *color*, *edgecolor*, *linewidth*,
+ *xerr*, and *yerr* can be either scalars or sequences of
+ length equal to the number of bars. This enables you to use
+ bar as the basis for stacked bar charts, or candlestick plots.
+ Detail: *xerr* and *yerr* are passed directly to
+ :meth:`errorbar`, so they can also have shape 2xN for
+ independent specification of lower and upper errors.
+
+ Other optional kwargs:
+
+ %(Rectangle)s
+
+ """
+ kwargs = cbook.normalize_kwargs(kwargs, mpatches._patch_alias_map)
+ # this is using the lambdas to do the arg/kwarg unpacking rather
+ # than trying to re-implement all of that logic our selves.
+ matchers = [
+ (lambda x, height, width=0.8, bottom=None, **kwargs:
+ (False, x, height, width, bottom, kwargs)),
+ (lambda left, height, width=0.8, bottom=None, **kwargs:
+ (True, left, height, width, bottom, kwargs)),
+ ]
+ exps = []
+ for matcher in matchers:
+ try:
+ dp, x, height, width, y, kwargs = matcher(*args, **kwargs)
+ except TypeError as e:
+ # This can only come from a no-match as there is
+ # no other logic in the matchers.
+ exps.append(e)
+ else:
+ break
+ else:
+ raise exps[0]
+ # if we matched the second-case, then the user passed in
+ # left=val as a kwarg which we want to deprecate
+ if dp:
+ warnings.warn(
+ "The *left* kwarg to `bar` is deprecated use *x* instead. "
+ "Support for *left* will be removed in Matplotlib 3.0",
+ mplDeprecation, stacklevel=2)
+ if not self._hold:
+ self.cla()
+ color = kwargs.pop('color', None)
+ if color is None:
+ color = self._get_patches_for_fill.get_next_color()
+ edgecolor = kwargs.pop('edgecolor', None)
+ linewidth = kwargs.pop('linewidth', None)
+
+ # Because xerr and yerr will be passed to errorbar,
+ # most dimension checking and processing will be left
+ # to the errorbar method.
+ xerr = kwargs.pop('xerr', None)
+ yerr = kwargs.pop('yerr', None)
+ error_kw = kwargs.pop('error_kw', dict())
+ ecolor = kwargs.pop('ecolor', 'k')
+ capsize = kwargs.pop('capsize', rcParams["errorbar.capsize"])
+ error_kw.setdefault('ecolor', ecolor)
+ error_kw.setdefault('capsize', capsize)
+
+ if rcParams['_internal.classic_mode']:
+ align = kwargs.pop('align', 'edge')
+ else:
+ align = kwargs.pop('align', 'center')
+
+ orientation = kwargs.pop('orientation', 'vertical')
+ log = kwargs.pop('log', False)
+ label = kwargs.pop('label', '')
+ tick_labels = kwargs.pop('tick_label', None)
+
+ adjust_ylim = False
+ adjust_xlim = False
+
+ if orientation == 'vertical':
+ if y is None:
+ if self.get_yscale() == 'log':
+ adjust_ylim = True
+ y = 0
+
+ elif orientation == 'horizontal':
+ if x is None:
+ if self.get_xscale() == 'log':
+ adjust_xlim = True
+ x = 0
+
+ if orientation == 'vertical':
+ self._process_unit_info(xdata=x, ydata=height, kwargs=kwargs)
+ if log:
+ self.set_yscale('log', nonposy='clip')
+ elif orientation == 'horizontal':
+ self._process_unit_info(xdata=width, ydata=y, kwargs=kwargs)
+ if log:
+ self.set_xscale('log', nonposx='clip')
+ else:
+ raise ValueError('invalid orientation: %s' % orientation)
+
+ # lets do some conversions now since some types cannot be
+ # subtracted uniformly
+ if self.xaxis is not None:
+ x = self.convert_xunits(x)
+ width = self.convert_xunits(width)
+ if xerr is not None:
+ xerr = self.convert_xunits(xerr)
+
+ if self.yaxis is not None:
+ y = self.convert_yunits(y)
+ height = self.convert_yunits(height)
+ if yerr is not None:
+ yerr = self.convert_yunits(yerr)
+
+ x, height, width, y, linewidth = np.broadcast_arrays(
+ # Make args iterable too.
+ np.atleast_1d(x), height, width, y, linewidth)
+
+ # Now that units have been converted, set the tick locations.
+ if orientation == 'vertical':
+ tick_label_axis = self.xaxis
+ tick_label_position = x
+ elif orientation == 'horizontal':
+ tick_label_axis = self.yaxis
+ tick_label_position = y
+
+ linewidth = itertools.cycle(np.atleast_1d(linewidth))
+ color = itertools.chain(itertools.cycle(mcolors.to_rgba_array(color)),
+ # Fallback if color == "none".
+ itertools.repeat([0, 0, 0, 0]))
+ if edgecolor is None:
+ edgecolor = itertools.repeat(None)
+ else:
+ edgecolor = itertools.chain(
+ itertools.cycle(mcolors.to_rgba_array(edgecolor)),
+ # Fallback if edgecolor == "none".
+ itertools.repeat([0, 0, 0, 0]))
+
+ # We will now resolve the alignment and really have
+ # left, bottom, width, height vectors
+ if align == 'center':
+ if orientation == 'vertical':
+ left = x - width / 2
+ bottom = y
+ elif orientation == 'horizontal':
+ bottom = y - height / 2
+ left = x
+ elif align == 'edge':
+ left = x
+ bottom = y
+ else:
+ raise ValueError('invalid alignment: %s' % align)
+
+ patches = []
+ args = zip(left, bottom, width, height, color, edgecolor, linewidth)
+ for l, b, w, h, c, e, lw in args:
+ r = mpatches.Rectangle(
+ xy=(l, b), width=w, height=h,
+ facecolor=c,
+ edgecolor=e,
+ linewidth=lw,
+ label='_nolegend_',
+ )
+ r.update(kwargs)
+ r.get_path()._interpolation_steps = 100
+ if orientation == 'vertical':
+ r.sticky_edges.y.append(b)
+ elif orientation == 'horizontal':
+ r.sticky_edges.x.append(l)
+ self.add_patch(r)
+ patches.append(r)
+
+ holdstate = self._hold
+ self._hold = True # ensure hold is on before plotting errorbars
+
+ if xerr is not None or yerr is not None:
+ if orientation == 'vertical':
+ # using list comps rather than arrays to preserve unit info
+ ex = [l + 0.5 * w for l, w in zip(left, width)]
+ ey = [b + h for b, h in zip(bottom, height)]
+
+ elif orientation == 'horizontal':
+ # using list comps rather than arrays to preserve unit info
+ ex = [l + w for l, w in zip(left, width)]
+ ey = [b + 0.5 * h for b, h in zip(bottom, height)]
+
+ error_kw.setdefault("label", '_nolegend_')
+
+ errorbar = self.errorbar(ex, ey,
+ yerr=yerr, xerr=xerr,
+ fmt='none', **error_kw)
+ else:
+ errorbar = None
+
+ self._hold = holdstate # restore previous hold state
+
+ if adjust_xlim:
+ xmin, xmax = self.dataLim.intervalx
+ xmin = min(w for w in width if w > 0)
+ if xerr is not None:
+ xmin = xmin - np.max(xerr)
+ xmin = max(xmin * 0.9, 1e-100)
+ self.dataLim.intervalx = (xmin, xmax)
+
+ if adjust_ylim:
+ ymin, ymax = self.dataLim.intervaly
+ ymin = min(h for h in height if h > 0)
+ if yerr is not None:
+ ymin = ymin - np.max(yerr)
+ ymin = max(ymin * 0.9, 1e-100)
+ self.dataLim.intervaly = (ymin, ymax)
+ self.autoscale_view()
+
+ bar_container = BarContainer(patches, errorbar, label=label)
+ self.add_container(bar_container)
+
+ if tick_labels is not None:
+ tick_labels = _backports.broadcast_to(tick_labels, len(patches))
+ tick_label_axis.set_ticks(tick_label_position)
+ tick_label_axis.set_ticklabels(tick_labels)
+
+ return bar_container
+
+ @docstring.dedent_interpd
+ def barh(self, *args, **kwargs):
+ r"""
+ Make a horizontal bar plot.
+
+ Call signatures::
+
+ bar(y, width, *, align='center', **kwargs)
+ bar(y, width, height, *, align='center', **kwargs)
+ bar(y, width, height, left, *, align='center', **kwargs)
+
+ The bars are positioned at *y* with the given *align*. Their
+ dimensions are given by *width* and *height*. The horizontal baseline
+ is *left* (default 0).
+
+ Each of *y*, *width*, *height*, and *left* may either be a scalar
+ applying to all bars, or it may be a sequence of length N providing a
+ separate value for each bar.
+
+
+ Parameters
+ ----------
+ y : scalar or array-like
+ The y coordinates of the bars. See also *align* for the
+ alignment of the bars to the coordinates.
+
+ width : scalar or array-like
+ The width(s) of the bars.
+
+ height : sequence of scalars, optional, default: 0.8
+ The heights of the bars.
+
+ left : sequence of scalars
+ The x coordinates of the left sides of the bars (default: 0).
+
+ align : {'center', 'edge'}, optional, default: 'center'
+ Alignment of the base to the *y* coordinates*:
+
+ - 'center': Center the bars on the *y* positions.
+ - 'edge': Align the bottom edges of the bars with the *y*
+ positions.
+
+ To align the bars on the top edge pass a negative *height* and
+ ``align='edge'``.
+
+ Returns
+ -------
+ container : `.BarContainer`
+ Container with all the bars and optionally errorbars.
+
+ Other Parameters
+ ----------------
+ color : scalar or array-like, optional
+ The colors of the bar faces.
+
+ edgecolor : scalar or array-like, optional
+ The colors of the bar edges.
+
+ linewidth : scalar or array-like, optional
+ Width of the bar edge(s). If 0, don't draw edges.
+
+ tick_label : string or array-like, optional
+ The tick labels of the bars.
+ Default: None (Use default numeric labels.)
+
+ xerr, yerr : scalar or array-like of shape(N,) or shape(2,N), optional
+ If not ``None``, add horizontal / vertical errorbars to the
+ bar tips. The values are +/- sizes relative to the data:
+
+ - scalar: symmetric +/- values for all bars
+ - shape(N,): symmetric +/- values for each bar
+ - shape(2,N): Separate - and + values for each bar. First row
+ contains the lower errors, the second row contains the
+ upper errors.
+ - *None*: No errorbar. (default)
+
+ See :doc:`/gallery/statistics/errorbar_features`
+ for an example on the usage of ``xerr`` and ``yerr``.
+
+ ecolor : scalar or array-like, optional, default: 'black'
+ The line color of the errorbars.
+
+ capsize : scalar, optional
+ The length of the error bar caps in points.
+ Default: None, which will take the value from
+ :rc:`errorbar.capsize`.
+
+ error_kw : dict, optional
+ Dictionary of kwargs to be passed to the `~.Axes.errorbar`
+ method. Values of *ecolor* or *capsize* defined here take
+ precedence over the independent kwargs.
+
+ log : bool, optional, default: False
+ If ``True``, set the x-axis to be log scale.
+
+ See also
+ --------
+ bar: Plot a vertical bar plot.
+
+ Notes
+ -----
+ The optional arguments *color*, *edgecolor*, *linewidth*,
+ *xerr*, and *yerr* can be either scalars or sequences of
+ length equal to the number of bars. This enables you to use
+ bar as the basis for stacked bar charts, or candlestick plots.
+ Detail: *xerr* and *yerr* are passed directly to
+ :meth:`errorbar`, so they can also have shape 2xN for
+ independent specification of lower and upper errors.
+
+ Other optional kwargs:
+
+ %(Rectangle)s
+
+ """
+ # this is using the lambdas to do the arg/kwarg unpacking rather
+ # than trying to re-implement all of that logic our selves.
+ matchers = [
+ (lambda y, width, height=0.8, left=None, **kwargs:
+ (False, y, width, height, left, kwargs)),
+ (lambda bottom, width, height=0.8, left=None, **kwargs:
+ (True, bottom, width, height, left, kwargs)),
+ ]
+ excs = []
+ for matcher in matchers:
+ try:
+ dp, y, width, height, left, kwargs = matcher(*args, **kwargs)
+ except TypeError as e:
+ # This can only come from a no-match as there is
+ # no other logic in the matchers.
+ excs.append(e)
+ else:
+ break
+ else:
+ raise excs[0]
+
+ if dp:
+ warnings.warn(
+ "The *bottom* kwarg to `barh` is deprecated use *y* instead. "
+ "Support for *bottom* will be removed in Matplotlib 3.0",
+ mplDeprecation, stacklevel=2)
+ kwargs.setdefault('orientation', 'horizontal')
+ patches = self.bar(x=left, height=height, width=width,
+ bottom=y, **kwargs)
+ return patches
+
+ @_preprocess_data(label_namer=None)
+ @docstring.dedent_interpd
+ def broken_barh(self, xranges, yrange, **kwargs):
+ """
+ Plot a horizontal sequence of rectangles.
+
+ A rectangle is drawn for each element of *xranges*. All rectangles
+ have the same vertical position and size defined by *yrange*.
+
+ This is a convenience function for instantiating a
+ `.BrokenBarHCollection`, adding it to the axes and autoscaling the
+ view.
+
+ Parameters
+ ----------
+ xranges : sequence of tuples (*xmin*, *xwidth*)
+ The x-positions and extends of the rectangles. For each tuple
+ (*xmin*, *xwidth*) a rectangle is drawn from *xmin* to *xmin* +
+ *xwidth*.
+ yranges : (*ymin*, *ymax*)
+ The y-position and extend for all the rectangles.
+
+ Other Parameters
+ ----------------
+ **kwargs : :class:`.BrokenBarHCollection` properties
+
+ Each *kwarg* can be either a single argument applying to all
+ rectangles, e.g.::
+
+ facecolors='black'
+
+ or a sequence of arguments over which is cycled, e.g.::
+
+ facecolors=('black', 'blue')
+
+ would create interleaving black and blue rectangles.
+
+ Supported keywords:
+
+ %(BrokenBarHCollection)s
+
+ Returns
+ -------
+ collection : A :class:`~.collections.BrokenBarHCollection`
+
+ Notes
+ -----
+ .. [Notes section required for data comment. See #10189.]
+
+ """
+ # process the unit information
+ if len(xranges):
+ xdata = cbook.safe_first_element(xranges)
+ else:
+ xdata = None
+ if len(yrange):
+ ydata = cbook.safe_first_element(yrange)
+ else:
+ ydata = None
+ self._process_unit_info(xdata=xdata,
+ ydata=ydata,
+ kwargs=kwargs)
+ xranges = self.convert_xunits(xranges)
+ yrange = self.convert_yunits(yrange)
+
+ col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
+ self.add_collection(col, autolim=True)
+ self.autoscale_view()
+
+ return col
+
+ @_preprocess_data(replace_all_args=True, label_namer=None)
+ def stem(self, *args, **kwargs):
+ """
+ Create a stem plot.
+
+ A stem plot plots vertical lines at each *x* location from the baseline
+ to *y*, and places a marker there.
+
+ Call signature::
+
+ stem([x,] y, linefmt=None, markerfmt=None, basefmt=None)
+
+ The x-positions are optional. The formats may be provided either as
+ positional or as keyword-arguments.
+
+ Parameters
+ ----------
+ x : array-like, optional
+ The x-positions of the stems. Default: (0, 1, ..., len(y) - 1).
+
+ y : array-like
+ The y-values of the stem heads.
+
+ linefmt : str, optional
+ A string defining the properties of the vertical lines. Usually,
+ this will be a color or a color and a linestyle:
+
+ ========= =============
+ Character Line Style
+ ========= =============
+ ``'-'`` solid line
+ ``'--'`` dashed line
+ ``'-.'`` dash-dot line
+ ``':'`` dotted line
+ ========= =============
+
+ Default: 'C0-', i.e. solid line with the first color of the color
+ cycle.
+
+ Note: While it is technically possible to specify valid formats
+ other than color or color and linestyle (e.g. 'rx' or '-.'), this
+ is beyond the intention of the method and will most likely not
+ result in a reasonable reasonable plot.
+
+ markerfmt : str, optional
+ A string defining the properties of the markers at the stem heads.
+ Default: 'C0o', i.e. filled circles with the first color of the
+ color cycle.
+
+ basefmt : str, optional
+ A format string defining the properties of the baseline.
+
+ Default: 'C3-' ('C2-' in classic mode).
+
+ bottom : float, optional, default: 0
+ The y-position of the baseline.
+
+ label : str, optional, default: None
+ The label to use for the stems in legends.
+
+
+ Other Parameters
+ ----------------
+ **kwargs
+ No other parameters are supported. They are currently ignored
+ silently for backward compatibility. This behavior is deprecated.
+ Future versions will not accept any other parameters and will
+ raise a TypeError instead.
+
+
+ Returns
+ -------
+ container : :class:`~matplotlib.container.StemContainer`
+ The container may be treated like a tuple
+ (*markerline*, *stemlines*, *baseline*)
+
+
+ Notes
+ -----
+
+ .. seealso::
+ The MATLAB function
+ `stem <http://www.mathworks.com/help/techdoc/ref/stem.html>`_
+ which inspired this method.
+
+ """
+
+ # kwargs handling
+ # We would like to have a signature with explicit kewords:
+ # stem(*args, linefmt=None, markerfmt=None, basefmt=None,
+ # bottom=0, label=None)
+ # Unfortunately, this is not supported in Python 2.x. There, *args
+ # can only exist after keyword arguments.
+ linefmt = kwargs.pop('linefmt', None)
+ markerfmt = kwargs.pop('markerfmt', None)
+ basefmt = kwargs.pop('basefmt', None)
+ bottom = kwargs.pop('bottom', None)
+ if bottom is None:
+ bottom = 0
+ label = kwargs.pop('label', None)
+ if kwargs:
+ warn_deprecated(since='2.2',
+ message="stem() got an unexpected keyword "
+ "argument '%s'. This will raise a "
+ "TypeError in future versions." % (
+ next(k for k in kwargs), )
+ )
+
+ remember_hold = self._hold
+ if not self._hold:
+ self.cla()
+ self._hold = True
+
+ # Assume there's at least one data array
+ y = np.asarray(args[0])
+ args = args[1:]
+
+ # Try a second one
+ try:
+ second = np.asarray(args[0], dtype=float)
+ x, y = y, second
+ args = args[1:]
+ except (IndexError, ValueError):
+ # The second array doesn't make sense, or it doesn't exist
+ second = np.arange(len(y))
+ x = second
+
+ # defaults for formats
+ if linefmt is None:
+ try:
+ # fallback to positional argument
+ linefmt = args[0]
+ except IndexError:
+ linecolor = 'C0'
+ linemarker = 'None'
+ linestyle = '-'
+ else:
+ linestyle, linemarker, linecolor = \
+ _process_plot_format(linefmt)
+ else:
+ linestyle, linemarker, linecolor = _process_plot_format(linefmt)
+
+ if markerfmt is None:
+ try:
+ # fallback to positional argument
+ markerfmt = args[1]
+ except IndexError:
+ markercolor = 'C0'
+ markermarker = 'o'
+ markerstyle = 'None'
+ else:
+ markerstyle, markermarker, markercolor = \
+ _process_plot_format(markerfmt)
+ else:
+ markerstyle, markermarker, markercolor = \
+ _process_plot_format(markerfmt)
+
+ if basefmt is None:
+ try:
+ # fallback to positional argument
+ basefmt = args[2]
+ except IndexError:
+ if rcParams['_internal.classic_mode']:
+ basecolor = 'C2'
+ else:
+ basecolor = 'C3'
+ basemarker = 'None'
+ basestyle = '-'
+ else:
+ basestyle, basemarker, basecolor = \
+ _process_plot_format(basefmt)
+ else:
+ basestyle, basemarker, basecolor = _process_plot_format(basefmt)
+
+ markerline, = self.plot(x, y, color=markercolor, linestyle=markerstyle,
+ marker=markermarker, label="_nolegend_")
+
+ stemlines = []
+ for thisx, thisy in zip(x, y):
+ l, = self.plot([thisx, thisx], [bottom, thisy],
+ color=linecolor, linestyle=linestyle,
+ marker=linemarker, label="_nolegend_")
+ stemlines.append(l)
+
+ baseline, = self.plot([np.min(x), np.max(x)], [bottom, bottom],
+ color=basecolor, linestyle=basestyle,
+ marker=basemarker, label="_nolegend_")
+
+ self._hold = remember_hold
+
+ stem_container = StemContainer((markerline, stemlines, baseline),
+ label=label)
+ self.add_container(stem_container)
+
+ return stem_container
+
+ @_preprocess_data(replace_names=["x", "explode", "labels", "colors"],
+ label_namer=None)
+ def pie(self, x, explode=None, labels=None, colors=None,
+ autopct=None, pctdistance=0.6, shadow=False, labeldistance=1.1,
+ startangle=None, radius=None, counterclock=True,
+ wedgeprops=None, textprops=None, center=(0, 0),
+ frame=False, rotatelabels=False):
+ """
+ Plot a pie chart.
+
+ Make a pie chart of array *x*. The fractional area of each wedge is
+ given by ``x/sum(x)``. If ``sum(x) < 1``, then the values of *x* give
+ the fractional area directly and the array will not be normalized. The
+ resulting pie will have an empty wedge of size ``1 - sum(x)``.
+
+ The wedges are plotted counterclockwise, by default starting from the
+ x-axis.
+
+ Parameters
+ ----------
+ x : array-like
+ The wedge sizes.
+
+ explode : array-like, optional, default: None
+ If not *None*, is a ``len(x)`` array which specifies the fraction
+ of the radius with which to offset each wedge.
+
+ labels : list, optional, default: None
+ A sequence of strings providing the labels for each wedge
+
+ colors : array-like, optional, default: None
+ A sequence of matplotlib color args through which the pie chart
+ will cycle. If *None*, will use the colors in the currently
+ active cycle.
+
+ autopct : None (default), string, or function, optional
+ If not *None*, is a string or function used to label the wedges
+ with their numeric value. The label will be placed inside the
+ wedge. If it is a format string, the label will be ``fmt%pct``.
+ If it is a function, it will be called.
+
+ pctdistance : float, optional, default: 0.6
+ The ratio between the center of each pie slice and the start of
+ the text generated by *autopct*. Ignored if *autopct* is *None*.
+
+ shadow : bool, optional, default: False
+ Draw a shadow beneath the pie.
+
+ labeldistance : float, optional, default: 1.1
+ The radial distance at which the pie labels are drawn
+
+ startangle : float, optional, default: None
+ If not *None*, rotates the start of the pie chart by *angle*
+ degrees counterclockwise from the x-axis.
+
+ radius : float, optional, default: None
+ The radius of the pie, if *radius* is *None* it will be set to 1.
+
+ counterclock : bool, optional, default: True
+ Specify fractions direction, clockwise or counterclockwise.
+
+ wedgeprops : dict, optional, default: None
+ Dict of arguments passed to the wedge objects making the pie.
+ For example, you can pass in ``wedgeprops = {'linewidth': 3}``
+ to set the width of the wedge border lines equal to 3.
+ For more details, look at the doc/arguments of the wedge object.
+ By default ``clip_on=False``.
+
+ textprops : dict, optional, default: None
+ Dict of arguments to pass to the text objects.
+
+ center : list of float, optional, default: (0, 0)
+ Center position of the chart. Takes value (0, 0) or is a sequence
+ of 2 scalars.
+
+ frame : bool, optional, default: False
+ Plot axes frame with the chart if true.
+
+ rotatelabels : bool, optional, default: False
+ Rotate each label to the angle of the corresponding slice if true.
+
+ Returns
+ -------
+ patches : list
+ A sequence of :class:`matplotlib.patches.Wedge` instances
+
+ texts : list
+ A list of the label :class:`matplotlib.text.Text` instances.
+
+ autotexts : list
+ A list of :class:`~matplotlib.text.Text` instances for the numeric
+ labels. This will only be returned if the parameter *autopct* is
+ not *None*.
+
+ Notes
+ -----
+ The pie chart will probably look best if the figure and axes are
+ square, or the Axes aspect is equal.
+ """
+ x = np.array(x, np.float32)
+
+ sx = x.sum()
+ if sx > 1:
+ x /= sx
+
+ if labels is None:
+ labels = [''] * len(x)
+ if explode is None:
+ explode = [0] * len(x)
+ if len(x) != len(labels):
+ raise ValueError("'label' must be of length 'x'")
+ if len(x) != len(explode):
+ raise ValueError("'explode' must be of length 'x'")
+ if colors is None:
+ get_next_color = self._get_patches_for_fill.get_next_color
+ else:
+ color_cycle = itertools.cycle(colors)
+
+ def get_next_color():
+ return next(color_cycle)
+
+ if radius is None:
+ radius = 1
+
+ # Starting theta1 is the start fraction of the circle
+ if startangle is None:
+ theta1 = 0
+ else:
+ theta1 = startangle / 360.0
+
+ # set default values in wedge_prop
+ if wedgeprops is None:
+ wedgeprops = {}
+ wedgeprops.setdefault('clip_on', False)
+
+ if textprops is None:
+ textprops = {}
+ textprops.setdefault('clip_on', False)
+
+ texts = []
+ slices = []
+ autotexts = []
+
+ i = 0
+ for frac, label, expl in zip(x, labels, explode):
+ x, y = center
+ theta2 = (theta1 + frac) if counterclock else (theta1 - frac)
+ thetam = 2 * np.pi * 0.5 * (theta1 + theta2)
+ x += expl * math.cos(thetam)
+ y += expl * math.sin(thetam)
+
+ w = mpatches.Wedge((x, y), radius, 360. * min(theta1, theta2),
+ 360. * max(theta1, theta2),
+ facecolor=get_next_color(),
+ **wedgeprops)
+ slices.append(w)
+ self.add_patch(w)
+ w.set_label(label)
+
+ if shadow:
+ # make sure to add a shadow after the call to
+ # add_patch so the figure and transform props will be
+ # set
+ shad = mpatches.Shadow(w, -0.02, -0.02)
+ shad.set_zorder(0.9 * w.get_zorder())
+ shad.set_label('_nolegend_')
+ self.add_patch(shad)
+
+ xt = x + labeldistance * radius * math.cos(thetam)
+ yt = y + labeldistance * radius * math.sin(thetam)
+ label_alignment_h = xt > 0 and 'left' or 'right'
+ label_alignment_v = 'center'
+ label_rotation = 'horizontal'
+ if rotatelabels:
+ label_alignment_v = yt > 0 and 'bottom' or 'top'
+ label_rotation = np.rad2deg(thetam) + (0 if xt > 0 else 180)
+
+ t = self.text(xt, yt, label,
+ size=rcParams['xtick.labelsize'],
+ horizontalalignment=label_alignment_h,
+ verticalalignment=label_alignment_v,
+ rotation=label_rotation,
+ **textprops)
+
+ texts.append(t)
+
+ if autopct is not None:
+ xt = x + pctdistance * radius * math.cos(thetam)
+ yt = y + pctdistance * radius * math.sin(thetam)
+ if isinstance(autopct, six.string_types):
+ s = autopct % (100. * frac)
+ elif callable(autopct):
+ s = autopct(100. * frac)
+ else:
+ raise TypeError(
+ 'autopct must be callable or a format string')
+
+ t = self.text(xt, yt, s,
+ horizontalalignment='center',
+ verticalalignment='center',
+ **textprops)
+
+ autotexts.append(t)
+
+ theta1 = theta2
+ i += 1
+
+ if not frame:
+ self.set_frame_on(False)
+
+ self.set_xlim((-1.25 + center[0],
+ 1.25 + center[0]))
+ self.set_ylim((-1.25 + center[1],
+ 1.25 + center[1]))
+ self.set_xticks([])
+ self.set_yticks([])
+
+ if autopct is None:
+ return slices, texts
+ else:
+ return slices, texts, autotexts
+
+ @_preprocess_data(replace_names=["x", "y", "xerr", "yerr"],
+ label_namer="y")
+ @docstring.dedent_interpd
+ def errorbar(self, x, y, yerr=None, xerr=None,
+ fmt='', ecolor=None, elinewidth=None, capsize=None,
+ barsabove=False, lolims=False, uplims=False,
+ xlolims=False, xuplims=False, errorevery=1, capthick=None,
+ **kwargs):
+ """
+ Plot y versus x as lines and/or markers with attached errorbars.
+
+ *x*, *y* define the data locations, *xerr*, *yerr* define the errorbar
+ sizes. By default, this draws the data markers/lines as well the
+ errorbars. Use fmt='none' to draw errorbars without any data markers.
+
+ Parameters
+ ----------
+ x, y : scalar or array-like
+ The data positions.
+
+ xerr, yerr : scalar or array-like, shape(N,) or shape(2,N), optional
+ The errorbar sizes:
+
+ - scalar: Symmetric +/- values for all data points.
+ - shape(N,): Symmetric +/-values for each data point.
+ - shape(2,N): Separate - and + values for each bar. First row
+ contains the lower errors, the second row contains the
+ upper errors.
+ - *None*: No errorbar.
+
+ See :doc:`/gallery/statistics/errorbar_features`
+ for an example on the usage of ``xerr`` and ``yerr``.
+
+ fmt : plot format string, optional, default: ''
+ The format for the data points / data lines. See `.plot` for
+ details.
+
+ Use 'none' (case insensitive) to plot errorbars without any data
+ markers.
+
+ ecolor : mpl color, optional, default: None
+ A matplotlib color arg which gives the color the errorbar lines.
+ If None, use the color of the line connecting the markers.
+
+ elinewidth : scalar, optional, default: None
+ The linewidth of the errorbar lines. If None, the linewidth of
+ the current style is used.
+
+ capsize : scalar, optional, default: None
+ The length of the error bar caps in points. If None, it will take
+ the value from :rc:`errorbar.capsize`.
+
+ capthick : scalar, optional, default: None
+ An alias to the keyword argument *markeredgewidth* (a.k.a. *mew*).
+ This setting is a more sensible name for the property that
+ controls the thickness of the error bar cap in points. For
+ backwards compatibility, if *mew* or *markeredgewidth* are given,
+ then they will over-ride *capthick*. This may change in future
+ releases.
+
+ barsabove : bool, optional, default: False
+ If True, will plot the errorbars above the plot
+ symbols. Default is below.
+
+ lolims, uplims, xlolims, xuplims : bool, optional, default: None
+ These arguments can be used to indicate that a value gives only
+ upper/lower limits. In that case a caret symbol is used to
+ indicate this. *lims*-arguments may be of the same type as *xerr*
+ and *yerr*. To use limits with inverted axes, :meth:`set_xlim`
+ or :meth:`set_ylim` must be called before :meth:`errorbar`.
+
+ errorevery : positive integer, optional, default: 1
+ Subsamples the errorbars. e.g., if errorevery=5, errorbars for
+ every 5-th datapoint will be plotted. The data plot itself still
+ shows all data points.
+
+ Returns
+ -------
+ container : :class:`~.container.ErrorbarContainer`
+ The container contains:
+
+ - plotline: :class:`~matplotlib.lines.Line2D` instance of
+ x, y plot markers and/or line.
+ - caplines: A tuple of :class:`~matplotlib.lines.Line2D` instances
+ of the error bar caps.
+ - barlinecols: A tuple of
+ :class:`~matplotlib.collections.LineCollection` with the
+ horizontal and vertical error ranges.
+
+ Other Parameters
+ ----------------
+ **kwargs :
+ All other keyword arguments are passed on to the plot
+ command for the markers. For example, this code makes big red
+ squares with thick green edges::
+
+ x,y,yerr = rand(3,10)
+ errorbar(x, y, yerr, marker='s', mfc='red',
+ mec='green', ms=20, mew=4)
+
+ where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
+ property names, *markerfacecolor*, *markeredgecolor*, *markersize*
+ and *markeredgewidth*.
+
+ Valid kwargs for the marker properties are `.Lines2D` properties:
+
+ %(Line2D)s
+
+ Notes
+ -----
+ .. [Notes section required for data comment. See #10189.]
+
+ """
+ kwargs = cbook.normalize_kwargs(kwargs, _alias_map)
+ # anything that comes in as 'None', drop so the default thing
+ # happens down stream
+ kwargs = {k: v for k, v in kwargs.items() if v is not None}
+ kwargs.setdefault('zorder', 2)
+
+ if errorevery < 1:
+ raise ValueError(
+ 'errorevery has to be a strictly positive integer')
+
+ self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
+ if not self._hold:
+ self.cla()
+ holdstate = self._hold
+ self._hold = True
+
+ plot_line = (fmt.lower() != 'none')
+ label = kwargs.pop("label", None)
+
+ if fmt == '':
+ fmt_style_kwargs = {}
+ else:
+ fmt_style_kwargs = {k: v for k, v in
+ zip(('linestyle', 'marker', 'color'),
+ _process_plot_format(fmt)) if v is not None}
+ if fmt == 'none':
+ # Remove alpha=0 color that _process_plot_format returns
+ fmt_style_kwargs.pop('color')
+
+ if ('color' in kwargs or 'color' in fmt_style_kwargs or
+ ecolor is not None):
+ base_style = {}
+ if 'color' in kwargs:
+ base_style['color'] = kwargs.pop('color')
+ else:
+ base_style = next(self._get_lines.prop_cycler)
+
+ base_style['label'] = '_nolegend_'
+ base_style.update(fmt_style_kwargs)
+ if 'color' not in base_style:
+ base_style['color'] = 'C0'
+ if ecolor is None:
+ ecolor = base_style['color']
+ # make sure all the args are iterable; use lists not arrays to
+ # preserve units
+ if not iterable(x):
+ x = [x]
+
+ if not iterable(y):
+ y = [y]
+
+ if xerr is not None:
+ if not iterable(xerr):
+ xerr = [xerr] * len(x)
+
+ if yerr is not None:
+ if not iterable(yerr):
+ yerr = [yerr] * len(y)
+
+ # make the style dict for the 'normal' plot line
+ plot_line_style = dict(base_style)
+ plot_line_style.update(**kwargs)
+ if barsabove:
+ plot_line_style['zorder'] = kwargs['zorder'] - .1
+ else:
+ plot_line_style['zorder'] = kwargs['zorder'] + .1
+
+ # make the style dict for the line collections (the bars)
+ eb_lines_style = dict(base_style)
+ eb_lines_style.pop('marker', None)
+ eb_lines_style.pop('linestyle', None)
+ eb_lines_style['color'] = ecolor
+
+ if elinewidth:
+ eb_lines_style['linewidth'] = elinewidth
+ elif 'linewidth' in kwargs:
+ eb_lines_style['linewidth'] = kwargs['linewidth']
+
+ for key in ('transform', 'alpha', 'zorder', 'rasterized'):
+ if key in kwargs:
+ eb_lines_style[key] = kwargs[key]
+
+ # set up cap style dictionary
+ eb_cap_style = dict(base_style)
+ # eject any marker information from format string
+ eb_cap_style.pop('marker', None)
+ eb_lines_style.pop('markerfacecolor', None)
+ eb_lines_style.pop('markeredgewidth', None)
+ eb_lines_style.pop('markeredgecolor', None)
+ eb_cap_style.pop('ls', None)
+ eb_cap_style['linestyle'] = 'none'
+ if capsize is None:
+ capsize = rcParams["errorbar.capsize"]
+ if capsize > 0:
+ eb_cap_style['markersize'] = 2. * capsize
+ if capthick is not None:
+ eb_cap_style['markeredgewidth'] = capthick
+
+ # For backwards-compat, allow explicit setting of
+ # 'markeredgewidth' to over-ride capthick.
+ for key in ('markeredgewidth', 'transform', 'alpha',
+ 'zorder', 'rasterized'):
+ if key in kwargs:
+ eb_cap_style[key] = kwargs[key]
+ eb_cap_style['color'] = ecolor
+
+ data_line = None
+ if plot_line:
+ data_line = mlines.Line2D(x, y, **plot_line_style)
+ self.add_line(data_line)
+
+ barcols = []
+ caplines = []
+
+ # arrays fine here, they are booleans and hence not units
+ def _bool_asarray_helper(d, expected):
+ if not iterable(d):
+ return np.asarray([d] * expected, bool)
+ else:
+ return np.asarray(d, bool)
+
+ lolims = _bool_asarray_helper(lolims, len(x))
+ uplims = _bool_asarray_helper(uplims, len(x))
+ xlolims = _bool_asarray_helper(xlolims, len(x))
+ xuplims = _bool_asarray_helper(xuplims, len(x))
+
+ everymask = np.arange(len(x)) % errorevery == 0
+
+ def xywhere(xs, ys, mask):
+ """
+ return xs[mask], ys[mask] where mask is True but xs and
+ ys are not arrays
+ """
+ assert len(xs) == len(ys)
+ assert len(xs) == len(mask)
+ xs = [thisx for thisx, b in zip(xs, mask) if b]
+ ys = [thisy for thisy, b in zip(ys, mask) if b]
+ return xs, ys
+
+ def extract_err(err, data):
+ '''private function to compute error bars
+
+ Parameters
+ ----------
+ err : iterable
+ xerr or yerr from errorbar
+ data : iterable
+ x or y from errorbar
+ '''
+ try:
+ a, b = err
+ except (TypeError, ValueError):
+ pass
+ else:
+ if iterable(a) and iterable(b):
+ # using list comps rather than arrays to preserve units
+ low = [thisx - thiserr for (thisx, thiserr)
+ in cbook.safezip(data, a)]
+ high = [thisx + thiserr for (thisx, thiserr)
+ in cbook.safezip(data, b)]
+ return low, high
+ # Check if xerr is scalar or symmetric. Asymmetric is handled
+ # above. This prevents Nx2 arrays from accidentally
+ # being accepted, when the user meant the 2xN transpose.
+ # special case for empty lists
+ if len(err) > 1:
+ fe = safe_first_element(err)
+ if (len(err) != len(data) or np.size(fe) > 1):
+ raise ValueError("err must be [ scalar | N, Nx1 "
+ "or 2xN array-like ]")
+ # using list comps rather than arrays to preserve units
+ low = [thisx - thiserr for (thisx, thiserr)
+ in cbook.safezip(data, err)]
+ high = [thisx + thiserr for (thisx, thiserr)
+ in cbook.safezip(data, err)]
+ return low, high
+
+ if xerr is not None:
+ left, right = extract_err(xerr, x)
+ # select points without upper/lower limits in x and
+ # draw normal errorbars for these points
+ noxlims = ~(xlolims | xuplims)
+ if noxlims.any() or len(noxlims) == 0:
+ yo, _ = xywhere(y, right, noxlims & everymask)
+ lo, ro = xywhere(left, right, noxlims & everymask)
+ barcols.append(self.hlines(yo, lo, ro, **eb_lines_style))
+ if capsize > 0:
+ caplines.append(mlines.Line2D(lo, yo, marker='|',
+ **eb_cap_style))
+ caplines.append(mlines.Line2D(ro, yo, marker='|',
+ **eb_cap_style))
+
+ if xlolims.any():
+ yo, _ = xywhere(y, right, xlolims & everymask)
+ lo, ro = xywhere(x, right, xlolims & everymask)
+ barcols.append(self.hlines(yo, lo, ro, **eb_lines_style))
+ rightup, yup = xywhere(right, y, xlolims & everymask)
+ if self.xaxis_inverted():
+ marker = mlines.CARETLEFTBASE
+ else:
+ marker = mlines.CARETRIGHTBASE
+ caplines.append(
+ mlines.Line2D(rightup, yup, ls='None', marker=marker,
+ **eb_cap_style))
+ if capsize > 0:
+ xlo, ylo = xywhere(x, y, xlolims & everymask)
+ caplines.append(mlines.Line2D(xlo, ylo, marker='|',
+ **eb_cap_style))
+
+ if xuplims.any():
+ yo, _ = xywhere(y, right, xuplims & everymask)
+ lo, ro = xywhere(left, x, xuplims & everymask)
+ barcols.append(self.hlines(yo, lo, ro, **eb_lines_style))
+ leftlo, ylo = xywhere(left, y, xuplims & everymask)
+ if self.xaxis_inverted():
+ marker = mlines.CARETRIGHTBASE
+ else:
+ marker = mlines.CARETLEFTBASE
+ caplines.append(
+ mlines.Line2D(leftlo, ylo, ls='None', marker=marker,
+ **eb_cap_style))
+ if capsize > 0:
+ xup, yup = xywhere(x, y, xuplims & everymask)
+ caplines.append(mlines.Line2D(xup, yup, marker='|',
+ **eb_cap_style))
+
+ if yerr is not None:
+ lower, upper = extract_err(yerr, y)
+ # select points without upper/lower limits in y and
+ # draw normal errorbars for these points
+ noylims = ~(lolims | uplims)
+ if noylims.any() or len(noylims) == 0:
+ xo, _ = xywhere(x, lower, noylims & everymask)
+ lo, uo = xywhere(lower, upper, noylims & everymask)
+ barcols.append(self.vlines(xo, lo, uo, **eb_lines_style))
+ if capsize > 0:
+ caplines.append(mlines.Line2D(xo, lo, marker='_',
+ **eb_cap_style))
+ caplines.append(mlines.Line2D(xo, uo, marker='_',
+ **eb_cap_style))
+
+ if lolims.any():
+ xo, _ = xywhere(x, lower, lolims & everymask)
+ lo, uo = xywhere(y, upper, lolims & everymask)
+ barcols.append(self.vlines(xo, lo, uo, **eb_lines_style))
+ xup, upperup = xywhere(x, upper, lolims & everymask)
+ if self.yaxis_inverted():
+ marker = mlines.CARETDOWNBASE
+ else:
+ marker = mlines.CARETUPBASE
+ caplines.append(
+ mlines.Line2D(xup, upperup, ls='None', marker=marker,
+ **eb_cap_style))
+ if capsize > 0:
+ xlo, ylo = xywhere(x, y, lolims & everymask)
+ caplines.append(mlines.Line2D(xlo, ylo, marker='_',
+ **eb_cap_style))
+
+ if uplims.any():
+ xo, _ = xywhere(x, lower, uplims & everymask)
+ lo, uo = xywhere(lower, y, uplims & everymask)
+ barcols.append(self.vlines(xo, lo, uo, **eb_lines_style))
+ xlo, lowerlo = xywhere(x, lower, uplims & everymask)
+ if self.yaxis_inverted():
+ marker = mlines.CARETUPBASE
+ else:
+ marker = mlines.CARETDOWNBASE
+ caplines.append(
+ mlines.Line2D(xlo, lowerlo, ls='None', marker=marker,
+ **eb_cap_style))
+ if capsize > 0:
+ xup, yup = xywhere(x, y, uplims & everymask)
+ caplines.append(mlines.Line2D(xup, yup, marker='_',
+ **eb_cap_style))
+ for l in caplines:
+ self.add_line(l)
+
+ self.autoscale_view()
+ self._hold = holdstate
+
+ errorbar_container = ErrorbarContainer((data_line, tuple(caplines),
+ tuple(barcols)),
+ has_xerr=(xerr is not None),
+ has_yerr=(yerr is not None),
+ label=label)
+ self.containers.append(errorbar_container)
+
+ return errorbar_container # (l0, caplines, barcols)
+
+ @_preprocess_data(label_namer=None)
+ def boxplot(self, x, notch=None, sym=None, vert=None, whis=None,
+ positions=None, widths=None, patch_artist=None,
+ bootstrap=None, usermedians=None, conf_intervals=None,
+ meanline=None, showmeans=None, showcaps=None,
+ showbox=None, showfliers=None, boxprops=None,
+ labels=None, flierprops=None, medianprops=None,
+ meanprops=None, capprops=None, whiskerprops=None,
+ manage_xticks=True, autorange=False, zorder=None):
+ """
+ Make a box and whisker plot.
+
+ Make a box and whisker plot for each column of ``x`` or each
+ vector in sequence ``x``. The box extends from the lower to
+ upper quartile values of the data, with a line at the median.
+ The whiskers extend from the box to show the range of the
+ data. Flier points are those past the end of the whiskers.
+
+ Parameters
+ ----------
+ x : Array or a sequence of vectors.
+ The input data.
+
+ notch : bool, optional (False)
+ If `True`, will produce a notched box plot. Otherwise, a
+ rectangular boxplot is produced. The notches represent the
+ confidence interval (CI) around the median. See the entry
+ for the ``bootstrap`` parameter for information regarding
+ how the locations of the notches are computed.
+
+ .. note::
+
+ In cases where the values of the CI are less than the
+ lower quartile or greater than the upper quartile, the
+ notches will extend beyond the box, giving it a
+ distinctive "flipped" appearance. This is expected
+ behavior and consistent with other statistical
+ visualization packages.
+
+ sym : str, optional
+ The default symbol for flier points. Enter an empty string
+ ('') if you don't want to show fliers. If `None`, then the
+ fliers default to 'b+' If you want more control use the
+ flierprops kwarg.
+
+ vert : bool, optional (True)
+ If `True` (default), makes the boxes vertical. If `False`,
+ everything is drawn horizontally.
+
+ whis : float, sequence, or string (default = 1.5)
+ As a float, determines the reach of the whiskers to the beyond the
+ first and third quartiles. In other words, where IQR is the
+ interquartile range (`Q3-Q1`), the upper whisker will extend to
+ last datum less than `Q3 + whis*IQR`). Similarly, the lower whisker
+ will extend to the first datum greater than `Q1 - whis*IQR`.
+ Beyond the whiskers, data
+ are considered outliers and are plotted as individual
+ points. Set this to an unreasonably high value to force the
+ whiskers to show the min and max values. Alternatively, set
+ this to an ascending sequence of percentile (e.g., [5, 95])
+ to set the whiskers at specific percentiles of the data.
+ Finally, ``whis`` can be the string ``'range'`` to force the
+ whiskers to the min and max of the data.
+
+ bootstrap : int, optional
+ Specifies whether to bootstrap the confidence intervals
+ around the median for notched boxplots. If ``bootstrap`` is
+ None, no bootstrapping is performed, and notches are
+ calculated using a Gaussian-based asymptotic approximation
+ (see McGill, R., Tukey, J.W., and Larsen, W.A., 1978, and
+ Kendall and Stuart, 1967). Otherwise, bootstrap specifies
+ the number of times to bootstrap the median to determine its
+ 95% confidence intervals. Values between 1000 and 10000 are
+ recommended.
+
+ usermedians : array-like, optional
+ An array or sequence whose first dimension (or length) is
+ compatible with ``x``. This overrides the medians computed
+ by matplotlib for each element of ``usermedians`` that is not
+ `None`. When an element of ``usermedians`` is None, the median
+ will be computed by matplotlib as normal.
+
+ conf_intervals : array-like, optional
+ Array or sequence whose first dimension (or length) is
+ compatible with ``x`` and whose second dimension is 2. When
+ the an element of ``conf_intervals`` is not None, the
+ notch locations computed by matplotlib are overridden
+ (provided ``notch`` is `True`). When an element of
+ ``conf_intervals`` is `None`, the notches are computed by the
+ method specified by the other kwargs (e.g., ``bootstrap``).
+
+ positions : array-like, optional
+ Sets the positions of the boxes. The ticks and limits are
+ automatically set to match the positions. Defaults to
+ `range(1, N+1)` where N is the number of boxes to be drawn.
+
+ widths : scalar or array-like
+ Sets the width of each box either with a scalar or a
+ sequence. The default is 0.5, or ``0.15*(distance between
+ extreme positions)``, if that is smaller.
+
+ patch_artist : bool, optional (False)
+ If `False` produces boxes with the Line2D artist. Otherwise,
+ boxes and drawn with Patch artists.
+
+ labels : sequence, optional
+ Labels for each dataset. Length must be compatible with
+ dimensions of ``x``.
+
+ manage_xticks : bool, optional (True)
+ If the function should adjust the xlim and xtick locations.
+
+ autorange : bool, optional (False)
+ When `True` and the data are distributed such that the 25th and
+ 75th percentiles are equal, ``whis`` is set to ``'range'`` such
+ that the whisker ends are at the minimum and maximum of the
+ data.
+
+ meanline : bool, optional (False)
+ If `True` (and ``showmeans`` is `True`), will try to render
+ the mean as a line spanning the full width of the box
+ according to ``meanprops`` (see below). Not recommended if
+ ``shownotches`` is also True. Otherwise, means will be shown
+ as points.
+
+ zorder : scalar, optional (None)
+ Sets the zorder of the boxplot.
+
+ Other Parameters
+ ----------------
+ showcaps : bool, optional (True)
+ Show the caps on the ends of whiskers.
+ showbox : bool, optional (True)
+ Show the central box.
+ showfliers : bool, optional (True)
+ Show the outliers beyond the caps.
+ showmeans : bool, optional (False)
+ Show the arithmetic means.
+ capprops : dict, optional (None)
+ Specifies the style of the caps.
+ boxprops : dict, optional (None)
+ Specifies the style of the box.
+ whiskerprops : dict, optional (None)
+ Specifies the style of the whiskers.
+ flierprops : dict, optional (None)
+ Specifies the style of the fliers.
+ medianprops : dict, optional (None)
+ Specifies the style of the median.
+ meanprops : dict, optional (None)
+ Specifies the style of the mean.
+
+ Returns
+ -------
+ result : dict
+ A dictionary mapping each component of the boxplot to a list
+ of the :class:`matplotlib.lines.Line2D` instances
+ created. That dictionary has the following keys (assuming
+ vertical boxplots):
+
+ - ``boxes``: the main body of the boxplot showing the
+ quartiles and the median's confidence intervals if
+ enabled.
+
+ - ``medians``: horizontal lines at the median of each box.
+
+ - ``whiskers``: the vertical lines extending to the most
+ extreme, non-outlier data points.
+
+ - ``caps``: the horizontal lines at the ends of the
+ whiskers.
+
+ - ``fliers``: points representing data that extend beyond
+ the whiskers (fliers).
+
+ - ``means``: points or lines representing the means.
+
+ Notes
+ -----
+ .. [Notes section required for data comment. See #10189.]
+
+ """
+
+ # If defined in matplotlibrc, apply the value from rc file
+ # Overridden if argument is passed
+ if whis is None:
+ whis = rcParams['boxplot.whiskers']
+ if bootstrap is None:
+ bootstrap = rcParams['boxplot.bootstrap']
+
+ bxpstats = cbook.boxplot_stats(x, whis=whis, bootstrap=bootstrap,
+ labels=labels, autorange=autorange)
+ if notch is None:
+ notch = rcParams['boxplot.notch']
+ if vert is None:
+ vert = rcParams['boxplot.vertical']
+ if patch_artist is None:
+ patch_artist = rcParams['boxplot.patchartist']
+ if meanline is None:
+ meanline = rcParams['boxplot.meanline']
+ if showmeans is None:
+ showmeans = rcParams['boxplot.showmeans']
+ if showcaps is None:
+ showcaps = rcParams['boxplot.showcaps']
+ if showbox is None:
+ showbox = rcParams['boxplot.showbox']
+ if showfliers is None:
+ showfliers = rcParams['boxplot.showfliers']
+
+ def _update_dict(dictionary, rc_name, properties):
+ """ Loads properties in the dictionary from rc file if not already
+ in the dictionary"""
+ rc_str = 'boxplot.{0}.{1}'
+ if dictionary is None:
+ dictionary = dict()
+ for prop_dict in properties:
+ dictionary.setdefault(prop_dict,
+ rcParams[rc_str.format(rc_name, prop_dict)])
+ return dictionary
+
+ # Common property dictionnaries loading from rc
+ flier_props = ['color', 'marker', 'markerfacecolor', 'markeredgecolor',
+ 'markersize', 'linestyle', 'linewidth']
+ default_props = ['color', 'linewidth', 'linestyle']
+
+ boxprops = _update_dict(boxprops, 'boxprops', default_props)
+ whiskerprops = _update_dict(whiskerprops, 'whiskerprops',
+ default_props)
+ capprops = _update_dict(capprops, 'capprops', default_props)
+ medianprops = _update_dict(medianprops, 'medianprops', default_props)
+ meanprops = _update_dict(meanprops, 'meanprops', default_props)
+ flierprops = _update_dict(flierprops, 'flierprops', flier_props)
+
+ if patch_artist:
+ boxprops['linestyle'] = 'solid'
+ boxprops['edgecolor'] = boxprops.pop('color')
+
+ # if non-default sym value, put it into the flier dictionary
+ # the logic for providing the default symbol ('b+') now lives
+ # in bxp in the initial value of final_flierprops
+ # handle all of the `sym` related logic here so we only have to pass
+ # on the flierprops dict.
+ if sym is not None:
+ # no-flier case, which should really be done with
+ # 'showfliers=False' but none-the-less deal with it to keep back
+ # compatibility
+ if sym == '':
+ # blow away existing dict and make one for invisible markers
+ flierprops = dict(linestyle='none', marker='', color='none')
+ # turn the fliers off just to be safe
+ showfliers = False
+ # now process the symbol string
+ else:
+ # process the symbol string
+ # discarded linestyle
+ _, marker, color = _process_plot_format(sym)
+ # if we have a marker, use it
+ if marker is not None:
+ flierprops['marker'] = marker
+ # if we have a color, use it
+ if color is not None:
+ # assume that if color is passed in the user want
+ # filled symbol, if the users want more control use
+ # flierprops
+ flierprops['color'] = color
+ flierprops['markerfacecolor'] = color
+ flierprops['markeredgecolor'] = color
+
+ # replace medians if necessary:
+ if usermedians is not None:
+ if (len(np.ravel(usermedians)) != len(bxpstats) or
+ np.shape(usermedians)[0] != len(bxpstats)):
+ raise ValueError('usermedians length not compatible with x')
+ else:
+ # reassign medians as necessary
+ for stats, med in zip(bxpstats, usermedians):
+ if med is not None:
+ stats['med'] = med
+
+ if conf_intervals is not None:
+ if np.shape(conf_intervals)[0] != len(bxpstats):
+ err_mess = 'conf_intervals length not compatible with x'
+ raise ValueError(err_mess)
+ else:
+ for stats, ci in zip(bxpstats, conf_intervals):
+ if ci is not None:
+ if len(ci) != 2:
+ raise ValueError('each confidence interval must '
+ 'have two values')
+ else:
+ if ci[0] is not None:
+ stats['cilo'] = ci[0]
+ if ci[1] is not None:
+ stats['cihi'] = ci[1]
+
+ artists = self.bxp(bxpstats, positions=positions, widths=widths,
+ vert=vert, patch_artist=patch_artist,
+ shownotches=notch, showmeans=showmeans,
+ showcaps=showcaps, showbox=showbox,
+ boxprops=boxprops, flierprops=flierprops,
+ medianprops=medianprops, meanprops=meanprops,
+ meanline=meanline, showfliers=showfliers,
+ capprops=capprops, whiskerprops=whiskerprops,
+ manage_xticks=manage_xticks, zorder=zorder)
+ return artists
+
+ def bxp(self, bxpstats, positions=None, widths=None, vert=True,
+ patch_artist=False, shownotches=False, showmeans=False,
+ showcaps=True, showbox=True, showfliers=True,
+ boxprops=None, whiskerprops=None, flierprops=None,
+ medianprops=None, capprops=None, meanprops=None,
+ meanline=False, manage_xticks=True, zorder=None):
+ """
+ Drawing function for box and whisker plots.
+
+ Make a box and whisker plot for each column of *x* or each
+ vector in sequence *x*. The box extends from the lower to
+ upper quartile values of the data, with a line at the median.
+ The whiskers extend from the box to show the range of the
+ data. Flier points are those past the end of the whiskers.
+
+ Parameters
+ ----------
+
+ bxpstats : list of dicts
+ A list of dictionaries containing stats for each boxplot.
+ Required keys are:
+
+ - ``med``: The median (scalar float).
+
+ - ``q1``: The first quartile (25th percentile) (scalar
+ float).
+
+ - ``q3``: The third quartile (75th percentile) (scalar
+ float).
+
+ - ``whislo``: Lower bound of the lower whisker (scalar
+ float).
+
+ - ``whishi``: Upper bound of the upper whisker (scalar
+ float).
+
+ Optional keys are:
+
+ - ``mean``: The mean (scalar float). Needed if
+ ``showmeans=True``.
+
+ - ``fliers``: Data beyond the whiskers (sequence of floats).
+ Needed if ``showfliers=True``.
+
+ - ``cilo`` & ``cihi``: Lower and upper confidence intervals
+ about the median. Needed if ``shownotches=True``.
+
+ - ``label``: Name of the dataset (string). If available,
+ this will be used a tick label for the boxplot
+
+ positions : array-like, default = [1, 2, ..., n]
+ Sets the positions of the boxes. The ticks and limits
+ are automatically set to match the positions.
+
+ widths : array-like, default = None
+ Either a scalar or a vector and sets the width of each
+ box. The default is ``0.15*(distance between extreme
+ positions)``, clipped to no less than 0.15 and no more than
+ 0.5.
+
+ vert : bool, default = False
+ If `True` (default), makes the boxes vertical. If `False`,
+ makes horizontal boxes.
+
+ patch_artist : bool, default = False
+ If `False` produces boxes with the
+ `~matplotlib.lines.Line2D` artist. If `True` produces boxes
+ with the `~matplotlib.patches.Patch` artist.
+
+ shownotches : bool, default = False
+ If `False` (default), produces a rectangular box plot.
+ If `True`, will produce a notched box plot
+
+ showmeans : bool, default = False
+ If `True`, will toggle on the rendering of the means
+
+ showcaps : bool, default = True
+ If `True`, will toggle on the rendering of the caps
+
+ showbox : bool, default = True
+ If `True`, will toggle on the rendering of the box
+
+ showfliers : bool, default = True
+ If `True`, will toggle on the rendering of the fliers
+
+ boxprops : dict or None (default)
+ If provided, will set the plotting style of the boxes
+
+ whiskerprops : dict or None (default)
+ If provided, will set the plotting style of the whiskers
+
+ capprops : dict or None (default)
+ If provided, will set the plotting style of the caps
+
+ flierprops : dict or None (default)
+ If provided will set the plotting style of the fliers
+
+ medianprops : dict or None (default)
+ If provided, will set the plotting style of the medians
+
+ meanprops : dict or None (default)
+ If provided, will set the plotting style of the means
+
+ meanline : bool, default = False
+ If `True` (and *showmeans* is `True`), will try to render the mean
+ as a line spanning the full width of the box according to
+ *meanprops*. Not recommended if *shownotches* is also True.
+ Otherwise, means will be shown as points.
+
+ manage_xticks : bool, default = True
+ If the function should adjust the xlim and xtick locations.
+
+ zorder : scalar, default = None
+ The zorder of the resulting boxplot
+
+ Returns
+ -------
+ result : dict
+ A dictionary mapping each component of the boxplot to a list
+ of the :class:`matplotlib.lines.Line2D` instances
+ created. That dictionary has the following keys (assuming
+ vertical boxplots):
+
+ - ``boxes``: the main body of the boxplot showing the
+ quartiles and the median's confidence intervals if
+ enabled.
+
+ - ``medians``: horizontal lines at the median of each box.
+
+ - ``whiskers``: the vertical lines extending to the most
+ extreme, non-outlier data points.
+
+ - ``caps``: the horizontal lines at the ends of the
+ whiskers.
+
+ - ``fliers``: points representing data that extend beyond
+ the whiskers (fliers).
+
+ - ``means``: points or lines representing the means.
+
+ Examples
+ --------
+
+ .. plot:: gallery/statistics/bxp.py
+
+ """
+ # lists of artists to be output
+ whiskers = []
+ caps = []
+ boxes = []
+ medians = []
+ means = []
+ fliers = []
+
+ # empty list of xticklabels
+ datalabels = []
+
+ # Use default zorder if none specified
+ if zorder is None:
+ zorder = mlines.Line2D.zorder
+
+ zdelta = 0.1
+ # box properties
+ if patch_artist:
+ final_boxprops = dict(
+ linestyle=rcParams['boxplot.boxprops.linestyle'],
+ edgecolor=rcParams['boxplot.boxprops.color'],
+ facecolor=rcParams['patch.facecolor'],
+ linewidth=rcParams['boxplot.boxprops.linewidth']
+ )
+ if rcParams['_internal.classic_mode']:
+ final_boxprops['facecolor'] = 'white'
+ else:
+ final_boxprops = dict(
+ linestyle=rcParams['boxplot.boxprops.linestyle'],
+ color=rcParams['boxplot.boxprops.color'],
+ )
+
+ final_boxprops['zorder'] = zorder
+ if boxprops is not None:
+ final_boxprops.update(boxprops)
+
+ # other (cap, whisker) properties
+ final_whiskerprops = dict(
+ linestyle=rcParams['boxplot.whiskerprops.linestyle'],
+ linewidth=rcParams['boxplot.whiskerprops.linewidth'],
+ color=rcParams['boxplot.whiskerprops.color'],
+ )
+
+ final_capprops = dict(
+ linestyle=rcParams['boxplot.capprops.linestyle'],
+ linewidth=rcParams['boxplot.capprops.linewidth'],
+ color=rcParams['boxplot.capprops.color'],
+ )
+
+ final_capprops['zorder'] = zorder
+ if capprops is not None:
+ final_capprops.update(capprops)
+
+ final_whiskerprops['zorder'] = zorder
+ if whiskerprops is not None:
+ final_whiskerprops.update(whiskerprops)
+
+ # set up the default flier properties
+ final_flierprops = dict(
+ linestyle=rcParams['boxplot.flierprops.linestyle'],
+ linewidth=rcParams['boxplot.flierprops.linewidth'],
+ color=rcParams['boxplot.flierprops.color'],
+ marker=rcParams['boxplot.flierprops.marker'],
+ markerfacecolor=rcParams['boxplot.flierprops.markerfacecolor'],
+ markeredgecolor=rcParams['boxplot.flierprops.markeredgecolor'],
+ markersize=rcParams['boxplot.flierprops.markersize'],
+ )
+
+ final_flierprops['zorder'] = zorder
+ # flier (outlier) properties
+ if flierprops is not None:
+ final_flierprops.update(flierprops)
+
+ # median line properties
+ final_medianprops = dict(
+ linestyle=rcParams['boxplot.medianprops.linestyle'],
+ linewidth=rcParams['boxplot.medianprops.linewidth'],
+ color=rcParams['boxplot.medianprops.color'],
+ )
+ final_medianprops['zorder'] = zorder + zdelta
+ if medianprops is not None:
+ final_medianprops.update(medianprops)
+
+ # mean (line or point) properties
+ if meanline:
+ final_meanprops = dict(
+ linestyle=rcParams['boxplot.meanprops.linestyle'],
+ linewidth=rcParams['boxplot.meanprops.linewidth'],
+ color=rcParams['boxplot.meanprops.color'],
+ )
+ else:
+ final_meanprops = dict(
+ linestyle='',
+ marker=rcParams['boxplot.meanprops.marker'],
+ markerfacecolor=rcParams['boxplot.meanprops.markerfacecolor'],
+ markeredgecolor=rcParams['boxplot.meanprops.markeredgecolor'],
+ markersize=rcParams['boxplot.meanprops.markersize'],
+ )
+ final_meanprops['zorder'] = zorder + zdelta
+ if meanprops is not None:
+ final_meanprops.update(meanprops)
+
+ def to_vc(xs, ys):
+ # convert arguments to verts and codes, append (0, 0) (ignored).
+ verts = np.append(np.column_stack([xs, ys]), [(0, 0)], 0)
+ codes = ([mpath.Path.MOVETO]
+ + [mpath.Path.LINETO] * (len(verts) - 2)
+ + [mpath.Path.CLOSEPOLY])
+ return verts, codes
+
+ def patch_list(xs, ys, **kwargs):
+ verts, codes = to_vc(xs, ys)
+ path = mpath.Path(verts, codes)
+ patch = mpatches.PathPatch(path, **kwargs)
+ self.add_artist(patch)
+ return [patch]
+
+ # vertical or horizontal plot?
+ if vert:
+ def doplot(*args, **kwargs):
+ return self.plot(*args, **kwargs)
+
+ def dopatch(xs, ys, **kwargs):
+ return patch_list(xs, ys, **kwargs)
+
+ else:
+ def doplot(*args, **kwargs):
+ shuffled = []
+ for i in xrange(0, len(args), 2):
+ shuffled.extend([args[i + 1], args[i]])
+ return self.plot(*shuffled, **kwargs)
+
+ def dopatch(xs, ys, **kwargs):
+ xs, ys = ys, xs # flip X, Y
+ return patch_list(xs, ys, **kwargs)
+
+ # input validation
+ N = len(bxpstats)
+ datashape_message = ("List of boxplot statistics and `{0}` "
+ "values must have same the length")
+ # check position
+ if positions is None:
+ positions = list(xrange(1, N + 1))
+ elif len(positions) != N:
+ raise ValueError(datashape_message.format("positions"))
+
+ # width
+ if widths is None:
+ widths = [np.clip(0.15 * np.ptp(positions), 0.15, 0.5)] * N
+ elif np.isscalar(widths):
+ widths = [widths] * N
+ elif len(widths) != N:
+ raise ValueError(datashape_message.format("widths"))
+
+ # check and save the `hold` state of the current axes
+ if not self._hold:
+ self.cla()
+ holdStatus = self._hold
+ for pos, width, stats in zip(positions, widths, bxpstats):
+ # try to find a new label
+ datalabels.append(stats.get('label', pos))
+
+ # whisker coords
+ whisker_x = np.ones(2) * pos
+ whiskerlo_y = np.array([stats['q1'], stats['whislo']])
+ whiskerhi_y = np.array([stats['q3'], stats['whishi']])
+
+ # cap coords
+ cap_left = pos - width * 0.25
+ cap_right = pos + width * 0.25
+ cap_x = np.array([cap_left, cap_right])
+ cap_lo = np.ones(2) * stats['whislo']
+ cap_hi = np.ones(2) * stats['whishi']
+
+ # box and median coords
+ box_left = pos - width * 0.5
+ box_right = pos + width * 0.5
+ med_y = [stats['med'], stats['med']]
+
+ # notched boxes
+ if shownotches:
+ box_x = [box_left, box_right, box_right, cap_right, box_right,
+ box_right, box_left, box_left, cap_left, box_left,
+ box_left]
+ box_y = [stats['q1'], stats['q1'], stats['cilo'],
+ stats['med'], stats['cihi'], stats['q3'],
+ stats['q3'], stats['cihi'], stats['med'],
+ stats['cilo'], stats['q1']]
+ med_x = cap_x
+
+ # plain boxes
+ else:
+ box_x = [box_left, box_right, box_right, box_left, box_left]
+ box_y = [stats['q1'], stats['q1'], stats['q3'], stats['q3'],
+ stats['q1']]
+ med_x = [box_left, box_right]
+
+ # maybe draw the box:
+ if showbox:
+ if patch_artist:
+ boxes.extend(dopatch(box_x, box_y, **final_boxprops))
+ else:
+ boxes.extend(doplot(box_x, box_y, **final_boxprops))
+
+ # draw the whiskers
+ whiskers.extend(doplot(
+ whisker_x, whiskerlo_y, **final_whiskerprops
+ ))
+ whiskers.extend(doplot(
+ whisker_x, whiskerhi_y, **final_whiskerprops
+ ))
+
+ # maybe draw the caps:
+ if showcaps:
+ caps.extend(doplot(cap_x, cap_lo, **final_capprops))
+ caps.extend(doplot(cap_x, cap_hi, **final_capprops))
+
+ # draw the medians
+ medians.extend(doplot(med_x, med_y, **final_medianprops))
+
+ # maybe draw the means
+ if showmeans:
+ if meanline:
+ means.extend(doplot(
+ [box_left, box_right], [stats['mean'], stats['mean']],
+ **final_meanprops
+ ))
+ else:
+ means.extend(doplot(
+ [pos], [stats['mean']], **final_meanprops
+ ))
+
+ # maybe draw the fliers
+ if showfliers:
+ # fliers coords
+ flier_x = np.ones(len(stats['fliers'])) * pos
+ flier_y = stats['fliers']
+
+ fliers.extend(doplot(
+ flier_x, flier_y, **final_flierprops
+ ))
+
+ # fix our axes/ticks up a little
+ if vert:
+ setticks = self.set_xticks
+ setlim = self.set_xlim
+ setlabels = self.set_xticklabels
+ else:
+ setticks = self.set_yticks
+ setlim = self.set_ylim
+ setlabels = self.set_yticklabels
+
+ if manage_xticks:
+ newlimits = min(positions) - 0.5, max(positions) + 0.5
+ setlim(newlimits)
+ setticks(positions)
+ setlabels(datalabels)
+
+ # reset hold status
+ self._hold = holdStatus
+
+ return dict(whiskers=whiskers, caps=caps, boxes=boxes,
+ medians=medians, fliers=fliers, means=means)
+
+ @_preprocess_data(replace_names=["x", "y", "s", "linewidths",
+ "edgecolors", "c", "facecolor",
+ "facecolors", "color"],
+ label_namer="y")
+ def scatter(self, x, y, s=None, c=None, marker=None, cmap=None, norm=None,
+ vmin=None, vmax=None, alpha=None, linewidths=None,
+ verts=None, edgecolors=None,
+ **kwargs):
+ """
+ A scatter plot of *y* vs *x* with varying marker size and/or color.
+
+ Parameters
+ ----------
+ x, y : array_like, shape (n, )
+ The data positions.
+
+ s : scalar or array_like, shape (n, ), optional
+ The marker size in points**2.
+ Default is ``rcParams['lines.markersize'] ** 2``.
+
+ c : color, sequence, or sequence of color, optional, default: 'b'
+ The marker color. Possible values:
+
+ - A single color format string.
+ - A sequence of color specifications of length n.
+ - A sequence of n numbers to be mapped to colors using *cmap* and
+ *norm*.
+ - A 2-D array in which the rows are RGB or RGBA.
+
+ Note that *c* should not be a single numeric RGB or RGBA sequence
+ because that is indistinguishable from an array of values to be
+ colormapped. If you want to specify the same RGB or RGBA value for
+ all points, use a 2-D array with a single row.
+
+ marker : `~matplotlib.markers.MarkerStyle`, optional, default: 'o'
+ The marker style. *marker* can be either an instance of the class
+ or the text shorthand for a particular marker.
+ See `~matplotlib.markers` for more information marker styles.
+
+ cmap : `~matplotlib.colors.Colormap`, optional, default: None
+ A `.Colormap` instance or registered colormap name. *cmap* is only
+ used if *c* is an array of floats. If ``None``, defaults to rc
+ ``image.cmap``.
+
+ norm : `~matplotlib.colors.Normalize`, optional, default: None
+ A `.Normalize` instance is used to scale luminance data to 0, 1.
+ *norm* is only used if *c* is an array of floats. If *None*, use
+ the default `.colors.Normalize`.
+
+ vmin, vmax : scalar, optional, default: None
+ *vmin* and *vmax* are used in conjunction with *norm* to normalize
+ luminance data. If None, the respective min and max of the color
+ array is used. *vmin* and *vmax* are ignored if you pass a *norm*
+ instance.
+
+ alpha : scalar, optional, default: None
+ The alpha blending value, between 0 (transparent) and 1 (opaque).
+
+ linewidths : scalar or array_like, optional, default: None
+ The linewidth of the marker edges. Note: The default *edgecolors*
+ is 'face'. You may want to change this as well.
+ If *None*, defaults to rcParams ``lines.linewidth``.
+
+ verts : sequence of (x, y), optional
+ If *marker* is *None*, these vertices will be used to construct
+ the marker. The center of the marker is located at (0, 0) in
+ normalized units. The overall marker is rescaled by *s*.
+
+ edgecolors : color or sequence of color, optional, default: 'face'
+ The edge color of the marker. Possible values:
+
+ - 'face': The edge color will always be the same as the face color.
+ - 'none': No patch boundary will be drawn.
+ - A matplotib color.
+
+ For non-filled markers, the *edgecolors* kwarg is ignored and
+ forced to 'face' internally.
+
+ Returns
+ -------
+ paths : `~matplotlib.collections.PathCollection`
+
+ Other Parameters
+ ----------------
+ **kwargs : `~matplotlib.collections.Collection` properties
+
+ See Also
+ --------
+ plot : To plot scatter plots when markers are identical in size and
+ color.
+
+ Notes
+ -----
+
+ * The `.plot` function will be faster for scatterplots where markers
+ don't vary in size or color.
+
+ * Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in which
+ case all masks will be combined and only unmasked points will be
+ plotted.
+
+ * Fundamentally, scatter works with 1-D arrays; *x*, *y*, *s*, and *c*
+ may be input as 2-D arrays, but within scatter they will be
+ flattened. The exception is *c*, which will be flattened only if its
+ size matches the size of *x* and *y*.
+
+ """
+
+ if not self._hold:
+ self.cla()
+
+ # Process **kwargs to handle aliases, conflicts with explicit kwargs:
+
+ facecolors = None
+ edgecolors = kwargs.pop('edgecolor', edgecolors)
+ fc = kwargs.pop('facecolors', None)
+ fc = kwargs.pop('facecolor', fc)
+ if fc is not None:
+ facecolors = fc
+ co = kwargs.pop('color', None)
+ if co is not None:
+ try:
+ mcolors.to_rgba_array(co)
+ except ValueError:
+ raise ValueError("'color' kwarg must be an mpl color"
+ " spec or sequence of color specs.\n"
+ "For a sequence of values to be"
+ " color-mapped, use the 'c' kwarg instead.")
+ if edgecolors is None:
+ edgecolors = co
+ if facecolors is None:
+ facecolors = co
+ if c is not None:
+ raise ValueError("Supply a 'c' kwarg or a 'color' kwarg"
+ " but not both; they differ but"
+ " their functionalities overlap.")
+ if c is None:
+ if facecolors is not None:
+ c = facecolors
+ else:
+ if rcParams['_internal.classic_mode']:
+ c = 'b' # The original default
+ else:
+ c = self._get_patches_for_fill.get_next_color()
+ c_none = True
+ else:
+ c_none = False
+
+ if edgecolors is None and not rcParams['_internal.classic_mode']:
+ edgecolors = 'face'
+
+ self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
+ x = self.convert_xunits(x)
+ y = self.convert_yunits(y)
+
+ # np.ma.ravel yields an ndarray, not a masked array,
+ # unless its argument is a masked array.
+ xy_shape = (np.shape(x), np.shape(y))
+ x = np.ma.ravel(x)
+ y = np.ma.ravel(y)
+ if x.size != y.size:
+ raise ValueError("x and y must be the same size")
+
+ if s is None:
+ if rcParams['_internal.classic_mode']:
+ s = 20
+ else:
+ s = rcParams['lines.markersize'] ** 2.0
+
+ s = np.ma.ravel(s) # This doesn't have to match x, y in size.
+
+ # After this block, c_array will be None unless
+ # c is an array for mapping. The potential ambiguity
+ # with a sequence of 3 or 4 numbers is resolved in
+ # favor of mapping, not rgb or rgba.
+ if c_none or co is not None:
+ c_array = None
+ else:
+ try:
+ c_array = np.asanyarray(c, dtype=float)
+ if c_array.shape in xy_shape:
+ c = np.ma.ravel(c_array)
+ else:
+ # Wrong size; it must not be intended for mapping.
+ c_array = None
+ except ValueError:
+ # Failed to make a floating-point array; c must be color specs.
+ c_array = None
+
+ if c_array is None:
+ try:
+ # must be acceptable as PathCollection facecolors
+ colors = mcolors.to_rgba_array(c)
+ except ValueError:
+ # c not acceptable as PathCollection facecolor
+ raise ValueError("c of shape {} not acceptable as a color "
+ "sequence for x with size {}, y with size {}"
+ .format(c.shape, x.size, y.size))
+ else:
+ colors = None # use cmap, norm after collection is created
+
+ # `delete_masked_points` only modifies arguments of the same length as
+ # `x`.
+ x, y, s, c, colors, edgecolors, linewidths =\
+ cbook.delete_masked_points(
+ x, y, s, c, colors, edgecolors, linewidths)
+
+ scales = s # Renamed for readability below.
+
+ # to be API compatible
+ if marker is None and verts is not None:
+ marker = (verts, 0)
+ verts = None
+
+ # load default marker from rcParams
+ if marker is None:
+ marker = rcParams['scatter.marker']
+
+ if isinstance(marker, mmarkers.MarkerStyle):
+ marker_obj = marker
+ else:
+ marker_obj = mmarkers.MarkerStyle(marker)
+
+ path = marker_obj.get_path().transformed(
+ marker_obj.get_transform())
+ if not marker_obj.is_filled():
+ edgecolors = 'face'
+ linewidths = rcParams['lines.linewidth']
+
+ offsets = np.column_stack([x, y])
+
+ collection = mcoll.PathCollection(
+ (path,), scales,
+ facecolors=colors,
+ edgecolors=edgecolors,
+ linewidths=linewidths,
+ offsets=offsets,
+ transOffset=kwargs.pop('transform', self.transData),
+ alpha=alpha
+ )
+ collection.set_transform(mtransforms.IdentityTransform())
+ collection.update(kwargs)
+
+ if colors is None:
+ if norm is not None and not isinstance(norm, mcolors.Normalize):
+ raise ValueError(
+ "'norm' must be an instance of 'mcolors.Normalize'")
+ collection.set_array(np.asarray(c))
+ collection.set_cmap(cmap)
+ collection.set_norm(norm)
+
+ if vmin is not None or vmax is not None:
+ collection.set_clim(vmin, vmax)
+ else:
+ collection.autoscale_None()
+
+ # Classic mode only:
+ # ensure there are margins to allow for the
+ # finite size of the symbols. In v2.x, margins
+ # are present by default, so we disable this
+ # scatter-specific override.
+ if rcParams['_internal.classic_mode']:
+ if self._xmargin < 0.05 and x.size > 0:
+ self.set_xmargin(0.05)
+ if self._ymargin < 0.05 and x.size > 0:
+ self.set_ymargin(0.05)
+
+ self.add_collection(collection)
+ self.autoscale_view()
+
+ return collection
+
+ @_preprocess_data(replace_names=["x", "y"], label_namer="y")
+ @docstring.dedent_interpd
+ def hexbin(self, x, y, C=None, gridsize=100, bins=None,
+ xscale='linear', yscale='linear', extent=None,
+ cmap=None, norm=None, vmin=None, vmax=None,
+ alpha=None, linewidths=None, edgecolors='face',
+ reduce_C_function=np.mean, mincnt=None, marginals=False,
+ **kwargs):
+ """
+ Make a hexagonal binning plot.
+
+ Make a hexagonal binning plot of *x* versus *y*, where *x*,
+ *y* are 1-D sequences of the same length, *N*. If *C* is *None*
+ (the default), this is a histogram of the number of occurrences
+ of the observations at (x[i],y[i]).
+
+ If *C* is specified, it specifies values at the coordinate
+ (x[i],y[i]). These values are accumulated for each hexagonal
+ bin and then reduced according to *reduce_C_function*, which
+ defaults to numpy's mean function (np.mean). (If *C* is
+ specified, it must also be a 1-D sequence of the same length
+ as *x* and *y*.)
+
+ Parameters
+ ----------
+ x, y : array or masked array
+
+ C : array or masked array, optional, default is *None*
+
+ gridsize : int or (int, int), optional, default is 100
+ The number of hexagons in the *x*-direction, default is
+ 100. The corresponding number of hexagons in the
+ *y*-direction is chosen such that the hexagons are
+ approximately regular. Alternatively, gridsize can be a
+ tuple with two elements specifying the number of hexagons
+ in the *x*-direction and the *y*-direction.
+
+ bins : {'log'} or int or sequence, optional, default is *None*
+ If *None*, no binning is applied; the color of each hexagon
+ directly corresponds to its count value.
+
+ If 'log', use a logarithmic scale for the color
+ map. Internally, :math:`log_{10}(i+1)` is used to
+ determine the hexagon color.
+
+ If an integer, divide the counts in the specified number
+ of bins, and color the hexagons accordingly.
+
+ If a sequence of values, the values of the lower bound of
+ the bins to be used.
+
+ xscale : {'linear', 'log'}, optional, default is 'linear'
+ Use a linear or log10 scale on the horizontal axis.
+
+ yscale : {'linear', 'log'}, optional, default is 'linear'
+ Use a linear or log10 scale on the vertical axis.
+
+ mincnt : int > 0, optional, default is *None*
+ If not *None*, only display cells with more than *mincnt*
+ number of points in the cell
+
+ marginals : bool, optional, default is *False*
+ if marginals is *True*, plot the marginal density as
+ colormapped rectagles along the bottom of the x-axis and
+ left of the y-axis
+
+ extent : scalar, optional, default is *None*
+ The limits of the bins. The default assigns the limits
+ based on *gridsize*, *x*, *y*, *xscale* and *yscale*.
+
+ If *xscale* or *yscale* is set to 'log', the limits are
+ expected to be the exponent for a power of 10. E.g. for
+ x-limits of 1 and 50 in 'linear' scale and y-limits
+ of 10 and 1000 in 'log' scale, enter (1, 50, 1, 3).
+
+ Order of scalars is (left, right, bottom, top).
+
+ Other Parameters
+ ----------------
+ cmap : object, optional, default is *None*
+ a :class:`matplotlib.colors.Colormap` instance. If *None*,
+ defaults to rc ``image.cmap``.
+
+ norm : object, optional, default is *None*
+ :class:`matplotlib.colors.Normalize` instance is used to
+ scale luminance data to 0,1.
+
+ vmin, vmax : scalar, optional, default is *None*
+ *vmin* and *vmax* are used in conjunction with *norm* to
+ normalize luminance data. If *None*, the min and max of the
+ color array *C* are used. Note if you pass a norm instance
+ your settings for *vmin* and *vmax* will be ignored.
+
+ alpha : scalar between 0 and 1, optional, default is *None*
+ the alpha value for the patches
+
+ linewidths : scalar, optional, default is *None*
+ If *None*, defaults to 1.0.
+
+ edgecolors : {'face', 'none', *None*} or color, optional
+
+ If 'face' (the default), draws the edges in the same color as the
+ fill color.
+
+ If 'none', no edge is drawn; this can sometimes lead to unsightly
+ unpainted pixels between the hexagons.
+
+ If *None*, draws outlines in the default color.
+
+ If a matplotlib color arg, draws outlines in the specified color.
+
+ Returns
+ -------
+ object
+ a :class:`~matplotlib.collections.PolyCollection` instance; use
+ :meth:`~matplotlib.collections.PolyCollection.get_array` on
+ this :class:`~matplotlib.collections.PolyCollection` to get
+ the counts in each hexagon.
+
+ If *marginals* is *True*, horizontal
+ bar and vertical bar (both PolyCollections) will be attached
+ to the return collection as attributes *hbar* and *vbar*.
+
+ Notes
+ -----
+ The standard descriptions of all the
+ :class:`~matplotlib.collections.Collection` parameters:
+
+ %(Collection)s
+
+ """
+
+ if not self._hold:
+ self.cla()
+
+ self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
+
+ x, y, C = cbook.delete_masked_points(x, y, C)
+
+ # Set the size of the hexagon grid
+ if iterable(gridsize):
+ nx, ny = gridsize
+ else:
+ nx = gridsize
+ ny = int(nx / math.sqrt(3))
+ # Count the number of data in each hexagon
+ x = np.array(x, float)
+ y = np.array(y, float)
+ if xscale == 'log':
+ if np.any(x <= 0.0):
+ raise ValueError("x contains non-positive values, so can not"
+ " be log-scaled")
+ x = np.log10(x)
+ if yscale == 'log':
+ if np.any(y <= 0.0):
+ raise ValueError("y contains non-positive values, so can not"
+ " be log-scaled")
+ y = np.log10(y)
+ if extent is not None:
+ xmin, xmax, ymin, ymax = extent
+ else:
+ xmin, xmax = (np.min(x), np.max(x)) if len(x) else (0, 1)
+ ymin, ymax = (np.min(y), np.max(y)) if len(y) else (0, 1)
+
+ # to avoid issues with singular data, expand the min/max pairs
+ xmin, xmax = mtransforms.nonsingular(xmin, xmax, expander=0.1)
+ ymin, ymax = mtransforms.nonsingular(ymin, ymax, expander=0.1)
+
+ # In the x-direction, the hexagons exactly cover the region from
+ # xmin to xmax. Need some padding to avoid roundoff errors.
+ padding = 1.e-9 * (xmax - xmin)
+ xmin -= padding
+ xmax += padding
+ sx = (xmax - xmin) / nx
+ sy = (ymax - ymin) / ny
+
+ if marginals:
+ xorig = x.copy()
+ yorig = y.copy()
+
+ x = (x - xmin) / sx
+ y = (y - ymin) / sy
+ ix1 = np.round(x).astype(int)
+ iy1 = np.round(y).astype(int)
+ ix2 = np.floor(x).astype(int)
+ iy2 = np.floor(y).astype(int)
+
+ nx1 = nx + 1
+ ny1 = ny + 1
+ nx2 = nx
+ ny2 = ny
+ n = nx1 * ny1 + nx2 * ny2
+
+ d1 = (x - ix1) ** 2 + 3.0 * (y - iy1) ** 2
+ d2 = (x - ix2 - 0.5) ** 2 + 3.0 * (y - iy2 - 0.5) ** 2
+ bdist = (d1 < d2)
+ if C is None:
+ lattice1 = np.zeros((nx1, ny1))
+ lattice2 = np.zeros((nx2, ny2))
+
+ cond1 = (0 <= ix1) * (ix1 < nx1) * (0 <= iy1) * (iy1 < ny1)
+ cond2 = (0 <= ix2) * (ix2 < nx2) * (0 <= iy2) * (iy2 < ny2)
+
+ cond1 *= bdist
+ cond2 *= np.logical_not(bdist)
+ ix1, iy1 = ix1[cond1], iy1[cond1]
+ ix2, iy2 = ix2[cond2], iy2[cond2]
+
+ for ix, iy in zip(ix1, iy1):
+ lattice1[ix, iy] += 1
+ for ix, iy in zip(ix2, iy2):
+ lattice2[ix, iy] += 1
+
+ # threshold
+ if mincnt is not None:
+ lattice1[lattice1 < mincnt] = np.nan
+ lattice2[lattice2 < mincnt] = np.nan
+ accum = np.hstack((lattice1.ravel(),
+ lattice2.ravel()))
+ good_idxs = ~np.isnan(accum)
+
+ else:
+ if mincnt is None:
+ mincnt = 0
+
+ # create accumulation arrays
+ lattice1 = np.empty((nx1, ny1), dtype=object)
+ for i in xrange(nx1):
+ for j in xrange(ny1):
+ lattice1[i, j] = []
+ lattice2 = np.empty((nx2, ny2), dtype=object)
+ for i in xrange(nx2):
+ for j in xrange(ny2):
+ lattice2[i, j] = []
+
+ for i in xrange(len(x)):
+ if bdist[i]:
+ if 0 <= ix1[i] < nx1 and 0 <= iy1[i] < ny1:
+ lattice1[ix1[i], iy1[i]].append(C[i])
+ else:
+ if 0 <= ix2[i] < nx2 and 0 <= iy2[i] < ny2:
+ lattice2[ix2[i], iy2[i]].append(C[i])
+
+ for i in xrange(nx1):
+ for j in xrange(ny1):
+ vals = lattice1[i, j]
+ if len(vals) > mincnt:
+ lattice1[i, j] = reduce_C_function(vals)
+ else:
+ lattice1[i, j] = np.nan
+ for i in xrange(nx2):
+ for j in xrange(ny2):
+ vals = lattice2[i, j]
+ if len(vals) > mincnt:
+ lattice2[i, j] = reduce_C_function(vals)
+ else:
+ lattice2[i, j] = np.nan
+
+ accum = np.hstack((lattice1.astype(float).ravel(),
+ lattice2.astype(float).ravel()))
+ good_idxs = ~np.isnan(accum)
+
+ offsets = np.zeros((n, 2), float)
+ offsets[:nx1 * ny1, 0] = np.repeat(np.arange(nx1), ny1)
+ offsets[:nx1 * ny1, 1] = np.tile(np.arange(ny1), nx1)
+ offsets[nx1 * ny1:, 0] = np.repeat(np.arange(nx2) + 0.5, ny2)
+ offsets[nx1 * ny1:, 1] = np.tile(np.arange(ny2), nx2) + 0.5
+ offsets[:, 0] *= sx
+ offsets[:, 1] *= sy
+ offsets[:, 0] += xmin
+ offsets[:, 1] += ymin
+ # remove accumulation bins with no data
+ offsets = offsets[good_idxs, :]
+ accum = accum[good_idxs]
+
+ polygon = np.zeros((6, 2), float)
+ polygon[:, 0] = sx * np.array([0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
+ polygon[:, 1] = sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
+
+ if linewidths is None:
+ linewidths = [1.0]
+
+ if xscale == 'log' or yscale == 'log':
+ polygons = np.expand_dims(polygon, 0) + np.expand_dims(offsets, 1)
+ if xscale == 'log':
+ polygons[:, :, 0] = 10.0 ** polygons[:, :, 0]
+ xmin = 10.0 ** xmin
+ xmax = 10.0 ** xmax
+ self.set_xscale(xscale)
+ if yscale == 'log':
+ polygons[:, :, 1] = 10.0 ** polygons[:, :, 1]
+ ymin = 10.0 ** ymin
+ ymax = 10.0 ** ymax
+ self.set_yscale(yscale)
+ collection = mcoll.PolyCollection(
+ polygons,
+ edgecolors=edgecolors,
+ linewidths=linewidths,
+ )
+ else:
+ collection = mcoll.PolyCollection(
+ [polygon],
+ edgecolors=edgecolors,
+ linewidths=linewidths,
+ offsets=offsets,
+ transOffset=mtransforms.IdentityTransform(),
+ offset_position="data"
+ )
+
+ if isinstance(norm, mcolors.LogNorm):
+ if (accum == 0).any():
+ # make sure we have not zeros
+ accum += 1
+
+ # autoscale the norm with curren accum values if it hasn't
+ # been set
+ if norm is not None:
+ if norm.vmin is None and norm.vmax is None:
+ norm.autoscale(accum)
+
+ # Transform accum if needed
+ if bins == 'log':
+ accum = np.log10(accum + 1)
+ elif bins is not None:
+ if not iterable(bins):
+ minimum, maximum = min(accum), max(accum)
+ bins -= 1 # one less edge than bins
+ bins = minimum + (maximum - minimum) * np.arange(bins) / bins
+ bins = np.sort(bins)
+ accum = bins.searchsorted(accum)
+
+ if norm is not None and not isinstance(norm, mcolors.Normalize):
+ raise ValueError(
+ "'norm' must be an instance of 'mcolors.Normalize'")
+ collection.set_array(accum)
+ collection.set_cmap(cmap)
+ collection.set_norm(norm)
+ collection.set_alpha(alpha)
+ collection.update(kwargs)
+
+ if vmin is not None or vmax is not None:
+ collection.set_clim(vmin, vmax)
+ else:
+ collection.autoscale_None()
+
+ corners = ((xmin, ymin), (xmax, ymax))
+ self.update_datalim(corners)
+ collection.sticky_edges.x[:] = [xmin, xmax]
+ collection.sticky_edges.y[:] = [ymin, ymax]
+ self.autoscale_view(tight=True)
+
+ # add the collection last
+ self.add_collection(collection, autolim=False)
+ if not marginals:
+ return collection
+
+ if C is None:
+ C = np.ones(len(x))
+
+ def coarse_bin(x, y, coarse):
+ ind = coarse.searchsorted(x).clip(0, len(coarse) - 1)
+ mus = np.zeros(len(coarse))
+ for i in range(len(coarse)):
+ yi = y[ind == i]
+ if len(yi) > 0:
+ mu = reduce_C_function(yi)
+ else:
+ mu = np.nan
+ mus[i] = mu
+ return mus
+
+ coarse = np.linspace(xmin, xmax, gridsize)
+
+ xcoarse = coarse_bin(xorig, C, coarse)
+ valid = ~np.isnan(xcoarse)
+ verts, values = [], []
+ for i, val in enumerate(xcoarse):
+ thismin = coarse[i]
+ if i < len(coarse) - 1:
+ thismax = coarse[i + 1]
+ else:
+ thismax = thismin + np.diff(coarse)[-1]
+
+ if not valid[i]:
+ continue
+
+ verts.append([(thismin, 0),
+ (thismin, 0.05),
+ (thismax, 0.05),
+ (thismax, 0)])
+ values.append(val)
+
+ values = np.array(values)
+ trans = self.get_xaxis_transform(which='grid')
+
+ hbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
+
+ hbar.set_array(values)
+ hbar.set_cmap(cmap)
+ hbar.set_norm(norm)
+ hbar.set_alpha(alpha)
+ hbar.update(kwargs)
+ self.add_collection(hbar, autolim=False)
+
+ coarse = np.linspace(ymin, ymax, gridsize)
+ ycoarse = coarse_bin(yorig, C, coarse)
+ valid = ~np.isnan(ycoarse)
+ verts, values = [], []
+ for i, val in enumerate(ycoarse):
+ thismin = coarse[i]
+ if i < len(coarse) - 1:
+ thismax = coarse[i + 1]
+ else:
+ thismax = thismin + np.diff(coarse)[-1]
+ if not valid[i]:
+ continue
+ verts.append([(0, thismin), (0.0, thismax),
+ (0.05, thismax), (0.05, thismin)])
+ values.append(val)
+
+ values = np.array(values)
+
+ trans = self.get_yaxis_transform(which='grid')
+
+ vbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
+ vbar.set_array(values)
+ vbar.set_cmap(cmap)
+ vbar.set_norm(norm)
+ vbar.set_alpha(alpha)
+ vbar.update(kwargs)
+ self.add_collection(vbar, autolim=False)
+
+ collection.hbar = hbar
+ collection.vbar = vbar
+
+ def on_changed(collection):
+ hbar.set_cmap(collection.get_cmap())
+ hbar.set_clim(collection.get_clim())
+ vbar.set_cmap(collection.get_cmap())
+ vbar.set_clim(collection.get_clim())
+
+ collection.callbacksSM.connect('changed', on_changed)
+
+ return collection
+
+ @docstring.dedent_interpd
+ def arrow(self, x, y, dx, dy, **kwargs):
+ """
+ Add an arrow to the axes.
+
+ This draws an arrow from ``(x, y)`` to ``(x+dx, y+dy)``.
+
+ Parameters
+ ----------
+ x, y : float
+ The x/y-coordinate of the arrow base.
+ dx, dy : float
+ The length of the arrow along x/y-direction.
+
+ Returns
+ -------
+ arrow : `.FancyArrow`
+ The created `.FancyArrow` object.
+
+ Other Parameters
+ ----------------
+ **kwargs
+ Optional kwargs (inherited from `.FancyArrow` patch) control the
+ arrow construction and properties:
+
+ %(FancyArrow)s
+
+ Notes
+ -----
+ The resulting arrow is affected by the axes aspect ratio and limits.
+ This may produce an arrow whose head is not square with its stem. To
+ create an arrow whose head is square with its stem,
+ use :meth:`annotate` for example:
+
+ >>> ax.annotate("", xy=(0.5, 0.5), xytext=(0, 0),
+ ... arrowprops=dict(arrowstyle="->"))
+
+ """
+ # Strip away units for the underlying patch since units
+ # do not make sense to most patch-like code
+ x = self.convert_xunits(x)
+ y = self.convert_yunits(y)
+ dx = self.convert_xunits(dx)
+ dy = self.convert_yunits(dy)
+
+ a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
+ self.add_artist(a)
+ return a
+
+ def quiverkey(self, *args, **kw):
+ qk = mquiver.QuiverKey(*args, **kw)
+ self.add_artist(qk)
+ return qk
+ quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
+
+ # Handle units for x and y, if they've been passed
+ def _quiver_units(self, args, kw):
+ if len(args) > 3:
+ x, y = args[0:2]
+ self._process_unit_info(xdata=x, ydata=y, kwargs=kw)
+ x = self.convert_xunits(x)
+ y = self.convert_yunits(y)
+ return (x, y) + args[2:]
+ return args
+
+ # args can by a combination if X, Y, U, V, C and all should be replaced
+ @_preprocess_data(replace_all_args=True, label_namer=None)
+ def quiver(self, *args, **kw):
+ if not self._hold:
+ self.cla()
+
+ # Make sure units are handled for x and y values
+ args = self._quiver_units(args, kw)
+
+ q = mquiver.Quiver(self, *args, **kw)
+
+ self.add_collection(q, autolim=True)
+ self.autoscale_view()
+ return q
+ quiver.__doc__ = mquiver.Quiver.quiver_doc
+
+ # args can by either Y or y1,y2,... and all should be replaced
+ @_preprocess_data(replace_all_args=True, label_namer=None)
+ def stackplot(self, x, *args, **kwargs):
+ return mstack.stackplot(self, x, *args, **kwargs)
+ stackplot.__doc__ = mstack.stackplot.__doc__
+
+ @_preprocess_data(replace_names=["x", "y", "u", "v", "start_points"],
+ label_namer=None)
+ def streamplot(self, x, y, u, v, density=1, linewidth=None, color=None,
+ cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
+ minlength=0.1, transform=None, zorder=None,
+ start_points=None, maxlength=4.0,
+ integration_direction='both'):
+ if not self._hold:
+ self.cla()
+ stream_container = mstream.streamplot(
+ self, x, y, u, v,
+ density=density,
+ linewidth=linewidth,
+ color=color,
+ cmap=cmap,
+ norm=norm,
+ arrowsize=arrowsize,
+ arrowstyle=arrowstyle,
+ minlength=minlength,
+ start_points=start_points,
+ transform=transform,
+ zorder=zorder,
+ maxlength=maxlength,
+ integration_direction=integration_direction)
+ return stream_container
+ streamplot.__doc__ = mstream.streamplot.__doc__
+
+ # args can be some combination of X, Y, U, V, C and all should be replaced
+ @_preprocess_data(replace_all_args=True, label_namer=None)
+ @docstring.dedent_interpd
+ def barbs(self, *args, **kw):
+ """
+ %(barbs_doc)s
+ """
+ if not self._hold:
+ self.cla()
+
+ # Make sure units are handled for x and y values
+ args = self._quiver_units(args, kw)
+
+ b = mquiver.Barbs(self, *args, **kw)
+ self.add_collection(b, autolim=True)
+ self.autoscale_view()
+ return b
+
+ @_preprocess_data(replace_names=["x", "y"], label_namer=None,
+ positional_parameter_names=["x", "y", "c"])
+ def fill(self, *args, **kwargs):
+ """
+ Plot filled polygons.
+
+ Parameters
+ ----------
+ args : sequence of x, y, [color]
+ Each polygon is defined by the lists of *x* and *y* positions of
+ its nodes, optionally followed by by a *color* specifier. See
+ :mod:`matplotlib.colors` for supported color specifiers. The
+ standard color cycle is used for polygons without a color
+ specifier.
+
+ You can plot multiple polygons by providing multiple *x*, *y*,
+ *[color]* groups.
+
+ For example, each of the following is legal::
+
+ ax.fill(x, y) # a polygon with default color
+ ax.fill(x, y, "b") # a blue polygon
+ ax.fill(x, y, x2, y2) # two polygons
+ ax.fill(x, y, "b", x2, y2, "r") # a blue and a red polygon
+
+ Returns
+ -------
+ a list of :class:`~matplotlib.patches.Polygon`
+
+ Other Parameters
+ ----------------
+ **kwargs : :class:`~matplotlib.patches.Polygon` properties
+
+ Notes
+ -----
+ Use :meth:`fill_between` if you would like to fill the region between
+ two curves.
+ """
+ if not self._hold:
+ self.cla()
+
+ kwargs = cbook.normalize_kwargs(kwargs, _alias_map)
+
+ patches = []
+ for poly in self._get_patches_for_fill(*args, **kwargs):
+ self.add_patch(poly)
+ patches.append(poly)
+ self.autoscale_view()
+ return patches
+
+ @_preprocess_data(replace_names=["x", "y1", "y2", "where"],
+ label_namer=None)
+ @docstring.dedent_interpd
+ def fill_between(self, x, y1, y2=0, where=None, interpolate=False,
+ step=None, **kwargs):
+ """
+ Fill the area between two horizontal curves.
+
+ The curves are defined by the points (*x*, *y1*) and (*x*, *y2*). This
+ creates one or multiple polygons describing the filled area.
+
+ You may exclude some horizontal sections from filling using *where*.
+
+ By default, the edges connect the given points directly. Use *step* if
+ the filling should be a step function, i.e. constant in between *x*.
+
+
+ Parameters
+ ----------
+ x : array (length N)
+ The x coordinates of the nodes defining the curves.
+
+ y1 : array (length N) or scalar
+ The y coordinates of the nodes defining the first curve.
+
+ y2 : array (length N) or scalar, optional, default: 0
+ The y coordinates of the nodes defining the second curve.
+
+ where : array of bool (length N), optional, default: None
+ Define *where* to exclude some horizontal regions from being
+ filled. The filled regions are defined by the coordinates
+ ``x[where]``. More precisely, fill between ``x[i]`` and ``x[i+1]``
+ if ``where[i] and where[i+1]``. Note that this definition implies
+ that an isolated *True* value between two *False* values in
+ *where* will not result in filling. Both sides of the *True*
+ position remain unfilled due to the adjacent *False* values.
+
+ interpolate : bool, optional
+ This option is only relvant if *where* is used and the two curves
+ are crossing each other.
+
+ Semantically, *where* is often used for *y1* > *y2* or similar.
+ By default, the nodes of the polygon defining the filled region
+ will only be placed at the positions in the *x* array. Such a
+ polygon cannot describe the above semantics close to the
+ intersection. The x-sections containing the intersection are
+ simply clipped.
+
+ Setting *interpolate* to *True* will calculate the actual
+ intersection point and extend the filled region up to this point.
+
+ step : {'pre', 'post', 'mid'}, optional
+ Define *step* if the filling should be a step function,
+ i.e. constant in between *x*. The value determines where the
+ step will occur:
+
+ - 'pre': The y value is continued constantly to the left from
+ every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the
+ value ``y[i]``.
+ - 'post': The y value is continued constantly to the right from
+ every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the
+ value ``y[i]``.
+ - 'mid': Steps occur half-way between the *x* positions.
+
+ Other Parameters
+ ----------------
+ **kwargs
+ All other keyword arguments are passed on to `.PolyCollection`.
+ They control the `.Polygon` properties:
+
+ %(PolyCollection)s
+
+ Returns
+ -------
+ `.PolyCollection`
+ A `.PolyCollection` containing the plotted polygons.
+
+ See Also
+ --------
+ fill_betweenx : Fill between two sets of x-values.
+
+ Notes
+ -----
+ .. [notes section required to get data note injection right]
+
+ """
+ if not rcParams['_internal.classic_mode']:
+ color_aliases = mcoll._color_aliases
+ kwargs = cbook.normalize_kwargs(kwargs, color_aliases)
+
+ if not any(c in kwargs for c in ('color', 'facecolors')):
+ fc = self._get_patches_for_fill.get_next_color()
+ kwargs['facecolors'] = fc
+
+ # Handle united data, such as dates
+ self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
+ self._process_unit_info(ydata=y2)
+
+ # Convert the arrays so we can work with them
+ x = ma.masked_invalid(self.convert_xunits(x))
+ y1 = ma.masked_invalid(self.convert_yunits(y1))
+ y2 = ma.masked_invalid(self.convert_yunits(y2))
+
+ for name, array in [('x', x), ('y1', y1), ('y2', y2)]:
+ if array.ndim > 1:
+ raise ValueError('Input passed into argument "%r"' % name +
+ 'is not 1-dimensional.')
+
+ if where is None:
+ where = True
+ where = where & ~functools.reduce(np.logical_or,
+ map(np.ma.getmask, [x, y1, y2]))
+
+ x, y1, y2 = np.broadcast_arrays(np.atleast_1d(x), y1, y2)
+
+ polys = []
+ for ind0, ind1 in cbook.contiguous_regions(where):
+ xslice = x[ind0:ind1]
+ y1slice = y1[ind0:ind1]
+ y2slice = y2[ind0:ind1]
+ if step is not None:
+ step_func = STEP_LOOKUP_MAP["steps-" + step]
+ xslice, y1slice, y2slice = step_func(xslice, y1slice, y2slice)
+
+ if not len(xslice):
+ continue
+
+ N = len(xslice)
+ X = np.zeros((2 * N + 2, 2), float)
+
+ if interpolate:
+ def get_interp_point(ind):
+ im1 = max(ind - 1, 0)
+ x_values = x[im1:ind + 1]
+ diff_values = y1[im1:ind + 1] - y2[im1:ind + 1]
+ y1_values = y1[im1:ind + 1]
+
+ if len(diff_values) == 2:
+ if np.ma.is_masked(diff_values[1]):
+ return x[im1], y1[im1]
+ elif np.ma.is_masked(diff_values[0]):
+ return x[ind], y1[ind]
+
+ diff_order = diff_values.argsort()
+ diff_root_x = np.interp(
+ 0, diff_values[diff_order], x_values[diff_order])
+ x_order = x_values.argsort()
+ diff_root_y = np.interp(diff_root_x, x_values[x_order],
+ y1_values[x_order])
+ return diff_root_x, diff_root_y
+
+ start = get_interp_point(ind0)
+ end = get_interp_point(ind1)
+ else:
+ # the purpose of the next two lines is for when y2 is a
+ # scalar like 0 and we want the fill to go all the way
+ # down to 0 even if none of the y1 sample points do
+ start = xslice[0], y2slice[0]
+ end = xslice[-1], y2slice[-1]
+
+ X[0] = start
+ X[N + 1] = end
+
+ X[1:N + 1, 0] = xslice
+ X[1:N + 1, 1] = y1slice
+ X[N + 2:, 0] = xslice[::-1]
+ X[N + 2:, 1] = y2slice[::-1]
+
+ polys.append(X)
+
+ collection = mcoll.PolyCollection(polys, **kwargs)
+
+ # now update the datalim and autoscale
+ XY1 = np.array([x[where], y1[where]]).T
+ XY2 = np.array([x[where], y2[where]]).T
+ self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
+ updatex=True, updatey=True)
+ self.ignore_existing_data_limits = False
+ self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
+ updatex=False, updatey=True)
+ self.add_collection(collection, autolim=False)
+ self.autoscale_view()
+ return collection
+
+ @_preprocess_data(replace_names=["y", "x1", "x2", "where"],
+ label_namer=None)
+ @docstring.dedent_interpd
+ def fill_betweenx(self, y, x1, x2=0, where=None,
+ step=None, interpolate=False, **kwargs):
+ """
+ Fill the area between two vertical curves.
+
+ The curves are defined by the points (*x1*, *y*) and (*x2*, *y*). This
+ creates one or multiple polygons describing the filled area.
+
+ You may exclude some vertical sections from filling using *where*.
+
+ By default, the edges connect the given points directly. Use *step* if
+ the filling should be a step function, i.e. constant in between *y*.
+
+
+ Parameters
+ ----------
+ y : array (length N)
+ The y coordinates of the nodes defining the curves.
+
+ x1 : array (length N) or scalar
+ The x coordinates of the nodes defining the first curve.
+
+ x2 : array (length N) or scalar, optional, default: 0
+ The x coordinates of the nodes defining the second curve.
+
+ where : array of bool (length N), optional, default: None
+ Define *where* to exclude some vertical regions from being
+ filled. The filled regions are defined by the coordinates
+ ``y[where]``. More precisely, fill between ``y[i]`` and ``y[i+1]``
+ if ``where[i] and where[i+1]``. Note that this definition implies
+ that an isolated *True* value between two *False* values in
+ *where* will not result in filling. Both sides of the *True*
+ position remain unfilled due to the adjacent *False* values.
+
+ interpolate : bool, optional
+ This option is only relvant if *where* is used and the two curves
+ are crossing each other.
+
+ Semantically, *where* is often used for *x1* > *x2* or similar.
+ By default, the nodes of the polygon defining the filled region
+ will only be placed at the positions in the *y* array. Such a
+ polygon cannot describe the above semantics close to the
+ intersection. The y-sections containing the intersecion are
+ simply clipped.
+
+ Setting *interpolate* to *True* will calculate the actual
+ interscection point and extend the filled region up to this point.
+
+ step : {'pre', 'post', 'mid'}, optional
+ Define *step* if the filling should be a step function,
+ i.e. constant in between *y*. The value determines where the
+ step will occur:
+
+ - 'pre': The y value is continued constantly to the left from
+ every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the
+ value ``y[i]``.
+ - 'post': The y value is continued constantly to the right from
+ every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the
+ value ``y[i]``.
+ - 'mid': Steps occur half-way between the *x* positions.
+
+ Other Parameters
+ ----------------
+ **kwargs
+ All other keyword arguments are passed on to `.PolyCollection`.
+ They control the `.Polygon` properties:
+
+ %(PolyCollection)s
+
+ Returns
+ -------
+ `.PolyCollection`
+ A `.PolyCollection` containing the plotted polygons.
+
+ See Also
+ --------
+ fill_between : Fill between two sets of y-values.
+
+ Notes
+ -----
+ .. [notes section required to get data note injection right]
+
+ """
+ if not rcParams['_internal.classic_mode']:
+ color_aliases = mcoll._color_aliases
+ kwargs = cbook.normalize_kwargs(kwargs, color_aliases)
+
+ if not any(c in kwargs for c in ('color', 'facecolors')):
+ fc = self._get_patches_for_fill.get_next_color()
+ kwargs['facecolors'] = fc
+ # Handle united data, such as dates
+ self._process_unit_info(ydata=y, xdata=x1, kwargs=kwargs)
+ self._process_unit_info(xdata=x2)
+
+ # Convert the arrays so we can work with them
+ y = ma.masked_invalid(self.convert_yunits(y))
+ x1 = ma.masked_invalid(self.convert_xunits(x1))
+ x2 = ma.masked_invalid(self.convert_xunits(x2))
+
+ for name, array in [('y', y), ('x1', x1), ('x2', x2)]:
+ if array.ndim > 1:
+ raise ValueError('Input passed into argument "%r"' % name +
+ 'is not 1-dimensional.')
+
+ if where is None:
+ where = True
+ where = where & ~functools.reduce(np.logical_or,
+ map(np.ma.getmask, [y, x1, x2]))
+
+ y, x1, x2 = np.broadcast_arrays(np.atleast_1d(y), x1, x2)
+
+ polys = []
+ for ind0, ind1 in cbook.contiguous_regions(where):
+ yslice = y[ind0:ind1]
+ x1slice = x1[ind0:ind1]
+ x2slice = x2[ind0:ind1]
+ if step is not None:
+ step_func = STEP_LOOKUP_MAP["steps-" + step]
+ yslice, x1slice, x2slice = step_func(yslice, x1slice, x2slice)
+
+ if not len(yslice):
+ continue
+
+ N = len(yslice)
+ Y = np.zeros((2 * N + 2, 2), float)
+ if interpolate:
+ def get_interp_point(ind):
+ im1 = max(ind - 1, 0)
+ y_values = y[im1:ind + 1]
+ diff_values = x1[im1:ind + 1] - x2[im1:ind + 1]
+ x1_values = x1[im1:ind + 1]
+
+ if len(diff_values) == 2:
+ if np.ma.is_masked(diff_values[1]):
+ return x1[im1], y[im1]
+ elif np.ma.is_masked(diff_values[0]):
+ return x1[ind], y[ind]
+
+ diff_order = diff_values.argsort()
+ diff_root_y = np.interp(
+ 0, diff_values[diff_order], y_values[diff_order])
+ y_order = y_values.argsort()
+ diff_root_x = np.interp(diff_root_y, y_values[y_order],
+ x1_values[y_order])
+ return diff_root_x, diff_root_y
+
+ start = get_interp_point(ind0)
+ end = get_interp_point(ind1)
+ else:
+ # the purpose of the next two lines is for when x2 is a
+ # scalar like 0 and we want the fill to go all the way
+ # down to 0 even if none of the x1 sample points do
+ start = x2slice[0], yslice[0]
+ end = x2slice[-1], yslice[-1]
+
+ Y[0] = start
+ Y[N + 1] = end
+
+ Y[1:N + 1, 0] = x1slice
+ Y[1:N + 1, 1] = yslice
+ Y[N + 2:, 0] = x2slice[::-1]
+ Y[N + 2:, 1] = yslice[::-1]
+
+ polys.append(Y)
+
+ collection = mcoll.PolyCollection(polys, **kwargs)
+
+ # now update the datalim and autoscale
+ X1Y = np.array([x1[where], y[where]]).T
+ X2Y = np.array([x2[where], y[where]]).T
+ self.dataLim.update_from_data_xy(X1Y, self.ignore_existing_data_limits,
+ updatex=True, updatey=True)
+ self.ignore_existing_data_limits = False
+ self.dataLim.update_from_data_xy(X2Y, self.ignore_existing_data_limits,
+ updatex=True, updatey=False)
+ self.add_collection(collection, autolim=False)
+ self.autoscale_view()
+ return collection
+
+ #### plotting z(x,y): imshow, pcolor and relatives, contour
+ @_preprocess_data(label_namer=None)
+ def imshow(self, X, cmap=None, norm=None, aspect=None,
+ interpolation=None, alpha=None, vmin=None, vmax=None,
+ origin=None, extent=None, shape=None, filternorm=1,
+ filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
+ """
+ Display an image on the axes.
+
+ Parameters
+ ----------
+ X : array_like, shape (n, m) or (n, m, 3) or (n, m, 4)
+ Display the image in `X` to current axes. `X` may be an
+ array or a PIL image. If `X` is an array, it
+ can have the following shapes and types:
+
+ - MxN -- values to be mapped (float or int)
+ - MxNx3 -- RGB (float or uint8)
+ - MxNx4 -- RGBA (float or uint8)
+
+ MxN arrays are mapped to colors based on the `norm` (mapping
+ scalar to scalar) and the `cmap` (mapping the normed scalar to
+ a color).
+
+ Elements of RGB and RGBA arrays represent pixels of an MxN image.
+ All values should be in the range [0 .. 1] for floats or
+ [0 .. 255] for integers. Out-of-range values will be clipped to
+ these bounds.
+
+ cmap : `~matplotlib.colors.Colormap`, optional, default: None
+ If None, default to rc `image.cmap` value. `cmap` is ignored
+ if `X` is 3-D, directly specifying RGB(A) values.
+
+ aspect : ['auto' | 'equal' | scalar], optional, default: None
+ If 'auto', changes the image aspect ratio to match that of the
+ axes.
+
+ If 'equal', and `extent` is None, changes the axes aspect ratio to
+ match that of the image. If `extent` is not `None`, the axes
+ aspect ratio is changed to match that of the extent.
+
+ If None, default to rc ``image.aspect`` value.
+
+ interpolation : string, optional, default: None
+ Acceptable values are 'none', 'nearest', 'bilinear', 'bicubic',
+ 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser',
+ 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc',
+ 'lanczos'
+
+ If `interpolation` is None, default to rc `image.interpolation`.
+ See also the `filternorm` and `filterrad` parameters.
+ If `interpolation` is 'none', then no interpolation is performed
+ on the Agg, ps and pdf backends. Other backends will fall back to
+ 'nearest'.
+
+ norm : `~matplotlib.colors.Normalize`, optional, default: None
+ A `~matplotlib.colors.Normalize` instance is used to scale
+ a 2-D float `X` input to the (0, 1) range for input to the
+ `cmap`. If `norm` is None, use the default func:`normalize`.
+ If `norm` is an instance of `~matplotlib.colors.NoNorm`,
+ `X` must be an array of integers that index directly into
+ the lookup table of the `cmap`.
+
+ vmin, vmax : scalar, optional, default: None
+ `vmin` and `vmax` are used in conjunction with norm to normalize
+ luminance data. Note if you pass a `norm` instance, your
+ settings for `vmin` and `vmax` will be ignored.
+
+ alpha : scalar, optional, default: None
+ The alpha blending value, between 0 (transparent) and 1 (opaque).
+ The ``alpha`` argument is ignored for RGBA input data.
+
+ origin : ['upper' | 'lower'], optional, default: None
+ Place the [0,0] index of the array in the upper left or lower left
+ corner of the axes. If None, default to rc `image.origin`.
+
+ extent : scalars (left, right, bottom, top), optional, default: None
+ The location, in data-coordinates, of the lower-left and
+ upper-right corners. If `None`, the image is positioned such that
+ the pixel centers fall on zero-based (row, column) indices.
+
+ shape : scalars (columns, rows), optional, default: None
+ For raw buffer images
+
+ filternorm : scalar, optional, default: 1
+ A parameter for the antigrain image resize filter. From the
+ antigrain documentation, if `filternorm` = 1, the filter
+ normalizes integer values and corrects the rounding errors. It
+ doesn't do anything with the source floating point values, it
+ corrects only integers according to the rule of 1.0 which means
+ that any sum of pixel weights must be equal to 1.0. So, the
+ filter function must produce a graph of the proper shape.
+
+ filterrad : scalar, optional, default: 4.0
+ The filter radius for filters that have a radius parameter, i.e.
+ when interpolation is one of: 'sinc', 'lanczos' or 'blackman'
+
+ Returns
+ -------
+ image : `~matplotlib.image.AxesImage`
+
+ Other Parameters
+ ----------------
+ **kwargs : `~matplotlib.artist.Artist` properties.
+
+ See also
+ --------
+ matshow : Plot a matrix or an array as an image.
+
+ Notes
+ -----
+ Unless *extent* is used, pixel centers will be located at integer
+ coordinates. In other words: the origin will coincide with the center
+ of pixel (0, 0).
+
+ Two typical representations are used for RGB images with an alpha
+ channel:
+
+ - Straight (unassociated) alpha: R, G, and B channels represent the
+ color of the pixel, disregarding its opacity.
+ - Premultiplied (associated) alpha: R, G, and B channels represent
+ the color of the pixel, adjusted for its opacity by multiplication.
+
+ `~matplotlib.pyplot.imshow` expects RGB images adopting the straight
+ (unassociated) alpha representation.
+ """
+
+ if not self._hold:
+ self.cla()
+
+ if norm is not None and not isinstance(norm, mcolors.Normalize):
+ raise ValueError(
+ "'norm' must be an instance of 'mcolors.Normalize'")
+ if aspect is None:
+ aspect = rcParams['image.aspect']
+ self.set_aspect(aspect)
+ im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
+ filternorm=filternorm, filterrad=filterrad,
+ resample=resample, **kwargs)
+
+ im.set_data(X)
+ im.set_alpha(alpha)
+ if im.get_clip_path() is None:
+ # image does not already have clipping set, clip to axes patch
+ im.set_clip_path(self.patch)
+ #if norm is None and shape is None:
+ # im.set_clim(vmin, vmax)
+ if vmin is not None or vmax is not None:
+ im.set_clim(vmin, vmax)
+ else:
+ im.autoscale_None()
+ im.set_url(url)
+
+ # update ax.dataLim, and, if autoscaling, set viewLim
+ # to tightly fit the image, regardless of dataLim.
+ im.set_extent(im.get_extent())
+
+ self.add_image(im)
+ return im
+
+ @staticmethod
+ def _pcolorargs(funcname, *args, **kw):
+ # This takes one kwarg, allmatch.
+ # If allmatch is True, then the incoming X, Y, C must
+ # have matching dimensions, taking into account that
+ # X and Y can be 1-D rather than 2-D. This perfect
+ # match is required for Gouroud shading. For flat
+ # shading, X and Y specify boundaries, so we need
+ # one more boundary than color in each direction.
+ # For convenience, and consistent with Matlab, we
+ # discard the last row and/or column of C if necessary
+ # to meet this condition. This is done if allmatch
+ # is False.
+
+ allmatch = kw.pop("allmatch", False)
+
+ if len(args) == 1:
+ C = np.asanyarray(args[0])
+ numRows, numCols = C.shape
+ if allmatch:
+ X, Y = np.meshgrid(np.arange(numCols), np.arange(numRows))
+ else:
+ X, Y = np.meshgrid(np.arange(numCols + 1),
+ np.arange(numRows + 1))
+ C = cbook.safe_masked_invalid(C)
+ return X, Y, C
+
+ if len(args) == 3:
+ # Check x and y for bad data...
+ C = np.asanyarray(args[2])
+ X, Y = [cbook.safe_masked_invalid(a) for a in args[:2]]
+ if funcname == 'pcolormesh':
+ if np.ma.is_masked(X) or np.ma.is_masked(Y):
+ raise ValueError(
+ 'x and y arguments to pcolormesh cannot have '
+ 'non-finite values or be of type '
+ 'numpy.ma.core.MaskedArray with masked values')
+ # safe_masked_invalid() returns an ndarray for dtypes other
+ # than floating point.
+ if isinstance(X, np.ma.core.MaskedArray):
+ X = X.data # strip mask as downstream doesn't like it...
+ if isinstance(Y, np.ma.core.MaskedArray):
+ Y = Y.data
+ numRows, numCols = C.shape
+ else:
+ raise TypeError(
+ 'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
+
+ Nx = X.shape[-1]
+ Ny = Y.shape[0]
+ if X.ndim != 2 or X.shape[0] == 1:
+ x = X.reshape(1, Nx)
+ X = x.repeat(Ny, axis=0)
+ if Y.ndim != 2 or Y.shape[1] == 1:
+ y = Y.reshape(Ny, 1)
+ Y = y.repeat(Nx, axis=1)
+ if X.shape != Y.shape:
+ raise TypeError(
+ 'Incompatible X, Y inputs to %s; see help(%s)' % (
+ funcname, funcname))
+ if allmatch:
+ if not (Nx == numCols and Ny == numRows):
+ raise TypeError('Dimensions of C %s are incompatible with'
+ ' X (%d) and/or Y (%d); see help(%s)' % (
+ C.shape, Nx, Ny, funcname))
+ else:
+ if not (numCols in (Nx, Nx - 1) and numRows in (Ny, Ny - 1)):
+ raise TypeError('Dimensions of C %s are incompatible with'
+ ' X (%d) and/or Y (%d); see help(%s)' % (
+ C.shape, Nx, Ny, funcname))
+ C = C[:Ny - 1, :Nx - 1]
+ C = cbook.safe_masked_invalid(C)
+ return X, Y, C
+
+ @_preprocess_data(label_namer=None)
+ @docstring.dedent_interpd
+ def pcolor(self, *args, **kwargs):
+ r"""
+ Create a pseudocolor plot with a non-regular rectangular grid.
+
+ Call signature::
+
+ pcolor([X, Y,] C, **kwargs)
+
+ *X* and *Y* can be used to specify the corners of the quadrilaterals.
+
+ .. hint::
+
+ ``pcolor()`` can be very slow for large arrays. In most
+ cases you should use the the similar but much faster
+ `~.Axes.pcolormesh` instead. See there for a discussion of the
+ differences.
+
+ Parameters
+ ----------
+ C : array_like
+ A scalar 2-D array. The values will be color-mapped.
+
+ X, Y : array_like, optional
+ The coordinates of the quadrilateral corners. The quadrilateral
+ for ``C[i,j]`` has corners at::
+
+ (X[i+1, j], Y[i+1, j]) (X[i+1, j+1], Y[i+1, j+1])
+ +--------+
+ | C[i,j] |
+ +--------+
+ (X[i, j], Y[i, j]) (X[i, j+1], Y[i, j+1]),
+
+ Note that the column index corresponds to the
+ x-coordinate, and the row index corresponds to y. For
+ details, see the :ref:`Notes <axes-pcolor-grid-orientation>`
+ section below.
+
+ The dimensions of *X* and *Y* should be one greater than those of
+ *C*. Alternatively, *X*, *Y* and *C* may have equal dimensions, in
+ which case the last row and column of *C* will be ignored.
+
+ If *X* and/or *Y* are 1-D arrays or column vectors they will be
+ expanded as needed into the appropriate 2-D arrays, making a
+ rectangular grid.
+
+ cmap : str or `~matplotlib.colors.Colormap`, optional
+ A Colormap instance or registered colormap name. The colormap
+ maps the *C* values to colors. Defaults to :rc:`image.cmap`.
+
+ norm : `~matplotlib.colors.Normalize`, optional
+ The Normalize instance scales the data values to the canonical
+ colormap range [0, 1] for mapping to colors. By default, the data
+ range is mapped to the colorbar range using linear scaling.
+
+ vmin, vmax : scalar, optional, default: None
+ The colorbar range. If *None*, suitable min/max values are
+ automatically chosen by the `~.Normalize` instance (defaults to
+ the respective min/max values of *C* in case of the default linear
+ scaling).
+
+ edgecolors : {'none', None, 'face', color, color sequence}, optional
+ The color of the edges. Defaults to 'none'. Possible values:
+
+ - 'none' or '': No edge.
+ - *None*: :rc:`patch.edgecolor` will be used. Note that currently
+ :rc:`patch.force_edgecolor` has to be True for this to work.
+ - 'face': Use the adjacent face color.
+ - An mpl color or sequence of colors will set the edge color.
+
+ The singular form *edgecolor* works as an alias.
+
+ alpha : scalar, optional, default: None
+ The alpha blending value of the face color, between 0 (transparent)
+ and 1 (opaque). Note: The edgecolor is currently not affected by
+ this.
+
+ snap : bool, optional, default: False
+ Whether to snap the mesh to pixel boundaries.
+
+ Returns
+ -------
+ collection : `matplotlib.collections.Collection`
+
+ Other Parameters
+ ----------------
+ antialiaseds : bool, optional, default: False
+ The default *antialiaseds* is False if the default
+ *edgecolors*\ ="none" is used. This eliminates artificial lines
+ at patch boundaries, and works regardless of the value of alpha.
+ If *edgecolors* is not "none", then the default *antialiaseds*
+ is taken from :rc:`patch.antialiased`, which defaults to True.
+ Stroking the edges may be preferred if *alpha* is 1, but will
+ cause artifacts otherwise.
+
+ **kwargs :
+ Additionally, the following arguments are allowed. They are passed
+ along to the `~matplotlib.collections.PolyCollection` constructor:
+
+ %(PolyCollection)s
+
+ See Also
+ --------
+ pcolormesh : for an explanation of the differences between
+ pcolor and pcolormesh.
+ imshow : If *X* and *Y* are each equidistant, `~.Axes.imshow` can be a
+ faster alternative.
+
+ Notes
+ -----
+
+ **Masked arrays**
+
+ *X*, *Y* and *C* may be masked arrays. If either ``C[i, j]``, or one
+ of the vertices surrounding ``C[i,j]`` (*X* or *Y* at
+ ``[i, j], [i+1, j], [i, j+1], [i+1, j+1]``) is masked, nothing is
+ plotted.
+
+ .. _axes-pcolor-grid-orientation:
+
+ **Grid orientation**
+
+ The grid orientation follows the standard matrix convention: An array
+ *C* with shape (nrows, ncolumns) is plotted with the column number as
+ *X* and the row number as *Y*.
+
+ **Handling of pcolor() end-cases**
+
+ ``pcolor()`` displays all columns of *C* if *X* and *Y* are not
+ specified, or if *X* and *Y* have one more column than *C*.
+ If *X* and *Y* have the same number of columns as *C* then the last
+ column of *C* is dropped. Similarly for the rows.
+
+ Note: This behavior is different from MATLAB's ``pcolor()``, which
+ always discards the last row and column of *C*.
+ """
+
+ if not self._hold:
+ self.cla()
+
+ alpha = kwargs.pop('alpha', None)
+ norm = kwargs.pop('norm', None)
+ cmap = kwargs.pop('cmap', None)
+ vmin = kwargs.pop('vmin', None)
+ vmax = kwargs.pop('vmax', None)
+
+ X, Y, C = self._pcolorargs('pcolor', *args, allmatch=False)
+ Ny, Nx = X.shape
+
+ # unit conversion allows e.g. datetime objects as axis values
+ self._process_unit_info(xdata=X, ydata=Y, kwargs=kwargs)
+ X = self.convert_xunits(X)
+ Y = self.convert_yunits(Y)
+
+ # convert to MA, if necessary.
+ C = ma.asarray(C)
+ X = ma.asarray(X)
+ Y = ma.asarray(Y)
+
+ mask = ma.getmaskarray(X) + ma.getmaskarray(Y)
+ xymask = (mask[0:-1, 0:-1] + mask[1:, 1:] +
+ mask[0:-1, 1:] + mask[1:, 0:-1])
+ # don't plot if C or any of the surrounding vertices are masked.
+ mask = ma.getmaskarray(C) + xymask
+
+ newaxis = np.newaxis
+ compress = np.compress
+
+ ravelmask = (mask == 0).ravel()
+ X1 = compress(ravelmask, ma.filled(X[0:-1, 0:-1]).ravel())
+ Y1 = compress(ravelmask, ma.filled(Y[0:-1, 0:-1]).ravel())
+ X2 = compress(ravelmask, ma.filled(X[1:, 0:-1]).ravel())
+ Y2 = compress(ravelmask, ma.filled(Y[1:, 0:-1]).ravel())
+ X3 = compress(ravelmask, ma.filled(X[1:, 1:]).ravel())
+ Y3 = compress(ravelmask, ma.filled(Y[1:, 1:]).ravel())
+ X4 = compress(ravelmask, ma.filled(X[0:-1, 1:]).ravel())
+ Y4 = compress(ravelmask, ma.filled(Y[0:-1, 1:]).ravel())
+ npoly = len(X1)
+
+ xy = np.concatenate((X1[:, newaxis], Y1[:, newaxis],
+ X2[:, newaxis], Y2[:, newaxis],
+ X3[:, newaxis], Y3[:, newaxis],
+ X4[:, newaxis], Y4[:, newaxis],
+ X1[:, newaxis], Y1[:, newaxis]),
+ axis=1)
+ verts = xy.reshape((npoly, 5, 2))
+
+ C = compress(ravelmask, ma.filled(C[0:Ny - 1, 0:Nx - 1]).ravel())
+
+ linewidths = (0.25,)
+ if 'linewidth' in kwargs:
+ kwargs['linewidths'] = kwargs.pop('linewidth')
+ kwargs.setdefault('linewidths', linewidths)
+
+ if 'edgecolor' in kwargs:
+ kwargs['edgecolors'] = kwargs.pop('edgecolor')
+ ec = kwargs.setdefault('edgecolors', 'none')
+
+ # aa setting will default via collections to patch.antialiased
+ # unless the boundary is not stroked, in which case the
+ # default will be False; with unstroked boundaries, aa
+ # makes artifacts that are often disturbing.
+ if 'antialiased' in kwargs:
+ kwargs['antialiaseds'] = kwargs.pop('antialiased')
+ if 'antialiaseds' not in kwargs and (
+ isinstance(ec, six.string_types) and ec.lower() == "none"):
+ kwargs['antialiaseds'] = False
+
+ kwargs.setdefault('snap', False)
+
+ collection = mcoll.PolyCollection(verts, **kwargs)
+
+ collection.set_alpha(alpha)
+ collection.set_array(C)
+ if norm is not None and not isinstance(norm, mcolors.Normalize):
+ raise ValueError(
+ "'norm' must be an instance of 'mcolors.Normalize'")
+ collection.set_cmap(cmap)
+ collection.set_norm(norm)
+ collection.set_clim(vmin, vmax)
+ collection.autoscale_None()
+ self.grid(False)
+
+ x = X.compressed()
+ y = Y.compressed()
+
+ # Transform from native to data coordinates?
+ t = collection._transform
+ if (not isinstance(t, mtransforms.Transform) and
+ hasattr(t, '_as_mpl_transform')):
+ t = t._as_mpl_transform(self.axes)
+
+ if t and any(t.contains_branch_seperately(self.transData)):
+ trans_to_data = t - self.transData
+ pts = np.vstack([x, y]).T.astype(float)
+ transformed_pts = trans_to_data.transform(pts)
+ x = transformed_pts[..., 0]
+ y = transformed_pts[..., 1]
+
+ self.add_collection(collection, autolim=False)
+
+ minx = np.min(x)
+ maxx = np.max(x)
+ miny = np.min(y)
+ maxy = np.max(y)
+ collection.sticky_edges.x[:] = [minx, maxx]
+ collection.sticky_edges.y[:] = [miny, maxy]
+ corners = (minx, miny), (maxx, maxy)
+ self.update_datalim(corners)
+ self.autoscale_view()
+ return collection
+
+ @_preprocess_data(label_namer=None)
+ @docstring.dedent_interpd
+ def pcolormesh(self, *args, **kwargs):
+ """
+ Create a pseudocolor plot with a non-regular rectangular grid.
+
+ Call signature::
+
+ pcolor([X, Y,] C, **kwargs)
+
+ *X* and *Y* can be used to specify the corners of the quadrilaterals.
+
+ .. note::
+
+ ``pcolormesh()`` is similar to :func:`~Axes.pcolor`. It's much
+ faster and preferred in most cases. For a detailed discussion on
+ the differences see
+ :ref:`Differences between pcolor() and pcolormesh()
+ <differences-pcolor-pcolormesh>`.
+
+ Parameters
+ ----------
+ C : array_like
+ A scalar 2-D array. The values will be color-mapped.
+
+ X, Y : array_like, optional
+ The coordinates of the quadrilateral corners. The quadrilateral
+ for ``C[i,j]`` has corners at::
+
+ (X[i+1, j], Y[i+1, j]) (X[i+1, j+1], Y[i+1, j+1])
+ +--------+
+ | C[i,j] |
+ +--------+
+ (X[i, j], Y[i, j]) (X[i, j+1], Y[i, j+1]),
+
+ Note that the column index corresponds to the
+ x-coordinate, and the row index corresponds to y. For
+ details, see the :ref:`Notes <axes-pcolormesh-grid-orientation>`
+ section below.
+
+ The dimensions of *X* and *Y* should be one greater than those of
+ *C*. Alternatively, *X*, *Y* and *C* may have equal dimensions, in
+ which case the last row and column of *C* will be ignored.
+
+ If *X* and/or *Y* are 1-D arrays or column vectors they will be
+ expanded as needed into the appropriate 2-D arrays, making a
+ rectangular grid.
+
+ cmap : str or `~matplotlib.colors.Colormap`, optional
+ A Colormap instance or registered colormap name. The colormap
+ maps the *C* values to colors. Defaults to :rc:`image.cmap`.
+
+ norm : `~matplotlib.colors.Normalize`, optional
+ The Normalize instance scales the data values to the canonical
+ colormap range [0, 1] for mapping to colors. By default, the data
+ range is mapped to the colorbar range using linear scaling.
+
+ vmin, vmax : scalar, optional, default: None
+ The colorbar range. If *None*, suitable min/max values are
+ automatically chosen by the `~.Normalize` instance (defaults to
+ the respective min/max values of *C* in case of the default linear
+ scaling).
+
+ edgecolors : {'none', None, 'face', color, color sequence}, optional
+ The color of the edges. Defaults to 'none'. Possible values:
+
+ - 'none' or '': No edge.
+ - *None*: :rc:`patch.edgecolor` will be used. Note that currently
+ :rc:`patch.force_edgecolor` has to be True for this to work.
+ - 'face': Use the adjacent face color.
+ - An mpl color or sequence of colors will set the edge color.
+
+ The singular form *edgecolor* works as an alias.
+
+ alpha : scalar, optional, default: None
+ The alpha blending value, between 0 (transparent) and 1 (opaque).
+
+ shading : {'flat', 'gouraud'}, optional
+ The fill style, Possible values:
+
+ - 'flat': A solid color is used for each quad. The color of the
+ quad (i, j), (i+1, j), (i, j+1), (i+1, j+1) is given by
+ ``C[i,j]``.
+ - 'gouraud': Each quad will be Gouraud shaded: The color of the
+ corners (i', j') are given by ``C[i',j']``. The color values of
+ the area in between is interpolated from the corner values.
+ When Gouraud shading is used, *edgecolors* is ignored.
+
+ snap : bool, optional, default: False
+ Whether to snap the mesh to pixel boundaries.
+
+ Returns
+ -------
+ mesh : `matplotlib.collections.QuadMesh`
+
+ Other Parameters
+ ----------------
+ **kwargs
+ Additionally, the following arguments are allowed. They are passed
+ along to the `~matplotlib.collections.QuadMesh` constructor:
+
+ %(QuadMesh)s
+
+
+ See Also
+ --------
+ pcolor : An alternative implementation with slightly different
+ features. For a detailed discussion on the differences see
+ :ref:`Differences between pcolor() and pcolormesh()
+ <differences-pcolor-pcolormesh>`.
+ imshow : If *X* and *Y* are each equidistant, `~.Axes.imshow` can be a
+ faster alternative.
+
+ Notes
+ -----
+
+ **Masked arrays**
+
+ *C* may be a masked array. If ``C[i, j]`` is masked, the corresponding
+ quadrilateral will be transparent. Masking of *X* and *Y* is not
+ supported. Use `~.Axes.pcolor` if you need this functionality.
+
+ .. _axes-pcolormesh-grid-orientation:
+
+ **Grid orientation**
+
+ The grid orientation follows the standard matrix convention: An array
+ *C* with shape (nrows, ncolumns) is plotted with the column number as
+ *X* and the row number as *Y*.
+
+ .. _differences-pcolor-pcolormesh:
+
+ **Differences between pcolor() and pcolormesh()**
+
+ Both methods are used to create a pseudocolor plot of a 2-D array
+ using quadrilaterals.
+
+ The main difference lies in the created object and internal data
+ handling:
+ While `~.Axes.pcolor` returns a `.PolyCollection`, `~.Axes.pcolormesh`
+ returns a `.QuadMesh`. The latter is more specialized for the given
+ purpose and thus is faster. It should almost always be preferred.
+
+ There is also a slight difference in the handling of masked arrays.
+ Both `~.Axes.pcolor` and `~.Axes.pcolormesh` support masked arrays
+ for *C*. However, only `~.Axes.pcolor` supports masked arrays for *X*
+ and *Y*. The reason lies in the internal handling of the masked values.
+ `~.Axes.pcolor` leaves out the respective polygons from the
+ PolyCollection. `~.Axes.pcolormesh` sets the facecolor of the masked
+ elements to transparent. You can see the difference when using
+ edgecolors. While all edges are drawn irrespective of masking in a
+ QuadMesh, the edge between two adjacent masked quadrilaterals in
+ `~.Axes.pcolor` is not drawn as the corresponding polygons do not
+ exist in the PolyCollection.
+
+ Another difference is the support of Gouraud shading in
+ `~.Axes.pcolormesh`, which is not available with `~.Axes.pcolor`.
+
+ """
+ if not self._hold:
+ self.cla()
+
+ alpha = kwargs.pop('alpha', None)
+ norm = kwargs.pop('norm', None)
+ cmap = kwargs.pop('cmap', None)
+ vmin = kwargs.pop('vmin', None)
+ vmax = kwargs.pop('vmax', None)
+ shading = kwargs.pop('shading', 'flat').lower()
+ antialiased = kwargs.pop('antialiased', False)
+ kwargs.setdefault('edgecolors', 'None')
+
+ allmatch = (shading == 'gouraud')
+
+ X, Y, C = self._pcolorargs('pcolormesh', *args, allmatch=allmatch)
+ Ny, Nx = X.shape
+ X = X.ravel()
+ Y = Y.ravel()
+ # unit conversion allows e.g. datetime objects as axis values
+ self._process_unit_info(xdata=X, ydata=Y, kwargs=kwargs)
+ X = self.convert_xunits(X)
+ Y = self.convert_yunits(Y)
+
+ # convert to one dimensional arrays
+ C = C.ravel()
+ coords = np.column_stack((X, Y)).astype(float, copy=False)
+ collection = mcoll.QuadMesh(Nx - 1, Ny - 1, coords,
+ antialiased=antialiased, shading=shading,
+ **kwargs)
+ collection.set_alpha(alpha)
+ collection.set_array(C)
+ if norm is not None and not isinstance(norm, mcolors.Normalize):
+ raise ValueError(
+ "'norm' must be an instance of 'mcolors.Normalize'")
+ collection.set_cmap(cmap)
+ collection.set_norm(norm)
+ collection.set_clim(vmin, vmax)
+ collection.autoscale_None()
+
+ self.grid(False)
+
+ # Transform from native to data coordinates?
+ t = collection._transform
+ if (not isinstance(t, mtransforms.Transform) and
+ hasattr(t, '_as_mpl_transform')):
+ t = t._as_mpl_transform(self.axes)
+
+ if t and any(t.contains_branch_seperately(self.transData)):
+ trans_to_data = t - self.transData
+ coords = trans_to_data.transform(coords)
+
+ self.add_collection(collection, autolim=False)
+
+ minx, miny = np.min(coords, axis=0)
+ maxx, maxy = np.max(coords, axis=0)
+ collection.sticky_edges.x[:] = [minx, maxx]
+ collection.sticky_edges.y[:] = [miny, maxy]
+ corners = (minx, miny), (maxx, maxy)
+ self.update_datalim(corners)
+ self.autoscale_view()
+ return collection
+
+ @_preprocess_data(label_namer=None)
+ @docstring.dedent_interpd
+ def pcolorfast(self, *args, **kwargs):
+ """
+ Create a pseudocolor plot with a non-regular rectangular grid.
+
+ Call signatures::
+
+ ax.pcolorfast(C, **kwargs)
+ ax.pcolorfast(xr, yr, C, **kwargs)
+ ax.pcolorfast(x, y, C, **kwargs)
+ ax.pcolorfast(X, Y, C, **kwargs)
+
+ This method is similar to ~.Axes.pcolor` and `~.Axes.pcolormesh`.
+ It's designed to provide the fastest pcolor-type plotting with the
+ Agg backend. To achieve this, it uses different algorithms internally
+ depending on the complexity of the input grid (regular rectangular,
+ non-regular rectangular or arbitrary quadrilateral).
+
+ .. warning::
+
+ This method is experimental. Compared to `~.Axes.pcolor` or
+ `~.Axes.pcolormesh` it has some limitations:
+
+ - It supports only flat shading (no outlines)
+ - It lacks support for log scaling of the axes.
+ - It does not have a have a pyplot wrapper.
+
+ Parameters
+ ----------
+ C : array-like(M, N)
+ A scalar 2D array. The values will be color-mapped.
+ *C* may be a masked array.
+
+ x, y : tuple or array-like
+ *X* and *Y* are used to specify the coordinates of the
+ quadilaterals. There are different ways to do this:
+
+ - Use tuples ``xr=(xmin, xmax)`` and ``yr=(ymin, ymax)`` to define
+ a *uniform rectiangular grid*.
+
+ The tuples define the outer edges of the grid. All individual
+ quadrilaterals will be of the same size. This is the fastest
+ version.
+
+ - Use 1D arrays *x*, *y* to specify a *non-uniform rectangular
+ grid*.
+
+ In this case *x* and *y* have to be monotonic 1D arrays of length
+ *N+1* and *M+1*, specifying the x and y boundaries of the cells.
+
+ The speed is intermediate. Note: The grid is checked, and if
+ found to be uniform the fast version is used.
+
+ - Use 2D arrays *X*, *Y* if you need an *arbitrary quadrilateral
+ grid* (i.e. if the quadrilaterals are not rectangular).
+
+ In this case *X* and *Y* are 2D arrays with shape (M, N),
+ specifying the x and y coordinates of the corners of the colored
+ quadrilaterals. See `~.Axes.pcolormesh` for details.
+
+ This is the most general, but the slowest to render. It may
+ produce faster and more compact output using ps, pdf, and
+ svg backends, however.
+
+ Leaving out *x* and *y* defaults to ``xr=(0, N)``, ``yr=(O, M)``.
+
+ cmap : str or `~matplotlib.colors.Colormap`, optional
+ A Colormap instance or registered colormap name. The colormap
+ maps the *C* values to colors. Defaults to :rc:`image.cmap`.
+
+ norm : `~matplotlib.colors.Normalize`, optional
+ The Normalize instance scales the data values to the canonical
+ colormap range [0, 1] for mapping to colors. By default, the data
+ range is mapped to the colorbar range using linear scaling.
+
+ vmin, vmax : scalar, optional, default: None
+ The colorbar range. If *None*, suitable min/max values are
+ automatically chosen by the `~.Normalize` instance (defaults to
+ the respective min/max values of *C* in case of the default linear
+ scaling).
+
+ alpha : scalar, optional, default: None
+ The alpha blending value, between 0 (transparent) and 1 (opaque).
+
+ snap : bool, optional, default: False
+ Whether to snap the mesh to pixel boundaries.
+
+ Returns
+ -------
+ image : `.AxesImage` or `.PcolorImage` or `.QuadMesh`
+ The return type depends on the type of grid:
+
+ - `.AxesImage` for a regular rectangular grid.
+ - `.PcolorImage` for a non-regular rectangular grid.
+ - `.QuadMesh` for a non-rectangular grid.
+
+ Notes
+ -----
+ .. [notes section required to get data note injection right]
+
+ """
+
+ if not self._hold:
+ self.cla()
+
+ alpha = kwargs.pop('alpha', None)
+ norm = kwargs.pop('norm', None)
+ cmap = kwargs.pop('cmap', None)
+ vmin = kwargs.pop('vmin', None)
+ vmax = kwargs.pop('vmax', None)
+ if norm is not None and not isinstance(norm, mcolors.Normalize):
+ raise ValueError(
+ "'norm' must be an instance of 'mcolors.Normalize'")
+
+ C = args[-1]
+ nr, nc = C.shape
+ if len(args) == 1:
+ style = "image"
+ x = [0, nc]
+ y = [0, nr]
+ elif len(args) == 3:
+ x, y = args[:2]
+ x = np.asarray(x)
+ y = np.asarray(y)
+ if x.ndim == 1 and y.ndim == 1:
+ if x.size == 2 and y.size == 2:
+ style = "image"
+ else:
+ dx = np.diff(x)
+ dy = np.diff(y)
+ if (np.ptp(dx) < 0.01 * np.abs(dx.mean()) and
+ np.ptp(dy) < 0.01 * np.abs(dy.mean())):
+ style = "image"
+ else:
+ style = "pcolorimage"
+ elif x.ndim == 2 and y.ndim == 2:
+ style = "quadmesh"
+ else:
+ raise TypeError("arguments do not match valid signatures")
+ else:
+ raise TypeError("need 1 argument or 3 arguments")
+
+ if style == "quadmesh":
+
+ # convert to one dimensional arrays
+ # This should also be moved to the QuadMesh class
+
+ # data point in each cell is value at lower left corner
+ C = ma.ravel(C)
+ X = x.ravel()
+ Y = y.ravel()
+ Nx = nc + 1
+ Ny = nr + 1
+
+ # The following needs to be cleaned up; the renderer
+ # requires separate contiguous arrays for X and Y,
+ # but the QuadMesh class requires the 2D array.
+ coords = np.empty(((Nx * Ny), 2), np.float64)
+ coords[:, 0] = X
+ coords[:, 1] = Y
+
+ # The QuadMesh class can also be changed to
+ # handle relevant superclass kwargs; the initializer
+ # should do much more than it does now.
+ collection = mcoll.QuadMesh(nc, nr, coords, 0, edgecolors="None")
+ collection.set_alpha(alpha)
+ collection.set_array(C)
+ collection.set_cmap(cmap)
+ collection.set_norm(norm)
+ self.add_collection(collection, autolim=False)
+ xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
+ ret = collection
+
+ else: # It's one of the two image styles.
+ xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
+
+ if style == "image":
+ im = mimage.AxesImage(self, cmap, norm,
+ interpolation='nearest',
+ origin='lower',
+ extent=(xl, xr, yb, yt),
+ **kwargs)
+ im.set_data(C)
+ im.set_alpha(alpha)
+ elif style == "pcolorimage":
+ im = mimage.PcolorImage(self, x, y, C,
+ cmap=cmap,
+ norm=norm,
+ alpha=alpha,
+ **kwargs)
+ im.set_extent((xl, xr, yb, yt))
+ self.add_image(im)
+ ret = im
+
+ if vmin is not None or vmax is not None:
+ ret.set_clim(vmin, vmax)
+ else:
+ ret.autoscale_None()
+
+ ret.sticky_edges.x[:] = [xl, xr]
+ ret.sticky_edges.y[:] = [yb, yt]
+ self.update_datalim(np.array([[xl, yb], [xr, yt]]))
+ self.autoscale_view(tight=True)
+ return ret
+
+ @_preprocess_data()
+ def contour(self, *args, **kwargs):
+ if not self._hold:
+ self.cla()
+ kwargs['filled'] = False
+ contours = mcontour.QuadContourSet(self, *args, **kwargs)
+ self.autoscale_view()
+ return contours
+ contour.__doc__ = mcontour.QuadContourSet._contour_doc
+
+ @_preprocess_data()
+ def contourf(self, *args, **kwargs):
+ if not self._hold:
+ self.cla()
+ kwargs['filled'] = True
+ contours = mcontour.QuadContourSet(self, *args, **kwargs)
+ self.autoscale_view()
+ return contours
+ contourf.__doc__ = mcontour.QuadContourSet._contour_doc
+
+ def clabel(self, CS, *args, **kwargs):
+ return CS.clabel(*args, **kwargs)
+ clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
+
+ @docstring.dedent_interpd
+ def table(self, **kwargs):
+ """
+ Add a table to the current axes.
+
+ Call signature::
+
+ table(cellText=None, cellColours=None,
+ cellLoc='right', colWidths=None,
+ rowLabels=None, rowColours=None, rowLoc='left',
+ colLabels=None, colColours=None, colLoc='center',
+ loc='bottom', bbox=None)
+
+ Returns a :class:`matplotlib.table.Table` instance. Either `cellText`
+ or `cellColours` must be provided. For finer grained control over
+ tables, use the :class:`~matplotlib.table.Table` class and add it to
+ the axes with :meth:`~matplotlib.axes.Axes.add_table`.
+
+ Thanks to John Gill for providing the class and table.
+
+ kwargs control the :class:`~matplotlib.table.Table`
+ properties:
+
+ %(Table)s
+ """
+ return mtable.table(self, **kwargs)
+
+ #### Data analysis
+
+ @_preprocess_data(replace_names=["x", 'weights'], label_namer="x")
+ def hist(self, x, bins=None, range=None, density=None, weights=None,
+ cumulative=False, bottom=None, histtype='bar', align='mid',
+ orientation='vertical', rwidth=None, log=False,
+ color=None, label=None, stacked=False, normed=None,
+ **kwargs):
+ """
+ Plot a histogram.
+
+ Compute and draw the histogram of *x*. The return value is a
+ tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
+ [*patches0*, *patches1*,...]) if the input contains multiple
+ data.
+
+ Multiple data can be provided via *x* as a list of datasets
+ of potentially different length ([*x0*, *x1*, ...]), or as
+ a 2-D ndarray in which each column is a dataset. Note that
+ the ndarray form is transposed relative to the list form.
+
+ Masked arrays are not supported at present.
+
+ Parameters
+ ----------
+ x : (n,) array or sequence of (n,) arrays
+ Input values, this takes either a single array or a sequence of
+ arrays which are not required to be of the same length
+
+ bins : integer or sequence or 'auto', optional
+ If an integer is given, ``bins + 1`` bin edges are calculated and
+ returned, consistent with :func:`numpy.histogram`.
+
+ If `bins` is a sequence, gives bin edges, including left edge of
+ first bin and right edge of last bin. In this case, `bins` is
+ returned unmodified.
+
+ All but the last (righthand-most) bin is half-open. In other
+ words, if `bins` is::
+
+ [1, 2, 3, 4]
+
+ then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
+ the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
+ *includes* 4.
+
+ Unequally spaced bins are supported if *bins* is a sequence.
+
+ If Numpy 1.11 is installed, may also be ``'auto'``.
+
+ Default is taken from the rcParam ``hist.bins``.
+
+ range : tuple or None, optional
+ The lower and upper range of the bins. Lower and upper outliers
+ are ignored. If not provided, *range* is ``(x.min(), x.max())``.
+ Range has no effect if *bins* is a sequence.
+
+ If *bins* is a sequence or *range* is specified, autoscaling
+ is based on the specified bin range instead of the
+ range of x.
+
+ Default is ``None``
+
+ density : boolean, optional
+ If ``True``, the first element of the return tuple will
+ be the counts normalized to form a probability density, i.e.,
+ the area (or integral) under the histogram will sum to 1.
+ This is achieved by dividing the count by the number of
+ observations times the bin width and not dividing by the total
+ number of observations. If *stacked* is also ``True``, the sum of
+ the histograms is normalized to 1.
+
+ Default is ``None`` for both *normed* and *density*. If either is
+ set, then that value will be used. If neither are set, then the
+ args will be treated as ``False``.
+
+ If both *density* and *normed* are set an error is raised.
+
+ weights : (n, ) array_like or None, optional
+ An array of weights, of the same shape as *x*. Each value in *x*
+ only contributes its associated weight towards the bin count
+ (instead of 1). If *normed* or *density* is ``True``,
+ the weights are normalized, so that the integral of the density
+ over the range remains 1.
+
+ Default is ``None``
+
+ cumulative : boolean, optional
+ If ``True``, then a histogram is computed where each bin gives the
+ counts in that bin plus all bins for smaller values. The last bin
+ gives the total number of datapoints. If *normed* or *density*
+ is also ``True`` then the histogram is normalized such that the
+ last bin equals 1. If *cumulative* evaluates to less than 0
+ (e.g., -1), the direction of accumulation is reversed.
+ In this case, if *normed* and/or *density* is also ``True``, then
+ the histogram is normalized such that the first bin equals 1.
+
+ Default is ``False``
+
+ bottom : array_like, scalar, or None
+ Location of the bottom baseline of each bin. If a scalar,
+ the base line for each bin is shifted by the same amount.
+ If an array, each bin is shifted independently and the length
+ of bottom must match the number of bins. If None, defaults to 0.
+
+ Default is ``None``
+
+ histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, optional
+ The type of histogram to draw.
+
+ - 'bar' is a traditional bar-type histogram. If multiple data
+ are given the bars are arranged side by side.
+
+ - 'barstacked' is a bar-type histogram where multiple
+ data are stacked on top of each other.
+
+ - 'step' generates a lineplot that is by default
+ unfilled.
+
+ - 'stepfilled' generates a lineplot that is by default
+ filled.
+
+ Default is 'bar'
+
+ align : {'left', 'mid', 'right'}, optional
+ Controls how the histogram is plotted.
+
+ - 'left': bars are centered on the left bin edges.
+
+ - 'mid': bars are centered between the bin edges.
+
+ - 'right': bars are centered on the right bin edges.
+
+ Default is 'mid'
+
+ orientation : {'horizontal', 'vertical'}, optional
+ If 'horizontal', `~matplotlib.pyplot.barh` will be used for
+ bar-type histograms and the *bottom* kwarg will be the left edges.
+
+ rwidth : scalar or None, optional
+ The relative width of the bars as a fraction of the bin width. If
+ ``None``, automatically compute the width.
+
+ Ignored if *histtype* is 'step' or 'stepfilled'.
+
+ Default is ``None``
+
+ log : boolean, optional
+ If ``True``, the histogram axis will be set to a log scale. If
+ *log* is ``True`` and *x* is a 1D array, empty bins will be
+ filtered out and only the non-empty ``(n, bins, patches)``
+ will be returned.
+
+ Default is ``False``
+
+ color : color or array_like of colors or None, optional
+ Color spec or sequence of color specs, one per dataset. Default
+ (``None``) uses the standard line color sequence.
+
+ Default is ``None``
+
+ label : string or None, optional
+ String, or sequence of strings to match multiple datasets. Bar
+ charts yield multiple patches per dataset, but only the first gets
+ the label, so that the legend command will work as expected.
+
+ default is ``None``
+
+ stacked : boolean, optional
+ If ``True``, multiple data are stacked on top of each other If
+ ``False`` multiple data are arranged side by side if histtype is
+ 'bar' or on top of each other if histtype is 'step'
+
+ Default is ``False``
+
+ normed : bool, optional
+ Deprecated; use the density keyword argument instead.
+
+ Returns
+ -------
+ n : array or list of arrays
+ The values of the histogram bins. See *normed* or *density*
+ and *weights* for a description of the possible semantics.
+ If input *x* is an array, then this is an array of length
+ *nbins*. If input is a sequence arrays
+ ``[data1, data2,..]``, then this is a list of arrays with
+ the values of the histograms for each of the arrays in the
+ same order.
+
+ bins : array
+ The edges of the bins. Length nbins + 1 (nbins left edges and right
+ edge of last bin). Always a single array even when multiple data
+ sets are passed in.
+
+ patches : list or list of lists
+ Silent list of individual patches used to create the histogram
+ or list of such list if multiple input datasets.
+
+ Other Parameters
+ ----------------
+ **kwargs : `~matplotlib.patches.Patch` properties
+
+ See also
+ --------
+ hist2d : 2D histograms
+
+ Notes
+ -----
+ .. [Notes section required for data comment. See #10189.]
+
+ """
+ # Avoid shadowing the builtin.
+ bin_range = range
+ del range
+
+ if not self._hold:
+ self.cla()
+
+ if np.isscalar(x):
+ x = [x]
+
+ if bins is None:
+ bins = rcParams['hist.bins']
+
+ # Validate string inputs here so we don't have to clutter
+ # subsequent code.
+ if histtype not in ['bar', 'barstacked', 'step', 'stepfilled']:
+ raise ValueError("histtype %s is not recognized" % histtype)
+
+ if align not in ['left', 'mid', 'right']:
+ raise ValueError("align kwarg %s is not recognized" % align)
+
+ if orientation not in ['horizontal', 'vertical']:
+ raise ValueError(
+ "orientation kwarg %s is not recognized" % orientation)
+
+ if histtype == 'barstacked' and not stacked:
+ stacked = True
+
+ if density is not None and normed is not None:
+ raise ValueError("kwargs 'density' and 'normed' cannot be used "
+ "simultaneously. "
+ "Please only use 'density', since 'normed'"
+ "is deprecated.")
+ if normed is not None:
+ warnings.warn("The 'normed' kwarg is deprecated, and has been "
+ "replaced by the 'density' kwarg.")
+
+ # basic input validation
+ input_empty = np.size(x) == 0
+ # Massage 'x' for processing.
+ if input_empty:
+ x = [np.array([])]
+ else:
+ x = cbook._reshape_2D(x, 'x')
+ nx = len(x) # number of datasets
+
+ # Process unit information
+ # Unit conversion is done individually on each dataset
+ self._process_unit_info(xdata=x[0], kwargs=kwargs)
+ x = [self.convert_xunits(xi) for xi in x]
+
+ if bin_range is not None:
+ bin_range = self.convert_xunits(bin_range)
+
+ # Check whether bins or range are given explicitly.
+ binsgiven = (cbook.iterable(bins) or bin_range is not None)
+
+ # We need to do to 'weights' what was done to 'x'
+ if weights is not None:
+ w = cbook._reshape_2D(weights, 'weights')
+ else:
+ w = [None] * nx
+
+ if len(w) != nx:
+ raise ValueError('weights should have the same shape as x')
+
+ for xi, wi in zip(x, w):
+ if wi is not None and len(wi) != len(xi):
+ raise ValueError(
+ 'weights should have the same shape as x')
+
+ if color is None:
+ color = [self._get_lines.get_next_color() for i in xrange(nx)]
+ else:
+ color = mcolors.to_rgba_array(color)
+ if len(color) != nx:
+ raise ValueError("color kwarg must have one color per dataset")
+
+ # If bins are not specified either explicitly or via range,
+ # we need to figure out the range required for all datasets,
+ # and supply that to np.histogram.
+ if not binsgiven and not input_empty:
+ xmin = np.inf
+ xmax = -np.inf
+ for xi in x:
+ if len(xi) > 0:
+ xmin = min(xmin, xi.min())
+ xmax = max(xmax, xi.max())
+ bin_range = (xmin, xmax)
+ density = bool(density) or bool(normed)
+ if density and not stacked:
+ hist_kwargs = dict(range=bin_range, density=density)
+ else:
+ hist_kwargs = dict(range=bin_range)
+
+ # List to store all the top coordinates of the histograms
+ tops = []
+ mlast = None
+ # Loop through datasets
+ for i in xrange(nx):
+ # this will automatically overwrite bins,
+ # so that each histogram uses the same bins
+ m, bins = np.histogram(x[i], bins, weights=w[i], **hist_kwargs)
+ m = m.astype(float) # causes problems later if it's an int
+ if mlast is None:
+ mlast = np.zeros(len(bins)-1, m.dtype)
+ if stacked:
+ m += mlast
+ mlast[:] = m
+ tops.append(m)
+
+ # If a stacked density plot, normalize so the area of all the stacked
+ # histograms together is 1
+ if stacked and density:
+ db = np.diff(bins)
+ for m in tops:
+ m[:] = (m / db) / tops[-1].sum()
+ if cumulative:
+ slc = slice(None)
+ if cbook.is_numlike(cumulative) and cumulative < 0:
+ slc = slice(None, None, -1)
+
+ if density:
+ tops = [(m * np.diff(bins))[slc].cumsum()[slc] for m in tops]
+ else:
+ tops = [m[slc].cumsum()[slc] for m in tops]
+
+ patches = []
+
+ # Save autoscale state for later restoration; turn autoscaling
+ # off so we can do it all a single time at the end, instead
+ # of having it done by bar or fill and then having to be redone.
+ _saved_autoscalex = self.get_autoscalex_on()
+ _saved_autoscaley = self.get_autoscaley_on()
+ self.set_autoscalex_on(False)
+ self.set_autoscaley_on(False)
+
+ if histtype.startswith('bar'):
+
+ totwidth = np.diff(bins)
+
+ if rwidth is not None:
+ dr = np.clip(rwidth, 0, 1)
+ elif (len(tops) > 1 and
+ ((not stacked) or rcParams['_internal.classic_mode'])):
+ dr = 0.8
+ else:
+ dr = 1.0
+
+ if histtype == 'bar' and not stacked:
+ width = dr * totwidth / nx
+ dw = width
+ boffset = -0.5 * dr * totwidth * (1 - 1 / nx)
+ elif histtype == 'barstacked' or stacked:
+ width = dr * totwidth
+ boffset, dw = 0.0, 0.0
+
+ if align == 'mid' or align == 'edge':
+ boffset += 0.5 * totwidth
+ elif align == 'right':
+ boffset += totwidth
+
+ if orientation == 'horizontal':
+ _barfunc = self.barh
+ bottom_kwarg = 'left'
+ else: # orientation == 'vertical'
+ _barfunc = self.bar
+ bottom_kwarg = 'bottom'
+
+ for m, c in zip(tops, color):
+ if bottom is None:
+ bottom = np.zeros(len(m))
+ if stacked:
+ height = m - bottom
+ else:
+ height = m
+ patch = _barfunc(bins[:-1]+boffset, height, width,
+ align='center', log=log,
+ color=c, **{bottom_kwarg: bottom})
+ patches.append(patch)
+ if stacked:
+ bottom[:] = m
+ boffset += dw
+
+ elif histtype.startswith('step'):
+ # these define the perimeter of the polygon
+ x = np.zeros(4 * len(bins) - 3)
+ y = np.zeros(4 * len(bins) - 3)
+
+ x[0:2*len(bins)-1:2], x[1:2*len(bins)-1:2] = bins, bins[:-1]
+ x[2*len(bins)-1:] = x[1:2*len(bins)-1][::-1]
+
+ if bottom is None:
+ bottom = np.zeros(len(bins) - 1)
+
+ y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = bottom, bottom
+ y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
+
+ if log:
+ if orientation == 'horizontal':
+ self.set_xscale('log', nonposx='clip')
+ logbase = self.xaxis._scale.base
+ else: # orientation == 'vertical'
+ self.set_yscale('log', nonposy='clip')
+ logbase = self.yaxis._scale.base
+
+ # Setting a minimum of 0 results in problems for log plots
+ if np.min(bottom) > 0:
+ minimum = np.min(bottom)
+ elif density or weights is not None:
+ # For data that is normed to form a probability density,
+ # set to minimum data value / logbase
+ # (gives 1 full tick-label unit for the lowest filled bin)
+ ndata = np.array(tops)
+ minimum = (np.min(ndata[ndata > 0])) / logbase
+ else:
+ # For non-normed (density = False) data,
+ # set the min to 1 / log base,
+ # again so that there is 1 full tick-label unit
+ # for the lowest bin
+ minimum = 1.0 / logbase
+
+ y[0], y[-1] = minimum, minimum
+ else:
+ minimum = 0
+
+ if align == 'left' or align == 'center':
+ x -= 0.5*(bins[1]-bins[0])
+ elif align == 'right':
+ x += 0.5*(bins[1]-bins[0])
+
+ # If fill kwarg is set, it will be passed to the patch collection,
+ # overriding this
+ fill = (histtype == 'stepfilled')
+
+ xvals, yvals = [], []
+ for m in tops:
+ if stacked:
+ # starting point for drawing polygon
+ y[0] = y[1]
+ # top of the previous polygon becomes the bottom
+ y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
+ # set the top of this polygon
+ y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = (m + bottom,
+ m + bottom)
+ if log:
+ y[y < minimum] = minimum
+ if orientation == 'horizontal':
+ xvals.append(y.copy())
+ yvals.append(x.copy())
+ else:
+ xvals.append(x.copy())
+ yvals.append(y.copy())
+
+ # stepfill is closed, step is not
+ split = -1 if fill else 2 * len(bins)
+ # add patches in reverse order so that when stacking,
+ # items lower in the stack are plotted on top of
+ # items higher in the stack
+ for x, y, c in reversed(list(zip(xvals, yvals, color))):
+ patches.append(self.fill(
+ x[:split], y[:split],
+ closed=True if fill else None,
+ facecolor=c,
+ edgecolor=None if fill else c,
+ fill=fill if fill else None))
+ for patch_list in patches:
+ for patch in patch_list:
+ if orientation == 'vertical':
+ patch.sticky_edges.y.append(minimum)
+ elif orientation == 'horizontal':
+ patch.sticky_edges.x.append(minimum)
+
+ # we return patches, so put it back in the expected order
+ patches.reverse()
+
+ self.set_autoscalex_on(_saved_autoscalex)
+ self.set_autoscaley_on(_saved_autoscaley)
+ self.autoscale_view()
+
+ if label is None:
+ labels = [None]
+ elif isinstance(label, six.string_types):
+ labels = [label]
+ else:
+ labels = [six.text_type(lab) for lab in label]
+
+ for patch, lbl in zip_longest(patches, labels, fillvalue=None):
+ if patch:
+ p = patch[0]
+ p.update(kwargs)
+ if lbl is not None:
+ p.set_label(lbl)
+
+ for p in patch[1:]:
+ p.update(kwargs)
+ p.set_label('_nolegend_')
+
+ if nx == 1:
+ return tops[0], bins, cbook.silent_list('Patch', patches[0])
+ else:
+ return tops, bins, cbook.silent_list('Lists of Patches', patches)
+
+ @_preprocess_data(replace_names=["x", "y", "weights"], label_namer=None)
+ def hist2d(self, x, y, bins=10, range=None, normed=False, weights=None,
+ cmin=None, cmax=None, **kwargs):
+ """
+ Make a 2D histogram plot.
+
+ Parameters
+ ----------
+ x, y: array_like, shape (n, )
+ Input values
+
+ bins: [None | int | [int, int] | array_like | [array, array]]
+
+ The bin specification:
+
+ - If int, the number of bins for the two dimensions
+ (nx=ny=bins).
+
+ - If ``[int, int]``, the number of bins in each dimension
+ (nx, ny = bins).
+
+ - If array_like, the bin edges for the two dimensions
+ (x_edges=y_edges=bins).
+
+ - If ``[array, array]``, the bin edges in each dimension
+ (x_edges, y_edges = bins).
+
+ The default value is 10.
+
+ range : array_like shape(2, 2), optional, default: None
+ The leftmost and rightmost edges of the bins along each dimension
+ (if not specified explicitly in the bins parameters): ``[[xmin,
+ xmax], [ymin, ymax]]``. All values outside of this range will be
+ considered outliers and not tallied in the histogram.
+
+ normed : boolean, optional, default: False
+ Normalize histogram.
+
+ weights : array_like, shape (n, ), optional, default: None
+ An array of values w_i weighing each sample (x_i, y_i).
+
+ cmin : scalar, optional, default: None
+ All bins that has count less than cmin will not be displayed and
+ these count values in the return value count histogram will also
+ be set to nan upon return
+
+ cmax : scalar, optional, default: None
+ All bins that has count more than cmax will not be displayed (set
+ to none before passing to imshow) and these count values in the
+ return value count histogram will also be set to nan upon return
+
+ Returns
+ -------
+ h : 2D array
+ The bi-dimensional histogram of samples x and y. Values in x are
+ histogrammed along the first dimension and values in y are
+ histogrammed along the second dimension.
+ xedges : 1D array
+ The bin edges along the x axis.
+ yedges : 1D array
+ The bin edges along the y axis.
+ image : AxesImage
+
+ Other Parameters
+ ----------------
+ cmap : Colormap or str, optional
+ A `.colors.Colormap` instance. If not set, use rc settings.
+
+ norm : Normalize, optional
+ A `.colors.Normalize` instance is used to
+ scale luminance data to ``[0, 1]``. If not set, defaults to
+ `.colors.Normalize()`.
+
+ vmin/vmax : None or scalar, optional
+ Arguments passed to the `~.colors.Normalize` instance.
+
+ alpha : ``0 <= scalar <= 1`` or ``None``, optional
+ The alpha blending value.
+
+ See also
+ --------
+ hist : 1D histogram plotting
+
+ Notes
+ -----
+ - Currently ``hist2d`` calculates it's own axis limits, and any limits
+ previously set are ignored.
+ - Rendering the histogram with a logarithmic color scale is
+ accomplished by passing a `.colors.LogNorm` instance to the *norm*
+ keyword argument. Likewise, power-law normalization (similar
+ in effect to gamma correction) can be accomplished with
+ `.colors.PowerNorm`.
+ """
+
+ h, xedges, yedges = np.histogram2d(x, y, bins=bins, range=range,
+ normed=normed, weights=weights)
+
+ if cmin is not None:
+ h[h < cmin] = None
+ if cmax is not None:
+ h[h > cmax] = None
+
+ pc = self.pcolorfast(xedges, yedges, h.T, **kwargs)
+ self.set_xlim(xedges[0], xedges[-1])
+ self.set_ylim(yedges[0], yedges[-1])
+
+ return h, xedges, yedges, pc
+
+ @_preprocess_data(replace_names=["x"], label_namer=None)
+ @docstring.dedent_interpd
+ def psd(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
+ window=None, noverlap=None, pad_to=None,
+ sides=None, scale_by_freq=None, return_line=None, **kwargs):
+ r"""
+ Plot the power spectral density.
+
+ Call signature::
+
+ psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
+ window=mlab.window_hanning, noverlap=0, pad_to=None,
+ sides='default', scale_by_freq=None, return_line=None, **kwargs)
+
+ The power spectral density :math:`P_{xx}` by Welch's average
+ periodogram method. The vector *x* is divided into *NFFT* length
+ segments. Each segment is detrended by function *detrend* and
+ windowed by function *window*. *noverlap* gives the length of
+ the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
+ of each segment :math:`i` are averaged to compute :math:`P_{xx}`,
+ with a scaling to correct for power loss due to windowing.
+
+ If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
+
+ Parameters
+ ----------
+ x : 1-D array or sequence
+ Array or sequence containing the data
+
+ %(Spectral)s
+
+ %(PSD)s
+
+ noverlap : integer
+ The number of points of overlap between segments.
+ The default value is 0 (no overlap).
+
+ Fc : integer
+ The center frequency of *x* (defaults to 0), which offsets
+ the x extents of the plot to reflect the frequency range used
+ when a signal is acquired and then filtered and downsampled to
+ baseband.
+
+ return_line : bool
+ Whether to include the line object plotted in the returned values.
+ Default is False.
+
+ Returns
+ -------
+ Pxx : 1-D array
+ The values for the power spectrum `P_{xx}` before scaling
+ (real valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *Pxx*
+
+ line : a :class:`~matplotlib.lines.Line2D` instance
+ The line created by this function.
+ Only returned if *return_line* is True.
+
+ Other Parameters
+ ----------------
+ **kwargs :
+ Keyword arguments control the :class:`~matplotlib.lines.Line2D`
+ properties:
+
+ %(Line2D)s
+
+ See Also
+ --------
+ :func:`specgram`
+ :func:`specgram` differs in the default overlap; in not returning
+ the mean of the segment periodograms; in returning the times of the
+ segments; and in plotting a colormap instead of a line.
+
+ :func:`magnitude_spectrum`
+ :func:`magnitude_spectrum` plots the magnitude spectrum.
+
+ :func:`csd`
+ :func:`csd` plots the spectral density between two signals.
+
+ Notes
+ -----
+ For plotting, the power is plotted as
+ :math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
+ is returned.
+
+ References
+ ----------
+ Bendat & Piersol -- Random Data: Analysis and Measurement Procedures,
+ John Wiley & Sons (1986)
+ """
+ if not self._hold:
+ self.cla()
+
+ if Fc is None:
+ Fc = 0
+
+ pxx, freqs = mlab.psd(x=x, NFFT=NFFT, Fs=Fs, detrend=detrend,
+ window=window, noverlap=noverlap, pad_to=pad_to,
+ sides=sides, scale_by_freq=scale_by_freq)
+ freqs += Fc
+
+ if scale_by_freq in (None, True):
+ psd_units = 'dB/Hz'
+ else:
+ psd_units = 'dB'
+
+ line = self.plot(freqs, 10 * np.log10(pxx), **kwargs)
+ self.set_xlabel('Frequency')
+ self.set_ylabel('Power Spectral Density (%s)' % psd_units)
+ self.grid(True)
+ vmin, vmax = self.viewLim.intervaly
+ intv = vmax - vmin
+ logi = int(np.log10(intv))
+ if logi == 0:
+ logi = .1
+ step = 10 * logi
+ ticks = np.arange(math.floor(vmin), math.ceil(vmax) + 1, step)
+ self.set_yticks(ticks)
+
+ if return_line is None or not return_line:
+ return pxx, freqs
+ else:
+ return pxx, freqs, line
+
+ @_preprocess_data(replace_names=["x", "y"], label_namer="y")
+ @docstring.dedent_interpd
+ def csd(self, x, y, NFFT=None, Fs=None, Fc=None, detrend=None,
+ window=None, noverlap=None, pad_to=None,
+ sides=None, scale_by_freq=None, return_line=None, **kwargs):
+ """
+ Plot the cross-spectral density.
+
+ Call signature::
+
+ csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
+ window=mlab.window_hanning, noverlap=0, pad_to=None,
+ sides='default', scale_by_freq=None, return_line=None, **kwargs)
+
+ The cross spectral density :math:`P_{xy}` by Welch's average
+ periodogram method. The vectors *x* and *y* are divided into
+ *NFFT* length segments. Each segment is detrended by function
+ *detrend* and windowed by function *window*. *noverlap* gives
+ the length of the overlap between segments. The product of
+ the direct FFTs of *x* and *y* are averaged over each segment
+ to compute :math:`P_{xy}`, with a scaling to correct for power
+ loss due to windowing.
+
+ If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
+ padded to *NFFT*.
+
+ Parameters
+ ----------
+ x, y : 1-D arrays or sequences
+ Arrays or sequences containing the data
+
+ %(Spectral)s
+
+ %(PSD)s
+
+ noverlap : integer
+ The number of points of overlap between segments.
+ The default value is 0 (no overlap).
+
+ Fc : integer
+ The center frequency of *x* (defaults to 0), which offsets
+ the x extents of the plot to reflect the frequency range used
+ when a signal is acquired and then filtered and downsampled to
+ baseband.
+
+ return_line : bool
+ Whether to include the line object plotted in the returned values.
+ Default is False.
+
+ Returns
+ -------
+ Pxy : 1-D array
+ The values for the cross spectrum `P_{xy}` before scaling
+ (complex valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *Pxy*
+
+ line : a :class:`~matplotlib.lines.Line2D` instance
+ The line created by this function.
+ Only returned if *return_line* is True.
+
+ Other Parameters
+ ----------------
+ **kwargs :
+ Keyword arguments control the :class:`~matplotlib.lines.Line2D`
+ properties:
+
+ %(Line2D)s
+
+ See Also
+ --------
+ :func:`psd`
+ :func:`psd` is the equivalent to setting y=x.
+
+ Notes
+ -----
+ For plotting, the power is plotted as
+ :math:`10\\log_{10}(P_{xy})` for decibels, though `P_{xy}` itself
+ is returned.
+
+ References
+ ----------
+ Bendat & Piersol -- Random Data: Analysis and Measurement Procedures,
+ John Wiley & Sons (1986)
+ """
+ if not self._hold:
+ self.cla()
+
+ if Fc is None:
+ Fc = 0
+
+ pxy, freqs = mlab.csd(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend,
+ window=window, noverlap=noverlap, pad_to=pad_to,
+ sides=sides, scale_by_freq=scale_by_freq)
+ # pxy is complex
+ freqs += Fc
+
+ line = self.plot(freqs, 10 * np.log10(np.abs(pxy)), **kwargs)
+ self.set_xlabel('Frequency')
+ self.set_ylabel('Cross Spectrum Magnitude (dB)')
+ self.grid(True)
+ vmin, vmax = self.viewLim.intervaly
+
+ intv = vmax - vmin
+ step = 10 * int(np.log10(intv))
+
+ ticks = np.arange(math.floor(vmin), math.ceil(vmax) + 1, step)
+ self.set_yticks(ticks)
+
+ if return_line is None or not return_line:
+ return pxy, freqs
+ else:
+ return pxy, freqs, line
+
+ @_preprocess_data(replace_names=["x"], label_namer=None)
+ @docstring.dedent_interpd
+ def magnitude_spectrum(self, x, Fs=None, Fc=None, window=None,
+ pad_to=None, sides=None, scale=None,
+ **kwargs):
+ """
+ Plot the magnitude spectrum.
+
+ Call signature::
+
+ magnitude_spectrum(x, Fs=2, Fc=0, window=mlab.window_hanning,
+ pad_to=None, sides='default', **kwargs)
+
+ Compute the magnitude spectrum of *x*. Data is padded to a
+ length of *pad_to* and the windowing function *window* is applied to
+ the signal.
+
+ Parameters
+ ----------
+ x : 1-D array or sequence
+ Array or sequence containing the data
+
+ %(Spectral)s
+
+ %(Single_Spectrum)s
+
+ scale : [ 'default' | 'linear' | 'dB' ]
+ The scaling of the values in the *spec*. 'linear' is no scaling.
+ 'dB' returns the values in dB scale, i.e., the dB amplitude
+ (20 * log10). 'default' is 'linear'.
+
+ Fc : integer
+ The center frequency of *x* (defaults to 0), which offsets
+ the x extents of the plot to reflect the frequency range used
+ when a signal is acquired and then filtered and downsampled to
+ baseband.
+
+ Returns
+ -------
+ spectrum : 1-D array
+ The values for the magnitude spectrum before scaling (real valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *spectrum*
+
+ line : a :class:`~matplotlib.lines.Line2D` instance
+ The line created by this function
+
+ Other Parameters
+ ----------------
+ **kwargs :
+ Keyword arguments control the :class:`~matplotlib.lines.Line2D`
+ properties:
+
+ %(Line2D)s
+
+ See Also
+ --------
+ :func:`psd`
+ :func:`psd` plots the power spectral density.`.
+
+ :func:`angle_spectrum`
+ :func:`angle_spectrum` plots the angles of the corresponding
+ frequencies.
+
+ :func:`phase_spectrum`
+ :func:`phase_spectrum` plots the phase (unwrapped angle) of the
+ corresponding frequencies.
+
+ :func:`specgram`
+ :func:`specgram` can plot the magnitude spectrum of segments within
+ the signal in a colormap.
+
+ Notes
+ -----
+ .. [Notes section required for data comment. See #10189.]
+
+ """
+ if not self._hold:
+ self.cla()
+
+ if Fc is None:
+ Fc = 0
+
+ if scale is None or scale == 'default':
+ scale = 'linear'
+
+ spec, freqs = mlab.magnitude_spectrum(x=x, Fs=Fs, window=window,
+ pad_to=pad_to, sides=sides)
+ freqs += Fc
+
+ if scale == 'linear':
+ Z = spec
+ yunits = 'energy'
+ elif scale == 'dB':
+ Z = 20. * np.log10(spec)
+ yunits = 'dB'
+ else:
+ raise ValueError('Unknown scale %s', scale)
+
+ lines = self.plot(freqs, Z, **kwargs)
+ self.set_xlabel('Frequency')
+ self.set_ylabel('Magnitude (%s)' % yunits)
+
+ return spec, freqs, lines[0]
+
+ @_preprocess_data(replace_names=["x"], label_namer=None)
+ @docstring.dedent_interpd
+ def angle_spectrum(self, x, Fs=None, Fc=None, window=None,
+ pad_to=None, sides=None, **kwargs):
+ """
+ Plot the angle spectrum.
+
+ Call signature::
+
+ angle_spectrum(x, Fs=2, Fc=0, window=mlab.window_hanning,
+ pad_to=None, sides='default', **kwargs)
+
+ Compute the angle spectrum (wrapped phase spectrum) of *x*.
+ Data is padded to a length of *pad_to* and the windowing function
+ *window* is applied to the signal.
+
+ Parameters
+ ----------
+ x : 1-D array or sequence
+ Array or sequence containing the data
+
+ %(Spectral)s
+
+ %(Single_Spectrum)s
+
+ Fc : integer
+ The center frequency of *x* (defaults to 0), which offsets
+ the x extents of the plot to reflect the frequency range used
+ when a signal is acquired and then filtered and downsampled to
+ baseband.
+
+ Returns
+ -------
+ spectrum : 1-D array
+ The values for the angle spectrum in radians (real valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *spectrum*
+
+ line : a :class:`~matplotlib.lines.Line2D` instance
+ The line created by this function
+
+ Other Parameters
+ ----------------
+ **kwargs :
+ Keyword arguments control the :class:`~matplotlib.lines.Line2D`
+ properties:
+
+ %(Line2D)s
+
+ See Also
+ --------
+ :func:`magnitude_spectrum`
+ :func:`angle_spectrum` plots the magnitudes of the corresponding
+ frequencies.
+
+ :func:`phase_spectrum`
+ :func:`phase_spectrum` plots the unwrapped version of this
+ function.
+
+ :func:`specgram`
+ :func:`specgram` can plot the angle spectrum of segments within the
+ signal in a colormap.
+
+ Notes
+ -----
+ .. [Notes section required for data comment. See #10189.]
+
+ """
+ if not self._hold:
+ self.cla()
+
+ if Fc is None:
+ Fc = 0
+
+ spec, freqs = mlab.angle_spectrum(x=x, Fs=Fs, window=window,
+ pad_to=pad_to, sides=sides)
+ freqs += Fc
+
+ lines = self.plot(freqs, spec, **kwargs)
+ self.set_xlabel('Frequency')
+ self.set_ylabel('Angle (radians)')
+
+ return spec, freqs, lines[0]
+
+ @_preprocess_data(replace_names=["x"], label_namer=None)
+ @docstring.dedent_interpd
+ def phase_spectrum(self, x, Fs=None, Fc=None, window=None,
+ pad_to=None, sides=None, **kwargs):
+ """
+ Plot the phase spectrum.
+
+ Call signature::
+
+ phase_spectrum(x, Fs=2, Fc=0, window=mlab.window_hanning,
+ pad_to=None, sides='default', **kwargs)
+
+ Compute the phase spectrum (unwrapped angle spectrum) of *x*.
+ Data is padded to a length of *pad_to* and the windowing function
+ *window* is applied to the signal.
+
+ Parameters
+ ----------
+ x : 1-D array or sequence
+ Array or sequence containing the data
+
+ %(Spectral)s
+
+ %(Single_Spectrum)s
+
+ Fc : integer
+ The center frequency of *x* (defaults to 0), which offsets
+ the x extents of the plot to reflect the frequency range used
+ when a signal is acquired and then filtered and downsampled to
+ baseband.
+
+ Returns
+ -------
+ spectrum : 1-D array
+ The values for the phase spectrum in radians (real valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *spectrum*
+
+ line : a :class:`~matplotlib.lines.Line2D` instance
+ The line created by this function
+
+ Other Parameters
+ ----------------
+ **kwargs :
+ Keyword arguments control the :class:`~matplotlib.lines.Line2D`
+ properties:
+
+ %(Line2D)s
+
+ See Also
+ --------
+ :func:`magnitude_spectrum`
+ :func:`magnitude_spectrum` plots the magnitudes of the
+ corresponding frequencies.
+
+ :func:`angle_spectrum`
+ :func:`angle_spectrum` plots the wrapped version of this function.
+
+ :func:`specgram`
+ :func:`specgram` can plot the phase spectrum of segments within the
+ signal in a colormap.
+
+ Notes
+ -----
+ .. [Notes section required for data comment. See #10189.]
+
+ """
+ if not self._hold:
+ self.cla()
+
+ if Fc is None:
+ Fc = 0
+
+ spec, freqs = mlab.phase_spectrum(x=x, Fs=Fs, window=window,
+ pad_to=pad_to, sides=sides)
+ freqs += Fc
+
+ lines = self.plot(freqs, spec, **kwargs)
+ self.set_xlabel('Frequency')
+ self.set_ylabel('Phase (radians)')
+
+ return spec, freqs, lines[0]
+
+ @_preprocess_data(replace_names=["x", "y"], label_namer=None)
+ @docstring.dedent_interpd
+ def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
+ window=mlab.window_hanning, noverlap=0, pad_to=None,
+ sides='default', scale_by_freq=None, **kwargs):
+ """
+ Plot the coherence between *x* and *y*.
+
+ Plot the coherence between *x* and *y*. Coherence is the
+ normalized cross spectral density:
+
+ .. math::
+
+ C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
+
+ Parameters
+ ----------
+ %(Spectral)s
+
+ %(PSD)s
+
+ noverlap : integer
+ The number of points of overlap between blocks. The
+ default value is 0 (no overlap).
+
+ Fc : integer
+ The center frequency of *x* (defaults to 0), which offsets
+ the x extents of the plot to reflect the frequency range used
+ when a signal is acquired and then filtered and downsampled to
+ baseband.
+
+
+ Returns
+ -------
+ The return value is a tuple (*Cxy*, *f*), where *f* are the
+ frequencies of the coherence vector.
+
+ kwargs are applied to the lines.
+
+ Other Parameters
+ ----------------
+ **kwargs :
+ Keyword arguments control the :class:`~matplotlib.lines.Line2D`
+ properties:
+
+ %(Line2D)s
+
+ References
+ ----------
+ Bendat & Piersol -- Random Data: Analysis and Measurement Procedures,
+ John Wiley & Sons (1986)
+ """
+ if not self._hold:
+ self.cla()
+ cxy, freqs = mlab.cohere(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend,
+ window=window, noverlap=noverlap,
+ scale_by_freq=scale_by_freq)
+ freqs += Fc
+
+ self.plot(freqs, cxy, **kwargs)
+ self.set_xlabel('Frequency')
+ self.set_ylabel('Coherence')
+ self.grid(True)
+
+ return cxy, freqs
+
+ @_preprocess_data(replace_names=["x"], label_namer=None)
+ @docstring.dedent_interpd
+ def specgram(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
+ window=None, noverlap=None,
+ cmap=None, xextent=None, pad_to=None, sides=None,
+ scale_by_freq=None, mode=None, scale=None,
+ vmin=None, vmax=None, **kwargs):
+ """
+ Plot a spectrogram.
+
+ Call signature::
+
+ specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
+ window=mlab.window_hanning, noverlap=128,
+ cmap=None, xextent=None, pad_to=None, sides='default',
+ scale_by_freq=None, mode='default', scale='default',
+ **kwargs)
+
+ Compute and plot a spectrogram of data in *x*. Data are split into
+ *NFFT* length segments and the spectrum of each section is
+ computed. The windowing function *window* is applied to each
+ segment, and the amount of overlap of each segment is
+ specified with *noverlap*. The spectrogram is plotted as a colormap
+ (using imshow).
+
+ Parameters
+ ----------
+ x : 1-D array or sequence
+ Array or sequence containing the data.
+
+ %(Spectral)s
+
+ %(PSD)s
+
+ mode : [ 'default' | 'psd' | 'magnitude' | 'angle' | 'phase' ]
+ What sort of spectrum to use. Default is 'psd', which takes
+ the power spectral density. 'complex' returns the complex-valued
+ frequency spectrum. 'magnitude' returns the magnitude spectrum.
+ 'angle' returns the phase spectrum without unwrapping. 'phase'
+ returns the phase spectrum with unwrapping.
+
+ noverlap : integer
+ The number of points of overlap between blocks. The
+ default value is 128.
+
+ scale : [ 'default' | 'linear' | 'dB' ]
+ The scaling of the values in the *spec*. 'linear' is no scaling.
+ 'dB' returns the values in dB scale. When *mode* is 'psd',
+ this is dB power (10 * log10). Otherwise this is dB amplitude
+ (20 * log10). 'default' is 'dB' if *mode* is 'psd' or
+ 'magnitude' and 'linear' otherwise. This must be 'linear'
+ if *mode* is 'angle' or 'phase'.
+
+ Fc : integer
+ The center frequency of *x* (defaults to 0), which offsets
+ the x extents of the plot to reflect the frequency range used
+ when a signal is acquired and then filtered and downsampled to
+ baseband.
+
+ cmap :
+ A :class:`matplotlib.colors.Colormap` instance; if *None*, use
+ default determined by rc
+
+ xextent : [None | (xmin, xmax)]
+ The image extent along the x-axis. The default sets *xmin* to the
+ left border of the first bin (*spectrum* column) and *xmax* to the
+ right border of the last bin. Note that for *noverlap>0* the width
+ of the bins is smaller than those of the segments.
+
+ **kwargs :
+ Additional kwargs are passed on to imshow which makes the
+ specgram image
+
+ Returns
+ -------
+ spectrum : 2-D array
+ Columns are the periodograms of successive segments.
+
+ freqs : 1-D array
+ The frequencies corresponding to the rows in *spectrum*.
+
+ t : 1-D array
+ The times corresponding to midpoints of segments (i.e., the columns
+ in *spectrum*).
+
+ im : instance of class :class:`~matplotlib.image.AxesImage`
+ The image created by imshow containing the spectrogram
+
+ See Also
+ --------
+ :func:`psd`
+ :func:`psd` differs in the default overlap; in returning the mean
+ of the segment periodograms; in not returning times; and in
+ generating a line plot instead of colormap.
+
+ :func:`magnitude_spectrum`
+ A single spectrum, similar to having a single segment when *mode*
+ is 'magnitude'. Plots a line instead of a colormap.
+
+ :func:`angle_spectrum`
+ A single spectrum, similar to having a single segment when *mode*
+ is 'angle'. Plots a line instead of a colormap.
+
+ :func:`phase_spectrum`
+ A single spectrum, similar to having a single segment when *mode*
+ is 'phase'. Plots a line instead of a colormap.
+
+ Notes
+ -----
+ The parameters *detrend* and *scale_by_freq* do only apply when *mode*
+ is set to 'psd'.
+ """
+ if not self._hold:
+ self.cla()
+
+ if NFFT is None:
+ NFFT = 256 # same default as in mlab.specgram()
+ if Fc is None:
+ Fc = 0 # same default as in mlab._spectral_helper()
+ if noverlap is None:
+ noverlap = 128 # same default as in mlab.specgram()
+
+ if mode == 'complex':
+ raise ValueError('Cannot plot a complex specgram')
+
+ if scale is None or scale == 'default':
+ if mode in ['angle', 'phase']:
+ scale = 'linear'
+ else:
+ scale = 'dB'
+ elif mode in ['angle', 'phase'] and scale == 'dB':
+ raise ValueError('Cannot use dB scale with angle or phase mode')
+
+ spec, freqs, t = mlab.specgram(x=x, NFFT=NFFT, Fs=Fs,
+ detrend=detrend, window=window,
+ noverlap=noverlap, pad_to=pad_to,
+ sides=sides,
+ scale_by_freq=scale_by_freq,
+ mode=mode)
+
+ if scale == 'linear':
+ Z = spec
+ elif scale == 'dB':
+ if mode is None or mode == 'default' or mode == 'psd':
+ Z = 10. * np.log10(spec)
+ else:
+ Z = 20. * np.log10(spec)
+ else:
+ raise ValueError('Unknown scale %s', scale)
+
+ Z = np.flipud(Z)
+
+ if xextent is None:
+ # padding is needed for first and last segment:
+ pad_xextent = (NFFT-noverlap) / Fs / 2
+ xextent = np.min(t) - pad_xextent, np.max(t) + pad_xextent
+ xmin, xmax = xextent
+ freqs += Fc
+ extent = xmin, xmax, freqs[0], freqs[-1]
+ im = self.imshow(Z, cmap, extent=extent, vmin=vmin, vmax=vmax,
+ **kwargs)
+ self.axis('auto')
+
+ return spec, freqs, t, im
+
+ def spy(self, Z, precision=0, marker=None, markersize=None,
+ aspect='equal', origin="upper", **kwargs):
+ """
+ Plot the sparsity pattern on a 2-D array.
+
+ ``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
+
+ Parameters
+ ----------
+
+ Z : sparse array (n, m)
+ The array to be plotted.
+
+ precision : float, optional, default: 0
+ If *precision* is 0, any non-zero value will be plotted; else,
+ values of :math:`|Z| > precision` will be plotted.
+
+ For :class:`scipy.sparse.spmatrix` instances, there is a special
+ case: if *precision* is 'present', any value present in the array
+ will be plotted, even if it is identically zero.
+
+ origin : ["upper", "lower"], optional, default: "upper"
+ Place the [0,0] index of the array in the upper left or lower left
+ corner of the axes.
+
+ aspect : ['auto' | 'equal' | scalar], optional, default: "equal"
+
+ If 'equal', and `extent` is None, changes the axes aspect ratio to
+ match that of the image. If `extent` is not `None`, the axes
+ aspect ratio is changed to match that of the extent.
+
+
+ If 'auto', changes the image aspect ratio to match that of the
+ axes.
+
+ If None, default to rc ``image.aspect`` value.
+
+ Two plotting styles are available: image or marker. Both
+ are available for full arrays, but only the marker style
+ works for :class:`scipy.sparse.spmatrix` instances.
+
+ If *marker* and *markersize* are *None*, an image will be
+ returned and any remaining kwargs are passed to
+ :func:`~matplotlib.pyplot.imshow`; else, a
+ :class:`~matplotlib.lines.Line2D` object will be returned with
+ the value of marker determining the marker type, and any
+ remaining kwargs passed to the
+ :meth:`~matplotlib.axes.Axes.plot` method.
+
+ If *marker* and *markersize* are *None*, useful kwargs include:
+
+ * *cmap*
+ * *alpha*
+
+ See also
+ --------
+ imshow : for image options.
+ plot : for plotting options
+ """
+ if marker is None and markersize is None and hasattr(Z, 'tocoo'):
+ marker = 's'
+ if marker is None and markersize is None:
+ Z = np.asarray(Z)
+ mask = np.abs(Z) > precision
+
+ if 'cmap' not in kwargs:
+ kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
+ name='binary')
+ nr, nc = Z.shape
+ extent = [-0.5, nc - 0.5, nr - 0.5, -0.5]
+ ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
+ extent=extent, origin=origin, **kwargs)
+ else:
+ if hasattr(Z, 'tocoo'):
+ c = Z.tocoo()
+ if precision == 'present':
+ y = c.row
+ x = c.col
+ else:
+ nonzero = np.abs(c.data) > precision
+ y = c.row[nonzero]
+ x = c.col[nonzero]
+ else:
+ Z = np.asarray(Z)
+ nonzero = np.abs(Z) > precision
+ y, x = np.nonzero(nonzero)
+ if marker is None:
+ marker = 's'
+ if markersize is None:
+ markersize = 10
+ marks = mlines.Line2D(x, y, linestyle='None',
+ marker=marker, markersize=markersize, **kwargs)
+ self.add_line(marks)
+ nr, nc = Z.shape
+ self.set_xlim(xmin=-0.5, xmax=nc - 0.5)
+ self.set_ylim(ymin=nr - 0.5, ymax=-0.5)
+ self.set_aspect(aspect)
+ ret = marks
+ self.title.set_y(1.05)
+ self.xaxis.tick_top()
+ self.xaxis.set_ticks_position('both')
+ self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
+ steps=[1, 2, 5, 10],
+ integer=True))
+ self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
+ steps=[1, 2, 5, 10],
+ integer=True))
+ return ret
+
+ def matshow(self, Z, **kwargs):
+ """
+ Plot the values of a 2D matrix or array as color-coded image.
+
+ The matrix will be shown the way it would be printed, with the first
+ row at the top. Row and column numbering is zero-based.
+
+ Parameters
+ ----------
+ Z : array-like(N, M)
+ The matrix to be displayed.
+
+ Returns
+ -------
+ image : `~matplotlib.image.AxesImage`
+
+ Other Parameters
+ ----------------
+ **kwargs : `~matplotlib.axes.Axes.imshow` arguments
+
+ See Also
+ --------
+ imshow : More general function to plot data on a 2D regular raster.
+
+ Notes
+ -----
+ This is just a convenience function wrapping `.imshow` to set useful
+ defaults for a displaying a matrix. In particular:
+
+ - Set ``origin='upper'``.
+ - Set ``interpolation='nearest'``.
+ - Set ``aspect='equal'``.
+ - Ticks are placed to the left and above.
+ - Ticks are formatted to show integer indices.
+
+ """
+ Z = np.asanyarray(Z)
+ nr, nc = Z.shape
+ kw = {'origin': 'upper',
+ 'interpolation': 'nearest',
+ 'aspect': 'equal'} # (already the imshow default)
+ kw.update(kwargs)
+ im = self.imshow(Z, **kw)
+ self.title.set_y(1.05)
+ self.xaxis.tick_top()
+ self.xaxis.set_ticks_position('both')
+ self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
+ steps=[1, 2, 5, 10],
+ integer=True))
+ self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
+ steps=[1, 2, 5, 10],
+ integer=True))
+ return im
+
+ @_preprocess_data(replace_names=["dataset"], label_namer=None)
+ def violinplot(self, dataset, positions=None, vert=True, widths=0.5,
+ showmeans=False, showextrema=True, showmedians=False,
+ points=100, bw_method=None):
+ """
+ Make a violin plot.
+
+ Make a violin plot for each column of *dataset* or each vector in
+ sequence *dataset*. Each filled area extends to represent the
+ entire data range, with optional lines at the mean, the median,
+ the minimum, and the maximum.
+
+ Parameters
+ ----------
+ dataset : Array or a sequence of vectors.
+ The input data.
+
+ positions : array-like, default = [1, 2, ..., n]
+ Sets the positions of the violins. The ticks and limits are
+ automatically set to match the positions.
+
+ vert : bool, default = True.
+ If true, creates a vertical violin plot.
+ Otherwise, creates a horizontal violin plot.
+
+ widths : array-like, default = 0.5
+ Either a scalar or a vector that sets the maximal width of
+ each violin. The default is 0.5, which uses about half of the
+ available horizontal space.
+
+ showmeans : bool, default = False
+ If `True`, will toggle rendering of the means.
+
+ showextrema : bool, default = True
+ If `True`, will toggle rendering of the extrema.
+
+ showmedians : bool, default = False
+ If `True`, will toggle rendering of the medians.
+
+ points : scalar, default = 100
+ Defines the number of points to evaluate each of the
+ gaussian kernel density estimations at.
+
+ bw_method : str, scalar or callable, optional
+ The method used to calculate the estimator bandwidth. This can be
+ 'scott', 'silverman', a scalar constant or a callable. If a
+ scalar, this will be used directly as `kde.factor`. If a
+ callable, it should take a `GaussianKDE` instance as its only
+ parameter and return a scalar. If None (default), 'scott' is used.
+
+ Returns
+ -------
+
+ result : dict
+ A dictionary mapping each component of the violinplot to a
+ list of the corresponding collection instances created. The
+ dictionary has the following keys:
+
+ - ``bodies``: A list of the
+ :class:`matplotlib.collections.PolyCollection` instances
+ containing the filled area of each violin.
+
+ - ``cmeans``: A
+ :class:`matplotlib.collections.LineCollection` instance
+ created to identify the mean values of each of the
+ violin's distribution.
+
+ - ``cmins``: A
+ :class:`matplotlib.collections.LineCollection` instance
+ created to identify the bottom of each violin's
+ distribution.
+
+ - ``cmaxes``: A
+ :class:`matplotlib.collections.LineCollection` instance
+ created to identify the top of each violin's
+ distribution.
+
+ - ``cbars``: A
+ :class:`matplotlib.collections.LineCollection` instance
+ created to identify the centers of each violin's
+ distribution.
+
+ - ``cmedians``: A
+ :class:`matplotlib.collections.LineCollection` instance
+ created to identify the median values of each of the
+ violin's distribution.
+
+ Notes
+ -----
+ .. [Notes section required for data comment. See #10189.]
+
+ """
+
+ def _kde_method(X, coords):
+ # fallback gracefully if the vector contains only one value
+ if np.all(X[0] == X):
+ return (X[0] == coords).astype(float)
+ kde = mlab.GaussianKDE(X, bw_method)
+ return kde.evaluate(coords)
+
+ vpstats = cbook.violin_stats(dataset, _kde_method, points=points)
+ return self.violin(vpstats, positions=positions, vert=vert,
+ widths=widths, showmeans=showmeans,
+ showextrema=showextrema, showmedians=showmedians)
+
+ def violin(self, vpstats, positions=None, vert=True, widths=0.5,
+ showmeans=False, showextrema=True, showmedians=False):
+ """Drawing function for violin plots.
+
+ Draw a violin plot for each column of `vpstats`. Each filled area
+ extends to represent the entire data range, with optional lines at the
+ mean, the median, the minimum, and the maximum.
+
+ Parameters
+ ----------
+
+ vpstats : list of dicts
+ A list of dictionaries containing stats for each violin plot.
+ Required keys are:
+
+ - ``coords``: A list of scalars containing the coordinates that
+ the violin's kernel density estimate were evaluated at.
+
+ - ``vals``: A list of scalars containing the values of the
+ kernel density estimate at each of the coordinates given
+ in *coords*.
+
+ - ``mean``: The mean value for this violin's dataset.
+
+ - ``median``: The median value for this violin's dataset.
+
+ - ``min``: The minimum value for this violin's dataset.
+
+ - ``max``: The maximum value for this violin's dataset.
+
+ positions : array-like, default = [1, 2, ..., n]
+ Sets the positions of the violins. The ticks and limits are
+ automatically set to match the positions.
+
+ vert : bool, default = True.
+ If true, plots the violins veritcally.
+ Otherwise, plots the violins horizontally.
+
+ widths : array-like, default = 0.5
+ Either a scalar or a vector that sets the maximal width of
+ each violin. The default is 0.5, which uses about half of the
+ available horizontal space.
+
+ showmeans : bool, default = False
+ If true, will toggle rendering of the means.
+
+ showextrema : bool, default = True
+ If true, will toggle rendering of the extrema.
+
+ showmedians : bool, default = False
+ If true, will toggle rendering of the medians.
+
+ Returns
+ -------
+ result : dict
+ A dictionary mapping each component of the violinplot to a
+ list of the corresponding collection instances created. The
+ dictionary has the following keys:
+
+ - ``bodies``: A list of the
+ :class:`matplotlib.collections.PolyCollection` instances
+ containing the filled area of each violin.
+
+ - ``cmeans``: A
+ :class:`matplotlib.collections.LineCollection` instance
+ created to identify the mean values of each of the
+ violin's distribution.
+
+ - ``cmins``: A
+ :class:`matplotlib.collections.LineCollection` instance
+ created to identify the bottom of each violin's
+ distribution.
+
+ - ``cmaxes``: A
+ :class:`matplotlib.collections.LineCollection` instance
+ created to identify the top of each violin's
+ distribution.
+
+ - ``cbars``: A
+ :class:`matplotlib.collections.LineCollection` instance
+ created to identify the centers of each violin's
+ distribution.
+
+ - ``cmedians``: A
+ :class:`matplotlib.collections.LineCollection` instance
+ created to identify the median values of each of the
+ violin's distribution.
+
+ """
+
+ # Statistical quantities to be plotted on the violins
+ means = []
+ mins = []
+ maxes = []
+ medians = []
+
+ # Collections to be returned
+ artists = {}
+
+ N = len(vpstats)
+ datashape_message = ("List of violinplot statistics and `{0}` "
+ "values must have the same length")
+
+ # Validate positions
+ if positions is None:
+ positions = range(1, N + 1)
+ elif len(positions) != N:
+ raise ValueError(datashape_message.format("positions"))
+
+ # Validate widths
+ if np.isscalar(widths):
+ widths = [widths] * N
+ elif len(widths) != N:
+ raise ValueError(datashape_message.format("widths"))
+
+ # Calculate ranges for statistics lines
+ pmins = -0.25 * np.array(widths) + positions
+ pmaxes = 0.25 * np.array(widths) + positions
+
+ # Check whether we are rendering vertically or horizontally
+ if vert:
+ fill = self.fill_betweenx
+ perp_lines = self.hlines
+ par_lines = self.vlines
+ else:
+ fill = self.fill_between
+ perp_lines = self.vlines
+ par_lines = self.hlines
+
+ if rcParams['_internal.classic_mode']:
+ fillcolor = 'y'
+ edgecolor = 'r'
+ else:
+ fillcolor = edgecolor = self._get_lines.get_next_color()
+
+ # Render violins
+ bodies = []
+ for stats, pos, width in zip(vpstats, positions, widths):
+ # The 0.5 factor reflects the fact that we plot from v-p to
+ # v+p
+ vals = np.array(stats['vals'])
+ vals = 0.5 * width * vals / vals.max()
+ bodies += [fill(stats['coords'],
+ -vals + pos,
+ vals + pos,
+ facecolor=fillcolor,
+ alpha=0.3)]
+ means.append(stats['mean'])
+ mins.append(stats['min'])
+ maxes.append(stats['max'])
+ medians.append(stats['median'])
+ artists['bodies'] = bodies
+
+ # Render means
+ if showmeans:
+ artists['cmeans'] = perp_lines(means, pmins, pmaxes,
+ colors=edgecolor)
+
+ # Render extrema
+ if showextrema:
+ artists['cmaxes'] = perp_lines(maxes, pmins, pmaxes,
+ colors=edgecolor)
+ artists['cmins'] = perp_lines(mins, pmins, pmaxes,
+ colors=edgecolor)
+ artists['cbars'] = par_lines(positions, mins, maxes,
+ colors=edgecolor)
+
+ # Render medians
+ if showmedians:
+ artists['cmedians'] = perp_lines(medians,
+ pmins,
+ pmaxes,
+ colors=edgecolor)
+
+ return artists
+
+ def tricontour(self, *args, **kwargs):
+ return mtri.tricontour(self, *args, **kwargs)
+ tricontour.__doc__ = mtri.tricontour.__doc__
+
+ def tricontourf(self, *args, **kwargs):
+ return mtri.tricontourf(self, *args, **kwargs)
+ tricontourf.__doc__ = mtri.tricontour.__doc__
+
+ def tripcolor(self, *args, **kwargs):
+ return mtri.tripcolor(self, *args, **kwargs)
+ tripcolor.__doc__ = mtri.tripcolor.__doc__
+
+ def triplot(self, *args, **kwargs):
+ return mtri.triplot(self, *args, **kwargs)
+ triplot.__doc__ = mtri.triplot.__doc__
diff --git a/contrib/python/matplotlib/py2/matplotlib/axes/_base.py b/contrib/python/matplotlib/py2/matplotlib/axes/_base.py
new file mode 100644
index 00000000000..5265f112701
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/axes/_base.py
@@ -0,0 +1,4297 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from collections import OrderedDict
+
+import six
+from six.moves import xrange
+
+import itertools
+import warnings
+import math
+from operator import attrgetter
+
+import numpy as np
+
+import matplotlib
+
+from matplotlib import cbook
+from matplotlib.cbook import (_check_1d, _string_to_bool, iterable,
+ index_of, get_label)
+from matplotlib import docstring
+import matplotlib.colors as mcolors
+import matplotlib.lines as mlines
+import matplotlib.patches as mpatches
+import matplotlib.artist as martist
+import matplotlib.transforms as mtransforms
+import matplotlib.ticker as mticker
+import matplotlib.axis as maxis
+import matplotlib.scale as mscale
+import matplotlib.spines as mspines
+import matplotlib.font_manager as font_manager
+import matplotlib.text as mtext
+import matplotlib.image as mimage
+from matplotlib.offsetbox import OffsetBox
+from matplotlib.artist import allow_rasterization
+from matplotlib.legend import Legend
+
+from matplotlib.rcsetup import cycler
+from matplotlib.rcsetup import validate_axisbelow
+
+rcParams = matplotlib.rcParams
+
+is_string_like = cbook.is_string_like
+is_sequence_of_strings = cbook.is_sequence_of_strings
+
+_hold_msg = """axes.hold is deprecated.
+ See the API Changes document (http://matplotlib.org/api/api_changes.html)
+ for more details."""
+
+
+def _process_plot_format(fmt):
+ """
+ Process a MATLAB style color/line style format string. Return a
+ (*linestyle*, *color*) tuple as a result of the processing. Default
+ values are ('-', 'b'). Example format strings include:
+
+ * 'ko': black circles
+ * '.b': blue dots
+ * 'r--': red dashed lines
+ * 'C2--': the third color in the color cycle, dashed lines
+
+ .. seealso::
+
+ :func:`~matplotlib.Line2D.lineStyles` and
+ :func:`~matplotlib.pyplot.colors`
+ for all possible styles and color format string.
+ """
+
+ linestyle = None
+ marker = None
+ color = None
+
+ # Is fmt just a colorspec?
+ try:
+ color = mcolors.to_rgba(fmt)
+
+ # We need to differentiate grayscale '1.0' from tri_down marker '1'
+ try:
+ fmtint = str(int(fmt))
+ except ValueError:
+ return linestyle, marker, color # Yes
+ else:
+ if fmt != fmtint:
+ # user definitely doesn't want tri_down marker
+ return linestyle, marker, color # Yes
+ else:
+ # ignore converted color
+ color = None
+ except ValueError:
+ pass # No, not just a color.
+
+ # handle the multi char special cases and strip them from the
+ # string
+ if fmt.find('--') >= 0:
+ linestyle = '--'
+ fmt = fmt.replace('--', '')
+ if fmt.find('-.') >= 0:
+ linestyle = '-.'
+ fmt = fmt.replace('-.', '')
+ if fmt.find(' ') >= 0:
+ linestyle = 'None'
+ fmt = fmt.replace(' ', '')
+
+ chars = [c for c in fmt]
+
+ i = 0
+ while i < len(chars):
+ c = chars[i]
+ if c in mlines.lineStyles:
+ if linestyle is not None:
+ raise ValueError(
+ 'Illegal format string "%s"; two linestyle symbols' % fmt)
+ linestyle = c
+ elif c in mlines.lineMarkers:
+ if marker is not None:
+ raise ValueError(
+ 'Illegal format string "%s"; two marker symbols' % fmt)
+ marker = c
+ elif c in mcolors.get_named_colors_mapping():
+ if color is not None:
+ raise ValueError(
+ 'Illegal format string "%s"; two color symbols' % fmt)
+ color = c
+ elif c == 'C' and i < len(chars) - 1:
+ color_cycle_number = int(chars[i + 1])
+ color = mcolors.to_rgba("C{}".format(color_cycle_number))
+ i += 1
+ else:
+ raise ValueError(
+ 'Unrecognized character %c in format string' % c)
+ i += 1
+
+ if linestyle is None and marker is None:
+ linestyle = rcParams['lines.linestyle']
+ if linestyle is None:
+ linestyle = 'None'
+ if marker is None:
+ marker = 'None'
+
+ return linestyle, marker, color
+
+
+class _process_plot_var_args(object):
+ """
+ Process variable length arguments to the plot command, so that
+ plot commands like the following are supported::
+
+ plot(t, s)
+ plot(t1, s1, t2, s2)
+ plot(t1, s1, 'ko', t2, s2)
+ plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
+
+ an arbitrary number of *x*, *y*, *fmt* are allowed
+ """
+ def __init__(self, axes, command='plot'):
+ self.axes = axes
+ self.command = command
+ self.set_prop_cycle()
+
+ def __getstate__(self):
+ # note: it is not possible to pickle a itertools.cycle instance
+ return {'axes': self.axes, 'command': self.command}
+
+ def __setstate__(self, state):
+ self.__dict__ = state.copy()
+ self.set_prop_cycle()
+
+ def set_prop_cycle(self, *args, **kwargs):
+ if not (args or kwargs) or (len(args) == 1 and args[0] is None):
+ prop_cycler = rcParams['axes.prop_cycle']
+ else:
+ prop_cycler = cycler(*args, **kwargs)
+
+ self.prop_cycler = itertools.cycle(prop_cycler)
+ # This should make a copy
+ self._prop_keys = prop_cycler.keys
+
+ def __call__(self, *args, **kwargs):
+ if self.axes.xaxis is not None and self.axes.yaxis is not None:
+ xunits = kwargs.pop('xunits', self.axes.xaxis.units)
+
+ if self.axes.name == 'polar':
+ xunits = kwargs.pop('thetaunits', xunits)
+
+ yunits = kwargs.pop('yunits', self.axes.yaxis.units)
+
+ if self.axes.name == 'polar':
+ yunits = kwargs.pop('runits', yunits)
+
+ if xunits != self.axes.xaxis.units:
+ self.axes.xaxis.set_units(xunits)
+
+ if yunits != self.axes.yaxis.units:
+ self.axes.yaxis.set_units(yunits)
+
+ ret = self._grab_next_args(*args, **kwargs)
+ return ret
+
+ def get_next_color(self):
+ """Return the next color in the cycle."""
+ if 'color' not in self._prop_keys:
+ return 'k'
+ return next(self.prop_cycler)['color']
+
+ def set_lineprops(self, line, **kwargs):
+ assert self.command == 'plot', 'set_lineprops only works with "plot"'
+ line.set(**kwargs)
+
+ def set_patchprops(self, fill_poly, **kwargs):
+ assert self.command == 'fill', 'set_patchprops only works with "fill"'
+ fill_poly.set(**kwargs)
+
+ def _xy_from_xy(self, x, y):
+ if self.axes.xaxis is not None and self.axes.yaxis is not None:
+ bx = self.axes.xaxis.update_units(x)
+ by = self.axes.yaxis.update_units(y)
+
+ if self.command != 'plot':
+ # the Line2D class can handle unitized data, with
+ # support for post hoc unit changes etc. Other mpl
+ # artists, e.g., Polygon which _process_plot_var_args
+ # also serves on calls to fill, cannot. So this is a
+ # hack to say: if you are not "plot", which is
+ # creating Line2D, then convert the data now to
+ # floats. If you are plot, pass the raw data through
+ # to Line2D which will handle the conversion. So
+ # polygons will not support post hoc conversions of
+ # the unit type since they are not storing the orig
+ # data. Hopefully we can rationalize this at a later
+ # date - JDH
+ if bx:
+ x = self.axes.convert_xunits(x)
+ if by:
+ y = self.axes.convert_yunits(y)
+
+ # like asanyarray, but converts scalar to array, and doesn't change
+ # existing compatible sequences
+ x = _check_1d(x)
+ y = _check_1d(y)
+ if x.shape[0] != y.shape[0]:
+ raise ValueError("x and y must have same first dimension, but "
+ "have shapes {} and {}".format(x.shape, y.shape))
+ if x.ndim > 2 or y.ndim > 2:
+ raise ValueError("x and y can be no greater than 2-D, but have "
+ "shapes {} and {}".format(x.shape, y.shape))
+
+ if x.ndim == 1:
+ x = x[:, np.newaxis]
+ if y.ndim == 1:
+ y = y[:, np.newaxis]
+ return x, y
+
+ def _getdefaults(self, ignore, *kwargs):
+ """
+ Only advance the cycler if the cycler has information that
+ is not specified in any of the supplied tuple of dicts.
+ Ignore any keys specified in the `ignore` set.
+
+ Returns a copy of defaults dictionary if there are any
+ keys that are not found in any of the supplied dictionaries.
+ If the supplied dictionaries have non-None values for
+ everything the property cycler has, then just return
+ an empty dictionary. Ignored keys are excluded from the
+ returned dictionary.
+
+ """
+ prop_keys = self._prop_keys
+ if ignore is None:
+ ignore = set()
+ prop_keys = prop_keys - ignore
+
+ if any(all(kw.get(k, None) is None for kw in kwargs)
+ for k in prop_keys):
+ # Need to copy this dictionary or else the next time around
+ # in the cycle, the dictionary could be missing entries.
+ default_dict = next(self.prop_cycler).copy()
+ for p in ignore:
+ default_dict.pop(p, None)
+ else:
+ default_dict = {}
+ return default_dict
+
+ def _setdefaults(self, defaults, *kwargs):
+ """
+ Given a defaults dictionary, and any other dictionaries,
+ update those other dictionaries with information in defaults if
+ none of the other dictionaries contains that information.
+
+ """
+ for k in defaults:
+ if all(kw.get(k, None) is None for kw in kwargs):
+ for kw in kwargs:
+ kw[k] = defaults[k]
+
+ def _makeline(self, x, y, kw, kwargs):
+ kw = kw.copy() # Don't modify the original kw.
+ kw.update(kwargs)
+ default_dict = self._getdefaults(None, kw)
+ self._setdefaults(default_dict, kw)
+ seg = mlines.Line2D(x, y, **kw)
+ return seg
+
+ def _makefill(self, x, y, kw, kwargs):
+ kw = kw.copy() # Don't modify the original kw.
+ kwargs = kwargs.copy()
+
+ # Ignore 'marker'-related properties as they aren't Polygon
+ # properties, but they are Line2D properties, and so they are
+ # likely to appear in the default cycler construction.
+ # This is done here to the defaults dictionary as opposed to the
+ # other two dictionaries because we do want to capture when a
+ # *user* explicitly specifies a marker which should be an error.
+ # We also want to prevent advancing the cycler if there are no
+ # defaults needed after ignoring the given properties.
+ ignores = {'marker', 'markersize', 'markeredgecolor',
+ 'markerfacecolor', 'markeredgewidth'}
+ # Also ignore anything provided by *kwargs*.
+ for k, v in six.iteritems(kwargs):
+ if v is not None:
+ ignores.add(k)
+
+ # Only using the first dictionary to use as basis
+ # for getting defaults for back-compat reasons.
+ # Doing it with both seems to mess things up in
+ # various places (probably due to logic bugs elsewhere).
+ default_dict = self._getdefaults(ignores, kw)
+ self._setdefaults(default_dict, kw)
+
+ # Looks like we don't want "color" to be interpreted to
+ # mean both facecolor and edgecolor for some reason.
+ # So the "kw" dictionary is thrown out, and only its
+ # 'color' value is kept and translated as a 'facecolor'.
+ # This design should probably be revisited as it increases
+ # complexity.
+ facecolor = kw.get('color', None)
+
+ # Throw out 'color' as it is now handled as a facecolor
+ default_dict.pop('color', None)
+
+ # To get other properties set from the cycler
+ # modify the kwargs dictionary.
+ self._setdefaults(default_dict, kwargs)
+
+ seg = mpatches.Polygon(np.hstack((x[:, np.newaxis],
+ y[:, np.newaxis])),
+ facecolor=facecolor,
+ fill=kwargs.get('fill', True),
+ closed=kw['closed'])
+ self.set_patchprops(seg, **kwargs)
+ return seg
+
+ def _plot_args(self, tup, kwargs):
+ ret = []
+ if len(tup) > 1 and isinstance(tup[-1], six.string_types):
+ linestyle, marker, color = _process_plot_format(tup[-1])
+ tup = tup[:-1]
+ elif len(tup) == 3:
+ raise ValueError('third arg must be a format string')
+ else:
+ linestyle, marker, color = None, None, None
+
+ # Don't allow any None value; These will be up-converted
+ # to one element array of None which causes problems
+ # downstream.
+ if any(v is None for v in tup):
+ raise ValueError("x and y must not be None")
+
+ kw = {}
+ for k, v in zip(('linestyle', 'marker', 'color'),
+ (linestyle, marker, color)):
+ if v is not None:
+ kw[k] = v
+
+ if 'label' not in kwargs or kwargs['label'] is None:
+ kwargs['label'] = get_label(tup[-1], None)
+
+ if len(tup) == 2:
+ x = _check_1d(tup[0])
+ y = _check_1d(tup[-1])
+ else:
+ x, y = index_of(tup[-1])
+
+ x, y = self._xy_from_xy(x, y)
+
+ if self.command == 'plot':
+ func = self._makeline
+ else:
+ kw['closed'] = kwargs.get('closed', True)
+ func = self._makefill
+
+ ncx, ncy = x.shape[1], y.shape[1]
+ if ncx > 1 and ncy > 1 and ncx != ncy:
+ cbook.warn_deprecated("2.2", "cycling among columns of inputs "
+ "with non-matching shapes is deprecated.")
+ for j in xrange(max(ncx, ncy)):
+ seg = func(x[:, j % ncx], y[:, j % ncy], kw, kwargs)
+ ret.append(seg)
+ return ret
+
+ def _grab_next_args(self, *args, **kwargs):
+ while args:
+ this, args = args[:2], args[2:]
+ if args and isinstance(args[0], six.string_types):
+ this += args[0],
+ args = args[1:]
+ for seg in self._plot_args(this, kwargs):
+ yield seg
+
+
+class _AxesBase(martist.Artist):
+ """
+ """
+ name = "rectilinear"
+
+ _shared_x_axes = cbook.Grouper()
+ _shared_y_axes = cbook.Grouper()
+ _twinned_axes = cbook.Grouper()
+
+ def __str__(self):
+ return "{0}({1[0]:g},{1[1]:g};{1[2]:g}x{1[3]:g})".format(
+ type(self).__name__, self._position.bounds)
+
+ @docstring.Substitution(scale=' | '.join(
+ [repr(x) for x in mscale.get_scale_names()]))
+ def __init__(self, fig, rect,
+ facecolor=None, # defaults to rc axes.facecolor
+ frameon=True,
+ sharex=None, # use Axes instance's xaxis info
+ sharey=None, # use Axes instance's yaxis info
+ label='',
+ xscale=None,
+ yscale=None,
+ **kwargs
+ ):
+ """
+ Build an `~axes.Axes` instance in
+ `~matplotlib.figure.Figure` *fig* with
+ *rect=[left, bottom, width, height]* in
+ `~matplotlib.figure.Figure` coordinates
+
+ Optional keyword arguments:
+
+ ================ =========================================
+ Keyword Description
+ ================ =========================================
+ *adjustable* [ 'box' | 'datalim' ]
+ *alpha* float: the alpha transparency (can be None)
+ *anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
+ 'NW', 'W' ]
+ *aspect* [ 'auto' | 'equal' | aspect_ratio ]
+ *autoscale_on* bool; whether to autoscale the *viewlim*
+ *axisbelow* [ bool | 'line' ] draw the grids
+ and ticks below or above most other artists,
+ or below lines but above patches
+ *cursor_props* a (*float*, *color*) tuple
+ *figure* a :class:`~matplotlib.figure.Figure`
+ instance
+ *frame_on* bool; whether to draw the axes frame
+ *label* the axes label
+ *navigate* bool
+ *navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
+ toolbar button status
+ *position* [left, bottom, width, height] in
+ class:`~matplotlib.figure.Figure` coords
+ *sharex* an class:`~matplotlib.axes.Axes` instance
+ to share the x-axis with
+ *sharey* an class:`~matplotlib.axes.Axes` instance
+ to share the y-axis with
+ *title* the title string
+ *visible* bool, whether the axes is visible
+ *xlabel* the xlabel
+ *xlim* (*xmin*, *xmax*) view limits
+ *xscale* [%(scale)s]
+ *xticklabels* sequence of strings
+ *xticks* sequence of floats
+ *ylabel* the ylabel strings
+ *ylim* (*ymin*, *ymax*) view limits
+ *yscale* [%(scale)s]
+ *yticklabels* sequence of strings
+ *yticks* sequence of floats
+ ================ =========================================
+ """
+
+ martist.Artist.__init__(self)
+ if isinstance(rect, mtransforms.Bbox):
+ self._position = rect
+ else:
+ self._position = mtransforms.Bbox.from_bounds(*rect)
+ if self._position.width < 0 or self._position.height < 0:
+ raise ValueError('Width and height specified must be non-negative')
+ self._originalPosition = self._position.frozen()
+ # self.set_axes(self)
+ self.axes = self
+ self._aspect = 'auto'
+ self._adjustable = 'box'
+ self._anchor = 'C'
+ self._sharex = sharex
+ self._sharey = sharey
+ if sharex is not None:
+ self._shared_x_axes.join(self, sharex)
+ if sharey is not None:
+ self._shared_y_axes.join(self, sharey)
+ self.set_label(label)
+ self.set_figure(fig)
+
+ self.set_axes_locator(kwargs.get("axes_locator", None))
+
+ self.spines = self._gen_axes_spines()
+
+ # this call may differ for non-sep axes, e.g., polar
+ self._init_axis()
+ if facecolor is None:
+ facecolor = rcParams['axes.facecolor']
+ self._facecolor = facecolor
+ self._frameon = frameon
+ self._axisbelow = rcParams['axes.axisbelow']
+
+ self._rasterization_zorder = None
+
+ self._hold = rcParams['axes.hold']
+ if self._hold is None:
+ self._hold = True
+
+ self._connected = {} # a dict from events to (id, func)
+ self.cla()
+
+ # funcs used to format x and y - fall back on major formatters
+ self.fmt_xdata = None
+ self.fmt_ydata = None
+
+ self._cachedRenderer = None
+ self.set_navigate(True)
+ self.set_navigate_mode(None)
+
+ if xscale:
+ self.set_xscale(xscale)
+ if yscale:
+ self.set_yscale(yscale)
+
+ if len(kwargs):
+ self.update(kwargs)
+
+ if self.xaxis is not None:
+ self._xcid = self.xaxis.callbacks.connect(
+ 'units finalize', lambda: self._on_units_changed(scalex=True))
+
+ if self.yaxis is not None:
+ self._ycid = self.yaxis.callbacks.connect(
+ 'units finalize', lambda: self._on_units_changed(scaley=True))
+
+ self.tick_params(
+ top=rcParams['xtick.top'] and rcParams['xtick.minor.top'],
+ bottom=rcParams['xtick.bottom'] and rcParams['xtick.minor.bottom'],
+ labeltop=(rcParams['xtick.labeltop'] and
+ rcParams['xtick.minor.top']),
+ labelbottom=(rcParams['xtick.labelbottom'] and
+ rcParams['xtick.minor.bottom']),
+ left=rcParams['ytick.left'] and rcParams['ytick.minor.left'],
+ right=rcParams['ytick.right'] and rcParams['ytick.minor.right'],
+ labelleft=(rcParams['ytick.labelleft'] and
+ rcParams['ytick.minor.left']),
+ labelright=(rcParams['ytick.labelright'] and
+ rcParams['ytick.minor.right']),
+ which='minor')
+
+ self.tick_params(
+ top=rcParams['xtick.top'] and rcParams['xtick.major.top'],
+ bottom=rcParams['xtick.bottom'] and rcParams['xtick.major.bottom'],
+ labeltop=(rcParams['xtick.labeltop'] and
+ rcParams['xtick.major.top']),
+ labelbottom=(rcParams['xtick.labelbottom'] and
+ rcParams['xtick.major.bottom']),
+ left=rcParams['ytick.left'] and rcParams['ytick.major.left'],
+ right=rcParams['ytick.right'] and rcParams['ytick.major.right'],
+ labelleft=(rcParams['ytick.labelleft'] and
+ rcParams['ytick.major.left']),
+ labelright=(rcParams['ytick.labelright'] and
+ rcParams['ytick.major.right']),
+ which='major')
+
+ self._layoutbox = None
+ self._poslayoutbox = None
+
+ def __getstate__(self):
+ # The renderer should be re-created by the figure, and then cached at
+ # that point.
+ state = super(_AxesBase, self).__getstate__()
+ state['_cachedRenderer'] = None
+ state.pop('_layoutbox')
+ state.pop('_poslayoutbox')
+
+ return state
+
+ def __setstate__(self, state):
+ self.__dict__ = state
+ # put the _remove_method back on all artists contained within the axes
+ for container_name in ['lines', 'collections', 'tables', 'patches',
+ 'texts', 'images']:
+ container = getattr(self, container_name)
+ for artist in container:
+ artist._remove_method = container.remove
+ self._stale = True
+ self._layoutbox = None
+ self._poslayoutbox = None
+
+ def get_window_extent(self, *args, **kwargs):
+ """
+ get the axes bounding box in display space; *args* and
+ *kwargs* are empty
+ """
+ bbox = self.bbox
+ x_pad = self.xaxis.get_tick_padding()
+ y_pad = self.yaxis.get_tick_padding()
+ return mtransforms.Bbox([[bbox.x0 - x_pad, bbox.y0 - y_pad],
+ [bbox.x1 + x_pad, bbox.y1 + y_pad]])
+
+ def _init_axis(self):
+ "move this out of __init__ because non-separable axes don't use it"
+ self.xaxis = maxis.XAxis(self)
+ self.spines['bottom'].register_axis(self.xaxis)
+ self.spines['top'].register_axis(self.xaxis)
+ self.yaxis = maxis.YAxis(self)
+ self.spines['left'].register_axis(self.yaxis)
+ self.spines['right'].register_axis(self.yaxis)
+ self._update_transScale()
+
+ def set_figure(self, fig):
+ """
+ Set the `.Figure` for this `.Axes`.
+
+ .. ACCEPTS: `.Figure`
+
+ Parameters
+ ----------
+ fig : `.Figure`
+ """
+ martist.Artist.set_figure(self, fig)
+
+ self.bbox = mtransforms.TransformedBbox(self._position,
+ fig.transFigure)
+ # these will be updated later as data is added
+ self.dataLim = mtransforms.Bbox.null()
+ self.viewLim = mtransforms.Bbox.unit()
+ self.transScale = mtransforms.TransformWrapper(
+ mtransforms.IdentityTransform())
+
+ self._set_lim_and_transforms()
+
+ def _set_lim_and_transforms(self):
+ """
+ set the *_xaxis_transform*, *_yaxis_transform*,
+ *transScale*, *transData*, *transLimits* and *transAxes*
+ transformations.
+
+ .. note::
+
+ This method is primarily used by rectilinear projections
+ of the :class:`~matplotlib.axes.Axes` class, and is meant
+ to be overridden by new kinds of projection axes that need
+ different transformations and limits. (See
+ :class:`~matplotlib.projections.polar.PolarAxes` for an
+ example.
+
+ """
+ self.transAxes = mtransforms.BboxTransformTo(self.bbox)
+
+ # Transforms the x and y axis separately by a scale factor.
+ # It is assumed that this part will have non-linear components
+ # (e.g., for a log scale).
+ self.transScale = mtransforms.TransformWrapper(
+ mtransforms.IdentityTransform())
+
+ # An affine transformation on the data, generally to limit the
+ # range of the axes
+ self.transLimits = mtransforms.BboxTransformFrom(
+ mtransforms.TransformedBbox(self.viewLim, self.transScale))
+
+ # The parentheses are important for efficiency here -- they
+ # group the last two (which are usually affines) separately
+ # from the first (which, with log-scaling can be non-affine).
+ self.transData = self.transScale + (self.transLimits + self.transAxes)
+
+ self._xaxis_transform = mtransforms.blended_transform_factory(
+ self.transData, self.transAxes)
+ self._yaxis_transform = mtransforms.blended_transform_factory(
+ self.transAxes, self.transData)
+
+ def get_xaxis_transform(self, which='grid'):
+ """
+ Get the transformation used for drawing x-axis labels, ticks
+ and gridlines. The x-direction is in data coordinates and the
+ y-direction is in axis coordinates.
+
+ .. note::
+
+ This transformation is primarily used by the
+ :class:`~matplotlib.axis.Axis` class, and is meant to be
+ overridden by new kinds of projections that may need to
+ place axis elements in different locations.
+
+ """
+ if which == 'grid':
+ return self._xaxis_transform
+ elif which == 'tick1':
+ # for cartesian projection, this is bottom spine
+ return self.spines['bottom'].get_spine_transform()
+ elif which == 'tick2':
+ # for cartesian projection, this is top spine
+ return self.spines['top'].get_spine_transform()
+ else:
+ raise ValueError('unknown value for which')
+
+ def get_xaxis_text1_transform(self, pad_points):
+ """
+ Get the transformation used for drawing x-axis labels, which
+ will add the given amount of padding (in points) between the
+ axes and the label. The x-direction is in data coordinates
+ and the y-direction is in axis coordinates. Returns a
+ 3-tuple of the form::
+
+ (transform, valign, halign)
+
+ where *valign* and *halign* are requested alignments for the
+ text.
+
+ .. note::
+
+ This transformation is primarily used by the
+ :class:`~matplotlib.axis.Axis` class, and is meant to be
+ overridden by new kinds of projections that may need to
+ place axis elements in different locations.
+
+ """
+ labels_align = matplotlib.rcParams["xtick.alignment"]
+
+ return (self.get_xaxis_transform(which='tick1') +
+ mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
+ self.figure.dpi_scale_trans),
+ "top", labels_align)
+
+ def get_xaxis_text2_transform(self, pad_points):
+ """
+ Get the transformation used for drawing the secondary x-axis
+ labels, which will add the given amount of padding (in points)
+ between the axes and the label. The x-direction is in data
+ coordinates and the y-direction is in axis coordinates.
+ Returns a 3-tuple of the form::
+
+ (transform, valign, halign)
+
+ where *valign* and *halign* are requested alignments for the
+ text.
+
+ .. note::
+
+ This transformation is primarily used by the
+ :class:`~matplotlib.axis.Axis` class, and is meant to be
+ overridden by new kinds of projections that may need to
+ place axis elements in different locations.
+
+ """
+ labels_align = matplotlib.rcParams["xtick.alignment"]
+ return (self.get_xaxis_transform(which='tick2') +
+ mtransforms.ScaledTranslation(0, pad_points / 72.0,
+ self.figure.dpi_scale_trans),
+ "bottom", labels_align)
+
+ def get_yaxis_transform(self, which='grid'):
+ """
+ Get the transformation used for drawing y-axis labels, ticks
+ and gridlines. The x-direction is in axis coordinates and the
+ y-direction is in data coordinates.
+
+ .. note::
+
+ This transformation is primarily used by the
+ :class:`~matplotlib.axis.Axis` class, and is meant to be
+ overridden by new kinds of projections that may need to
+ place axis elements in different locations.
+
+ """
+ if which == 'grid':
+ return self._yaxis_transform
+ elif which == 'tick1':
+ # for cartesian projection, this is bottom spine
+ return self.spines['left'].get_spine_transform()
+ elif which == 'tick2':
+ # for cartesian projection, this is top spine
+ return self.spines['right'].get_spine_transform()
+ else:
+ raise ValueError('unknown value for which')
+
+ def get_yaxis_text1_transform(self, pad_points):
+ """
+ Get the transformation used for drawing y-axis labels, which
+ will add the given amount of padding (in points) between the
+ axes and the label. The x-direction is in axis coordinates
+ and the y-direction is in data coordinates. Returns a 3-tuple
+ of the form::
+
+ (transform, valign, halign)
+
+ where *valign* and *halign* are requested alignments for the
+ text.
+
+ .. note::
+
+ This transformation is primarily used by the
+ :class:`~matplotlib.axis.Axis` class, and is meant to be
+ overridden by new kinds of projections that may need to
+ place axis elements in different locations.
+
+ """
+ labels_align = matplotlib.rcParams["ytick.alignment"]
+ return (self.get_yaxis_transform(which='tick1') +
+ mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
+ self.figure.dpi_scale_trans),
+ labels_align, "right")
+
+ def get_yaxis_text2_transform(self, pad_points):
+ """
+ Get the transformation used for drawing the secondary y-axis
+ labels, which will add the given amount of padding (in points)
+ between the axes and the label. The x-direction is in axis
+ coordinates and the y-direction is in data coordinates.
+ Returns a 3-tuple of the form::
+
+ (transform, valign, halign)
+
+ where *valign* and *halign* are requested alignments for the
+ text.
+
+ .. note::
+
+ This transformation is primarily used by the
+ :class:`~matplotlib.axis.Axis` class, and is meant to be
+ overridden by new kinds of projections that may need to
+ place axis elements in different locations.
+
+ """
+ labels_align = matplotlib.rcParams["ytick.alignment"]
+
+ return (self.get_yaxis_transform(which='tick2') +
+ mtransforms.ScaledTranslation(pad_points / 72.0, 0,
+ self.figure.dpi_scale_trans),
+ labels_align, "left")
+
+ def _update_transScale(self):
+ self.transScale.set(
+ mtransforms.blended_transform_factory(
+ self.xaxis.get_transform(), self.yaxis.get_transform()))
+ if hasattr(self, "lines"):
+ for line in self.lines:
+ try:
+ line._transformed_path.invalidate()
+ except AttributeError:
+ pass
+
+ def get_position(self, original=False):
+ """
+ Get a copy of the axes rectangle as a `.Bbox`.
+
+ Parameters
+ ----------
+ original : bool
+ If ``True``, return the original position. Otherwise return the
+ active position. For an explanation of the positions see
+ `.set_position`.
+
+ Returns
+ -------
+ pos : `.Bbox`
+
+ """
+ if original:
+ return self._originalPosition.frozen()
+ else:
+ return self._position.frozen()
+
+ def set_position(self, pos, which='both'):
+ """
+ Set the axes position.
+
+ Axes have two position attributes. The 'original' position is the
+ position allocated for the Axes. The 'active' position is the
+ position the Axes is actually drawn at. These positions are usually
+ the same unless a fixed aspect is set to the Axes. See `.set_aspect`
+ for details.
+
+ Parameters
+ ----------
+ pos : [left, bottom, width, height] or `~matplotlib.transforms.Bbox`
+ The new position of the in `.Figure` coordinates.
+
+ which : ['both' | 'active' | 'original'], optional
+ Determines which position variables to change.
+
+ """
+ self._set_position(pos, which='both')
+ # because this is being called externally to the library we
+ # zero the constrained layout parts.
+ self._layoutbox = None
+ self._poslayoutbox = None
+
+ def _set_position(self, pos, which='both'):
+ """
+ private version of set_position. Call this internally
+ to get the same functionality of `get_position`, but not
+ to take the axis out of the constrained_layout
+ hierarchy.
+ """
+ if not isinstance(pos, mtransforms.BboxBase):
+ pos = mtransforms.Bbox.from_bounds(*pos)
+ for ax in self._twinned_axes.get_siblings(self):
+ if which in ('both', 'active'):
+ ax._position.set(pos)
+ if which in ('both', 'original'):
+ ax._originalPosition.set(pos)
+ self.stale = True
+
+ def reset_position(self):
+ """
+ Reset the active position to the original position.
+
+ This resets the a possible position change due to aspect constraints.
+ For an explanation of the positions see `.set_position`.
+ """
+ for ax in self._twinned_axes.get_siblings(self):
+ pos = ax.get_position(original=True)
+ ax.set_position(pos, which='active')
+
+ def set_axes_locator(self, locator):
+ """
+ Set the axes locator.
+
+ .. ACCEPTS: a callable object which takes an axes instance and
+ renderer and returns a bbox.
+
+ Parameters
+ ----------
+ locator : callable
+ A locator function, which takes an axes and a renderer and returns
+ a bbox.
+ """
+ self._axes_locator = locator
+ self.stale = True
+
+ def get_axes_locator(self):
+ """
+ Return the axes_locator.
+ """
+ return self._axes_locator
+
+ def _set_artist_props(self, a):
+ """set the boilerplate props for artists added to axes"""
+ a.set_figure(self.figure)
+ if not a.is_transform_set():
+ a.set_transform(self.transData)
+
+ a.axes = self
+ if a.mouseover:
+ self.mouseover_set.add(a)
+
+ def _gen_axes_patch(self):
+ """
+ Returns the patch used to draw the background of the axes. It
+ is also used as the clipping path for any data elements on the
+ axes.
+
+ In the standard axes, this is a rectangle, but in other
+ projections it may not be.
+
+ .. note::
+
+ Intended to be overridden by new projection types.
+
+ """
+ return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
+
+ def _gen_axes_spines(self, locations=None, offset=0.0, units='inches'):
+ """
+ Returns a dict whose keys are spine names and values are
+ Line2D or Patch instances. Each element is used to draw a
+ spine of the axes.
+
+ In the standard axes, this is a single line segment, but in
+ other projections it may not be.
+
+ .. note::
+
+ Intended to be overridden by new projection types.
+
+ """
+ return OrderedDict([
+ ('left', mspines.Spine.linear_spine(self, 'left')),
+ ('right', mspines.Spine.linear_spine(self, 'right')),
+ ('bottom', mspines.Spine.linear_spine(self, 'bottom')),
+ ('top', mspines.Spine.linear_spine(self, 'top'))])
+
+ def cla(self):
+ """Clear the current axes."""
+ # Note: this is called by Axes.__init__()
+
+ # stash the current visibility state
+ if hasattr(self, 'patch'):
+ patch_visible = self.patch.get_visible()
+ else:
+ patch_visible = True
+
+ xaxis_visible = self.xaxis.get_visible()
+ yaxis_visible = self.yaxis.get_visible()
+
+ self.xaxis.cla()
+ self.yaxis.cla()
+
+ for name, spine in six.iteritems(self.spines):
+ spine.cla()
+
+ self.ignore_existing_data_limits = True
+ self.callbacks = cbook.CallbackRegistry()
+
+ if self._sharex is not None:
+ # major and minor are axis.Ticker class instances with
+ # locator and formatter attributes
+ self.xaxis.major = self._sharex.xaxis.major
+ self.xaxis.minor = self._sharex.xaxis.minor
+ x0, x1 = self._sharex.get_xlim()
+ self.set_xlim(x0, x1, emit=False, auto=None)
+ self.xaxis._scale = mscale.scale_factory(
+ self._sharex.xaxis.get_scale(), self.xaxis)
+ else:
+ self.xaxis._set_scale('linear')
+ try:
+ self.set_xlim(0, 1)
+ except TypeError:
+ pass
+
+ if self._sharey is not None:
+ self.yaxis.major = self._sharey.yaxis.major
+ self.yaxis.minor = self._sharey.yaxis.minor
+ y0, y1 = self._sharey.get_ylim()
+ self.set_ylim(y0, y1, emit=False, auto=None)
+ self.yaxis._scale = mscale.scale_factory(
+ self._sharey.yaxis.get_scale(), self.yaxis)
+ else:
+ self.yaxis._set_scale('linear')
+ try:
+ self.set_ylim(0, 1)
+ except TypeError:
+ pass
+ # update the minor locator for x and y axis based on rcParams
+ if (rcParams['xtick.minor.visible']):
+ self.xaxis.set_minor_locator(mticker.AutoMinorLocator())
+
+ if (rcParams['ytick.minor.visible']):
+ self.yaxis.set_minor_locator(mticker.AutoMinorLocator())
+
+ self._autoscaleXon = True
+ self._autoscaleYon = True
+ self._xmargin = rcParams['axes.xmargin']
+ self._ymargin = rcParams['axes.ymargin']
+ self._tight = None
+ self._use_sticky_edges = True
+ self._update_transScale() # needed?
+
+ self._get_lines = _process_plot_var_args(self)
+ self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
+
+ self._gridOn = rcParams['axes.grid']
+ self.lines = []
+ self.patches = []
+ self.texts = []
+ self.tables = []
+ self.artists = []
+ self.images = []
+ self.mouseover_set = set()
+ self._current_image = None # strictly for pyplot via _sci, _gci
+ self.legend_ = None
+ self.collections = [] # collection.Collection instances
+ self.containers = []
+
+ self.grid(False) # Disable grid on init to use rcParameter
+ self.grid(self._gridOn, which=rcParams['axes.grid.which'],
+ axis=rcParams['axes.grid.axis'])
+ props = font_manager.FontProperties(
+ size=rcParams['axes.titlesize'],
+ weight=rcParams['axes.titleweight'])
+
+ self.title = mtext.Text(
+ x=0.5, y=1.0, text='',
+ fontproperties=props,
+ verticalalignment='baseline',
+ horizontalalignment='center',
+ )
+ self._left_title = mtext.Text(
+ x=0.0, y=1.0, text='',
+ fontproperties=props.copy(),
+ verticalalignment='baseline',
+ horizontalalignment='left', )
+ self._right_title = mtext.Text(
+ x=1.0, y=1.0, text='',
+ fontproperties=props.copy(),
+ verticalalignment='baseline',
+ horizontalalignment='right',
+ )
+ title_offset_points = rcParams['axes.titlepad']
+ # refactor this out so it can be called in ax.set_title if
+ # pad argument used...
+ self._set_title_offset_trans(title_offset_points)
+
+ for _title in (self.title, self._left_title, self._right_title):
+ self._set_artist_props(_title)
+
+ # The patch draws the background of the axes. We want this to be below
+ # the other artists. We use the frame to draw the edges so we are
+ # setting the edgecolor to None.
+ self.patch = self._gen_axes_patch()
+ self.patch.set_figure(self.figure)
+ self.patch.set_facecolor(self._facecolor)
+ self.patch.set_edgecolor('None')
+ self.patch.set_linewidth(0)
+ self.patch.set_transform(self.transAxes)
+
+ self.set_axis_on()
+
+ self.xaxis.set_clip_path(self.patch)
+ self.yaxis.set_clip_path(self.patch)
+
+ self._shared_x_axes.clean()
+ self._shared_y_axes.clean()
+ if self._sharex:
+ self.xaxis.set_visible(xaxis_visible)
+ self.patch.set_visible(patch_visible)
+
+ if self._sharey:
+ self.yaxis.set_visible(yaxis_visible)
+ self.patch.set_visible(patch_visible)
+
+ self.stale = True
+
+ @property
+ @cbook.deprecated("2.1", alternative="Axes.patch")
+ def axesPatch(self):
+ return self.patch
+
+ def clear(self):
+ """Clear the axes."""
+ self.cla()
+
+ def get_facecolor(self):
+ """Get the Axes facecolor."""
+ return self.patch.get_facecolor()
+ get_fc = get_facecolor
+
+ def set_facecolor(self, color):
+ """Set the Axes facecolor.
+
+ .. ACCEPTS: color
+
+ Parameters
+ ----------
+ color : color
+ """
+ self._facecolor = color
+ self.stale = True
+ return self.patch.set_facecolor(color)
+ set_fc = set_facecolor
+
+ def _set_title_offset_trans(self, title_offset_points):
+ """
+ Set the offset for the title either from rcParams['axes.titlepad']
+ or from set_title kwarg ``pad``.
+ """
+ self.titleOffsetTrans = mtransforms.ScaledTranslation(
+ 0.0, title_offset_points / 72.0,
+ self.figure.dpi_scale_trans)
+ for _title in (self.title, self._left_title, self._right_title):
+ _title.set_transform(self.transAxes + self.titleOffsetTrans)
+ _title.set_clip_box(None)
+
+ def set_prop_cycle(self, *args, **kwargs):
+ """
+ Set the property cycle of the Axes.
+
+ The property cycle controls the style properties such as color,
+ marker and linestyle of future plot commands. The style properties
+ of data already added to the Axes are not modified.
+
+ Call signatures::
+
+ set_prop_cycle(cycler)
+ set_prop_cycle(label=values[, label2=values2[, ...]])
+ set_prop_cycle(label, values)
+
+ Form 1 sets given `~cycler.Cycler` object.
+
+ Form 2 creates a `~cycler.Cycler` which cycles over one or more
+ properties simultaneously and set it as the property cycle of the
+ axes. If multiple properties are given, their value lists must have
+ the same length. This is just a shortcut for explicitly creating a
+ cycler and passing it to the function, i.e. it's short for
+ ``set_prop_cycle(cycler(label=values label2=values2, ...))``.
+
+ Form 3 creates a `~cycler.Cycler` for a single property and set it
+ as the property cycle of the axes. This form exists for compatibility
+ with the original `cycler.cycler` interface. Its use is discouraged
+ in favor of the kwarg form, i.e. ``set_prop_cycle(label=values)``.
+
+ Parameters
+ ----------
+ cycler : Cycler
+ Set the given Cycler. *None* resets to the cycle defined by the
+ current style.
+
+ label : str
+ The property key. Must be a valid `.Artist` property.
+ For example, 'color' or 'linestyle'. Aliases are allowed,
+ such as 'c' for 'color' and 'lw' for 'linewidth'.
+
+ values : iterable
+ Finite-length iterable of the property values. These values
+ are validated and will raise a ValueError if invalid.
+
+ Examples
+ --------
+ Setting the property cycle for a single property:
+
+ >>> ax.set_prop_cycle(color=['red', 'green', 'blue'])
+
+ Setting the property cycle for simultaneously cycling over multiple
+ properties (e.g. red circle, green plus, blue cross):
+
+ >>> ax.set_prop_cycle(color=['red', 'green', 'blue'],
+ ... marker=['o', '+', 'x'])
+
+ See Also
+ --------
+ matplotlib.rcsetup.cycler
+ Convenience function for creating validated cyclers for properties.
+ cycler.cycler
+ The original function for creating unvalidated cyclers.
+
+ """
+ if args and kwargs:
+ raise TypeError("Cannot supply both positional and keyword "
+ "arguments to this method.")
+ if len(args) == 1 and args[0] is None:
+ prop_cycle = None
+ else:
+ prop_cycle = cycler(*args, **kwargs)
+ self._get_lines.set_prop_cycle(prop_cycle)
+ self._get_patches_for_fill.set_prop_cycle(prop_cycle)
+
+ @cbook.deprecated('1.5', alternative='`.set_prop_cycle`')
+ def set_color_cycle(self, clist):
+ """
+ Set the color cycle for any future plot commands on this Axes.
+
+ Parameters
+ ----------
+ clist
+ A list of mpl color specifiers.
+ """
+ if clist is None:
+ # Calling set_color_cycle() or set_prop_cycle() with None
+ # effectively resets the cycle, but you can't do
+ # set_prop_cycle('color', None). So we are special-casing this.
+ self.set_prop_cycle(None)
+ else:
+ self.set_prop_cycle('color', clist)
+
+ @cbook.deprecated("2.0")
+ def ishold(self):
+ """return the HOLD status of the axes
+
+ The `hold` mechanism is deprecated and will be removed in
+ v3.0.
+ """
+
+ return self._hold
+
+ @cbook.deprecated("2.0", message=_hold_msg)
+ def hold(self, b=None):
+ """
+ Set the hold state.
+
+ The ``hold`` mechanism is deprecated and will be removed in
+ v3.0. The behavior will remain consistent with the
+ long-time default value of True.
+
+ If *hold* is *None* (default), toggle the *hold* state. Else
+ set the *hold* state to boolean value *b*.
+
+ Examples::
+
+ # toggle hold
+ hold()
+
+ # turn hold on
+ hold(True)
+
+ # turn hold off
+ hold(False)
+
+ When hold is *True*, subsequent plot commands will be added to
+ the current axes. When hold is *False*, the current axes and
+ figure will be cleared on the next plot command
+
+ """
+ if b is None:
+ self._hold = not self._hold
+ else:
+ self._hold = b
+
+ def get_aspect(self):
+ return self._aspect
+
+ def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):
+ """
+ Set the aspect of the axis scaling, i.e. the ratio of y-unit to x-unit.
+
+ Parameters
+ ----------
+ aspect : ['auto' | 'equal'] or num
+ Possible values:
+
+ ======== ================================================
+ value description
+ ======== ================================================
+ 'auto' automatic; fill the position rectangle with data
+ 'equal' same scaling from data to plot units for x and y
+ num a circle will be stretched such that the height
+ is num times the width. aspect=1 is the same as
+ aspect='equal'.
+ ======== ================================================
+
+ adjustable : None or ['box' | 'datalim'], optional
+ If not ``None``, this defines which parameter will be adjusted to
+ meet the required aspect. See `.set_adjustable` for further
+ details.
+
+ anchor : None or str or 2-tuple of float, optional
+ If not ``None``, this defines where the Axes will be drawn if there
+ is extra space due to aspect constraints. The most common way to
+ to specify the anchor are abbreviations of cardinal directions:
+
+ ===== =====================
+ value description
+ ===== =====================
+ 'C' centered
+ 'SW' lower left corner
+ 'S' middle of bottom edge
+ 'SE' lower right corner
+ etc.
+ ===== =====================
+
+ See `.set_anchor` for further details.
+
+ share : bool, optional
+ If ``True``, apply the settings to all shared Axes.
+ Default is ``False``.
+
+ See Also
+ --------
+ matplotlib.axes.Axes.set_adjustable
+ defining the parameter to adjust in order to meet the required
+ aspect.
+ matplotlib.axes.Axes.set_anchor
+ defining the position in case of extra space.
+ """
+ if not (isinstance(aspect, six.string_types)
+ and aspect in ('equal', 'auto')):
+ aspect = float(aspect) # raise ValueError if necessary
+ if share:
+ axes = set(self._shared_x_axes.get_siblings(self)
+ + self._shared_y_axes.get_siblings(self))
+ else:
+ axes = [self]
+ for ax in axes:
+ ax._aspect = aspect
+
+ if adjustable is None:
+ adjustable = self._adjustable
+ self.set_adjustable(adjustable, share=share) # Handle sharing.
+
+ if anchor is not None:
+ self.set_anchor(anchor, share=share)
+ self.stale = True
+
+ def get_adjustable(self):
+ return self._adjustable
+
+ def set_adjustable(self, adjustable, share=False):
+ """
+ Define which parameter the Axes will change to achieve a given aspect.
+
+ Parameters
+ ----------
+ adjustable : ['box' | 'datalim']
+ If 'box', change the physical dimensions of the Axes.
+ If 'datalim', change the ``x`` or ``y`` data limits.
+
+ share : bool, optional
+ If ``True``, apply the settings to all shared Axes.
+ Default is ``False``.
+
+ .. ACCEPTS: [ 'box' | 'datalim']
+
+ See Also
+ --------
+ matplotlib.axes.Axes.set_aspect
+ for a description of aspect handling.
+
+ Notes
+ -----
+ Shared Axes (of which twinned Axes are a special case)
+ impose restrictions on how aspect ratios can be imposed.
+ For twinned Axes, use 'datalim'. For Axes that share both
+ x and y, use 'box'. Otherwise, either 'datalim' or 'box'
+ may be used. These limitations are partly a requirement
+ to avoid over-specification, and partly a result of the
+ particular implementation we are currently using, in
+ which the adjustments for aspect ratios are done sequentially
+ and independently on each Axes as it is drawn.
+ """
+ if adjustable == 'box-forced':
+ warnings.warn("The 'box-forced' keyword argument is deprecated"
+ " since 2.2.", cbook.mplDeprecation)
+ if adjustable not in ('box', 'datalim', 'box-forced'):
+ raise ValueError("argument must be 'box', or 'datalim'")
+ if share:
+ axes = set(self._shared_x_axes.get_siblings(self)
+ + self._shared_y_axes.get_siblings(self))
+ else:
+ axes = [self]
+ for ax in axes:
+ ax._adjustable = adjustable
+ self.stale = True
+
+ def get_anchor(self):
+ """
+ Get the anchor location.
+
+ See Also
+ --------
+ matplotlib.axes.Axes.set_anchor
+ for a description of the anchor.
+ matplotlib.axes.Axes.set_aspect
+ for a description of aspect handling.
+ """
+ return self._anchor
+
+ def set_anchor(self, anchor, share=False):
+ """
+ Define the anchor location.
+
+ The actual drawing area (active position) of the Axes may be smaller
+ than the Bbox (original position) when a fixed aspect is required. The
+ anchor defines where the drawing area will be located within the
+ available space.
+
+ .. ACCEPTS: [ 'C' | 'SW' | 'S' | 'SE' | 'E' | 'NE' | 'N' | 'NW' | 'W' ]
+
+ Parameters
+ ----------
+ anchor : str or 2-tuple of floats
+ The anchor position may be either:
+
+ - a sequence (*cx*, *cy*). *cx* and *cy* may range from 0
+ to 1, where 0 is left or bottom and 1 is right or top.
+
+ - a string using cardinal directions as abbreviation:
+
+ - 'C' for centered
+ - 'S' (south) for bottom-center
+ - 'SW' (south west) for bottom-left
+ - etc.
+
+ Here is an overview of the possible positions:
+
+ +------+------+------+
+ | 'NW' | 'N' | 'NE' |
+ +------+------+------+
+ | 'W' | 'C' | 'E' |
+ +------+------+------+
+ | 'SW' | 'S' | 'SE' |
+ +------+------+------+
+
+ share : bool, optional
+ If ``True``, apply the settings to all shared Axes.
+ Default is ``False``.
+
+ See Also
+ --------
+ matplotlib.axes.Axes.set_aspect
+ for a description of aspect handling.
+ """
+ if not (anchor in mtransforms.Bbox.coefs or len(anchor) == 2):
+ raise ValueError('argument must be among %s' %
+ ', '.join(mtransforms.Bbox.coefs))
+ if share:
+ axes = set(self._shared_x_axes.get_siblings(self)
+ + self._shared_y_axes.get_siblings(self))
+ else:
+ axes = [self]
+ for ax in axes:
+ ax._anchor = anchor
+
+ self.stale = True
+
+ def get_data_ratio(self):
+ """
+ Returns the aspect ratio of the raw data.
+
+ This method is intended to be overridden by new projection
+ types.
+ """
+ xmin, xmax = self.get_xbound()
+ ymin, ymax = self.get_ybound()
+
+ xsize = max(abs(xmax - xmin), 1e-30)
+ ysize = max(abs(ymax - ymin), 1e-30)
+
+ return ysize / xsize
+
+ def get_data_ratio_log(self):
+ """
+ Returns the aspect ratio of the raw data in log scale.
+ Will be used when both axis scales are in log.
+ """
+ xmin, xmax = self.get_xbound()
+ ymin, ymax = self.get_ybound()
+
+ xsize = max(abs(math.log10(xmax) - math.log10(xmin)), 1e-30)
+ ysize = max(abs(math.log10(ymax) - math.log10(ymin)), 1e-30)
+
+ return ysize / xsize
+
+ def apply_aspect(self, position=None):
+ """
+ Adjust the Axes for a specified data aspect ratio.
+
+ Depending on `.get_adjustable` this will modify either the Axes box
+ (position) or the view limits. In the former case, `.get_anchor`
+ will affect the position.
+
+ Notes
+ -----
+ This is called automatically when each Axes is drawn. You may need
+ to call it yourself if you need to update the Axes position and/or
+ view limits before the Figure is drawn.
+
+ See Also
+ --------
+ matplotlib.axes.Axes.set_aspect
+ for a description of aspect ratio handling.
+ matplotlib.axes.Axes.set_adjustable
+ defining the parameter to adjust in order to meet the required
+ aspect.
+ matplotlib.axes.Axes.set_anchor
+ defining the position in case of extra space.
+ """
+ if position is None:
+ position = self.get_position(original=True)
+
+ aspect = self.get_aspect()
+
+ if self.name != 'polar':
+ xscale, yscale = self.get_xscale(), self.get_yscale()
+ if xscale == "linear" and yscale == "linear":
+ aspect_scale_mode = "linear"
+ elif xscale == "log" and yscale == "log":
+ aspect_scale_mode = "log"
+ elif ((xscale == "linear" and yscale == "log") or
+ (xscale == "log" and yscale == "linear")):
+ if aspect != "auto":
+ warnings.warn(
+ 'aspect is not supported for Axes with xscale=%s, '
+ 'yscale=%s' % (xscale, yscale))
+ aspect = "auto"
+ else: # some custom projections have their own scales.
+ pass
+ else:
+ aspect_scale_mode = "linear"
+
+ if aspect == 'auto':
+ self._set_position(position, which='active')
+ return
+
+ if aspect == 'equal':
+ A = 1
+ else:
+ A = aspect
+
+ figW, figH = self.get_figure().get_size_inches()
+ fig_aspect = figH / figW
+ if self._adjustable in ['box', 'box-forced']:
+ if self in self._twinned_axes:
+ raise RuntimeError("Adjustable 'box' is not allowed in a"
+ " twinned Axes. Use 'datalim' instead.")
+ if aspect_scale_mode == "log":
+ box_aspect = A * self.get_data_ratio_log()
+ else:
+ box_aspect = A * self.get_data_ratio()
+ pb = position.frozen()
+ pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
+ self._set_position(pb1.anchored(self.get_anchor(), pb), 'active')
+ return
+
+ # reset active to original in case it had been changed
+ # by prior use of 'box'
+ self._set_position(position, which='active')
+
+ xmin, xmax = self.get_xbound()
+ ymin, ymax = self.get_ybound()
+
+ if aspect_scale_mode == "log":
+ xmin, xmax = math.log10(xmin), math.log10(xmax)
+ ymin, ymax = math.log10(ymin), math.log10(ymax)
+
+ xsize = max(abs(xmax - xmin), 1e-30)
+ ysize = max(abs(ymax - ymin), 1e-30)
+
+ l, b, w, h = position.bounds
+ box_aspect = fig_aspect * (h / w)
+ data_ratio = box_aspect / A
+
+ y_expander = (data_ratio * xsize / ysize - 1.0)
+ # If y_expander > 0, the dy/dx viewLim ratio needs to increase
+ if abs(y_expander) < 0.005:
+ return
+
+ if aspect_scale_mode == "log":
+ dL = self.dataLim
+ dL_width = math.log10(dL.x1) - math.log10(dL.x0)
+ dL_height = math.log10(dL.y1) - math.log10(dL.y0)
+ xr = 1.05 * dL_width
+ yr = 1.05 * dL_height
+ else:
+ dL = self.dataLim
+ xr = 1.05 * dL.width
+ yr = 1.05 * dL.height
+
+ xmarg = xsize - xr
+ ymarg = ysize - yr
+ Ysize = data_ratio * xsize
+ Xsize = ysize / data_ratio
+ Xmarg = Xsize - xr
+ Ymarg = Ysize - yr
+ # Setting these targets to, e.g., 0.05*xr does not seem to
+ # help.
+ xm = 0
+ ym = 0
+
+ shared_x = self in self._shared_x_axes
+ shared_y = self in self._shared_y_axes
+ # Not sure whether we need this check:
+ if shared_x and shared_y:
+ raise RuntimeError("adjustable='datalim' is not allowed when both"
+ " axes are shared.")
+
+ # If y is shared, then we are only allowed to change x, etc.
+ if shared_y:
+ adjust_y = False
+ else:
+ if xmarg > xm and ymarg > ym:
+ adjy = ((Ymarg > 0 and y_expander < 0) or
+ (Xmarg < 0 and y_expander > 0))
+ else:
+ adjy = y_expander > 0
+ adjust_y = shared_x or adjy # (Ymarg > xmarg)
+
+ if adjust_y:
+ yc = 0.5 * (ymin + ymax)
+ y0 = yc - Ysize / 2.0
+ y1 = yc + Ysize / 2.0
+ if aspect_scale_mode == "log":
+ self.set_ybound((10. ** y0, 10. ** y1))
+ else:
+ self.set_ybound((y0, y1))
+ else:
+ xc = 0.5 * (xmin + xmax)
+ x0 = xc - Xsize / 2.0
+ x1 = xc + Xsize / 2.0
+ if aspect_scale_mode == "log":
+ self.set_xbound((10. ** x0, 10. ** x1))
+ else:
+ self.set_xbound((x0, x1))
+
+ def axis(self, *v, **kwargs):
+ """Set axis properties.
+
+ Valid signatures::
+
+ xmin, xmax, ymin, ymax = axis()
+ xmin, xmax, ymin, ymax = axis(list_arg)
+ xmin, xmax, ymin, ymax = axis(string_arg)
+ xmin, xmax, ymin, ymax = axis(**kwargs)
+
+ Parameters
+ ----------
+ v : list of float or {'on', 'off', 'equal', 'tight', 'scaled',\
+ 'normal', 'auto', 'image', 'square'}
+ Optional positional argument
+
+ Axis data limits set from a list; or a command relating to axes:
+
+ ========== ================================================
+ Value Description
+ ========== ================================================
+ 'on' Toggle axis lines and labels on
+ 'off' Toggle axis lines and labels off
+ 'equal' Equal scaling by changing limits
+ 'scaled' Equal scaling by changing box dimensions
+ 'tight' Limits set such that all data is shown
+ 'auto' Automatic scaling, fill rectangle with data
+ 'normal' Same as 'auto'; deprecated
+ 'image' 'scaled' with axis limits equal to data limits
+ 'square' Square plot; similar to 'scaled', but initially\
+ forcing xmax-xmin = ymax-ymin
+ ========== ================================================
+
+ emit : bool, optional
+ Passed to set_{x,y}lim functions, if observers
+ are notified of axis limit change
+
+ xmin, ymin, xmax, ymax : float, optional
+ The axis limits to be set
+
+ Returns
+ -------
+ xmin, xmax, ymin, ymax : float
+ The axis limits
+
+ """
+
+ if len(v) == 0 and len(kwargs) == 0:
+ xmin, xmax = self.get_xlim()
+ ymin, ymax = self.get_ylim()
+ return xmin, xmax, ymin, ymax
+
+ emit = kwargs.get('emit', True)
+
+ if len(v) == 1 and isinstance(v[0], six.string_types):
+ s = v[0].lower()
+ if s == 'on':
+ self.set_axis_on()
+ elif s == 'off':
+ self.set_axis_off()
+ elif s in ('equal', 'tight', 'scaled', 'normal',
+ 'auto', 'image', 'square'):
+ self.set_autoscale_on(True)
+ self.set_aspect('auto')
+ self.autoscale_view(tight=False)
+ # self.apply_aspect()
+ if s == 'equal':
+ self.set_aspect('equal', adjustable='datalim')
+ elif s == 'scaled':
+ self.set_aspect('equal', adjustable='box', anchor='C')
+ self.set_autoscale_on(False) # Req. by Mark Bakker
+ elif s == 'tight':
+ self.autoscale_view(tight=True)
+ self.set_autoscale_on(False)
+ elif s == 'image':
+ self.autoscale_view(tight=True)
+ self.set_autoscale_on(False)
+ self.set_aspect('equal', adjustable='box', anchor='C')
+ elif s == 'square':
+ self.set_aspect('equal', adjustable='box', anchor='C')
+ self.set_autoscale_on(False)
+ xlim = self.get_xlim()
+ ylim = self.get_ylim()
+ edge_size = max(np.diff(xlim), np.diff(ylim))
+ self.set_xlim([xlim[0], xlim[0] + edge_size],
+ emit=emit, auto=False)
+ self.set_ylim([ylim[0], ylim[0] + edge_size],
+ emit=emit, auto=False)
+ else:
+ raise ValueError('Unrecognized string %s to axis; '
+ 'try on or off' % s)
+ xmin, xmax = self.get_xlim()
+ ymin, ymax = self.get_ylim()
+ return xmin, xmax, ymin, ymax
+
+ try:
+ v[0]
+ except IndexError:
+ xmin = kwargs.get('xmin', None)
+ xmax = kwargs.get('xmax', None)
+ auto = False # turn off autoscaling, unless...
+ if xmin is None and xmax is None:
+ auto = None # leave autoscaling state alone
+ xmin, xmax = self.set_xlim(xmin, xmax, emit=emit, auto=auto)
+
+ ymin = kwargs.get('ymin', None)
+ ymax = kwargs.get('ymax', None)
+ auto = False # turn off autoscaling, unless...
+ if ymin is None and ymax is None:
+ auto = None # leave autoscaling state alone
+ ymin, ymax = self.set_ylim(ymin, ymax, emit=emit, auto=auto)
+ return xmin, xmax, ymin, ymax
+
+ v = v[0]
+ if len(v) != 4:
+ raise ValueError('v must contain [xmin xmax ymin ymax]')
+
+ self.set_xlim([v[0], v[1]], emit=emit, auto=False)
+ self.set_ylim([v[2], v[3]], emit=emit, auto=False)
+
+ return v
+
+ def get_legend(self):
+ """Return the `Legend` instance, or None if no legend is defined."""
+ return self.legend_
+
+ def get_images(self):
+ """return a list of Axes images contained by the Axes"""
+ return cbook.silent_list('AxesImage', self.images)
+
+ def get_lines(self):
+ """Return a list of lines contained by the Axes"""
+ return cbook.silent_list('Line2D', self.lines)
+
+ def get_xaxis(self):
+ """Return the XAxis instance."""
+ return self.xaxis
+
+ def get_xgridlines(self):
+ """Get the x grid lines as a list of `Line2D` instances."""
+ return cbook.silent_list('Line2D xgridline',
+ self.xaxis.get_gridlines())
+
+ def get_xticklines(self):
+ """Get the x tick lines as a list of `Line2D` instances."""
+ return cbook.silent_list('Line2D xtickline',
+ self.xaxis.get_ticklines())
+
+ def get_yaxis(self):
+ """Return the YAxis instance."""
+ return self.yaxis
+
+ def get_ygridlines(self):
+ """Get the y grid lines as a list of `Line2D` instances."""
+ return cbook.silent_list('Line2D ygridline',
+ self.yaxis.get_gridlines())
+
+ def get_yticklines(self):
+ """Get the y tick lines as a list of `Line2D` instances."""
+ return cbook.silent_list('Line2D ytickline',
+ self.yaxis.get_ticklines())
+
+ # Adding and tracking artists
+
+ def _sci(self, im):
+ """
+ helper for :func:`~matplotlib.pyplot.sci`;
+ do not use elsewhere.
+ """
+ if isinstance(im, matplotlib.contour.ContourSet):
+ if im.collections[0] not in self.collections:
+ raise ValueError(
+ "ContourSet must be in current Axes")
+ elif im not in self.images and im not in self.collections:
+ raise ValueError(
+ "Argument must be an image, collection, or ContourSet in "
+ "this Axes")
+ self._current_image = im
+
+ def _gci(self):
+ """
+ Helper for :func:`~matplotlib.pyplot.gci`;
+ do not use elsewhere.
+ """
+ return self._current_image
+
+ def has_data(self):
+ """
+ Return *True* if any artists have been added to axes.
+
+ This should not be used to determine whether the *dataLim*
+ need to be updated, and may not actually be useful for
+ anything.
+ """
+ return (
+ len(self.collections) +
+ len(self.images) +
+ len(self.lines) +
+ len(self.patches)) > 0
+
+ def add_artist(self, a):
+ """Add any :class:`~matplotlib.artist.Artist` to the axes.
+
+ Use `add_artist` only for artists for which there is no dedicated
+ "add" method; and if necessary, use a method such as `update_datalim`
+ to manually update the dataLim if the artist is to be included in
+ autoscaling.
+
+ Returns the artist.
+ """
+ a.axes = self
+ self.artists.append(a)
+ self._set_artist_props(a)
+ a.set_clip_path(self.patch)
+ a._remove_method = lambda h: self.artists.remove(h)
+ self.stale = True
+ return a
+
+ def add_collection(self, collection, autolim=True):
+ """
+ Add a :class:`~matplotlib.collections.Collection` instance
+ to the axes.
+
+ Returns the collection.
+ """
+ label = collection.get_label()
+ if not label:
+ collection.set_label('_collection%d' % len(self.collections))
+ self.collections.append(collection)
+ self._set_artist_props(collection)
+
+ if collection.get_clip_path() is None:
+ collection.set_clip_path(self.patch)
+
+ if autolim:
+ self.update_datalim(collection.get_datalim(self.transData))
+
+ collection._remove_method = lambda h: self.collections.remove(h)
+ self.stale = True
+ return collection
+
+ def add_image(self, image):
+ """
+ Add a :class:`~matplotlib.image.AxesImage` to the axes.
+
+ Returns the image.
+ """
+ self._set_artist_props(image)
+ if not image.get_label():
+ image.set_label('_image%d' % len(self.images))
+ self.images.append(image)
+ image._remove_method = lambda h: self.images.remove(h)
+ self.stale = True
+ return image
+
+ def _update_image_limits(self, image):
+ xmin, xmax, ymin, ymax = image.get_extent()
+ self.axes.update_datalim(((xmin, ymin), (xmax, ymax)))
+
+ def add_line(self, line):
+ """
+ Add a :class:`~matplotlib.lines.Line2D` to the list of plot
+ lines
+
+ Returns the line.
+ """
+ self._set_artist_props(line)
+ if line.get_clip_path() is None:
+ line.set_clip_path(self.patch)
+
+ self._update_line_limits(line)
+ if not line.get_label():
+ line.set_label('_line%d' % len(self.lines))
+ self.lines.append(line)
+ line._remove_method = lambda h: self.lines.remove(h)
+ self.stale = True
+ return line
+
+ def _add_text(self, txt):
+ """
+
+ """
+ self._set_artist_props(txt)
+ self.texts.append(txt)
+ txt._remove_method = lambda h: self.texts.remove(h)
+ self.stale = True
+ return txt
+
+ def _update_line_limits(self, line):
+ """
+ Figures out the data limit of the given line, updating self.dataLim.
+ """
+ path = line.get_path()
+ if path.vertices.size == 0:
+ return
+
+ line_trans = line.get_transform()
+
+ if line_trans == self.transData:
+ data_path = path
+
+ elif any(line_trans.contains_branch_seperately(self.transData)):
+ # identify the transform to go from line's coordinates
+ # to data coordinates
+ trans_to_data = line_trans - self.transData
+
+ # if transData is affine we can use the cached non-affine component
+ # of line's path. (since the non-affine part of line_trans is
+ # entirely encapsulated in trans_to_data).
+ if self.transData.is_affine:
+ line_trans_path = line._get_transformed_path()
+ na_path, _ = line_trans_path.get_transformed_path_and_affine()
+ data_path = trans_to_data.transform_path_affine(na_path)
+ else:
+ data_path = trans_to_data.transform_path(path)
+ else:
+ # for backwards compatibility we update the dataLim with the
+ # coordinate range of the given path, even though the coordinate
+ # systems are completely different. This may occur in situations
+ # such as when ax.transAxes is passed through for absolute
+ # positioning.
+ data_path = path
+
+ if data_path.vertices.size > 0:
+ updatex, updatey = line_trans.contains_branch_seperately(
+ self.transData)
+ self.dataLim.update_from_path(data_path,
+ self.ignore_existing_data_limits,
+ updatex=updatex,
+ updatey=updatey)
+ self.ignore_existing_data_limits = False
+
+ def add_patch(self, p):
+ """
+ Add a :class:`~matplotlib.patches.Patch` *p* to the list of
+ axes patches; the clipbox will be set to the Axes clipping
+ box. If the transform is not set, it will be set to
+ :attr:`transData`.
+
+ Returns the patch.
+ """
+
+ self._set_artist_props(p)
+ if p.get_clip_path() is None:
+ p.set_clip_path(self.patch)
+ self._update_patch_limits(p)
+ self.patches.append(p)
+ p._remove_method = lambda h: self.patches.remove(h)
+ return p
+
+ def _update_patch_limits(self, patch):
+ """update the data limits for patch *p*"""
+ # hist can add zero height Rectangles, which is useful to keep
+ # the bins, counts and patches lined up, but it throws off log
+ # scaling. We'll ignore rects with zero height or width in
+ # the auto-scaling
+
+ # cannot check for '==0' since unitized data may not compare to zero
+ # issue #2150 - we update the limits if patch has non zero width
+ # or height.
+ if (isinstance(patch, mpatches.Rectangle) and
+ ((not patch.get_width()) and (not patch.get_height()))):
+ return
+ vertices = patch.get_path().vertices
+ if vertices.size > 0:
+ xys = patch.get_patch_transform().transform(vertices)
+ if patch.get_data_transform() != self.transData:
+ patch_to_data = (patch.get_data_transform() -
+ self.transData)
+ xys = patch_to_data.transform(xys)
+
+ updatex, updatey = patch.get_transform().\
+ contains_branch_seperately(self.transData)
+ self.update_datalim(xys, updatex=updatex,
+ updatey=updatey)
+
+ def add_table(self, tab):
+ """
+ Add a :class:`~matplotlib.table.Table` instance to the
+ list of axes tables
+
+ Parameters
+ ----------
+ tab: `matplotlib.table.Table`
+ Table instance
+
+ Returns
+ -------
+ `matplotlib.table.Table`: the table.
+ """
+ self._set_artist_props(tab)
+ self.tables.append(tab)
+ tab.set_clip_path(self.patch)
+ tab._remove_method = lambda h: self.tables.remove(h)
+ return tab
+
+ def add_container(self, container):
+ """
+ Add a :class:`~matplotlib.container.Container` instance
+ to the axes.
+
+ Returns the collection.
+ """
+ label = container.get_label()
+ if not label:
+ container.set_label('_container%d' % len(self.containers))
+ self.containers.append(container)
+ container.set_remove_method(lambda h: self.containers.remove(h))
+ return container
+
+ def _on_units_changed(self, scalex=False, scaley=False):
+ """
+ Callback for processing changes to axis units.
+
+ Currently forces updates of data limits and view limits.
+ """
+ self.relim()
+ self.autoscale_view(scalex=scalex, scaley=scaley)
+
+ def relim(self, visible_only=False):
+ """
+ Recompute the data limits based on current artists. If you want to
+ exclude invisible artists from the calculation, set
+ ``visible_only=True``
+
+ At present, :class:`~matplotlib.collections.Collection`
+ instances are not supported.
+ """
+ # Collections are deliberately not supported (yet); see
+ # the TODO note in artists.py.
+ self.dataLim.ignore(True)
+ self.dataLim.set_points(mtransforms.Bbox.null().get_points())
+ self.ignore_existing_data_limits = True
+
+ for line in self.lines:
+ if not visible_only or line.get_visible():
+ self._update_line_limits(line)
+
+ for p in self.patches:
+ if not visible_only or p.get_visible():
+ self._update_patch_limits(p)
+
+ for image in self.images:
+ if not visible_only or image.get_visible():
+ self._update_image_limits(image)
+
+ def update_datalim(self, xys, updatex=True, updatey=True):
+ """
+ Update the data lim bbox with seq of xy tups or equiv. 2-D array
+ """
+ # if no data is set currently, the bbox will ignore its
+ # limits and set the bound to be the bounds of the xydata.
+ # Otherwise, it will compute the bounds of it's current data
+ # and the data in xydata
+ xys = np.asarray(xys)
+ if not len(xys):
+ return
+ self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
+ updatex=updatex, updatey=updatey)
+ self.ignore_existing_data_limits = False
+
+ def update_datalim_bounds(self, bounds):
+ """
+ Update the datalim to include the given
+ :class:`~matplotlib.transforms.Bbox` *bounds*
+ """
+ self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
+
+ def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
+ """Look for unit *kwargs* and update the axis instances as necessary"""
+
+ if self.xaxis is None or self.yaxis is None:
+ return
+
+ if xdata is not None:
+ # we only need to update if there is nothing set yet.
+ if not self.xaxis.have_units():
+ self.xaxis.update_units(xdata)
+
+ if ydata is not None:
+ # we only need to update if there is nothing set yet.
+ if not self.yaxis.have_units():
+ self.yaxis.update_units(ydata)
+
+ # process kwargs 2nd since these will override default units
+ if kwargs is not None:
+ xunits = kwargs.pop('xunits', self.xaxis.units)
+ if self.name == 'polar':
+ xunits = kwargs.pop('thetaunits', xunits)
+ if xunits != self.xaxis.units:
+ self.xaxis.set_units(xunits)
+ # If the units being set imply a different converter,
+ # we need to update.
+ if xdata is not None:
+ self.xaxis.update_units(xdata)
+
+ yunits = kwargs.pop('yunits', self.yaxis.units)
+ if self.name == 'polar':
+ yunits = kwargs.pop('runits', yunits)
+ if yunits != self.yaxis.units:
+ self.yaxis.set_units(yunits)
+ # If the units being set imply a different converter,
+ # we need to update.
+ if ydata is not None:
+ self.yaxis.update_units(ydata)
+ return kwargs
+
+ def in_axes(self, mouseevent):
+ """
+ Return *True* if the given *mouseevent* (in display coords)
+ is in the Axes
+ """
+ return self.patch.contains(mouseevent)[0]
+
+ def get_autoscale_on(self):
+ """
+ Get whether autoscaling is applied for both axes on plot commands
+ """
+ return self._autoscaleXon and self._autoscaleYon
+
+ def get_autoscalex_on(self):
+ """
+ Get whether autoscaling for the x-axis is applied on plot commands
+ """
+ return self._autoscaleXon
+
+ def get_autoscaley_on(self):
+ """
+ Get whether autoscaling for the y-axis is applied on plot commands
+ """
+ return self._autoscaleYon
+
+ def set_autoscale_on(self, b):
+ """
+ Set whether autoscaling is applied on plot commands
+
+ .. ACCEPTS: bool
+
+ Parameters
+ ----------
+ b : bool
+ """
+ self._autoscaleXon = b
+ self._autoscaleYon = b
+
+ def set_autoscalex_on(self, b):
+ """
+ Set whether autoscaling for the x-axis is applied on plot commands
+
+ .. ACCEPTS: bool
+
+ Parameters
+ ----------
+ b : bool
+ """
+ self._autoscaleXon = b
+
+ def set_autoscaley_on(self, b):
+ """
+ Set whether autoscaling for the y-axis is applied on plot commands
+
+ .. ACCEPTS: bool
+
+ Parameters
+ ----------
+ b : bool
+ """
+ self._autoscaleYon = b
+
+ @property
+ def use_sticky_edges(self):
+ """
+ When autoscaling, whether to obey all `Artist.sticky_edges`.
+
+ Default is ``True``.
+
+ Setting this to ``False`` ensures that the specified margins
+ will be applied, even if the plot includes an image, for
+ example, which would otherwise force a view limit to coincide
+ with its data limit.
+
+ The changing this property does not change the plot until
+ `autoscale` or `autoscale_view` is called.
+ """
+ return self._use_sticky_edges
+
+ @use_sticky_edges.setter
+ def use_sticky_edges(self, b):
+ self._use_sticky_edges = bool(b)
+ # No effect until next autoscaling, which will mark the axes as stale.
+
+ def set_xmargin(self, m):
+ """
+ Set padding of X data limits prior to autoscaling.
+
+ *m* times the data interval will be added to each
+ end of that interval before it is used in autoscaling.
+ For example, if your data is in the range [0, 2], a factor of
+ ``m = 0.1`` will result in a range [-0.2, 2.2].
+
+ Negative values -0.5 < m < 0 will result in clipping of the data range.
+ I.e. for a data range [0, 2], a factor of ``m = -0.1`` will result in
+ a range [0.2, 1.8].
+
+ .. ACCEPTS: float greater than -0.5
+
+ Parameters
+ ----------
+ m : float greater than -0.5
+ """
+ if m <= -0.5:
+ raise ValueError("margin must be greater than -0.5")
+ self._xmargin = m
+ self.stale = True
+
+ def set_ymargin(self, m):
+ """
+ Set padding of Y data limits prior to autoscaling.
+
+ *m* times the data interval will be added to each
+ end of that interval before it is used in autoscaling.
+ For example, if your data is in the range [0, 2], a factor of
+ ``m = 0.1`` will result in a range [-0.2, 2.2].
+
+ Negative values -0.5 < m < 0 will result in clipping of the data range.
+ I.e. for a data range [0, 2], a factor of ``m = -0.1`` will result in
+ a range [0.2, 1.8].
+
+ .. ACCEPTS: float greater than -0.5
+
+ Parameters
+ ----------
+ m : float greater than -0.5
+ """
+ if m <= -0.5:
+ raise ValueError("margin must be greater than -0.5")
+ self._ymargin = m
+ self.stale = True
+
+ def margins(self, *args, **kw):
+ """
+ Set or retrieve autoscaling margins.
+
+ signatures::
+
+ margins()
+
+ returns xmargin, ymargin
+
+ ::
+
+ margins(margin)
+
+ margins(xmargin, ymargin)
+
+ margins(x=xmargin, y=ymargin)
+
+ margins(..., tight=False)
+
+ All three forms above set the xmargin and ymargin parameters.
+ All keyword parameters are optional. A single argument
+ specifies both xmargin and ymargin. The padding added to the end of
+ each interval is *margin* times the data interval. The *margin* must
+ be a float in the range [0, 1].
+
+ The *tight* parameter is passed to :meth:`autoscale_view`
+ , which is executed after a margin is changed; the default here is
+ *True*, on the assumption that when margins are specified, no
+ additional padding to match tick marks is usually desired. Setting
+ *tight* to *None* will preserve the previous setting.
+
+ Specifying any margin changes only the autoscaling; for example,
+ if *xmargin* is not None, then *xmargin* times the X data
+ interval will be added to each end of that interval before
+ it is used in autoscaling.
+
+ """
+ if not args and not kw:
+ return self._xmargin, self._ymargin
+
+ tight = kw.pop('tight', True)
+ mx = kw.pop('x', None)
+ my = kw.pop('y', None)
+ if len(args) == 1:
+ mx = my = args[0]
+ elif len(args) == 2:
+ mx, my = args
+ elif len(args) > 2:
+ raise ValueError("more than two arguments were supplied")
+ if mx is not None:
+ self.set_xmargin(mx)
+ if my is not None:
+ self.set_ymargin(my)
+
+ scalex = (mx is not None)
+ scaley = (my is not None)
+
+ self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley)
+
+ def set_rasterization_zorder(self, z):
+ """
+ Parameters
+ ----------
+ z : float or None
+ zorder below which artists are rasterized. ``None`` means that
+ artists do not get rasterized based on zorder.
+
+ .. ACCEPTS: float or None
+ """
+ self._rasterization_zorder = z
+ self.stale = True
+
+ def get_rasterization_zorder(self):
+ """Return the zorder value below which artists will be rasterized."""
+ return self._rasterization_zorder
+
+ def autoscale(self, enable=True, axis='both', tight=None):
+ """
+ Autoscale the axis view to the data (toggle).
+
+ Convenience method for simple axis view autoscaling.
+ It turns autoscaling on or off, and then,
+ if autoscaling for either axis is on, it performs
+ the autoscaling on the specified axis or axes.
+
+ Parameters
+ ----------
+ enable : bool or None, optional
+ True (default) turns autoscaling on, False turns it off.
+ None leaves the autoscaling state unchanged.
+
+ axis : ['both' | 'x' | 'y'], optional
+ which axis to operate on; default is 'both'
+
+ tight: bool or None, optional
+ If True, set view limits to data limits;
+ if False, let the locator and margins expand the view limits;
+ if None, use tight scaling if the only artist is an image,
+ otherwise treat *tight* as False.
+ The *tight* setting is retained for future autoscaling
+ until it is explicitly changed.
+
+ """
+ if enable is None:
+ scalex = True
+ scaley = True
+ else:
+ scalex = False
+ scaley = False
+ if axis in ['x', 'both']:
+ self._autoscaleXon = bool(enable)
+ scalex = self._autoscaleXon
+ if axis in ['y', 'both']:
+ self._autoscaleYon = bool(enable)
+ scaley = self._autoscaleYon
+ if tight and scalex:
+ self._xmargin = 0
+ if tight and scaley:
+ self._ymargin = 0
+ self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley)
+
+ def autoscale_view(self, tight=None, scalex=True, scaley=True):
+ """
+ Autoscale the view limits using the data limits.
+
+ You can selectively autoscale only a single axis, e.g., the xaxis by
+ setting *scaley* to *False*. The autoscaling preserves any
+ axis direction reversal that has already been done.
+
+ If *tight* is *False*, the axis major locator will be used
+ to expand the view limits if rcParams['axes.autolimit_mode']
+ is 'round_numbers'. Note that any margins that are in effect
+ will be applied first, regardless of whether *tight* is
+ *True* or *False*. Specifying *tight* as *True* or *False*
+ saves the setting as a private attribute of the Axes; specifying
+ it as *None* (the default) applies the previously saved value.
+
+ The data limits are not updated automatically when artist data are
+ changed after the artist has been added to an Axes instance. In that
+ case, use :meth:`matplotlib.axes.Axes.relim` prior to calling
+ autoscale_view.
+ """
+ if tight is not None:
+ self._tight = bool(tight)
+
+ if self.use_sticky_edges and (self._xmargin or self._ymargin):
+ stickies = [artist.sticky_edges for artist in self.get_children()]
+ x_stickies = sum([sticky.x for sticky in stickies], [])
+ y_stickies = sum([sticky.y for sticky in stickies], [])
+ if self.get_xscale().lower() == 'log':
+ x_stickies = [xs for xs in x_stickies if xs > 0]
+ if self.get_yscale().lower() == 'log':
+ y_stickies = [ys for ys in y_stickies if ys > 0]
+ else: # Small optimization.
+ x_stickies, y_stickies = [], []
+
+ def handle_single_axis(scale, autoscaleon, shared_axes, interval,
+ minpos, axis, margin, stickies, set_bound):
+
+ if not (scale and autoscaleon):
+ return # nothing to do...
+
+ shared = shared_axes.get_siblings(self)
+ dl = [ax.dataLim for ax in shared]
+ # ignore non-finite data limits if good limits exist
+ finite_dl = [d for d in dl if np.isfinite(d).all()]
+ if len(finite_dl):
+ # if finite limits exist for atleast one axis (and the
+ # other is infinite), restore the finite limits
+ x_finite = [d for d in dl
+ if (np.isfinite(d.intervalx).all() and
+ (d not in finite_dl))]
+ y_finite = [d for d in dl
+ if (np.isfinite(d.intervaly).all() and
+ (d not in finite_dl))]
+
+ dl = finite_dl
+ dl.extend(x_finite)
+ dl.extend(y_finite)
+
+ bb = mtransforms.BboxBase.union(dl)
+ x0, x1 = getattr(bb, interval)
+ locator = axis.get_major_locator()
+ try:
+ # e.g., DateLocator has its own nonsingular()
+ x0, x1 = locator.nonsingular(x0, x1)
+ except AttributeError:
+ # Default nonsingular for, e.g., MaxNLocator
+ x0, x1 = mtransforms.nonsingular(
+ x0, x1, increasing=False, expander=0.05)
+
+ # Add the margin in figure space and then transform back, to handle
+ # non-linear scales.
+ minpos = getattr(bb, minpos)
+ transform = axis.get_transform()
+ inverse_trans = transform.inverted()
+ # We cannot use exact equality due to floating point issues e.g.
+ # with streamplot.
+ do_lower_margin = not np.any(np.isclose(x0, stickies))
+ do_upper_margin = not np.any(np.isclose(x1, stickies))
+ x0, x1 = axis._scale.limit_range_for_scale(x0, x1, minpos)
+ x0t, x1t = transform.transform([x0, x1])
+ delta = (x1t - x0t) * margin
+ if do_lower_margin:
+ x0t -= delta
+ if do_upper_margin:
+ x1t += delta
+ x0, x1 = inverse_trans.transform([x0t, x1t])
+
+ if not self._tight:
+ x0, x1 = locator.view_limits(x0, x1)
+ set_bound(x0, x1)
+ # End of definition of internal function 'handle_single_axis'.
+
+ handle_single_axis(
+ scalex, self._autoscaleXon, self._shared_x_axes, 'intervalx',
+ 'minposx', self.xaxis, self._xmargin, x_stickies, self.set_xbound)
+ handle_single_axis(
+ scaley, self._autoscaleYon, self._shared_y_axes, 'intervaly',
+ 'minposy', self.yaxis, self._ymargin, y_stickies, self.set_ybound)
+
+ def _get_axis_list(self):
+ return (self.xaxis, self.yaxis)
+
+ # Drawing
+
+ @allow_rasterization
+ def draw(self, renderer=None, inframe=False):
+ """Draw everything (plot lines, axes, labels)"""
+ if renderer is None:
+ renderer = self._cachedRenderer
+
+ if renderer is None:
+ raise RuntimeError('No renderer defined')
+ if not self.get_visible():
+ return
+ renderer.open_group('axes')
+ # prevent triggering call backs during the draw process
+ self._stale = True
+ locator = self.get_axes_locator()
+ if locator:
+ pos = locator(self, renderer)
+ self.apply_aspect(pos)
+ else:
+ self.apply_aspect()
+
+ artists = self.get_children()
+ artists.remove(self.patch)
+
+ # the frame draws the edges around the axes patch -- we
+ # decouple these so the patch can be in the background and the
+ # frame in the foreground. Do this before drawing the axis
+ # objects so that the spine has the opportunity to update them.
+ if not (self.axison and self._frameon):
+ for spine in six.itervalues(self.spines):
+ artists.remove(spine)
+
+ if self.axison and not inframe:
+ if self._axisbelow is True:
+ self.xaxis.set_zorder(0.5)
+ self.yaxis.set_zorder(0.5)
+ elif self._axisbelow is False:
+ self.xaxis.set_zorder(2.5)
+ self.yaxis.set_zorder(2.5)
+ else:
+ # 'line': above patches, below lines
+ self.xaxis.set_zorder(1.5)
+ self.yaxis.set_zorder(1.5)
+ else:
+ for _axis in self._get_axis_list():
+ artists.remove(_axis)
+
+ if inframe:
+ artists.remove(self.title)
+ artists.remove(self._left_title)
+ artists.remove(self._right_title)
+
+ if not self.figure.canvas.is_saving():
+ artists = [a for a in artists
+ if not a.get_animated() or a in self.images]
+ artists = sorted(artists, key=attrgetter('zorder'))
+
+ # rasterize artists with negative zorder
+ # if the minimum zorder is negative, start rasterization
+ rasterization_zorder = self._rasterization_zorder
+ if (rasterization_zorder is not None and
+ artists and artists[0].zorder < rasterization_zorder):
+ renderer.start_rasterizing()
+ artists_rasterized = [a for a in artists
+ if a.zorder < rasterization_zorder]
+ artists = [a for a in artists
+ if a.zorder >= rasterization_zorder]
+ else:
+ artists_rasterized = []
+
+ # the patch draws the background rectangle -- the frame below
+ # will draw the edges
+ if self.axison and self._frameon:
+ self.patch.draw(renderer)
+
+ if artists_rasterized:
+ for a in artists_rasterized:
+ a.draw(renderer)
+ renderer.stop_rasterizing()
+
+ mimage._draw_list_compositing_images(renderer, self, artists)
+
+ renderer.close_group('axes')
+ self._cachedRenderer = renderer
+ self.stale = False
+
+ def draw_artist(self, a):
+ """
+ This method can only be used after an initial draw which
+ caches the renderer. It is used to efficiently update Axes
+ data (axis ticks, labels, etc are not updated)
+ """
+ if self._cachedRenderer is None:
+ raise AttributeError("draw_artist can only be used after an "
+ "initial draw which caches the renderer")
+ a.draw(self._cachedRenderer)
+
+ def redraw_in_frame(self):
+ """
+ This method can only be used after an initial draw which
+ caches the renderer. It is used to efficiently update Axes
+ data (axis ticks, labels, etc are not updated)
+ """
+ if self._cachedRenderer is None:
+ raise AttributeError("redraw_in_frame can only be used after an "
+ "initial draw which caches the renderer")
+ self.draw(self._cachedRenderer, inframe=True)
+
+ def get_renderer_cache(self):
+ return self._cachedRenderer
+
+ # Axes rectangle characteristics
+
+ def get_frame_on(self):
+ """
+ Get whether the axes rectangle patch is drawn.
+ """
+ return self._frameon
+
+ def set_frame_on(self, b):
+ """
+ Set whether the axes rectangle patch is drawn.
+
+ .. ACCEPTS: bool
+
+ Parameters
+ ----------
+ b : bool
+ """
+ self._frameon = b
+ self.stale = True
+
+ def get_axisbelow(self):
+ """
+ Get whether axis ticks and gridlines are above or below most artists.
+ """
+ return self._axisbelow
+
+ def set_axisbelow(self, b):
+ """
+ Set whether axis ticks and gridlines are above or below most artists.
+
+ .. ACCEPTS: [ bool | 'line' ]
+
+ Parameters
+ ----------
+ b : bool or 'line'
+ """
+ self._axisbelow = validate_axisbelow(b)
+ self.stale = True
+
+ @docstring.dedent_interpd
+ def grid(self, b=None, which='major', axis='both', **kwargs):
+ """
+ Turn the axes grids on or off.
+
+ Set the axes grids on or off; *b* is a boolean.
+
+ If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
+ *kwargs* are supplied, it is assumed that you want a grid and *b*
+ is thus set to *True*.
+
+ *which* can be 'major' (default), 'minor', or 'both' to control
+ whether major tick grids, minor tick grids, or both are affected.
+
+ *axis* can be 'both' (default), 'x', or 'y' to control which
+ set of gridlines are drawn.
+
+ *kwargs* are used to set the grid line properties, e.g.,::
+
+ ax.grid(color='r', linestyle='-', linewidth=2)
+
+ Valid :class:`~matplotlib.lines.Line2D` kwargs are
+
+ %(Line2D)s
+
+ """
+ if len(kwargs):
+ b = True
+ elif b is not None:
+ b = _string_to_bool(b)
+
+ if axis == 'x' or axis == 'both':
+ self.xaxis.grid(b, which=which, **kwargs)
+ if axis == 'y' or axis == 'both':
+ self.yaxis.grid(b, which=which, **kwargs)
+
+ def ticklabel_format(self, **kwargs):
+ """
+ Change the `~matplotlib.ticker.ScalarFormatter` used by
+ default for linear axes.
+
+ Optional keyword arguments:
+
+ ============== =========================================
+ Keyword Description
+ ============== =========================================
+ *style* [ 'sci' (or 'scientific') | 'plain' ]
+ plain turns off scientific notation
+ *scilimits* (m, n), pair of integers; if *style*
+ is 'sci', scientific notation will
+ be used for numbers outside the range
+ 10`m`:sup: to 10`n`:sup:.
+ Use (0,0) to include all numbers.
+ *useOffset* [ bool | offset ]; if True,
+ the offset will be calculated as needed;
+ if False, no offset will be used; if a
+ numeric offset is specified, it will be
+ used.
+ *axis* [ 'x' | 'y' | 'both' ]
+ *useLocale* If True, format the number according to
+ the current locale. This affects things
+ such as the character used for the
+ decimal separator. If False, use
+ C-style (English) formatting. The
+ default setting is controlled by the
+ axes.formatter.use_locale rcparam.
+ *useMathText* If True, render the offset and scientific
+ notation in mathtext
+ ============== =========================================
+
+ Only the major ticks are affected.
+ If the method is called when the
+ :class:`~matplotlib.ticker.ScalarFormatter` is not the
+ :class:`~matplotlib.ticker.Formatter` being used, an
+ :exc:`AttributeError` will be raised.
+
+ """
+ style = kwargs.pop('style', '').lower()
+ scilimits = kwargs.pop('scilimits', None)
+ useOffset = kwargs.pop('useOffset', None)
+ useLocale = kwargs.pop('useLocale', None)
+ useMathText = kwargs.pop('useMathText', None)
+ axis = kwargs.pop('axis', 'both').lower()
+ if scilimits is not None:
+ try:
+ m, n = scilimits
+ m + n + 1 # check that both are numbers
+ except (ValueError, TypeError):
+ raise ValueError("scilimits must be a sequence of 2 integers")
+ if style[:3] == 'sci':
+ sb = True
+ elif style == 'plain':
+ sb = False
+ elif style == 'comma':
+ raise NotImplementedError("comma style remains to be added")
+ elif style == '':
+ sb = None
+ else:
+ raise ValueError("%s is not a valid style value")
+ try:
+ if sb is not None:
+ if axis == 'both' or axis == 'x':
+ self.xaxis.major.formatter.set_scientific(sb)
+ if axis == 'both' or axis == 'y':
+ self.yaxis.major.formatter.set_scientific(sb)
+ if scilimits is not None:
+ if axis == 'both' or axis == 'x':
+ self.xaxis.major.formatter.set_powerlimits(scilimits)
+ if axis == 'both' or axis == 'y':
+ self.yaxis.major.formatter.set_powerlimits(scilimits)
+ if useOffset is not None:
+ if axis == 'both' or axis == 'x':
+ self.xaxis.major.formatter.set_useOffset(useOffset)
+ if axis == 'both' or axis == 'y':
+ self.yaxis.major.formatter.set_useOffset(useOffset)
+ if useLocale is not None:
+ if axis == 'both' or axis == 'x':
+ self.xaxis.major.formatter.set_useLocale(useLocale)
+ if axis == 'both' or axis == 'y':
+ self.yaxis.major.formatter.set_useLocale(useLocale)
+ if useMathText is not None:
+ if axis == 'both' or axis == 'x':
+ self.xaxis.major.formatter.set_useMathText(useMathText)
+ if axis == 'both' or axis == 'y':
+ self.yaxis.major.formatter.set_useMathText(useMathText)
+ except AttributeError:
+ raise AttributeError(
+ "This method only works with the ScalarFormatter.")
+
+ def locator_params(self, axis='both', tight=None, **kwargs):
+ """
+ Control behavior of tick locators.
+
+ Parameters
+ ----------
+ axis : ['both' | 'x' | 'y'], optional
+ The axis on which to operate.
+
+ tight : bool or None, optional
+ Parameter passed to :meth:`autoscale_view`.
+ Default is None, for no change.
+
+ Other Parameters
+ ----------------
+ **kw :
+ Remaining keyword arguments are passed to directly to the
+ :meth:`~matplotlib.ticker.MaxNLocator.set_params` method.
+
+ Typically one might want to reduce the maximum number
+ of ticks and use tight bounds when plotting small
+ subplots, for example::
+
+ ax.locator_params(tight=True, nbins=4)
+
+ Because the locator is involved in autoscaling,
+ :meth:`autoscale_view` is called automatically after
+ the parameters are changed.
+
+ This presently works only for the
+ :class:`~matplotlib.ticker.MaxNLocator` used
+ by default on linear axes, but it may be generalized.
+ """
+ _x = axis in ['x', 'both']
+ _y = axis in ['y', 'both']
+ if _x:
+ self.xaxis.get_major_locator().set_params(**kwargs)
+ if _y:
+ self.yaxis.get_major_locator().set_params(**kwargs)
+ self.autoscale_view(tight=tight, scalex=_x, scaley=_y)
+
+ def tick_params(self, axis='both', **kwargs):
+ """Change the appearance of ticks, tick labels, and gridlines.
+
+ Parameters
+ ----------
+ axis : {'x', 'y', 'both'}, optional
+ Which axis to apply the parameters to.
+
+ Other Parameters
+ ----------------
+
+ axis : {'x', 'y', 'both'}
+ Axis on which to operate; default is 'both'.
+
+ reset : bool
+ If *True*, set all parameters to defaults
+ before processing other keyword arguments. Default is
+ *False*.
+
+ which : {'major', 'minor', 'both'}
+ Default is 'major'; apply arguments to *which* ticks.
+
+ direction : {'in', 'out', 'inout'}
+ Puts ticks inside the axes, outside the axes, or both.
+
+ length : float
+ Tick length in points.
+
+ width : float
+ Tick width in points.
+
+ color : color
+ Tick color; accepts any mpl color spec.
+
+ pad : float
+ Distance in points between tick and label.
+
+ labelsize : float or str
+ Tick label font size in points or as a string (e.g., 'large').
+
+ labelcolor : color
+ Tick label color; mpl color spec.
+
+ colors : color
+ Changes the tick color and the label color to the same value:
+ mpl color spec.
+
+ zorder : float
+ Tick and label zorder.
+
+ bottom, top, left, right : bool
+ Whether to draw the respective ticks.
+
+ labelbottom, labeltop, labelleft, labelright : bool
+ Whether to draw the respective tick labels.
+
+ labelrotation : float
+ Tick label rotation
+
+ grid_color : color
+ Changes the gridline color to the given mpl color spec.
+
+ grid_alpha : float
+ Transparency of gridlines: 0 (transparent) to 1 (opaque).
+
+ grid_linewidth : float
+ Width of gridlines in points.
+
+ grid_linestyle : string
+ Any valid :class:`~matplotlib.lines.Line2D` line style spec.
+
+ Examples
+ --------
+
+ Usage ::
+
+ ax.tick_params(direction='out', length=6, width=2, colors='r',
+ grid_color='r', grid_alpha=0.5)
+
+ This will make all major ticks be red, pointing out of the box,
+ and with dimensions 6 points by 2 points. Tick labels will
+ also be red. Gridlines will be red and translucent.
+
+ """
+ if axis in ['x', 'both']:
+ xkw = dict(kwargs)
+ xkw.pop('left', None)
+ xkw.pop('right', None)
+ xkw.pop('labelleft', None)
+ xkw.pop('labelright', None)
+ self.xaxis.set_tick_params(**xkw)
+ if axis in ['y', 'both']:
+ ykw = dict(kwargs)
+ ykw.pop('top', None)
+ ykw.pop('bottom', None)
+ ykw.pop('labeltop', None)
+ ykw.pop('labelbottom', None)
+ self.yaxis.set_tick_params(**ykw)
+
+ def set_axis_off(self):
+ """Turn off the axis."""
+ self.axison = False
+ self.stale = True
+
+ def set_axis_on(self):
+ """Turn on the axis."""
+ self.axison = True
+ self.stale = True
+
+ # data limits, ticks, tick labels, and formatting
+
+ def invert_xaxis(self):
+ """Invert the x-axis."""
+ self.set_xlim(self.get_xlim()[::-1], auto=None)
+
+ def xaxis_inverted(self):
+ """Return whether the x-axis is inverted."""
+ left, right = self.get_xlim()
+ return right < left
+
+ def get_xbound(self):
+ """Return the lower and upper x-axis bounds, in increasing order."""
+ left, right = self.get_xlim()
+ if left < right:
+ return left, right
+ else:
+ return right, left
+
+ def set_xbound(self, lower=None, upper=None):
+ """
+ Set the lower and upper numerical bounds of the x-axis.
+
+ This method will honor axes inversion regardless of parameter order.
+ It will not change the _autoscaleXon attribute.
+
+ .. ACCEPTS: (lower: float, upper: float)
+ """
+ if upper is None and iterable(lower):
+ lower, upper = lower
+
+ old_lower, old_upper = self.get_xbound()
+
+ if lower is None:
+ lower = old_lower
+ if upper is None:
+ upper = old_upper
+
+ if self.xaxis_inverted():
+ if lower < upper:
+ self.set_xlim(upper, lower, auto=None)
+ else:
+ self.set_xlim(lower, upper, auto=None)
+ else:
+ if lower < upper:
+ self.set_xlim(lower, upper, auto=None)
+ else:
+ self.set_xlim(upper, lower, auto=None)
+
+ def get_xlim(self):
+ """
+ Get the x-axis range
+
+ Returns
+ -------
+ xlimits : tuple
+ Returns the current x-axis limits as the tuple
+ (`left`, `right`).
+
+ Notes
+ -----
+ The x-axis may be inverted, in which case the `left` value will
+ be greater than the `right` value.
+
+ """
+ return tuple(self.viewLim.intervalx)
+
+ def _validate_converted_limits(self, limit, convert):
+ """
+ Raise ValueError if converted limits are non-finite.
+
+ Note that this function also accepts None as a limit argument.
+
+ Returns
+ -------
+ The limit value after call to convert(), or None if limit is None.
+
+ """
+ if limit is not None:
+ converted_limit = convert(limit)
+ if (isinstance(converted_limit, float) and
+ (not np.isreal(converted_limit) or
+ not np.isfinite(converted_limit))):
+ raise ValueError("Axis limits cannot be NaN or Inf")
+ return converted_limit
+
+ def set_xlim(self, left=None, right=None, emit=True, auto=False, **kw):
+ """
+ Set the data limits for the x-axis
+
+ .. ACCEPTS: (left: float, right: float)
+
+ Parameters
+ ----------
+ left : scalar, optional
+ The left xlim (default: None, which leaves the left limit
+ unchanged).
+
+ right : scalar, optional
+ The right xlim (default: None, which leaves the right limit
+ unchanged).
+
+ emit : bool, optional
+ Whether to notify observers of limit change (default: True).
+
+ auto : bool or None, optional
+ Whether to turn on autoscaling of the x-axis. True turns on,
+ False turns off (default action), None leaves unchanged.
+
+ xlimits : tuple, optional
+ The left and right xlims may be passed as the tuple
+ (`left`, `right`) as the first positional argument (or as
+ the `left` keyword argument).
+
+ Returns
+ -------
+ xlimits : tuple
+ Returns the new x-axis limits as (`left`, `right`).
+
+ Notes
+ -----
+ The `left` value may be greater than the `right` value, in which
+ case the x-axis values will decrease from left to right.
+
+ Examples
+ --------
+ >>> set_xlim(left, right)
+ >>> set_xlim((left, right))
+ >>> left, right = set_xlim(left, right)
+
+ One limit may be left unchanged.
+
+ >>> set_xlim(right=right_lim)
+
+ Limits may be passed in reverse order to flip the direction of
+ the x-axis. For example, suppose `x` represents the number of
+ years before present. The x-axis limits might be set like the
+ following so 5000 years ago is on the left of the plot and the
+ present is on the right.
+
+ >>> set_xlim(5000, 0)
+
+ """
+ if 'xmin' in kw:
+ left = kw.pop('xmin')
+ if 'xmax' in kw:
+ right = kw.pop('xmax')
+ if kw:
+ raise ValueError("unrecognized kwargs: %s" % list(kw))
+
+ if right is None and iterable(left):
+ left, right = left
+
+ self._process_unit_info(xdata=(left, right))
+ left = self._validate_converted_limits(left, self.convert_xunits)
+ right = self._validate_converted_limits(right, self.convert_xunits)
+
+ old_left, old_right = self.get_xlim()
+ if left is None:
+ left = old_left
+ if right is None:
+ right = old_right
+
+ if left == right:
+ warnings.warn(
+ ('Attempting to set identical left==right results\n'
+ 'in singular transformations; automatically expanding.\n'
+ 'left=%s, right=%s') % (left, right))
+ left, right = mtransforms.nonsingular(left, right, increasing=False)
+
+ if self.get_xscale() == 'log' and (left <= 0.0 or right <= 0.0):
+ warnings.warn(
+ 'Attempted to set non-positive xlimits for log-scale axis; '
+ 'invalid limits will be ignored.')
+ left, right = self.xaxis.limit_range_for_scale(left, right)
+
+ self.viewLim.intervalx = (left, right)
+ if auto is not None:
+ self._autoscaleXon = bool(auto)
+
+ if emit:
+ self.callbacks.process('xlim_changed', self)
+ # Call all of the other x-axes that are shared with this one
+ for other in self._shared_x_axes.get_siblings(self):
+ if other is not self:
+ other.set_xlim(self.viewLim.intervalx,
+ emit=False, auto=auto)
+ if (other.figure != self.figure and
+ other.figure.canvas is not None):
+ other.figure.canvas.draw_idle()
+ self.stale = True
+ return left, right
+
+ def get_xscale(self):
+ return self.xaxis.get_scale()
+ get_xscale.__doc__ = "Return the xaxis scale string: %s""" % (
+ ", ".join(mscale.get_scale_names()))
+
+ def set_xscale(self, value, **kwargs):
+ """
+ Set the x-axis scale.
+
+ .. ACCEPTS: [ 'linear' | 'log' | 'symlog' | 'logit' | ... ]
+
+ Parameters
+ ----------
+ value : {"linear", "log", "symlog", "logit"}
+ scaling strategy to apply
+
+ Notes
+ -----
+ Different kwargs are accepted, depending on the scale. See
+ the `~matplotlib.scale` module for more information.
+
+ See also
+ --------
+ matplotlib.scale.LinearScale : linear transform
+
+ matplotlib.scale.LogTransform : log transform
+
+ matplotlib.scale.SymmetricalLogTransform : symlog transform
+
+ matplotlib.scale.LogisticTransform : logit transform
+ """
+ g = self.get_shared_x_axes()
+ for ax in g.get_siblings(self):
+ ax.xaxis._set_scale(value, **kwargs)
+ ax._update_transScale()
+ ax.stale = True
+
+ self.autoscale_view(scaley=False)
+
+ def get_xticks(self, minor=False):
+ """Return the x ticks as a list of locations"""
+ return self.xaxis.get_ticklocs(minor=minor)
+
+ def set_xticks(self, ticks, minor=False):
+ """
+ Set the x ticks with list of *ticks*
+
+ .. ACCEPTS: list of tick locations.
+
+ Parameters
+ ----------
+ ticks : list
+ List of x-axis tick locations.
+
+ minor : bool, optional
+ If ``False`` sets major ticks, if ``True`` sets minor ticks.
+ Default is ``False``.
+ """
+ ret = self.xaxis.set_ticks(ticks, minor=minor)
+ self.stale = True
+ return ret
+
+ def get_xmajorticklabels(self):
+ """
+ Get the major x tick labels.
+
+ Returns
+ -------
+ labels : list
+ List of :class:`~matplotlib.text.Text` instances
+ """
+ return cbook.silent_list('Text xticklabel',
+ self.xaxis.get_majorticklabels())
+
+ def get_xminorticklabels(self):
+ """
+ Get the minor x tick labels.
+
+ Returns
+ -------
+ labels : list
+ List of :class:`~matplotlib.text.Text` instances
+ """
+ return cbook.silent_list('Text xticklabel',
+ self.xaxis.get_minorticklabels())
+
+ def get_xticklabels(self, minor=False, which=None):
+ """
+ Get the x tick labels as a list of :class:`~matplotlib.text.Text`
+ instances.
+
+ Parameters
+ ----------
+ minor : bool, optional
+ If True return the minor ticklabels,
+ else return the major ticklabels.
+
+ which : None, ('minor', 'major', 'both')
+ Overrides `minor`.
+
+ Selects which ticklabels to return
+
+ Returns
+ -------
+ ret : list
+ List of :class:`~matplotlib.text.Text` instances.
+ """
+ return cbook.silent_list('Text xticklabel',
+ self.xaxis.get_ticklabels(minor=minor,
+ which=which))
+
+ def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
+ """
+ Set the x-tick labels with list of string labels.
+
+ .. ACCEPTS: list of string labels
+
+ Parameters
+ ----------
+ labels : list of str
+ List of string labels.
+
+ fontdict : dict, optional
+ A dictionary controlling the appearance of the ticklabels.
+ The default `fontdict` is::
+
+ {'fontsize': rcParams['axes.titlesize'],
+ 'fontweight': rcParams['axes.titleweight'],
+ 'verticalalignment': 'baseline',
+ 'horizontalalignment': loc}
+
+ minor : bool, optional
+ Whether to set the minor ticklabels rather than the major ones.
+
+ Returns
+ -------
+ A list of `~.text.Text` instances.
+
+ Other Parameters
+ -----------------
+ **kwargs : `~.text.Text` properties.
+ """
+ if fontdict is not None:
+ kwargs.update(fontdict)
+ ret = self.xaxis.set_ticklabels(labels,
+ minor=minor, **kwargs)
+ self.stale = True
+ return ret
+
+ def invert_yaxis(self):
+ """Invert the y-axis."""
+ self.set_ylim(self.get_ylim()[::-1], auto=None)
+
+ def yaxis_inverted(self):
+ """Return whether the y-axis is inverted."""
+ bottom, top = self.get_ylim()
+ return top < bottom
+
+ def get_ybound(self):
+ """Return the lower and upper y-axis bounds, in increasing order."""
+ bottom, top = self.get_ylim()
+ if bottom < top:
+ return bottom, top
+ else:
+ return top, bottom
+
+ def set_ybound(self, lower=None, upper=None):
+ """
+ Set the lower and upper numerical bounds of the y-axis.
+ This method will honor axes inversion regardless of parameter order.
+ It will not change the _autoscaleYon attribute.
+
+ .. ACCEPTS: (lower: float, upper: float)
+ """
+ if upper is None and iterable(lower):
+ lower, upper = lower
+
+ old_lower, old_upper = self.get_ybound()
+
+ if lower is None:
+ lower = old_lower
+ if upper is None:
+ upper = old_upper
+
+ if self.yaxis_inverted():
+ if lower < upper:
+ self.set_ylim(upper, lower, auto=None)
+ else:
+ self.set_ylim(lower, upper, auto=None)
+ else:
+ if lower < upper:
+ self.set_ylim(lower, upper, auto=None)
+ else:
+ self.set_ylim(upper, lower, auto=None)
+
+ def get_ylim(self):
+ """
+ Get the y-axis range
+
+ Returns
+ -------
+ ylimits : tuple
+ Returns the current y-axis limits as the tuple
+ (`bottom`, `top`).
+
+ Notes
+ -----
+ The y-axis may be inverted, in which case the `bottom` value
+ will be greater than the `top` value.
+
+ """
+ return tuple(self.viewLim.intervaly)
+
+ def set_ylim(self, bottom=None, top=None, emit=True, auto=False, **kw):
+ """
+ Set the data limits for the y-axis
+
+ .. ACCEPTS: (bottom: float, top: float)
+
+ Parameters
+ ----------
+ bottom : scalar, optional
+ The bottom ylim (default: None, which leaves the bottom
+ limit unchanged).
+
+ top : scalar, optional
+ The top ylim (default: None, which leaves the top limit
+ unchanged).
+
+ emit : bool, optional
+ Whether to notify observers of limit change (default: True).
+
+ auto : bool or None, optional
+ Whether to turn on autoscaling of the y-axis. True turns on,
+ False turns off (default action), None leaves unchanged.
+
+ ylimits : tuple, optional
+ The bottom and top yxlims may be passed as the tuple
+ (`bottom`, `top`) as the first positional argument (or as
+ the `bottom` keyword argument).
+
+ Returns
+ -------
+ ylimits : tuple
+ Returns the new y-axis limits as (`bottom`, `top`).
+
+ Notes
+ -----
+ The `bottom` value may be greater than the `top` value, in which
+ case the y-axis values will decrease from bottom to top.
+
+ Examples
+ --------
+ >>> set_ylim(bottom, top)
+ >>> set_ylim((bottom, top))
+ >>> bottom, top = set_ylim(bottom, top)
+
+ One limit may be left unchanged.
+
+ >>> set_ylim(top=top_lim)
+
+ Limits may be passed in reverse order to flip the direction of
+ the y-axis. For example, suppose `y` represents depth of the
+ ocean in m. The y-axis limits might be set like the following
+ so 5000 m depth is at the bottom of the plot and the surface,
+ 0 m, is at the top.
+
+ >>> set_ylim(5000, 0)
+ """
+ if 'ymin' in kw:
+ bottom = kw.pop('ymin')
+ if 'ymax' in kw:
+ top = kw.pop('ymax')
+ if kw:
+ raise ValueError("unrecognized kwargs: %s" % list(kw))
+
+ if top is None and iterable(bottom):
+ bottom, top = bottom
+
+ bottom = self._validate_converted_limits(bottom, self.convert_yunits)
+ top = self._validate_converted_limits(top, self.convert_yunits)
+
+ old_bottom, old_top = self.get_ylim()
+
+ if bottom is None:
+ bottom = old_bottom
+ if top is None:
+ top = old_top
+
+ if bottom == top:
+ warnings.warn(
+ ('Attempting to set identical bottom==top results\n'
+ 'in singular transformations; automatically expanding.\n'
+ 'bottom=%s, top=%s') % (bottom, top))
+
+ bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
+
+ if self.get_yscale() == 'log' and (bottom <= 0.0 or top <= 0.0):
+ warnings.warn(
+ 'Attempted to set non-positive ylimits for log-scale axis; '
+ 'invalid limits will be ignored.')
+ bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
+
+ self.viewLim.intervaly = (bottom, top)
+ if auto is not None:
+ self._autoscaleYon = bool(auto)
+
+ if emit:
+ self.callbacks.process('ylim_changed', self)
+ # Call all of the other y-axes that are shared with this one
+ for other in self._shared_y_axes.get_siblings(self):
+ if other is not self:
+ other.set_ylim(self.viewLim.intervaly,
+ emit=False, auto=auto)
+ if (other.figure != self.figure and
+ other.figure.canvas is not None):
+ other.figure.canvas.draw_idle()
+ self.stale = True
+ return bottom, top
+
+ def get_yscale(self):
+ return self.yaxis.get_scale()
+ get_yscale.__doc__ = "Return the yaxis scale string: %s""" % (
+ ", ".join(mscale.get_scale_names()))
+
+ def set_yscale(self, value, **kwargs):
+ """
+ Set the y-axis scale.
+
+ .. ACCEPTS: [ 'linear' | 'log' | 'symlog' | 'logit' | ... ]
+
+ Parameters
+ ----------
+ value : {"linear", "log", "symlog", "logit"}
+ scaling strategy to apply
+
+ Notes
+ -----
+ Different kwargs are accepted, depending on the scale. See
+ the `~matplotlib.scale` module for more information.
+
+ See also
+ --------
+ matplotlib.scale.LinearScale : linear transform
+
+ matplotlib.scale.LogTransform : log transform
+
+ matplotlib.scale.SymmetricalLogTransform : symlog transform
+
+ matplotlib.scale.LogisticTransform : logit transform
+ """
+ g = self.get_shared_y_axes()
+ for ax in g.get_siblings(self):
+ ax.yaxis._set_scale(value, **kwargs)
+ ax._update_transScale()
+ ax.stale = True
+ self.autoscale_view(scalex=False)
+
+ def get_yticks(self, minor=False):
+ """Return the y ticks as a list of locations"""
+ return self.yaxis.get_ticklocs(minor=minor)
+
+ def set_yticks(self, ticks, minor=False):
+ """
+ Set the y ticks with list of *ticks*
+
+ .. ACCEPTS: list of tick locations.
+
+ Parameters
+ ----------
+ ticks : sequence
+ List of y-axis tick locations
+
+ minor : bool, optional
+ If ``False`` sets major ticks, if ``True`` sets minor ticks.
+ Default is ``False``.
+ """
+ ret = self.yaxis.set_ticks(ticks, minor=minor)
+ return ret
+
+ def get_ymajorticklabels(self):
+ """
+ Get the major y tick labels.
+
+ Returns
+ -------
+ labels : list
+ List of :class:`~matplotlib.text.Text` instances
+ """
+ return cbook.silent_list('Text yticklabel',
+ self.yaxis.get_majorticklabels())
+
+ def get_yminorticklabels(self):
+ """
+ Get the minor y tick labels.
+
+ Returns
+ -------
+ labels : list
+ List of :class:`~matplotlib.text.Text` instances
+ """
+ return cbook.silent_list('Text yticklabel',
+ self.yaxis.get_minorticklabels())
+
+ def get_yticklabels(self, minor=False, which=None):
+ """
+ Get the y tick labels as a list of :class:`~matplotlib.text.Text`
+ instances.
+
+ Parameters
+ ----------
+ minor : bool
+ If True return the minor ticklabels,
+ else return the major ticklabels
+
+ which : None, ('minor', 'major', 'both')
+ Overrides `minor`.
+
+ Selects which ticklabels to return
+
+ Returns
+ -------
+ ret : list
+ List of :class:`~matplotlib.text.Text` instances.
+ """
+ return cbook.silent_list('Text yticklabel',
+ self.yaxis.get_ticklabels(minor=minor,
+ which=which))
+
+ def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
+ """
+ Set the y-tick labels with list of strings labels.
+
+ .. ACCEPTS: list of string labels
+
+ Parameters
+ ----------
+ labels : list of str
+ list of string labels
+
+ fontdict : dict, optional
+ A dictionary controlling the appearance of the ticklabels.
+ The default `fontdict` is::
+
+ {'fontsize': rcParams['axes.titlesize'],
+ 'fontweight': rcParams['axes.titleweight'],
+ 'verticalalignment': 'baseline',
+ 'horizontalalignment': loc}
+
+ minor : bool, optional
+ Whether to set the minor ticklabels rather than the major ones.
+
+ Returns
+ -------
+ A list of `~.text.Text` instances.
+
+ Other Parameters
+ ----------------
+ **kwargs : `~.text.Text` properties.
+ """
+ if fontdict is not None:
+ kwargs.update(fontdict)
+ return self.yaxis.set_ticklabels(labels,
+ minor=minor, **kwargs)
+
+ def xaxis_date(self, tz=None):
+ """
+ Sets up x-axis ticks and labels that treat the x data as dates.
+
+ Parameters
+ ----------
+ tz : string or :class:`tzinfo` instance, optional
+ Timezone string or timezone. Defaults to rc value.
+ """
+ # should be enough to inform the unit conversion interface
+ # dates are coming in
+ self.xaxis.axis_date(tz)
+
+ def yaxis_date(self, tz=None):
+ """
+ Sets up y-axis ticks and labels that treat the y data as dates.
+
+ Parameters
+ ----------
+ tz : string or :class:`tzinfo` instance, optional
+ Timezone string or timezone. Defaults to rc value.
+ """
+ self.yaxis.axis_date(tz)
+
+ def format_xdata(self, x):
+ """
+ Return *x* string formatted. This function will use the attribute
+ self.fmt_xdata if it is callable, else will fall back on the xaxis
+ major formatter
+ """
+ try:
+ return self.fmt_xdata(x)
+ except TypeError:
+ func = self.xaxis.get_major_formatter().format_data_short
+ val = func(x)
+ return val
+
+ def format_ydata(self, y):
+ """
+ Return y string formatted. This function will use the
+ :attr:`fmt_ydata` attribute if it is callable, else will fall
+ back on the yaxis major formatter
+ """
+ try:
+ return self.fmt_ydata(y)
+ except TypeError:
+ func = self.yaxis.get_major_formatter().format_data_short
+ val = func(y)
+ return val
+
+ def format_coord(self, x, y):
+ """Return a format string formatting the *x*, *y* coord"""
+ if x is None:
+ xs = '???'
+ else:
+ xs = self.format_xdata(x)
+ if y is None:
+ ys = '???'
+ else:
+ ys = self.format_ydata(y)
+ return 'x=%s y=%s' % (xs, ys)
+
+ def minorticks_on(self):
+ 'Add autoscaling minor ticks to the axes.'
+ for ax in (self.xaxis, self.yaxis):
+ scale = ax.get_scale()
+ if scale == 'log':
+ s = ax._scale
+ ax.set_minor_locator(mticker.LogLocator(s.base, s.subs))
+ elif scale == 'symlog':
+ s = ax._scale
+ ax.set_minor_locator(
+ mticker.SymmetricalLogLocator(s._transform, s.subs))
+ else:
+ ax.set_minor_locator(mticker.AutoMinorLocator())
+
+ def minorticks_off(self):
+ """Remove minor ticks from the axes."""
+ self.xaxis.set_minor_locator(mticker.NullLocator())
+ self.yaxis.set_minor_locator(mticker.NullLocator())
+
+ # Interactive manipulation
+
+ def can_zoom(self):
+ """
+ Return *True* if this axes supports the zoom box button functionality.
+ """
+ return True
+
+ def can_pan(self):
+ """
+ Return *True* if this axes supports any pan/zoom button functionality.
+ """
+ return True
+
+ def get_navigate(self):
+ """
+ Get whether the axes responds to navigation commands
+ """
+ return self._navigate
+
+ def set_navigate(self, b):
+ """
+ Set whether the axes responds to navigation toolbar commands
+
+ .. ACCEPTS: bool
+
+ Parameters
+ ----------
+ b : bool
+ """
+ self._navigate = b
+
+ def get_navigate_mode(self):
+ """
+ Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
+ """
+ return self._navigate_mode
+
+ def set_navigate_mode(self, b):
+ """
+ Set the navigation toolbar button status;
+
+ .. warning::
+ this is not a user-API function.
+
+ """
+ self._navigate_mode = b
+
+ def _get_view(self):
+ """
+ Save information required to reproduce the current view.
+
+ Called before a view is changed, such as during a pan or zoom
+ initiated by the user. You may return any information you deem
+ necessary to describe the view.
+
+ .. note::
+
+ Intended to be overridden by new projection types, but if not, the
+ default implementation saves the view limits. You *must* implement
+ :meth:`_set_view` if you implement this method.
+ """
+ xmin, xmax = self.get_xlim()
+ ymin, ymax = self.get_ylim()
+ return (xmin, xmax, ymin, ymax)
+
+ def _set_view(self, view):
+ """
+ Apply a previously saved view.
+
+ Called when restoring a view, such as with the navigation buttons.
+
+ .. note::
+
+ Intended to be overridden by new projection types, but if not, the
+ default implementation restores the view limits. You *must*
+ implement :meth:`_get_view` if you implement this method.
+ """
+ xmin, xmax, ymin, ymax = view
+ self.set_xlim((xmin, xmax))
+ self.set_ylim((ymin, ymax))
+
+ def _set_view_from_bbox(self, bbox, direction='in',
+ mode=None, twinx=False, twiny=False):
+ """
+ Update view from a selection bbox.
+
+ .. note::
+
+ Intended to be overridden by new projection types, but if not, the
+ default implementation sets the view limits to the bbox directly.
+
+ Parameters
+ ----------
+
+ bbox : 4-tuple or 3 tuple
+ * If bbox is a 4 tuple, it is the selected bounding box limits,
+ in *display* coordinates.
+ * If bbox is a 3 tuple, it is an (xp, yp, scl) triple, where
+ (xp,yp) is the center of zooming and scl the scale factor to
+ zoom by.
+
+ direction : str
+ The direction to apply the bounding box.
+ * `'in'` - The bounding box describes the view directly, i.e.,
+ it zooms in.
+ * `'out'` - The bounding box describes the size to make the
+ existing view, i.e., it zooms out.
+
+ mode : str or None
+ The selection mode, whether to apply the bounding box in only the
+ `'x'` direction, `'y'` direction or both (`None`).
+
+ twinx : bool
+ Whether this axis is twinned in the *x*-direction.
+
+ twiny : bool
+ Whether this axis is twinned in the *y*-direction.
+ """
+ Xmin, Xmax = self.get_xlim()
+ Ymin, Ymax = self.get_ylim()
+
+ if len(bbox) == 3:
+ # Zooming code
+ xp, yp, scl = bbox
+
+ # Should not happen
+ if scl == 0:
+ scl = 1.
+
+ # direction = 'in'
+ if scl > 1:
+ direction = 'in'
+ else:
+ direction = 'out'
+ scl = 1/scl
+
+ # get the limits of the axes
+ tranD2C = self.transData.transform
+ xmin, ymin = tranD2C((Xmin, Ymin))
+ xmax, ymax = tranD2C((Xmax, Ymax))
+
+ # set the range
+ xwidth = xmax - xmin
+ ywidth = ymax - ymin
+ xcen = (xmax + xmin)*.5
+ ycen = (ymax + ymin)*.5
+ xzc = (xp*(scl - 1) + xcen)/scl
+ yzc = (yp*(scl - 1) + ycen)/scl
+
+ bbox = [xzc - xwidth/2./scl, yzc - ywidth/2./scl,
+ xzc + xwidth/2./scl, yzc + ywidth/2./scl]
+ elif len(bbox) != 4:
+ # should be len 3 or 4 but nothing else
+ warnings.warn(
+ "Warning in _set_view_from_bbox: bounding box is not a tuple "
+ "of length 3 or 4. Ignoring the view change.")
+ return
+
+ # Just grab bounding box
+ lastx, lasty, x, y = bbox
+
+ # zoom to rect
+ inverse = self.transData.inverted()
+ lastx, lasty = inverse.transform_point((lastx, lasty))
+ x, y = inverse.transform_point((x, y))
+
+ if twinx:
+ x0, x1 = Xmin, Xmax
+ else:
+ if Xmin < Xmax:
+ if x < lastx:
+ x0, x1 = x, lastx
+ else:
+ x0, x1 = lastx, x
+ if x0 < Xmin:
+ x0 = Xmin
+ if x1 > Xmax:
+ x1 = Xmax
+ else:
+ if x > lastx:
+ x0, x1 = x, lastx
+ else:
+ x0, x1 = lastx, x
+ if x0 > Xmin:
+ x0 = Xmin
+ if x1 < Xmax:
+ x1 = Xmax
+
+ if twiny:
+ y0, y1 = Ymin, Ymax
+ else:
+ if Ymin < Ymax:
+ if y < lasty:
+ y0, y1 = y, lasty
+ else:
+ y0, y1 = lasty, y
+ if y0 < Ymin:
+ y0 = Ymin
+ if y1 > Ymax:
+ y1 = Ymax
+ else:
+ if y > lasty:
+ y0, y1 = y, lasty
+ else:
+ y0, y1 = lasty, y
+ if y0 > Ymin:
+ y0 = Ymin
+ if y1 < Ymax:
+ y1 = Ymax
+
+ if direction == 'in':
+ if mode == 'x':
+ self.set_xlim((x0, x1))
+ elif mode == 'y':
+ self.set_ylim((y0, y1))
+ else:
+ self.set_xlim((x0, x1))
+ self.set_ylim((y0, y1))
+ elif direction == 'out':
+ if self.get_xscale() == 'log':
+ alpha = np.log(Xmax / Xmin) / np.log(x1 / x0)
+ rx1 = pow(Xmin / x0, alpha) * Xmin
+ rx2 = pow(Xmax / x0, alpha) * Xmin
+ else:
+ alpha = (Xmax - Xmin) / (x1 - x0)
+ rx1 = alpha * (Xmin - x0) + Xmin
+ rx2 = alpha * (Xmax - x0) + Xmin
+ if self.get_yscale() == 'log':
+ alpha = np.log(Ymax / Ymin) / np.log(y1 / y0)
+ ry1 = pow(Ymin / y0, alpha) * Ymin
+ ry2 = pow(Ymax / y0, alpha) * Ymin
+ else:
+ alpha = (Ymax - Ymin) / (y1 - y0)
+ ry1 = alpha * (Ymin - y0) + Ymin
+ ry2 = alpha * (Ymax - y0) + Ymin
+
+ if mode == 'x':
+ self.set_xlim((rx1, rx2))
+ elif mode == 'y':
+ self.set_ylim((ry1, ry2))
+ else:
+ self.set_xlim((rx1, rx2))
+ self.set_ylim((ry1, ry2))
+
+ def start_pan(self, x, y, button):
+ """
+ Called when a pan operation has started.
+
+ *x*, *y* are the mouse coordinates in display coords.
+ button is the mouse button number:
+
+ * 1: LEFT
+ * 2: MIDDLE
+ * 3: RIGHT
+
+ .. note::
+
+ Intended to be overridden by new projection types.
+
+ """
+ self._pan_start = cbook.Bunch(
+ lim=self.viewLim.frozen(),
+ trans=self.transData.frozen(),
+ trans_inverse=self.transData.inverted().frozen(),
+ bbox=self.bbox.frozen(),
+ x=x,
+ y=y)
+
+ def end_pan(self):
+ """
+ Called when a pan operation completes (when the mouse button
+ is up.)
+
+ .. note::
+
+ Intended to be overridden by new projection types.
+
+ """
+ del self._pan_start
+
+ def drag_pan(self, button, key, x, y):
+ """
+ Called when the mouse moves during a pan operation.
+
+ *button* is the mouse button number:
+
+ * 1: LEFT
+ * 2: MIDDLE
+ * 3: RIGHT
+
+ *key* is a "shift" key
+
+ *x*, *y* are the mouse coordinates in display coords.
+
+ .. note::
+
+ Intended to be overridden by new projection types.
+
+ """
+ def format_deltas(key, dx, dy):
+ if key == 'control':
+ if abs(dx) > abs(dy):
+ dy = dx
+ else:
+ dx = dy
+ elif key == 'x':
+ dy = 0
+ elif key == 'y':
+ dx = 0
+ elif key == 'shift':
+ if 2 * abs(dx) < abs(dy):
+ dx = 0
+ elif 2 * abs(dy) < abs(dx):
+ dy = 0
+ elif abs(dx) > abs(dy):
+ dy = dy / abs(dy) * abs(dx)
+ else:
+ dx = dx / abs(dx) * abs(dy)
+ return dx, dy
+
+ p = self._pan_start
+ dx = x - p.x
+ dy = y - p.y
+ if dx == 0 and dy == 0:
+ return
+ if button == 1:
+ dx, dy = format_deltas(key, dx, dy)
+ result = p.bbox.translated(-dx, -dy).transformed(p.trans_inverse)
+ elif button == 3:
+ try:
+ dx = -dx / self.bbox.width
+ dy = -dy / self.bbox.height
+ dx, dy = format_deltas(key, dx, dy)
+ if self.get_aspect() != 'auto':
+ dx = dy = 0.5 * (dx + dy)
+ alpha = np.power(10.0, (dx, dy))
+ start = np.array([p.x, p.y])
+ oldpoints = p.lim.transformed(p.trans)
+ newpoints = start + alpha * (oldpoints - start)
+ result = (mtransforms.Bbox(newpoints)
+ .transformed(p.trans_inverse))
+ except OverflowError:
+ warnings.warn('Overflow while panning')
+ return
+
+ valid = np.isfinite(result.transformed(p.trans))
+ points = result.get_points().astype(object)
+ # Just ignore invalid limits (typically, underflow in log-scale).
+ points[~valid] = None
+ self.set_xlim(points[:, 0])
+ self.set_ylim(points[:, 1])
+
+ @cbook.deprecated("2.1")
+ def get_cursor_props(self):
+ """
+ Return the cursor propertiess as a (*linewidth*, *color*)
+ tuple, where *linewidth* is a float and *color* is an RGBA
+ tuple
+ """
+ return self._cursorProps
+
+ @cbook.deprecated("2.1")
+ def set_cursor_props(self, *args):
+ """Set the cursor property as
+
+ Call signature ::
+
+ ax.set_cursor_props(linewidth, color)
+
+ or::
+
+ ax.set_cursor_props((linewidth, color))
+
+ ACCEPTS: a (*float*, *color*) tuple
+ """
+ if len(args) == 1:
+ lw, c = args[0]
+ elif len(args) == 2:
+ lw, c = args
+ else:
+ raise ValueError('args must be a (linewidth, color) tuple')
+ c = mcolors.to_rgba(c)
+ self._cursorProps = lw, c
+
+ def get_children(self):
+ """return a list of child artists"""
+ children = []
+ children.extend(self.collections)
+ children.extend(self.patches)
+ children.extend(self.lines)
+ children.extend(self.texts)
+ children.extend(self.artists)
+ children.extend(six.itervalues(self.spines))
+ children.append(self.xaxis)
+ children.append(self.yaxis)
+ children.append(self.title)
+ children.append(self._left_title)
+ children.append(self._right_title)
+ children.extend(self.tables)
+ children.extend(self.images)
+ if self.legend_ is not None:
+ children.append(self.legend_)
+ children.append(self.patch)
+ return children
+
+ def contains(self, mouseevent):
+ """
+ Test whether the mouse event occurred in the axes.
+
+ Returns *True* / *False*, {}
+ """
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+ return self.patch.contains(mouseevent)
+
+ def contains_point(self, point):
+ """
+ Returns *True* if the point (tuple of x,y) is inside the axes
+ (the area defined by the its patch). A pixel coordinate is
+ required.
+
+ """
+ return self.patch.contains_point(point, radius=1.0)
+
+ def pick(self, *args):
+ """Trigger pick event
+
+ Call signature::
+
+ pick(mouseevent)
+
+ each child artist will fire a pick event if mouseevent is over
+ the artist and the artist has picker set
+ """
+ martist.Artist.pick(self, args[0])
+
+ def get_default_bbox_extra_artists(self):
+ return [artist for artist in self.get_children()
+ if artist.get_visible()]
+
+ def get_tightbbox(self, renderer, call_axes_locator=True):
+ """
+ Return the tight bounding box of the axes.
+ The dimension of the Bbox in canvas coordinate.
+
+ If *call_axes_locator* is *False*, it does not call the
+ _axes_locator attribute, which is necessary to get the correct
+ bounding box. ``call_axes_locator==False`` can be used if the
+ caller is only intereted in the relative size of the tightbbox
+ compared to the axes bbox.
+ """
+
+ bb = []
+
+ if not self.get_visible():
+ return None
+
+ locator = self.get_axes_locator()
+ if locator and call_axes_locator:
+ pos = locator(self, renderer)
+ self.apply_aspect(pos)
+ else:
+ self.apply_aspect()
+
+ bb.append(self.get_window_extent(renderer))
+
+ if self.title.get_visible():
+ bb.append(self.title.get_window_extent(renderer))
+ if self._left_title.get_visible():
+ bb.append(self._left_title.get_window_extent(renderer))
+ if self._right_title.get_visible():
+ bb.append(self._right_title.get_window_extent(renderer))
+
+ bb_xaxis = self.xaxis.get_tightbbox(renderer)
+ if bb_xaxis:
+ bb.append(bb_xaxis)
+
+ bb_yaxis = self.yaxis.get_tightbbox(renderer)
+ if bb_yaxis:
+ bb.append(bb_yaxis)
+
+ for child in self.get_children():
+ if isinstance(child, OffsetBox) and child.get_visible():
+ bb.append(child.get_window_extent(renderer))
+ elif isinstance(child, Legend) and child.get_visible():
+ bb.append(child._legend_box.get_window_extent(renderer))
+
+ _bbox = mtransforms.Bbox.union(
+ [b for b in bb if b.width != 0 or b.height != 0])
+
+ return _bbox
+
+ def _make_twin_axes(self, *kl, **kwargs):
+ """
+ Make a twinx axes of self. This is used for twinx and twiny.
+ """
+ # Typically, SubplotBase._make_twin_axes is called instead of this.
+ # There is also an override in axes_grid1/axes_divider.py.
+ if 'sharex' in kwargs and 'sharey' in kwargs:
+ raise ValueError("Twinned Axes may share only one axis.")
+ ax2 = self.figure.add_axes(self.get_position(True), *kl, **kwargs)
+ self.set_adjustable('datalim')
+ ax2.set_adjustable('datalim')
+ self._twinned_axes.join(self, ax2)
+ return ax2
+
+ def twinx(self):
+ """
+ Create a twin Axes sharing the xaxis
+
+ Create a new Axes instance with an invisible x-axis and an independent
+ y-axis positioned opposite to the original one (i.e. at right). The
+ x-axis autoscale setting will be inherited from the original Axes.
+ To ensure that the tick marks of both y-axes align, see
+ `~matplotlib.ticker.LinearLocator`
+
+ Returns
+ -------
+ ax_twin : Axes
+ The newly created Axes instance
+
+ Notes
+ -----
+ For those who are 'picking' artists while using twinx, pick
+ events are only called for the artists in the top-most axes.
+ """
+ ax2 = self._make_twin_axes(sharex=self)
+ ax2.yaxis.tick_right()
+ ax2.yaxis.set_label_position('right')
+ ax2.yaxis.set_offset_position('right')
+ ax2.set_autoscalex_on(self.get_autoscalex_on())
+ self.yaxis.tick_left()
+ ax2.xaxis.set_visible(False)
+ ax2.patch.set_visible(False)
+ return ax2
+
+ def twiny(self):
+ """
+ Create a twin Axes sharing the yaxis
+
+ Create a new Axes instance with an invisible y-axis and an independent
+ x-axis positioned opposite to the original one (i.e. at top). The
+ y-axis autoscale setting will be inherited from the original Axes.
+ To ensure that the tick marks of both x-axes align, see
+ `~matplotlib.ticker.LinearLocator`
+
+ Returns
+ -------
+ ax_twin : Axes
+ The newly created Axes instance
+
+ Notes
+ -----
+ For those who are 'picking' artists while using twiny, pick
+ events are only called for the artists in the top-most axes.
+ """
+
+ ax2 = self._make_twin_axes(sharey=self)
+ ax2.xaxis.tick_top()
+ ax2.xaxis.set_label_position('top')
+ ax2.set_autoscaley_on(self.get_autoscaley_on())
+ self.xaxis.tick_bottom()
+ ax2.yaxis.set_visible(False)
+ ax2.patch.set_visible(False)
+ return ax2
+
+ def get_shared_x_axes(self):
+ """Return a reference to the shared axes Grouper object for x axes."""
+ return self._shared_x_axes
+
+ def get_shared_y_axes(self):
+ """Return a reference to the shared axes Grouper object for y axes."""
+ return self._shared_y_axes
diff --git a/contrib/python/matplotlib/py2/matplotlib/axes/_subplots.py b/contrib/python/matplotlib/py2/matplotlib/axes/_subplots.py
new file mode 100644
index 00000000000..4c93ed996a1
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/axes/_subplots.py
@@ -0,0 +1,267 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import map
+
+from matplotlib.gridspec import GridSpec, SubplotSpec
+from matplotlib import docstring
+import matplotlib.artist as martist
+from matplotlib.axes._axes import Axes
+
+import matplotlib._layoutbox as layoutbox
+
+import warnings
+from matplotlib.cbook import mplDeprecation
+
+
+class SubplotBase(object):
+ """
+ Base class for subplots, which are :class:`Axes` instances with
+ additional methods to facilitate generating and manipulating a set
+ of :class:`Axes` within a figure.
+ """
+
+ def __init__(self, fig, *args, **kwargs):
+ """
+ *fig* is a :class:`matplotlib.figure.Figure` instance.
+
+ *args* is the tuple (*numRows*, *numCols*, *plotNum*), where
+ the array of subplots in the figure has dimensions *numRows*,
+ *numCols*, and where *plotNum* is the number of the subplot
+ being created. *plotNum* starts at 1 in the upper left
+ corner and increases to the right.
+
+ If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
+ decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
+ """
+
+ self.figure = fig
+
+ if len(args) == 1:
+ if isinstance(args[0], SubplotSpec):
+ self._subplotspec = args[0]
+ else:
+ try:
+ s = str(int(args[0]))
+ rows, cols, num = map(int, s)
+ except ValueError:
+ raise ValueError('Single argument to subplot must be '
+ 'a 3-digit integer')
+ self._subplotspec = GridSpec(rows, cols,
+ figure=self.figure)[num - 1]
+ # num - 1 for converting from MATLAB to python indexing
+ elif len(args) == 3:
+ rows, cols, num = args
+ rows = int(rows)
+ cols = int(cols)
+ if isinstance(num, tuple) and len(num) == 2:
+ num = [int(n) for n in num]
+ self._subplotspec = GridSpec(
+ rows, cols,
+ figure=self.figure)[(num[0] - 1):num[1]]
+ else:
+ if num < 1 or num > rows*cols:
+ raise ValueError(
+ ("num must be 1 <= num <= {maxn}, not {num}"
+ ).format(maxn=rows*cols, num=num))
+ self._subplotspec = GridSpec(
+ rows, cols, figure=self.figure)[int(num) - 1]
+ # num - 1 for converting from MATLAB to python indexing
+ else:
+ raise ValueError('Illegal argument(s) to subplot: %s' % (args,))
+
+ self.update_params()
+
+ # _axes_class is set in the subplot_class_factory
+ self._axes_class.__init__(self, fig, self.figbox, **kwargs)
+ # add a layout box to this, for both the full axis, and the poss
+ # of the axis. We need both because the axes may become smaller
+ # due to parasitic axes and hence no longer fill the subplotspec.
+ if self._subplotspec._layoutbox is None:
+ self._layoutbox = None
+ self._poslayoutbox = None
+ else:
+ name = self._subplotspec._layoutbox.name + '.ax'
+ name = name + layoutbox.seq_id()
+ self._layoutbox = layoutbox.LayoutBox(
+ parent=self._subplotspec._layoutbox,
+ name=name,
+ artist=self)
+ self._poslayoutbox = layoutbox.LayoutBox(
+ parent=self._layoutbox,
+ name=self._layoutbox.name+'.pos',
+ pos=True, subplot=True, artist=self)
+
+ def __reduce__(self):
+ # get the first axes class which does not
+ # inherit from a subplotbase
+
+ def not_subplotbase(c):
+ return issubclass(c, Axes) and not issubclass(c, SubplotBase)
+
+ axes_class = [c for c in self.__class__.mro()
+ if not_subplotbase(c)][0]
+ r = [_PicklableSubplotClassConstructor(),
+ (axes_class,),
+ self.__getstate__()]
+ return tuple(r)
+
+ def get_geometry(self):
+ """get the subplot geometry, e.g., 2,2,3"""
+ rows, cols, num1, num2 = self.get_subplotspec().get_geometry()
+ return rows, cols, num1 + 1 # for compatibility
+
+ # COVERAGE NOTE: Never used internally or from examples
+ def change_geometry(self, numrows, numcols, num):
+ """change subplot geometry, e.g., from 1,1,1 to 2,2,3"""
+ self._subplotspec = GridSpec(numrows, numcols,
+ figure=self.figure)[num - 1]
+ self.update_params()
+ self.set_position(self.figbox)
+
+ def get_subplotspec(self):
+ """get the SubplotSpec instance associated with the subplot"""
+ return self._subplotspec
+
+ def set_subplotspec(self, subplotspec):
+ """set the SubplotSpec instance associated with the subplot"""
+ self._subplotspec = subplotspec
+
+ def update_params(self):
+ """update the subplot position from fig.subplotpars"""
+
+ self.figbox, self.rowNum, self.colNum, self.numRows, self.numCols = \
+ self.get_subplotspec().get_position(self.figure,
+ return_all=True)
+
+ def is_first_col(self):
+ return self.colNum == 0
+
+ def is_first_row(self):
+ return self.rowNum == 0
+
+ def is_last_row(self):
+ return self.rowNum == self.numRows - 1
+
+ def is_last_col(self):
+ return self.colNum == self.numCols - 1
+
+ # COVERAGE NOTE: Never used internally.
+ def label_outer(self):
+ """Only show "outer" labels and tick labels.
+
+ x-labels are only kept for subplots on the last row; y-labels only for
+ subplots on the first column.
+ """
+ lastrow = self.is_last_row()
+ firstcol = self.is_first_col()
+ if not lastrow:
+ for label in self.get_xticklabels(which="both"):
+ label.set_visible(False)
+ self.get_xaxis().get_offset_text().set_visible(False)
+ self.set_xlabel("")
+ if not firstcol:
+ for label in self.get_yticklabels(which="both"):
+ label.set_visible(False)
+ self.get_yaxis().get_offset_text().set_visible(False)
+ self.set_ylabel("")
+
+ def _make_twin_axes(self, *kl, **kwargs):
+ """
+ Make a twinx axes of self. This is used for twinx and twiny.
+ """
+ from matplotlib.projections import process_projection_requirements
+ if 'sharex' in kwargs and 'sharey' in kwargs:
+ # The following line is added in v2.2 to avoid breaking Seaborn,
+ # which currently uses this internal API.
+ if kwargs["sharex"] is not self and kwargs["sharey"] is not self:
+ raise ValueError("Twinned Axes may share only one axis.")
+ kl = (self.get_subplotspec(),) + kl
+ projection_class, kwargs, key = process_projection_requirements(
+ self.figure, *kl, **kwargs)
+
+ ax2 = subplot_class_factory(projection_class)(self.figure,
+ *kl, **kwargs)
+ self.figure.add_subplot(ax2)
+ self.set_adjustable('datalim')
+ ax2.set_adjustable('datalim')
+
+ if self._layoutbox is not None and ax2._layoutbox is not None:
+ # make the layout boxes be explicitly the same
+ ax2._layoutbox.constrain_same(self._layoutbox)
+ ax2._poslayoutbox.constrain_same(self._poslayoutbox)
+
+ self._twinned_axes.join(self, ax2)
+ return ax2
+
+_subplot_classes = {}
+
+
+def subplot_class_factory(axes_class=None):
+ # This makes a new class that inherits from SubplotBase and the
+ # given axes_class (which is assumed to be a subclass of Axes).
+ # This is perhaps a little bit roundabout to make a new class on
+ # the fly like this, but it means that a new Subplot class does
+ # not have to be created for every type of Axes.
+ if axes_class is None:
+ axes_class = Axes
+
+ new_class = _subplot_classes.get(axes_class)
+ if new_class is None:
+ new_class = type(str("%sSubplot") % (axes_class.__name__),
+ (SubplotBase, axes_class),
+ {'_axes_class': axes_class})
+ _subplot_classes[axes_class] = new_class
+
+ return new_class
+
+# This is provided for backward compatibility
+Subplot = subplot_class_factory()
+
+
+class _PicklableSubplotClassConstructor(object):
+ """
+ This stub class exists to return the appropriate subplot
+ class when __call__-ed with an axes class. This is purely to
+ allow Pickling of Axes and Subplots.
+ """
+ def __call__(self, axes_class):
+ # create a dummy object instance
+ subplot_instance = _PicklableSubplotClassConstructor()
+ subplot_class = subplot_class_factory(axes_class)
+ # update the class to the desired subplot class
+ subplot_instance.__class__ = subplot_class
+ return subplot_instance
+
+
+docstring.interpd.update(Axes=martist.kwdoc(Axes))
+docstring.interpd.update(Subplot=martist.kwdoc(Axes))
+
+"""
+# this is some discarded code I was using to find the minimum positive
+# data point for some log scaling fixes. I realized there was a
+# cleaner way to do it, but am keeping this around as an example for
+# how to get the data out of the axes. Might want to make something
+# like this a method one day, or better yet make get_verts an Artist
+# method
+
+ minx, maxx = self.get_xlim()
+ if minx<=0 or maxx<=0:
+ # find the min pos value in the data
+ xs = []
+ for line in self.lines:
+ xs.extend(line.get_xdata(orig=False))
+ for patch in self.patches:
+ xs.extend([x for x,y in patch.get_verts()])
+ for collection in self.collections:
+ xs.extend([x for x,y in collection.get_verts()])
+ posx = [x for x in xs if x>0]
+ if len(posx):
+
+ minx = min(posx)
+ maxx = max(posx)
+ # warning, probably breaks inverted axis
+ self.set_xlim((0.1*minx, maxx))
+
+"""
diff --git a/contrib/python/matplotlib/py2/matplotlib/axis.py b/contrib/python/matplotlib/py2/matplotlib/axis.py
new file mode 100644
index 00000000000..48c31ae6c0c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/axis.py
@@ -0,0 +1,2509 @@
+"""
+Classes for the ticks and x and y axis
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import logging
+
+from matplotlib import rcParams
+import matplotlib.artist as artist
+from matplotlib.artist import allow_rasterization
+import matplotlib.cbook as cbook
+from matplotlib.cbook import _string_to_bool
+import matplotlib.font_manager as font_manager
+import matplotlib.lines as mlines
+import matplotlib.patches as mpatches
+import matplotlib.scale as mscale
+import matplotlib.text as mtext
+import matplotlib.ticker as mticker
+import matplotlib.transforms as mtransforms
+import matplotlib.units as munits
+import numpy as np
+import warnings
+
+_log = logging.getLogger(__name__)
+
+GRIDLINE_INTERPOLATION_STEPS = 180
+
+# This list is being used for compatibility with Axes.grid, which
+# allows all Line2D kwargs.
+_line_AI = artist.ArtistInspector(mlines.Line2D)
+_line_param_names = _line_AI.get_setters()
+_line_param_aliases = [list(d.keys())[0] for d in _line_AI.aliasd.values()]
+_gridline_param_names = ['grid_' + name
+ for name in _line_param_names + _line_param_aliases]
+
+
+class Tick(artist.Artist):
+ """
+ Abstract base class for the axis ticks, grid lines and labels
+
+ 1 refers to the bottom of the plot for xticks and the left for yticks
+ 2 refers to the top of the plot for xticks and the right for yticks
+
+ Attributes
+ ----------
+ tick1line : Line2D
+
+ tick2line : Line2D
+
+ gridline : Line2D
+
+ label1 : Text
+
+ label2 : Text
+
+ gridOn : bool
+ Determines whether to draw the tickline.
+
+ tick1On : bool
+ Determines whether to draw the first tickline.
+
+ tick2On : bool
+ Determines whether to draw the second tickline.
+
+ label1On : bool
+ Determines whether to draw the first tick label.
+
+ label2On : bool
+ Determines whether to draw the second tick label.
+ """
+ def __init__(self, axes, loc, label,
+ size=None, # points
+ width=None,
+ color=None,
+ tickdir=None,
+ pad=None,
+ labelsize=None,
+ labelcolor=None,
+ zorder=None,
+ gridOn=None, # defaults to axes.grid depending on
+ # axes.grid.which
+ tick1On=True,
+ tick2On=True,
+ label1On=True,
+ label2On=False,
+ major=True,
+ labelrotation=0,
+ grid_color=None,
+ grid_linestyle=None,
+ grid_linewidth=None,
+ grid_alpha=None,
+ **kw # Other Line2D kwargs applied to gridlines.
+ ):
+ """
+ bbox is the Bound2D bounding box in display coords of the Axes
+ loc is the tick location in data coords
+ size is the tick size in points
+ """
+ artist.Artist.__init__(self)
+
+ if gridOn is None:
+ if major and (rcParams['axes.grid.which'] in ('both', 'major')):
+ gridOn = rcParams['axes.grid']
+ elif (not major) and (rcParams['axes.grid.which']
+ in ('both', 'minor')):
+ gridOn = rcParams['axes.grid']
+ else:
+ gridOn = False
+
+ self.set_figure(axes.figure)
+ self.axes = axes
+
+ name = self.__name__.lower()
+ self._name = name
+
+ self._loc = loc
+
+ if size is None:
+ if major:
+ size = rcParams['%s.major.size' % name]
+ else:
+ size = rcParams['%s.minor.size' % name]
+ self._size = size
+
+ if width is None:
+ if major:
+ width = rcParams['%s.major.width' % name]
+ else:
+ width = rcParams['%s.minor.width' % name]
+ self._width = width
+
+ if color is None:
+ color = rcParams['%s.color' % name]
+ self._color = color
+
+ if pad is None:
+ if major:
+ pad = rcParams['%s.major.pad' % name]
+ else:
+ pad = rcParams['%s.minor.pad' % name]
+ self._base_pad = pad
+
+ if labelcolor is None:
+ labelcolor = rcParams['%s.color' % name]
+ self._labelcolor = labelcolor
+
+ if labelsize is None:
+ labelsize = rcParams['%s.labelsize' % name]
+ self._labelsize = labelsize
+
+ self._set_labelrotation(labelrotation)
+
+ if zorder is None:
+ if major:
+ zorder = mlines.Line2D.zorder + 0.01
+ else:
+ zorder = mlines.Line2D.zorder
+ self._zorder = zorder
+
+ self._grid_color = (rcParams['grid.color']
+ if grid_color is None else grid_color)
+ self._grid_linestyle = (rcParams['grid.linestyle']
+ if grid_linestyle is None else grid_linestyle)
+ self._grid_linewidth = (rcParams['grid.linewidth']
+ if grid_linewidth is None else grid_linewidth)
+ self._grid_alpha = (rcParams['grid.alpha']
+ if grid_alpha is None else grid_alpha)
+
+ self._grid_kw = {k[5:]: v for k, v in kw.items()}
+
+ self.apply_tickdir(tickdir)
+
+ self.tick1line = self._get_tick1line()
+ self.tick2line = self._get_tick2line()
+ self.gridline = self._get_gridline()
+
+ self.label1 = self._get_text1()
+ self.label = self.label1 # legacy name
+ self.label2 = self._get_text2()
+
+ self.gridOn = gridOn
+ self.tick1On = tick1On
+ self.tick2On = tick2On
+ self.label1On = label1On
+ self.label2On = label2On
+
+ self.update_position(loc)
+
+ def _set_labelrotation(self, labelrotation):
+ if isinstance(labelrotation, six.string_types):
+ mode = labelrotation
+ angle = 0
+ elif isinstance(labelrotation, (tuple, list)):
+ mode, angle = labelrotation
+ else:
+ mode = 'default'
+ angle = labelrotation
+ if mode not in ('auto', 'default'):
+ raise ValueError("Label rotation mode must be 'default' or "
+ "'auto', not '{}'.".format(mode))
+ self._labelrotation = (mode, angle)
+
+ def apply_tickdir(self, tickdir):
+ """
+ Calculate self._pad and self._tickmarkers
+ """
+ pass
+
+ def get_tickdir(self):
+ return self._tickdir
+
+ def get_tick_padding(self):
+ """
+ Get the length of the tick outside of the axes.
+ """
+ padding = {
+ 'in': 0.0,
+ 'inout': 0.5,
+ 'out': 1.0
+ }
+ return self._size * padding[self._tickdir]
+
+ def get_children(self):
+ children = [self.tick1line, self.tick2line,
+ self.gridline, self.label1, self.label2]
+ return children
+
+ def set_clip_path(self, clippath, transform=None):
+ artist.Artist.set_clip_path(self, clippath, transform)
+ self.gridline.set_clip_path(clippath, transform)
+ self.stale = True
+
+ set_clip_path.__doc__ = artist.Artist.set_clip_path.__doc__
+
+ def get_pad_pixels(self):
+ return self.figure.dpi * self._base_pad / 72.0
+
+ def contains(self, mouseevent):
+ """
+ Test whether the mouse event occurred in the Tick marks.
+
+ This function always returns false. It is more useful to test if the
+ axis as a whole contains the mouse rather than the set of tick marks.
+ """
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+ return False, {}
+
+ def set_pad(self, val):
+ """
+ Set the tick label pad in points
+
+ ACCEPTS: float
+ """
+ self._apply_params(pad=val)
+ self.stale = True
+
+ def get_pad(self):
+ 'Get the value of the tick label pad in points'
+ return self._base_pad
+
+ def _get_text1(self):
+ 'Get the default Text 1 instance'
+ pass
+
+ def _get_text2(self):
+ 'Get the default Text 2 instance'
+ pass
+
+ def _get_tick1line(self):
+ 'Get the default line2D instance for tick1'
+ pass
+
+ def _get_tick2line(self):
+ 'Get the default line2D instance for tick2'
+ pass
+
+ def _get_gridline(self):
+ 'Get the default grid Line2d instance for this tick'
+ pass
+
+ def get_loc(self):
+ 'Return the tick location (data coords) as a scalar'
+ return self._loc
+
+ @allow_rasterization
+ def draw(self, renderer):
+ if not self.get_visible():
+ self.stale = False
+ return
+
+ renderer.open_group(self.__name__)
+ if self.gridOn:
+ self.gridline.draw(renderer)
+ if self.tick1On:
+ self.tick1line.draw(renderer)
+ if self.tick2On:
+ self.tick2line.draw(renderer)
+
+ if self.label1On:
+ self.label1.draw(renderer)
+ if self.label2On:
+ self.label2.draw(renderer)
+ renderer.close_group(self.__name__)
+
+ self.stale = False
+
+ def set_label1(self, s):
+ """
+ Set the text of ticklabel
+
+ ACCEPTS: str
+ """
+ self.label1.set_text(s)
+ self.stale = True
+
+ set_label = set_label1
+
+ def set_label2(self, s):
+ """
+ Set the text of ticklabel2
+
+ ACCEPTS: str
+ """
+ self.label2.set_text(s)
+ self.stale = True
+
+ def _set_artist_props(self, a):
+ a.set_figure(self.figure)
+
+ def get_view_interval(self):
+ 'return the view Interval instance for the axis this tick is ticking'
+ raise NotImplementedError('Derived must override')
+
+ def _apply_params(self, **kw):
+ switchkw = ['gridOn', 'tick1On', 'tick2On', 'label1On', 'label2On']
+ switches = [k for k in kw if k in switchkw]
+ for k in switches:
+ setattr(self, k, kw.pop(k))
+ newmarker = [k for k in kw if k in ['size', 'width', 'pad', 'tickdir']]
+ if newmarker:
+ self._size = kw.pop('size', self._size)
+ # Width could be handled outside this block, but it is
+ # convenient to leave it here.
+ self._width = kw.pop('width', self._width)
+ self._base_pad = kw.pop('pad', self._base_pad)
+ # apply_tickdir uses _size and _base_pad to make _pad,
+ # and also makes _tickmarkers.
+ self.apply_tickdir(kw.pop('tickdir', self._tickdir))
+ self.tick1line.set_marker(self._tickmarkers[0])
+ self.tick2line.set_marker(self._tickmarkers[1])
+ for line in (self.tick1line, self.tick2line):
+ line.set_markersize(self._size)
+ line.set_markeredgewidth(self._width)
+ # _get_text1_transform uses _pad from apply_tickdir.
+ trans = self._get_text1_transform()[0]
+ self.label1.set_transform(trans)
+ trans = self._get_text2_transform()[0]
+ self.label2.set_transform(trans)
+ tick_kw = {k: v for k, v in six.iteritems(kw)
+ if k in ['color', 'zorder']}
+ if tick_kw:
+ self.tick1line.set(**tick_kw)
+ self.tick2line.set(**tick_kw)
+ for k, v in six.iteritems(tick_kw):
+ setattr(self, '_' + k, v)
+
+ if 'labelrotation' in kw:
+ self._set_labelrotation(kw.pop('labelrotation'))
+ self.label1.set(rotation=self._labelrotation[1])
+ self.label2.set(rotation=self._labelrotation[1])
+
+ label_list = [k for k in six.iteritems(kw)
+ if k[0] in ['labelsize', 'labelcolor']]
+ if label_list:
+ label_kw = {k[5:]: v for k, v in label_list}
+ self.label1.set(**label_kw)
+ self.label2.set(**label_kw)
+ for k, v in six.iteritems(label_kw):
+ # for labelsize the text objects covert str ('small')
+ # -> points. grab the integer from the `Text` object
+ # instead of saving the string representation
+ v = getattr(self.label1, 'get_' + k)()
+ setattr(self, '_label' + k, v)
+
+ grid_list = [k for k in six.iteritems(kw)
+ if k[0] in _gridline_param_names]
+ if grid_list:
+ grid_kw = {k[5:]: v for k, v in grid_list}
+ self.gridline.set(**grid_kw)
+ for k, v in six.iteritems(grid_kw):
+ setattr(self, '_grid_' + k, v)
+
+ def update_position(self, loc):
+ 'Set the location of tick in data coords with scalar *loc*'
+ raise NotImplementedError('Derived must override')
+
+ def _get_text1_transform(self):
+ raise NotImplementedError('Derived must override')
+
+ def _get_text2_transform(self):
+ raise NotImplementedError('Derived must override')
+
+
+class XTick(Tick):
+ """
+ Contains all the Artists needed to make an x tick - the tick line,
+ the label text and the grid line
+ """
+ __name__ = 'xtick'
+
+ def _get_text1_transform(self):
+ return self.axes.get_xaxis_text1_transform(self._pad)
+
+ def _get_text2_transform(self):
+ return self.axes.get_xaxis_text2_transform(self._pad)
+
+ def apply_tickdir(self, tickdir):
+ if tickdir is None:
+ tickdir = rcParams['%s.direction' % self._name]
+ self._tickdir = tickdir
+
+ if self._tickdir == 'in':
+ self._tickmarkers = (mlines.TICKUP, mlines.TICKDOWN)
+ elif self._tickdir == 'inout':
+ self._tickmarkers = ('|', '|')
+ else:
+ self._tickmarkers = (mlines.TICKDOWN, mlines.TICKUP)
+ self._pad = self._base_pad + self.get_tick_padding()
+ self.stale = True
+
+ def _get_text1(self):
+ 'Get the default Text instance'
+ # the y loc is 3 points below the min of y axis
+ # get the affine as an a,b,c,d,tx,ty list
+ # x in data coords, y in axes coords
+ trans, vert, horiz = self._get_text1_transform()
+ t = mtext.Text(
+ x=0, y=0,
+ fontproperties=font_manager.FontProperties(size=self._labelsize),
+ color=self._labelcolor,
+ verticalalignment=vert,
+ horizontalalignment=horiz,
+ )
+ t.set_transform(trans)
+ self._set_artist_props(t)
+ return t
+
+ def _get_text2(self):
+
+ 'Get the default Text 2 instance'
+ # x in data coords, y in axes coords
+ trans, vert, horiz = self._get_text2_transform()
+ t = mtext.Text(
+ x=0, y=1,
+ fontproperties=font_manager.FontProperties(size=self._labelsize),
+ color=self._labelcolor,
+ verticalalignment=vert,
+ horizontalalignment=horiz,
+ )
+ t.set_transform(trans)
+ self._set_artist_props(t)
+ return t
+
+ def _get_tick1line(self):
+ 'Get the default line2D instance'
+ # x in data coords, y in axes coords
+ l = mlines.Line2D(xdata=(0,), ydata=(0,), color=self._color,
+ linestyle='None', marker=self._tickmarkers[0],
+ markersize=self._size,
+ markeredgewidth=self._width, zorder=self._zorder)
+ l.set_transform(self.axes.get_xaxis_transform(which='tick1'))
+ self._set_artist_props(l)
+ return l
+
+ def _get_tick2line(self):
+ 'Get the default line2D instance'
+ # x in data coords, y in axes coords
+ l = mlines.Line2D(xdata=(0,), ydata=(1,),
+ color=self._color,
+ linestyle='None',
+ marker=self._tickmarkers[1],
+ markersize=self._size,
+ markeredgewidth=self._width,
+ zorder=self._zorder)
+
+ l.set_transform(self.axes.get_xaxis_transform(which='tick2'))
+ self._set_artist_props(l)
+ return l
+
+ def _get_gridline(self):
+ 'Get the default line2D instance'
+ # x in data coords, y in axes coords
+ l = mlines.Line2D(xdata=(0.0, 0.0), ydata=(0, 1.0),
+ color=self._grid_color,
+ linestyle=self._grid_linestyle,
+ linewidth=self._grid_linewidth,
+ alpha=self._grid_alpha,
+ markersize=0,
+ **self._grid_kw)
+ l.set_transform(self.axes.get_xaxis_transform(which='grid'))
+ l.get_path()._interpolation_steps = GRIDLINE_INTERPOLATION_STEPS
+ self._set_artist_props(l)
+
+ return l
+
+ def update_position(self, loc):
+ 'Set the location of tick in data coords with scalar *loc*'
+ if self.tick1On:
+ self.tick1line.set_xdata((loc,))
+ if self.tick2On:
+ self.tick2line.set_xdata((loc,))
+ if self.gridOn:
+ self.gridline.set_xdata((loc,))
+ if self.label1On:
+ self.label1.set_x(loc)
+ if self.label2On:
+ self.label2.set_x(loc)
+
+ self._loc = loc
+ self.stale = True
+
+ def get_view_interval(self):
+ 'return the Interval instance for this axis view limits'
+ return self.axes.viewLim.intervalx
+
+
+class YTick(Tick):
+ """
+ Contains all the Artists needed to make a Y tick - the tick line,
+ the label text and the grid line
+ """
+ __name__ = 'ytick'
+
+ def _get_text1_transform(self):
+ return self.axes.get_yaxis_text1_transform(self._pad)
+
+ def _get_text2_transform(self):
+ return self.axes.get_yaxis_text2_transform(self._pad)
+
+ def apply_tickdir(self, tickdir):
+ if tickdir is None:
+ tickdir = rcParams['%s.direction' % self._name]
+ self._tickdir = tickdir
+
+ if self._tickdir == 'in':
+ self._tickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)
+ elif self._tickdir == 'inout':
+ self._tickmarkers = ('_', '_')
+ else:
+ self._tickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)
+ self._pad = self._base_pad + self.get_tick_padding()
+ self.stale = True
+
+ # how far from the y axis line the right of the ticklabel are
+ def _get_text1(self):
+ 'Get the default Text instance'
+ # x in axes coords, y in data coords
+ trans, vert, horiz = self._get_text1_transform()
+ t = mtext.Text(
+ x=0, y=0,
+ fontproperties=font_manager.FontProperties(size=self._labelsize),
+ color=self._labelcolor,
+ verticalalignment=vert,
+ horizontalalignment=horiz,
+ )
+ t.set_transform(trans)
+ self._set_artist_props(t)
+ return t
+
+ def _get_text2(self):
+ 'Get the default Text instance'
+ # x in axes coords, y in data coords
+ trans, vert, horiz = self._get_text2_transform()
+ t = mtext.Text(
+ x=1, y=0,
+ fontproperties=font_manager.FontProperties(size=self._labelsize),
+ color=self._labelcolor,
+ verticalalignment=vert,
+ horizontalalignment=horiz,
+ )
+ t.set_transform(trans)
+ self._set_artist_props(t)
+ return t
+
+ def _get_tick1line(self):
+ 'Get the default line2D instance'
+ # x in axes coords, y in data coords
+
+ l = mlines.Line2D((0,), (0,),
+ color=self._color,
+ marker=self._tickmarkers[0],
+ linestyle='None',
+ markersize=self._size,
+ markeredgewidth=self._width,
+ zorder=self._zorder)
+ l.set_transform(self.axes.get_yaxis_transform(which='tick1'))
+ self._set_artist_props(l)
+ return l
+
+ def _get_tick2line(self):
+ 'Get the default line2D instance'
+ # x in axes coords, y in data coords
+ l = mlines.Line2D((1,), (0,),
+ color=self._color,
+ marker=self._tickmarkers[1],
+ linestyle='None',
+ markersize=self._size,
+ markeredgewidth=self._width,
+ zorder=self._zorder)
+ l.set_transform(self.axes.get_yaxis_transform(which='tick2'))
+ self._set_artist_props(l)
+ return l
+
+ def _get_gridline(self):
+ 'Get the default line2D instance'
+ # x in axes coords, y in data coords
+ l = mlines.Line2D(xdata=(0, 1), ydata=(0, 0),
+ color=self._grid_color,
+ linestyle=self._grid_linestyle,
+ linewidth=self._grid_linewidth,
+ alpha=self._grid_alpha,
+ markersize=0,
+ **self._grid_kw)
+ l.set_transform(self.axes.get_yaxis_transform(which='grid'))
+ l.get_path()._interpolation_steps = GRIDLINE_INTERPOLATION_STEPS
+ self._set_artist_props(l)
+ return l
+
+ def update_position(self, loc):
+ 'Set the location of tick in data coords with scalar *loc*'
+ if self.tick1On:
+ self.tick1line.set_ydata((loc,))
+ if self.tick2On:
+ self.tick2line.set_ydata((loc,))
+ if self.gridOn:
+ self.gridline.set_ydata((loc,))
+ if self.label1On:
+ self.label1.set_y(loc)
+ if self.label2On:
+ self.label2.set_y(loc)
+
+ self._loc = loc
+ self.stale = True
+
+ def get_view_interval(self):
+ 'return the Interval instance for this axis view limits'
+ return self.axes.viewLim.intervaly
+
+
+class Ticker(object):
+ locator = None
+ formatter = None
+
+
+class _LazyTickList(object):
+ """
+ A descriptor for lazy instantiation of tick lists.
+
+ See comment above definition of the ``majorTicks`` and ``minorTicks``
+ attributes.
+ """
+
+ def __init__(self, major):
+ self._major = major
+
+ def __get__(self, instance, cls):
+ if instance is None:
+ return self
+ else:
+ # instance._get_tick() can itself try to access the majorTicks
+ # attribute (e.g. in certain projection classes which override
+ # e.g. get_xaxis_text1_transform). In order to avoid infinite
+ # recursion, first set the majorTicks on the instance to an empty
+ # list, then create the tick and append it.
+ if self._major:
+ instance.majorTicks = []
+ tick = instance._get_tick(major=True)
+ instance.majorTicks.append(tick)
+ return instance.majorTicks
+ else:
+ instance.minorTicks = []
+ tick = instance._get_tick(major=False)
+ instance.minorTicks.append(tick)
+ return instance.minorTicks
+
+
+class Axis(artist.Artist):
+ """
+ Public attributes
+
+ * :attr:`axes.transData` - transform data coords to display coords
+ * :attr:`axes.transAxes` - transform axis coords to display coords
+ * :attr:`labelpad` - number of points between the axis and its label
+ """
+ OFFSETTEXTPAD = 3
+
+ def __str__(self):
+ return self.__class__.__name__ \
+ + "(%f,%f)" % tuple(self.axes.transAxes.transform_point((0, 0)))
+
+ def __init__(self, axes, pickradius=15):
+ """
+ Init the axis with the parent Axes instance
+ """
+ artist.Artist.__init__(self)
+ self.set_figure(axes.figure)
+
+ self.isDefault_label = True
+
+ self.axes = axes
+ self.major = Ticker()
+ self.minor = Ticker()
+ self.callbacks = cbook.CallbackRegistry()
+
+ self._autolabelpos = True
+ self._smart_bounds = False
+
+ self.label = self._get_label()
+ self.labelpad = rcParams['axes.labelpad']
+ self.offsetText = self._get_offset_text()
+
+ self.pickradius = pickradius
+
+ # Initialize here for testing; later add API
+ self._major_tick_kw = dict()
+ self._minor_tick_kw = dict()
+
+ self.cla()
+ self._set_scale('linear')
+
+ # During initialization, Axis objects often create ticks that are later
+ # unused; this turns out to be a very slow step. Instead, use a custom
+ # descriptor to make the tick lists lazy and instantiate them as needed.
+ majorTicks = _LazyTickList(major=True)
+ minorTicks = _LazyTickList(major=False)
+
+ def set_label_coords(self, x, y, transform=None):
+ """
+ Set the coordinates of the label. By default, the x
+ coordinate of the y label is determined by the tick label
+ bounding boxes, but this can lead to poor alignment of
+ multiple ylabels if there are multiple axes. Ditto for the y
+ coordinate of the x label.
+
+ You can also specify the coordinate system of the label with
+ the transform. If None, the default coordinate system will be
+ the axes coordinate system (0,0) is (left,bottom), (0.5, 0.5)
+ is middle, etc
+
+ """
+
+ self._autolabelpos = False
+ if transform is None:
+ transform = self.axes.transAxes
+
+ self.label.set_transform(transform)
+ self.label.set_position((x, y))
+ self.stale = True
+
+ def get_transform(self):
+ return self._scale.get_transform()
+
+ def get_scale(self):
+ return self._scale.name
+
+ def _set_scale(self, value, **kwargs):
+ self._scale = mscale.scale_factory(value, self, **kwargs)
+ self._scale.set_default_locators_and_formatters(self)
+
+ self.isDefault_majloc = True
+ self.isDefault_minloc = True
+ self.isDefault_majfmt = True
+ self.isDefault_minfmt = True
+
+ def limit_range_for_scale(self, vmin, vmax):
+ return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())
+
+ @property
+ @cbook.deprecated("2.2.0")
+ def unit_data(self):
+ return self.units
+
+ @unit_data.setter
+ @cbook.deprecated("2.2.0")
+ def unit_data(self, unit_data):
+ self.set_units(unit_data)
+
+ def get_children(self):
+ children = [self.label, self.offsetText]
+ majorticks = self.get_major_ticks()
+ minorticks = self.get_minor_ticks()
+
+ children.extend(majorticks)
+ children.extend(minorticks)
+ return children
+
+ def cla(self):
+ 'clear the current axis'
+
+ self.label.set_text('') # self.set_label_text would change isDefault_
+
+ self._set_scale('linear')
+
+ # Clear the callback registry for this axis, or it may "leak"
+ self.callbacks = cbook.CallbackRegistry()
+
+ # whether the grids are on
+ self._gridOnMajor = (rcParams['axes.grid'] and
+ rcParams['axes.grid.which'] in ('both', 'major'))
+ self._gridOnMinor = (rcParams['axes.grid'] and
+ rcParams['axes.grid.which'] in ('both', 'minor'))
+
+ self.reset_ticks()
+
+ self.converter = None
+ self.units = None
+ self.set_units(None)
+ self.stale = True
+
+ def reset_ticks(self):
+ """
+ Re-initialize the major and minor Tick lists.
+
+ Each list starts with a single fresh Tick.
+ """
+ # Restore the lazy tick lists.
+ try:
+ del self.majorTicks
+ except AttributeError:
+ pass
+ try:
+ del self.minorTicks
+ except AttributeError:
+ pass
+ try:
+ self.set_clip_path(self.axes.patch)
+ except AttributeError:
+ pass
+
+ def set_tick_params(self, which='major', reset=False, **kw):
+ """
+ Set appearance parameters for ticks, ticklabels, and gridlines.
+
+ For documentation of keyword arguments, see
+ :meth:`matplotlib.axes.Axes.tick_params`.
+ """
+ dicts = []
+ if which == 'major' or which == 'both':
+ dicts.append(self._major_tick_kw)
+ if which == 'minor' or which == 'both':
+ dicts.append(self._minor_tick_kw)
+ kwtrans = self._translate_tick_kw(kw, to_init_kw=True)
+ for d in dicts:
+ if reset:
+ d.clear()
+ d.update(kwtrans)
+
+ if reset:
+ self.reset_ticks()
+ else:
+ if which == 'major' or which == 'both':
+ for tick in self.majorTicks:
+ tick._apply_params(**self._major_tick_kw)
+ if which == 'minor' or which == 'both':
+ for tick in self.minorTicks:
+ tick._apply_params(**self._minor_tick_kw)
+ if 'labelcolor' in kwtrans:
+ self.offsetText.set_color(kwtrans['labelcolor'])
+ self.stale = True
+
+ @staticmethod
+ def _translate_tick_kw(kw, to_init_kw=True):
+ # The following lists may be moved to a more
+ # accessible location.
+ kwkeys0 = ['size', 'width', 'color', 'tickdir', 'pad',
+ 'labelsize', 'labelcolor', 'zorder', 'gridOn',
+ 'tick1On', 'tick2On', 'label1On', 'label2On']
+ kwkeys1 = ['length', 'direction', 'left', 'bottom', 'right', 'top',
+ 'labelleft', 'labelbottom', 'labelright', 'labeltop',
+ 'labelrotation']
+ kwkeys2 = _gridline_param_names
+ kwkeys = kwkeys0 + kwkeys1 + kwkeys2
+ kwtrans = dict()
+ if to_init_kw:
+ if 'length' in kw:
+ kwtrans['size'] = kw.pop('length')
+ if 'direction' in kw:
+ kwtrans['tickdir'] = kw.pop('direction')
+ if 'rotation' in kw:
+ kwtrans['labelrotation'] = kw.pop('rotation')
+ if 'left' in kw:
+ kwtrans['tick1On'] = _string_to_bool(kw.pop('left'))
+ if 'bottom' in kw:
+ kwtrans['tick1On'] = _string_to_bool(kw.pop('bottom'))
+ if 'right' in kw:
+ kwtrans['tick2On'] = _string_to_bool(kw.pop('right'))
+ if 'top' in kw:
+ kwtrans['tick2On'] = _string_to_bool(kw.pop('top'))
+
+ if 'labelleft' in kw:
+ kwtrans['label1On'] = _string_to_bool(kw.pop('labelleft'))
+ if 'labelbottom' in kw:
+ kwtrans['label1On'] = _string_to_bool(kw.pop('labelbottom'))
+ if 'labelright' in kw:
+ kwtrans['label2On'] = _string_to_bool(kw.pop('labelright'))
+ if 'labeltop' in kw:
+ kwtrans['label2On'] = _string_to_bool(kw.pop('labeltop'))
+ if 'colors' in kw:
+ c = kw.pop('colors')
+ kwtrans['color'] = c
+ kwtrans['labelcolor'] = c
+ # Maybe move the checking up to the caller of this method.
+ for key in kw:
+ if key not in kwkeys:
+ raise ValueError(
+ "keyword %s is not recognized; valid keywords are %s"
+ % (key, kwkeys))
+ kwtrans.update(kw)
+ else:
+ raise NotImplementedError("Inverse translation is deferred")
+ return kwtrans
+
+ def set_clip_path(self, clippath, transform=None):
+ artist.Artist.set_clip_path(self, clippath, transform)
+ for child in self.majorTicks + self.minorTicks:
+ child.set_clip_path(clippath, transform)
+ self.stale = True
+
+ def get_view_interval(self):
+ 'return the Interval instance for this axis view limits'
+ raise NotImplementedError('Derived must override')
+
+ def set_view_interval(self, vmin, vmax, ignore=False):
+ raise NotImplementedError('Derived must override')
+
+ def get_data_interval(self):
+ 'return the Interval instance for this axis data limits'
+ raise NotImplementedError('Derived must override')
+
+ def set_data_interval(self):
+ '''set the axis data limits'''
+ raise NotImplementedError('Derived must override')
+
+ def set_default_intervals(self):
+ '''set the default limits for the axis data and view interval if they
+ are not mutated'''
+
+ # this is mainly in support of custom object plotting. For
+ # example, if someone passes in a datetime object, we do not
+ # know automagically how to set the default min/max of the
+ # data and view limits. The unit conversion AxisInfo
+ # interface provides a hook for custom types to register
+ # default limits through the AxisInfo.default_limits
+ # attribute, and the derived code below will check for that
+ # and use it if is available (else just use 0..1)
+ pass
+
+ def _set_artist_props(self, a):
+ if a is None:
+ return
+ a.set_figure(self.figure)
+
+ def iter_ticks(self):
+ """
+ Iterate through all of the major and minor ticks.
+ """
+ majorLocs = self.major.locator()
+ majorTicks = self.get_major_ticks(len(majorLocs))
+ self.major.formatter.set_locs(majorLocs)
+ majorLabels = [self.major.formatter(val, i)
+ for i, val in enumerate(majorLocs)]
+
+ minorLocs = self.minor.locator()
+ minorTicks = self.get_minor_ticks(len(minorLocs))
+ self.minor.formatter.set_locs(minorLocs)
+ minorLabels = [self.minor.formatter(val, i)
+ for i, val in enumerate(minorLocs)]
+
+ major_minor = [
+ (majorTicks, majorLocs, majorLabels),
+ (minorTicks, minorLocs, minorLabels)]
+
+ for group in major_minor:
+ for tick in zip(*group):
+ yield tick
+
+ def get_ticklabel_extents(self, renderer):
+ """
+ Get the extents of the tick labels on either side
+ of the axes.
+ """
+
+ ticks_to_draw = self._update_ticks(renderer)
+ ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
+ renderer)
+
+ if len(ticklabelBoxes):
+ bbox = mtransforms.Bbox.union(ticklabelBoxes)
+ else:
+ bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)
+ if len(ticklabelBoxes2):
+ bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)
+ else:
+ bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)
+ return bbox, bbox2
+
+ def set_smart_bounds(self, value):
+ """set the axis to have smart bounds"""
+ self._smart_bounds = value
+ self.stale = True
+
+ def get_smart_bounds(self):
+ """get whether the axis has smart bounds"""
+ return self._smart_bounds
+
+ def _update_ticks(self, renderer):
+ """
+ Update ticks (position and labels) using the current data
+ interval of the axes. Returns a list of ticks that will be
+ drawn.
+ """
+
+ interval = self.get_view_interval()
+ tick_tups = list(self.iter_ticks()) # iter_ticks calls the locator
+ if self._smart_bounds and tick_tups:
+ # handle inverted limits
+ view_low, view_high = sorted(interval)
+ data_low, data_high = sorted(self.get_data_interval())
+ locs = np.sort([ti[1] for ti in tick_tups])
+ if data_low <= view_low:
+ # data extends beyond view, take view as limit
+ ilow = view_low
+ else:
+ # data stops within view, take best tick
+ good_locs = locs[locs <= data_low]
+ if len(good_locs):
+ # last tick prior or equal to first data point
+ ilow = good_locs[-1]
+ else:
+ # No ticks (why not?), take first tick
+ ilow = locs[0]
+ if data_high >= view_high:
+ # data extends beyond view, take view as limit
+ ihigh = view_high
+ else:
+ # data stops within view, take best tick
+ good_locs = locs[locs >= data_high]
+ if len(good_locs):
+ # first tick after or equal to last data point
+ ihigh = good_locs[0]
+ else:
+ # No ticks (why not?), take last tick
+ ihigh = locs[-1]
+ tick_tups = [ti for ti in tick_tups if ilow <= ti[1] <= ihigh]
+
+ # so that we don't lose ticks on the end, expand out the interval ever
+ # so slightly. The "ever so slightly" is defined to be the width of a
+ # half of a pixel. We don't want to draw a tick that even one pixel
+ # outside of the defined axis interval.
+ if interval[0] <= interval[1]:
+ interval_expanded = interval
+ else:
+ interval_expanded = interval[1], interval[0]
+
+ if hasattr(self, '_get_pixel_distance_along_axis'):
+ # normally, one does not want to catch all exceptions that
+ # could possibly happen, but it is not clear exactly what
+ # exceptions might arise from a user's projection (their
+ # rendition of the Axis object). So, we catch all, with
+ # the idea that one would rather potentially lose a tick
+ # from one side of the axis or another, rather than see a
+ # stack trace.
+ # We also catch users warnings here. These are the result of
+ # invalid numpy calculations that may be the result of out of
+ # bounds on axis with finite allowed intervals such as geo
+ # projections i.e. Mollweide.
+ with np.errstate(invalid='ignore'):
+ try:
+ ds1 = self._get_pixel_distance_along_axis(
+ interval_expanded[0], -0.5)
+ except:
+ warnings.warn("Unable to find pixel distance along axis "
+ "for interval padding of ticks; assuming no "
+ "interval padding needed.")
+ ds1 = 0.0
+ if np.isnan(ds1):
+ ds1 = 0.0
+ try:
+ ds2 = self._get_pixel_distance_along_axis(
+ interval_expanded[1], +0.5)
+ except:
+ warnings.warn("Unable to find pixel distance along axis "
+ "for interval padding of ticks; assuming no "
+ "interval padding needed.")
+ ds2 = 0.0
+ if np.isnan(ds2):
+ ds2 = 0.0
+ interval_expanded = (interval_expanded[0] - ds1,
+ interval_expanded[1] + ds2)
+
+ ticks_to_draw = []
+ for tick, loc, label in tick_tups:
+ if tick is None:
+ continue
+ # NB: always update labels and position to avoid issues like #9397
+ tick.update_position(loc)
+ tick.set_label1(label)
+ tick.set_label2(label)
+ if not mtransforms.interval_contains(interval_expanded, loc):
+ continue
+ ticks_to_draw.append(tick)
+
+ return ticks_to_draw
+
+ def _get_tick_bboxes(self, ticks, renderer):
+ """
+ Given the list of ticks, return two lists of bboxes. One for
+ tick lable1's and another for tick label2's.
+ """
+
+ ticklabelBoxes = []
+ ticklabelBoxes2 = []
+
+ for tick in ticks:
+ if tick.label1On and tick.label1.get_visible():
+ extent = tick.label1.get_window_extent(renderer)
+ ticklabelBoxes.append(extent)
+ if tick.label2On and tick.label2.get_visible():
+ extent = tick.label2.get_window_extent(renderer)
+ ticklabelBoxes2.append(extent)
+ return ticklabelBoxes, ticklabelBoxes2
+
+ def get_tightbbox(self, renderer):
+ """
+ Return a bounding box that encloses the axis. It only accounts
+ tick labels, axis label, and offsetText.
+ """
+ if not self.get_visible():
+ return
+
+ ticks_to_draw = self._update_ticks(renderer)
+
+ self._update_label_position(renderer)
+
+ # go back to just this axis's tick labels
+ ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(
+ ticks_to_draw, renderer)
+
+ self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
+ self.offsetText.set_text(self.major.formatter.get_offset())
+
+ bb = []
+
+ for a in [self.label, self.offsetText]:
+ if a.get_visible():
+ bb.append(a.get_window_extent(renderer))
+
+ bb.extend(ticklabelBoxes)
+ bb.extend(ticklabelBoxes2)
+
+ bb = [b for b in bb if b.width != 0 or b.height != 0]
+ if bb:
+ _bbox = mtransforms.Bbox.union(bb)
+ return _bbox
+ else:
+ return None
+
+ def get_tick_padding(self):
+ values = []
+ if len(self.majorTicks):
+ values.append(self.majorTicks[0].get_tick_padding())
+ if len(self.minorTicks):
+ values.append(self.minorTicks[0].get_tick_padding())
+ if len(values):
+ return max(values)
+ return 0.0
+
+ @allow_rasterization
+ def draw(self, renderer, *args, **kwargs):
+ 'Draw the axis lines, grid lines, tick lines and labels'
+
+ if not self.get_visible():
+ return
+ renderer.open_group(__name__)
+
+ ticks_to_draw = self._update_ticks(renderer)
+ ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
+ renderer)
+
+ for tick in ticks_to_draw:
+ tick.draw(renderer)
+
+ # scale up the axis label box to also find the neighbors, not
+ # just the tick labels that actually overlap note we need a
+ # *copy* of the axis label box because we don't wan't to scale
+ # the actual bbox
+
+ self._update_label_position(renderer)
+
+ self.label.draw(renderer)
+
+ self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
+ self.offsetText.set_text(self.major.formatter.get_offset())
+ self.offsetText.draw(renderer)
+
+ if 0: # draw the bounding boxes around the text for debug
+ for tick in self.majorTicks:
+ label = tick.label1
+ mpatches.bbox_artist(label, renderer)
+ mpatches.bbox_artist(self.label, renderer)
+
+ renderer.close_group(__name__)
+ self.stale = False
+
+ def _get_label(self):
+ raise NotImplementedError('Derived must override')
+
+ def _get_offset_text(self):
+ raise NotImplementedError('Derived must override')
+
+ def get_gridlines(self):
+ 'Return the grid lines as a list of Line2D instance'
+ ticks = self.get_major_ticks()
+ return cbook.silent_list('Line2D gridline',
+ [tick.gridline for tick in ticks])
+
+ def get_label(self):
+ 'Return the axis label as a Text instance'
+ return self.label
+
+ def get_offset_text(self):
+ 'Return the axis offsetText as a Text instance'
+ return self.offsetText
+
+ def get_pickradius(self):
+ 'Return the depth of the axis used by the picker'
+ return self.pickradius
+
+ def get_majorticklabels(self):
+ 'Return a list of Text instances for the major ticklabels'
+ ticks = self.get_major_ticks()
+ labels1 = [tick.label1 for tick in ticks if tick.label1On]
+ labels2 = [tick.label2 for tick in ticks if tick.label2On]
+ return cbook.silent_list('Text major ticklabel', labels1 + labels2)
+
+ def get_minorticklabels(self):
+ 'Return a list of Text instances for the minor ticklabels'
+ ticks = self.get_minor_ticks()
+ labels1 = [tick.label1 for tick in ticks if tick.label1On]
+ labels2 = [tick.label2 for tick in ticks if tick.label2On]
+ return cbook.silent_list('Text minor ticklabel', labels1 + labels2)
+
+ def get_ticklabels(self, minor=False, which=None):
+ """
+ Get the tick labels as a list of :class:`~matplotlib.text.Text`
+ instances.
+
+ Parameters
+ ----------
+ minor : bool
+ If True return the minor ticklabels,
+ else return the major ticklabels
+
+ which : None, ('minor', 'major', 'both')
+ Overrides `minor`.
+
+ Selects which ticklabels to return
+
+ Returns
+ -------
+ ret : list
+ List of :class:`~matplotlib.text.Text` instances.
+ """
+
+ if which is not None:
+ if which == 'minor':
+ return self.get_minorticklabels()
+ elif which == 'major':
+ return self.get_majorticklabels()
+ elif which == 'both':
+ return self.get_majorticklabels() + self.get_minorticklabels()
+ else:
+ raise ValueError("`which` must be one of ('minor', 'major', "
+ "'both') not " + str(which))
+ if minor:
+ return self.get_minorticklabels()
+ return self.get_majorticklabels()
+
+ def get_majorticklines(self):
+ 'Return the major tick lines as a list of Line2D instances'
+ lines = []
+ ticks = self.get_major_ticks()
+ for tick in ticks:
+ lines.append(tick.tick1line)
+ lines.append(tick.tick2line)
+ return cbook.silent_list('Line2D ticklines', lines)
+
+ def get_minorticklines(self):
+ 'Return the minor tick lines as a list of Line2D instances'
+ lines = []
+ ticks = self.get_minor_ticks()
+ for tick in ticks:
+ lines.append(tick.tick1line)
+ lines.append(tick.tick2line)
+ return cbook.silent_list('Line2D ticklines', lines)
+
+ def get_ticklines(self, minor=False):
+ 'Return the tick lines as a list of Line2D instances'
+ if minor:
+ return self.get_minorticklines()
+ return self.get_majorticklines()
+
+ def get_majorticklocs(self):
+ "Get the major tick locations in data coordinates as a numpy array"
+ return self.major.locator()
+
+ def get_minorticklocs(self):
+ "Get the minor tick locations in data coordinates as a numpy array"
+ return self.minor.locator()
+
+ def get_ticklocs(self, minor=False):
+ "Get the tick locations in data coordinates as a numpy array"
+ if minor:
+ return self.minor.locator()
+ return self.major.locator()
+
+ def get_ticks_direction(self, minor=False):
+ """
+ Get the tick directions as a numpy array
+
+ Parameters
+ ----------
+ minor : boolean
+ True to return the minor tick directions,
+ False to return the major tick directions,
+ Default is False
+
+ Returns
+ -------
+ numpy array of tick directions
+ """
+ if minor:
+ return np.array(
+ [tick._tickdir for tick in self.get_minor_ticks()])
+ else:
+ return np.array(
+ [tick._tickdir for tick in self.get_major_ticks()])
+
+ def _get_tick(self, major):
+ 'return the default tick instance'
+ raise NotImplementedError('derived must override')
+
+ def _copy_tick_props(self, src, dest):
+ 'Copy the props from src tick to dest tick'
+ if src is None or dest is None:
+ return
+ dest.label1.update_from(src.label1)
+ dest.label2.update_from(src.label2)
+
+ dest.tick1line.update_from(src.tick1line)
+ dest.tick2line.update_from(src.tick2line)
+ dest.gridline.update_from(src.gridline)
+
+ dest.tick1On = src.tick1On
+ dest.tick2On = src.tick2On
+ dest.label1On = src.label1On
+ dest.label2On = src.label2On
+
+ def get_label_text(self):
+ 'Get the text of the label'
+ return self.label.get_text()
+
+ def get_major_locator(self):
+ 'Get the locator of the major ticker'
+ return self.major.locator
+
+ def get_minor_locator(self):
+ 'Get the locator of the minor ticker'
+ return self.minor.locator
+
+ def get_major_formatter(self):
+ 'Get the formatter of the major ticker'
+ return self.major.formatter
+
+ def get_minor_formatter(self):
+ 'Get the formatter of the minor ticker'
+ return self.minor.formatter
+
+ def get_major_ticks(self, numticks=None):
+ 'get the tick instances; grow as necessary'
+ if numticks is None:
+ numticks = len(self.get_major_locator()())
+
+ while len(self.majorTicks) < numticks:
+ # update the new tick label properties from the old
+ tick = self._get_tick(major=True)
+ self.majorTicks.append(tick)
+ if self._gridOnMajor:
+ tick.gridOn = True
+ self._copy_tick_props(self.majorTicks[0], tick)
+
+ return self.majorTicks[:numticks]
+
+ def get_minor_ticks(self, numticks=None):
+ 'get the minor tick instances; grow as necessary'
+ if numticks is None:
+ numticks = len(self.get_minor_locator()())
+
+ while len(self.minorTicks) < numticks:
+ # update the new tick label properties from the old
+ tick = self._get_tick(major=False)
+ self.minorTicks.append(tick)
+ if self._gridOnMinor:
+ tick.gridOn = True
+ self._copy_tick_props(self.minorTicks[0], tick)
+
+ return self.minorTicks[:numticks]
+
+ def grid(self, b=None, which='major', **kwargs):
+ """
+ Set the axis grid on or off; b is a boolean. Use *which* =
+ 'major' | 'minor' | 'both' to set the grid for major or minor ticks.
+
+ If *b* is *None* and len(kwargs)==0, toggle the grid state. If
+ *kwargs* are supplied, it is assumed you want the grid on and *b*
+ will be set to True.
+
+ *kwargs* are used to set the line properties of the grids, e.g.,
+
+ xax.grid(color='r', linestyle='-', linewidth=2)
+ """
+ if len(kwargs):
+ b = True
+ which = which.lower()
+ gridkw = {'grid_' + item[0]: item[1] for item in kwargs.items()}
+ if which in ['minor', 'both']:
+ if b is None:
+ self._gridOnMinor = not self._gridOnMinor
+ else:
+ self._gridOnMinor = b
+ self.set_tick_params(which='minor', gridOn=self._gridOnMinor,
+ **gridkw)
+ if which in ['major', 'both']:
+ if b is None:
+ self._gridOnMajor = not self._gridOnMajor
+ else:
+ self._gridOnMajor = b
+ self.set_tick_params(which='major', gridOn=self._gridOnMajor,
+ **gridkw)
+ self.stale = True
+
+ def update_units(self, data):
+ """
+ introspect *data* for units converter and update the
+ axis.converter instance if necessary. Return *True*
+ if *data* is registered for unit conversion.
+ """
+
+ converter = munits.registry.get_converter(data)
+ if converter is None:
+ return False
+
+ neednew = self.converter != converter
+ self.converter = converter
+ default = self.converter.default_units(data, self)
+ if default is not None and self.units is None:
+ self.set_units(default)
+
+ if neednew:
+ self._update_axisinfo()
+ self.stale = True
+ return True
+
+ def _update_axisinfo(self):
+ """
+ check the axis converter for the stored units to see if the
+ axis info needs to be updated
+ """
+ if self.converter is None:
+ return
+
+ info = self.converter.axisinfo(self.units, self)
+
+ if info is None:
+ return
+ if info.majloc is not None and \
+ self.major.locator != info.majloc and self.isDefault_majloc:
+ self.set_major_locator(info.majloc)
+ self.isDefault_majloc = True
+ if info.minloc is not None and \
+ self.minor.locator != info.minloc and self.isDefault_minloc:
+ self.set_minor_locator(info.minloc)
+ self.isDefault_minloc = True
+ if info.majfmt is not None and \
+ self.major.formatter != info.majfmt and self.isDefault_majfmt:
+ self.set_major_formatter(info.majfmt)
+ self.isDefault_majfmt = True
+ if info.minfmt is not None and \
+ self.minor.formatter != info.minfmt and self.isDefault_minfmt:
+ self.set_minor_formatter(info.minfmt)
+ self.isDefault_minfmt = True
+ if info.label is not None and self.isDefault_label:
+ self.set_label_text(info.label)
+ self.isDefault_label = True
+
+ self.set_default_intervals()
+
+ def have_units(self):
+ return self.converter is not None or self.units is not None
+
+ def convert_units(self, x):
+ # If x is already a number, doesn't need converting
+ if munits.ConversionInterface.is_numlike(x):
+ return x
+
+ if self.converter is None:
+ self.converter = munits.registry.get_converter(x)
+
+ if self.converter is None:
+ return x
+
+ ret = self.converter.convert(x, self.units, self)
+ return ret
+
+ def set_units(self, u):
+ """
+ set the units for axis
+
+ ACCEPTS: a units tag
+ """
+ pchanged = False
+ if u is None:
+ self.units = None
+ pchanged = True
+ else:
+ if u != self.units:
+ self.units = u
+ pchanged = True
+ if pchanged:
+ self._update_axisinfo()
+ self.callbacks.process('units')
+ self.callbacks.process('units finalize')
+ self.stale = True
+
+ def get_units(self):
+ 'return the units for axis'
+ return self.units
+
+ def set_label_text(self, label, fontdict=None, **kwargs):
+ """ Sets the text value of the axis label
+
+ ACCEPTS: A string value for the label
+ """
+ self.isDefault_label = False
+ self.label.set_text(label)
+ if fontdict is not None:
+ self.label.update(fontdict)
+ self.label.update(kwargs)
+ self.stale = True
+ return self.label
+
+ def set_major_formatter(self, formatter):
+ """
+ Set the formatter of the major ticker
+
+ ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
+ """
+ self.isDefault_majfmt = False
+ self.major.formatter = formatter
+ formatter.set_axis(self)
+ self.stale = True
+
+ def set_minor_formatter(self, formatter):
+ """
+ Set the formatter of the minor ticker
+
+ ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
+ """
+ self.isDefault_minfmt = False
+ self.minor.formatter = formatter
+ formatter.set_axis(self)
+ self.stale = True
+
+ def set_major_locator(self, locator):
+ """
+ Set the locator of the major ticker
+
+ ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
+ """
+ self.isDefault_majloc = False
+ self.major.locator = locator
+ locator.set_axis(self)
+ self.stale = True
+
+ def set_minor_locator(self, locator):
+ """
+ Set the locator of the minor ticker
+
+ ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
+ """
+ self.isDefault_minloc = False
+ self.minor.locator = locator
+ locator.set_axis(self)
+ self.stale = True
+
+ def set_pickradius(self, pickradius):
+ """
+ Set the depth of the axis used by the picker
+
+ ACCEPTS: a distance in points
+ """
+ self.pickradius = pickradius
+
+ def set_ticklabels(self, ticklabels, *args, **kwargs):
+ """
+ Set the text values of the tick labels. Return a list of Text
+ instances. Use *kwarg* *minor=True* to select minor ticks.
+ All other kwargs are used to update the text object properties.
+ As for get_ticklabels, label1 (left or bottom) is
+ affected for a given tick only if its label1On attribute
+ is True, and similarly for label2. The list of returned
+ label text objects consists of all such label1 objects followed
+ by all such label2 objects.
+
+ The input *ticklabels* is assumed to match the set of
+ tick locations, regardless of the state of label1On and
+ label2On.
+
+ ACCEPTS: sequence of strings or Text objects
+ """
+ get_labels = []
+ for t in ticklabels:
+ # try calling get_text() to check whether it is Text object
+ # if it is Text, get label content
+ try:
+ get_labels.append(t.get_text())
+ # otherwise add the label to the list directly
+ except AttributeError:
+ get_labels.append(t)
+ # replace the ticklabels list with the processed one
+ ticklabels = get_labels
+
+ minor = kwargs.pop('minor', False)
+ if minor:
+ self.set_minor_formatter(mticker.FixedFormatter(ticklabels))
+ ticks = self.get_minor_ticks()
+ else:
+ self.set_major_formatter(mticker.FixedFormatter(ticklabels))
+ ticks = self.get_major_ticks()
+ ret = []
+ for tick_label, tick in zip(ticklabels, ticks):
+ # deal with label1
+ tick.label1.set_text(tick_label)
+ tick.label1.update(kwargs)
+ # deal with label2
+ tick.label2.set_text(tick_label)
+ tick.label2.update(kwargs)
+ # only return visible tick labels
+ if tick.label1On:
+ ret.append(tick.label1)
+ if tick.label2On:
+ ret.append(tick.label2)
+
+ self.stale = True
+ return ret
+
+ def set_ticks(self, ticks, minor=False):
+ """
+ Set the locations of the tick marks from sequence ticks
+
+ ACCEPTS: sequence of floats
+ """
+ # XXX if the user changes units, the information will be lost here
+ ticks = self.convert_units(ticks)
+ if len(ticks) > 1:
+ xleft, xright = self.get_view_interval()
+ if xright > xleft:
+ self.set_view_interval(min(ticks), max(ticks))
+ else:
+ self.set_view_interval(max(ticks), min(ticks))
+ if minor:
+ self.set_minor_locator(mticker.FixedLocator(ticks))
+ return self.get_minor_ticks(len(ticks))
+ else:
+ self.set_major_locator(mticker.FixedLocator(ticks))
+ return self.get_major_ticks(len(ticks))
+
+ def _get_tick_boxes_siblings(self, xdir, renderer):
+ """
+ Get the bounding boxes for this `.axis` and its siblings
+ as set by `.Figure.align_xlabels` or `.Figure.align_ylablels`.
+
+ By default it just gets bboxes for self.
+ """
+ raise NotImplementedError('Derived must override')
+
+ def _update_label_position(self, renderer):
+ """
+ Update the label position based on the bounding box enclosing
+ all the ticklabels and axis spine
+ """
+ raise NotImplementedError('Derived must override')
+
+ def _update_offset_text_position(self, bboxes, bboxes2):
+ """
+ Update the label position based on the sequence of bounding
+ boxes of all the ticklabels
+ """
+ raise NotImplementedError('Derived must override')
+
+ def pan(self, numsteps):
+ 'Pan *numsteps* (can be positive or negative)'
+ self.major.locator.pan(numsteps)
+
+ def zoom(self, direction):
+ "Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out"
+ self.major.locator.zoom(direction)
+
+ def axis_date(self, tz=None):
+ """
+ Sets up x-axis ticks and labels that treat the x data as dates.
+ *tz* is a :class:`tzinfo` instance or a timezone string.
+ This timezone is used to create date labels.
+ """
+ # By providing a sample datetime instance with the desired
+ # timezone, the registered converter can be selected,
+ # and the "units" attribute, which is the timezone, can
+ # be set.
+ import datetime
+ if isinstance(tz, six.string_types):
+ import pytz
+ tz = pytz.timezone(tz)
+ self.update_units(datetime.datetime(2009, 1, 1, 0, 0, 0, 0, tz))
+
+ def get_tick_space(self):
+ """
+ Return the estimated number of ticks that can fit on the axis.
+ """
+ # Must be overridden in the subclass
+ raise NotImplementedError()
+
+ def get_label_position(self):
+ """
+ Return the label position (top or bottom)
+ """
+ return self.label_position
+
+ def set_label_position(self, position):
+ """
+ Set the label position (top or bottom)
+
+ ACCEPTS: [ 'top' | 'bottom' ]
+ """
+ raise NotImplementedError()
+
+ def get_minpos(self):
+ raise NotImplementedError()
+
+
+class XAxis(Axis):
+ __name__ = 'xaxis'
+ axis_name = 'x'
+
+ def contains(self, mouseevent):
+ """Test whether the mouse event occurred in the x axis.
+ """
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+
+ x, y = mouseevent.x, mouseevent.y
+ try:
+ trans = self.axes.transAxes.inverted()
+ xaxes, yaxes = trans.transform_point((x, y))
+ except ValueError:
+ return False, {}
+ l, b = self.axes.transAxes.transform_point((0, 0))
+ r, t = self.axes.transAxes.transform_point((1, 1))
+ inaxis = xaxes >= 0 and xaxes <= 1 and (
+ (y < b and y > b - self.pickradius) or
+ (y > t and y < t + self.pickradius))
+ return inaxis, {}
+
+ def _get_tick(self, major):
+ if major:
+ tick_kw = self._major_tick_kw
+ else:
+ tick_kw = self._minor_tick_kw
+ return XTick(self.axes, 0, '', major=major, **tick_kw)
+
+ def _get_label(self):
+ # x in axes coords, y in display coords (to be updated at draw
+ # time by _update_label_positions)
+ label = mtext.Text(x=0.5, y=0,
+ fontproperties=font_manager.FontProperties(
+ size=rcParams['axes.labelsize'],
+ weight=rcParams['axes.labelweight']),
+ color=rcParams['axes.labelcolor'],
+ verticalalignment='top',
+ horizontalalignment='center')
+
+ label.set_transform(mtransforms.blended_transform_factory(
+ self.axes.transAxes, mtransforms.IdentityTransform()))
+
+ self._set_artist_props(label)
+ self.label_position = 'bottom'
+ return label
+
+ def _get_offset_text(self):
+ # x in axes coords, y in display coords (to be updated at draw time)
+ offsetText = mtext.Text(x=1, y=0,
+ fontproperties=font_manager.FontProperties(
+ size=rcParams['xtick.labelsize']),
+ color=rcParams['xtick.color'],
+ verticalalignment='top',
+ horizontalalignment='right')
+ offsetText.set_transform(mtransforms.blended_transform_factory(
+ self.axes.transAxes, mtransforms.IdentityTransform())
+ )
+ self._set_artist_props(offsetText)
+ self.offset_text_position = 'bottom'
+ return offsetText
+
+ def _get_pixel_distance_along_axis(self, where, perturb):
+ """
+ Returns the amount, in data coordinates, that a single pixel
+ corresponds to in the locality given by "where", which is also given
+ in data coordinates, and is an x coordinate. "perturb" is the amount
+ to perturb the pixel. Usually +0.5 or -0.5.
+
+ Implementing this routine for an axis is optional; if present, it will
+ ensure that no ticks are lost due to round-off at the extreme ends of
+ an axis.
+ """
+
+ # Note that this routine does not work for a polar axis, because of
+ # the 1e-10 below. To do things correctly, we need to use rmax
+ # instead of 1e-10 for a polar axis. But since we do not have that
+ # kind of information at this point, we just don't try to pad anything
+ # for the theta axis of a polar plot.
+ if self.axes.name == 'polar':
+ return 0.0
+
+ #
+ # first figure out the pixel location of the "where" point. We use
+ # 1e-10 for the y point, so that we remain compatible with log axes.
+
+ # transformation from data coords to display coords
+ trans = self.axes.transData
+ # transformation from display coords to data coords
+ transinv = trans.inverted()
+ pix = trans.transform_point((where, 1e-10))
+ # perturb the pixel
+ ptp = transinv.transform_point((pix[0] + perturb, pix[1]))
+ dx = abs(ptp[0] - where)
+
+ return dx
+
+ def set_label_position(self, position):
+ """
+ Set the label position (top or bottom)
+
+ ACCEPTS: [ 'top' | 'bottom' ]
+ """
+ if position == 'top':
+ self.label.set_verticalalignment('baseline')
+ elif position == 'bottom':
+ self.label.set_verticalalignment('top')
+ else:
+ raise ValueError("Position accepts only 'top' or 'bottom'")
+ self.label_position = position
+ self.stale = True
+
+ def _get_tick_boxes_siblings(self, renderer):
+ """
+ Get the bounding boxes for this `.axis` and its siblings
+ as set by `.Figure.align_xlabels` or `.Figure.align_ylablels`.
+
+ By default it just gets bboxes for self.
+ """
+ bboxes = []
+ bboxes2 = []
+ # get the Grouper that keeps track of x-label groups for this figure
+ grp = self.figure._align_xlabel_grp
+ # if we want to align labels from other axes:
+ for nn, axx in enumerate(grp.get_siblings(self.axes)):
+ ticks_to_draw = axx.xaxis._update_ticks(renderer)
+ tlb, tlb2 = axx.xaxis._get_tick_bboxes(ticks_to_draw, renderer)
+ bboxes.extend(tlb)
+ bboxes2.extend(tlb2)
+ return bboxes, bboxes2
+
+ def _update_label_position(self, renderer):
+ """
+ Update the label position based on the bounding box enclosing
+ all the ticklabels and axis spine
+ """
+ if not self._autolabelpos:
+ return
+
+ # get bounding boxes for this axis and any siblings
+ # that have been set by `fig.align_xlabels()`
+ bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)
+
+ x, y = self.label.get_position()
+ if self.label_position == 'bottom':
+ try:
+ spine = self.axes.spines['bottom']
+ spinebbox = spine.get_transform().transform_path(
+ spine.get_path()).get_extents()
+ except KeyError:
+ # use axes if spine doesn't exist
+ spinebbox = self.axes.bbox
+ bbox = mtransforms.Bbox.union(bboxes + [spinebbox])
+ bottom = bbox.y0
+
+ self.label.set_position(
+ (x, bottom - self.labelpad * self.figure.dpi / 72.0)
+ )
+
+ else:
+ try:
+ spine = self.axes.spines['top']
+ spinebbox = spine.get_transform().transform_path(
+ spine.get_path()).get_extents()
+ except KeyError:
+ # use axes if spine doesn't exist
+ spinebbox = self.axes.bbox
+ bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])
+ top = bbox.y1
+
+ self.label.set_position(
+ (x, top + self.labelpad * self.figure.dpi / 72.0)
+ )
+
+ def _update_offset_text_position(self, bboxes, bboxes2):
+ """
+ Update the offset_text position based on the sequence of bounding
+ boxes of all the ticklabels
+ """
+ x, y = self.offsetText.get_position()
+ if not len(bboxes):
+ bottom = self.axes.bbox.ymin
+ else:
+ bbox = mtransforms.Bbox.union(bboxes)
+ bottom = bbox.y0
+ self.offsetText.set_position(
+ (x, bottom - self.OFFSETTEXTPAD * self.figure.dpi / 72.0)
+ )
+
+ def get_text_heights(self, renderer):
+ """
+ Returns the amount of space one should reserve for text
+ above and below the axes. Returns a tuple (above, below)
+ """
+ bbox, bbox2 = self.get_ticklabel_extents(renderer)
+ # MGDTODO: Need a better way to get the pad
+ padPixels = self.majorTicks[0].get_pad_pixels()
+
+ above = 0.0
+ if bbox2.height:
+ above += bbox2.height + padPixels
+ below = 0.0
+ if bbox.height:
+ below += bbox.height + padPixels
+
+ if self.get_label_position() == 'top':
+ above += self.label.get_window_extent(renderer).height + padPixels
+ else:
+ below += self.label.get_window_extent(renderer).height + padPixels
+ return above, below
+
+ def set_ticks_position(self, position):
+ """
+ Set the ticks position (top, bottom, both, default or none)
+ both sets the ticks to appear on both positions, but does not
+ change the tick labels. 'default' resets the tick positions to
+ the default: ticks on both positions, labels at bottom. 'none'
+ can be used if you don't want any ticks. 'none' and 'both'
+ affect only the ticks, not the labels.
+
+ ACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]
+ """
+ if position == 'top':
+ self.set_tick_params(which='both', top=True, labeltop=True,
+ bottom=False, labelbottom=False)
+ elif position == 'bottom':
+ self.set_tick_params(which='both', top=False, labeltop=False,
+ bottom=True, labelbottom=True)
+ elif position == 'both':
+ self.set_tick_params(which='both', top=True,
+ bottom=True)
+ elif position == 'none':
+ self.set_tick_params(which='both', top=False,
+ bottom=False)
+ elif position == 'default':
+ self.set_tick_params(which='both', top=True, labeltop=False,
+ bottom=True, labelbottom=True)
+ else:
+ raise ValueError("invalid position: %s" % position)
+ self.stale = True
+
+ def tick_top(self):
+ """
+ Move ticks and ticklabels (if present) to the top of the axes.
+ """
+ label = True
+ if 'label1On' in self._major_tick_kw:
+ label = (self._major_tick_kw['label1On']
+ or self._major_tick_kw['label2On'])
+ self.set_ticks_position('top')
+ # if labels were turned off before this was called
+ # leave them off
+ self.set_tick_params(which='both', labeltop=label)
+
+ def tick_bottom(self):
+ """
+ Move ticks and ticklabels (if present) to the bottom of the axes.
+ """
+ label = True
+ if 'label1On' in self._major_tick_kw:
+ label = (self._major_tick_kw['label1On']
+ or self._major_tick_kw['label2On'])
+ self.set_ticks_position('bottom')
+ # if labels were turned off before this was called
+ # leave them off
+ self.set_tick_params(which='both', labelbottom=label)
+
+ def get_ticks_position(self):
+ """
+ Return the ticks position (top, bottom, default or unknown)
+ """
+ majt = self.majorTicks[0]
+ mT = self.minorTicks[0]
+
+ majorTop = ((not majt.tick1On) and majt.tick2On and
+ (not majt.label1On) and majt.label2On)
+ minorTop = ((not mT.tick1On) and mT.tick2On and
+ (not mT.label1On) and mT.label2On)
+ if majorTop and minorTop:
+ return 'top'
+
+ MajorBottom = (majt.tick1On and (not majt.tick2On) and
+ majt.label1On and (not majt.label2On))
+ MinorBottom = (mT.tick1On and (not mT.tick2On) and
+ mT.label1On and (not mT.label2On))
+ if MajorBottom and MinorBottom:
+ return 'bottom'
+
+ majorDefault = (majt.tick1On and majt.tick2On and
+ majt.label1On and (not majt.label2On))
+ minorDefault = (mT.tick1On and mT.tick2On and
+ mT.label1On and (not mT.label2On))
+ if majorDefault and minorDefault:
+ return 'default'
+
+ return 'unknown'
+
+ def get_view_interval(self):
+ 'return the Interval instance for this axis view limits'
+ return self.axes.viewLim.intervalx
+
+ def set_view_interval(self, vmin, vmax, ignore=False):
+ """
+ If *ignore* is *False*, the order of vmin, vmax
+ does not matter; the original axis orientation will
+ be preserved. In addition, the view limits can be
+ expanded, but will not be reduced. This method is
+ for mpl internal use; for normal use, see
+ :meth:`~matplotlib.axes.Axes.set_xlim`.
+
+ """
+ if ignore:
+ self.axes.viewLim.intervalx = vmin, vmax
+ else:
+ Vmin, Vmax = self.get_view_interval()
+ if Vmin < Vmax:
+ self.axes.viewLim.intervalx = (min(vmin, vmax, Vmin),
+ max(vmin, vmax, Vmax))
+ else:
+ self.axes.viewLim.intervalx = (max(vmin, vmax, Vmin),
+ min(vmin, vmax, Vmax))
+
+ def get_minpos(self):
+ return self.axes.dataLim.minposx
+
+ def get_data_interval(self):
+ 'return the Interval instance for this axis data limits'
+ return self.axes.dataLim.intervalx
+
+ def set_data_interval(self, vmin, vmax, ignore=False):
+ 'set the axis data limits'
+ if ignore:
+ self.axes.dataLim.intervalx = vmin, vmax
+ else:
+ Vmin, Vmax = self.get_data_interval()
+ self.axes.dataLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
+ self.stale = True
+
+ def set_default_intervals(self):
+ 'set the default limits for the axis interval if they are not mutated'
+ xmin, xmax = 0., 1.
+ dataMutated = self.axes.dataLim.mutatedx()
+ viewMutated = self.axes.viewLim.mutatedx()
+ if not dataMutated or not viewMutated:
+ if self.converter is not None:
+ info = self.converter.axisinfo(self.units, self)
+ if info.default_limits is not None:
+ valmin, valmax = info.default_limits
+ xmin = self.converter.convert(valmin, self.units, self)
+ xmax = self.converter.convert(valmax, self.units, self)
+ if not dataMutated:
+ self.axes.dataLim.intervalx = xmin, xmax
+ if not viewMutated:
+ self.axes.viewLim.intervalx = xmin, xmax
+ self.stale = True
+
+ def get_tick_space(self):
+ ends = self.axes.transAxes.transform([[0, 0], [1, 0]])
+ length = ((ends[1][0] - ends[0][0]) / self.axes.figure.dpi) * 72.0
+ tick = self._get_tick(True)
+ # There is a heuristic here that the aspect ratio of tick text
+ # is no more than 3:1
+ size = tick.label1.get_size() * 3
+ if size > 0:
+ return int(np.floor(length / size))
+ else:
+ return 2**31 - 1
+
+
+class YAxis(Axis):
+ __name__ = 'yaxis'
+ axis_name = 'y'
+
+ def contains(self, mouseevent):
+ """Test whether the mouse event occurred in the y axis.
+
+ Returns *True* | *False*
+ """
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+
+ x, y = mouseevent.x, mouseevent.y
+ try:
+ trans = self.axes.transAxes.inverted()
+ xaxes, yaxes = trans.transform_point((x, y))
+ except ValueError:
+ return False, {}
+ l, b = self.axes.transAxes.transform_point((0, 0))
+ r, t = self.axes.transAxes.transform_point((1, 1))
+ inaxis = yaxes >= 0 and yaxes <= 1 and (
+ (x < l and x > l - self.pickradius) or
+ (x > r and x < r + self.pickradius))
+ return inaxis, {}
+
+ def _get_tick(self, major):
+ if major:
+ tick_kw = self._major_tick_kw
+ else:
+ tick_kw = self._minor_tick_kw
+ return YTick(self.axes, 0, '', major=major, **tick_kw)
+
+ def _get_label(self):
+ # x in display coords (updated by _update_label_position)
+ # y in axes coords
+ label = mtext.Text(x=0, y=0.5,
+ # todo: get the label position
+ fontproperties=font_manager.FontProperties(
+ size=rcParams['axes.labelsize'],
+ weight=rcParams['axes.labelweight']),
+ color=rcParams['axes.labelcolor'],
+ verticalalignment='bottom',
+ horizontalalignment='center',
+ rotation='vertical',
+ rotation_mode='anchor')
+ label.set_transform(mtransforms.blended_transform_factory(
+ mtransforms.IdentityTransform(), self.axes.transAxes))
+
+ self._set_artist_props(label)
+ self.label_position = 'left'
+ return label
+
+ def _get_offset_text(self):
+ # x in display coords, y in axes coords (to be updated at draw time)
+ offsetText = mtext.Text(x=0, y=0.5,
+ fontproperties=font_manager.FontProperties(
+ size=rcParams['ytick.labelsize']
+ ),
+ color=rcParams['ytick.color'],
+ verticalalignment='baseline',
+ horizontalalignment='left')
+ offsetText.set_transform(mtransforms.blended_transform_factory(
+ self.axes.transAxes, mtransforms.IdentityTransform())
+ )
+ self._set_artist_props(offsetText)
+ self.offset_text_position = 'left'
+ return offsetText
+
+ def _get_pixel_distance_along_axis(self, where, perturb):
+ """
+ Returns the amount, in data coordinates, that a single pixel
+ corresponds to in the locality given by *where*, which is also given
+ in data coordinates, and is a y coordinate.
+
+ *perturb* is the amount to perturb the pixel. Usually +0.5 or -0.5.
+
+ Implementing this routine for an axis is optional; if present, it will
+ ensure that no ticks are lost due to round-off at the extreme ends of
+ an axis.
+ """
+
+ #
+ # first figure out the pixel location of the "where" point. We use
+ # 1e-10 for the x point, so that we remain compatible with log axes.
+
+ # transformation from data coords to display coords
+ trans = self.axes.transData
+ # transformation from display coords to data coords
+ transinv = trans.inverted()
+ pix = trans.transform_point((1e-10, where))
+ # perturb the pixel
+ ptp = transinv.transform_point((pix[0], pix[1] + perturb))
+ dy = abs(ptp[1] - where)
+ return dy
+
+ def set_label_position(self, position):
+ """
+ Set the label position (left or right)
+
+ ACCEPTS: [ 'left' | 'right' ]
+ """
+ self.label.set_rotation_mode('anchor')
+ self.label.set_horizontalalignment('center')
+ if position == 'left':
+ self.label.set_verticalalignment('bottom')
+ elif position == 'right':
+ self.label.set_verticalalignment('top')
+ else:
+ raise ValueError("Position accepts only 'left' or 'right'")
+ self.label_position = position
+ self.stale = True
+
+ def _get_tick_boxes_siblings(self, renderer):
+ """
+ Get the bounding boxes for this `.axis` and its siblings
+ as set by `.Figure.align_xlabels` or `.Figure.align_ylablels`.
+
+ By default it just gets bboxes for self.
+ """
+ bboxes = []
+ bboxes2 = []
+ # get the Grouper that keeps track of y-label groups for this figure
+ grp = self.figure._align_ylabel_grp
+ # if we want to align labels from other axes:
+ for axx in grp.get_siblings(self.axes):
+ ticks_to_draw = axx.yaxis._update_ticks(renderer)
+ tlb, tlb2 = axx.yaxis._get_tick_bboxes(ticks_to_draw, renderer)
+ bboxes.extend(tlb)
+ bboxes2.extend(tlb2)
+ return bboxes, bboxes2
+
+ def _update_label_position(self, renderer):
+ """
+ Update the label position based on the bounding box enclosing
+ all the ticklabels and axis spine
+ """
+ if not self._autolabelpos:
+ return
+
+ # get bounding boxes for this axis and any siblings
+ # that have been set by `fig.align_ylabels()`
+ bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)
+
+ x, y = self.label.get_position()
+ if self.label_position == 'left':
+ try:
+ spine = self.axes.spines['left']
+ spinebbox = spine.get_transform().transform_path(
+ spine.get_path()).get_extents()
+ except KeyError:
+ # use axes if spine doesn't exist
+ spinebbox = self.axes.bbox
+ bbox = mtransforms.Bbox.union(bboxes + [spinebbox])
+ left = bbox.x0
+ self.label.set_position(
+ (left - self.labelpad * self.figure.dpi / 72.0, y)
+ )
+
+ else:
+ try:
+ spine = self.axes.spines['right']
+ spinebbox = spine.get_transform().transform_path(
+ spine.get_path()).get_extents()
+ except KeyError:
+ # use axes if spine doesn't exist
+ spinebbox = self.axes.bbox
+ bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])
+ right = bbox.x1
+
+ self.label.set_position(
+ (right + self.labelpad * self.figure.dpi / 72.0, y)
+ )
+
+ def _update_offset_text_position(self, bboxes, bboxes2):
+ """
+ Update the offset_text position based on the sequence of bounding
+ boxes of all the ticklabels
+ """
+ x, y = self.offsetText.get_position()
+ top = self.axes.bbox.ymax
+ self.offsetText.set_position(
+ (x, top + self.OFFSETTEXTPAD * self.figure.dpi / 72.0)
+ )
+
+ def set_offset_position(self, position):
+ """
+ .. ACCEPTS: [ 'left' | 'right' ]
+ """
+ x, y = self.offsetText.get_position()
+ if position == 'left':
+ x = 0
+ elif position == 'right':
+ x = 1
+ else:
+ raise ValueError("Position accepts only [ 'left' | 'right' ]")
+
+ self.offsetText.set_ha(position)
+ self.offsetText.set_position((x, y))
+ self.stale = True
+
+ def get_text_widths(self, renderer):
+ bbox, bbox2 = self.get_ticklabel_extents(renderer)
+ # MGDTODO: Need a better way to get the pad
+ padPixels = self.majorTicks[0].get_pad_pixels()
+
+ left = 0.0
+ if bbox.width:
+ left += bbox.width + padPixels
+ right = 0.0
+ if bbox2.width:
+ right += bbox2.width + padPixels
+
+ if self.get_label_position() == 'left':
+ left += self.label.get_window_extent(renderer).width + padPixels
+ else:
+ right += self.label.get_window_extent(renderer).width + padPixels
+ return left, right
+
+ def set_ticks_position(self, position):
+ """
+ Set the ticks position (left, right, both, default or none)
+ 'both' sets the ticks to appear on both positions, but does not
+ change the tick labels. 'default' resets the tick positions to
+ the default: ticks on both positions, labels at left. 'none'
+ can be used if you don't want any ticks. 'none' and 'both'
+ affect only the ticks, not the labels.
+
+ ACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]
+ """
+ if position == 'right':
+ self.set_tick_params(which='both', right=True, labelright=True,
+ left=False, labelleft=False)
+ self.set_offset_position(position)
+ elif position == 'left':
+ self.set_tick_params(which='both', right=False, labelright=False,
+ left=True, labelleft=True)
+ self.set_offset_position(position)
+ elif position == 'both':
+ self.set_tick_params(which='both', right=True,
+ left=True)
+ elif position == 'none':
+ self.set_tick_params(which='both', right=False,
+ left=False)
+ elif position == 'default':
+ self.set_tick_params(which='both', right=True, labelright=False,
+ left=True, labelleft=True)
+ else:
+ raise ValueError("invalid position: %s" % position)
+ self.stale = True
+
+ def tick_right(self):
+ """
+ Move ticks and ticklabels (if present) to the right of the axes.
+ """
+ label = True
+ if 'label1On' in self._major_tick_kw:
+ label = (self._major_tick_kw['label1On']
+ or self._major_tick_kw['label2On'])
+ self.set_ticks_position('right')
+ # if labels were turned off before this was called
+ # leave them off
+ self.set_tick_params(which='both', labelright=label)
+
+ def tick_left(self):
+ """
+ Move ticks and ticklabels (if present) to the left of the axes.
+ """
+ label = True
+ if 'label1On' in self._major_tick_kw:
+ label = (self._major_tick_kw['label1On']
+ or self._major_tick_kw['label2On'])
+ self.set_ticks_position('left')
+ # if labels were turned off before this was called
+ # leave them off
+ self.set_tick_params(which='both', labelleft=label)
+
+ def get_ticks_position(self):
+ """
+ Return the ticks position (left, right, both or unknown)
+ """
+ majt = self.majorTicks[0]
+ mT = self.minorTicks[0]
+
+ majorRight = ((not majt.tick1On) and majt.tick2On and
+ (not majt.label1On) and majt.label2On)
+ minorRight = ((not mT.tick1On) and mT.tick2On and
+ (not mT.label1On) and mT.label2On)
+ if majorRight and minorRight:
+ return 'right'
+
+ majorLeft = (majt.tick1On and (not majt.tick2On) and
+ majt.label1On and (not majt.label2On))
+ minorLeft = (mT.tick1On and (not mT.tick2On) and
+ mT.label1On and (not mT.label2On))
+ if majorLeft and minorLeft:
+ return 'left'
+
+ majorDefault = (majt.tick1On and majt.tick2On and
+ majt.label1On and (not majt.label2On))
+ minorDefault = (mT.tick1On and mT.tick2On and
+ mT.label1On and (not mT.label2On))
+ if majorDefault and minorDefault:
+ return 'default'
+
+ return 'unknown'
+
+ def get_view_interval(self):
+ 'return the Interval instance for this axis view limits'
+ return self.axes.viewLim.intervaly
+
+ def set_view_interval(self, vmin, vmax, ignore=False):
+ """
+ If *ignore* is *False*, the order of vmin, vmax
+ does not matter; the original axis orientation will
+ be preserved. In addition, the view limits can be
+ expanded, but will not be reduced. This method is
+ for mpl internal use; for normal use, see
+ :meth:`~matplotlib.axes.Axes.set_ylim`.
+
+ """
+ if ignore:
+ self.axes.viewLim.intervaly = vmin, vmax
+ else:
+ Vmin, Vmax = self.get_view_interval()
+ if Vmin < Vmax:
+ self.axes.viewLim.intervaly = (min(vmin, vmax, Vmin),
+ max(vmin, vmax, Vmax))
+ else:
+ self.axes.viewLim.intervaly = (max(vmin, vmax, Vmin),
+ min(vmin, vmax, Vmax))
+ self.stale = True
+
+ def get_minpos(self):
+ return self.axes.dataLim.minposy
+
+ def get_data_interval(self):
+ 'return the Interval instance for this axis data limits'
+ return self.axes.dataLim.intervaly
+
+ def set_data_interval(self, vmin, vmax, ignore=False):
+ 'set the axis data limits'
+ if ignore:
+ self.axes.dataLim.intervaly = vmin, vmax
+ else:
+ Vmin, Vmax = self.get_data_interval()
+ self.axes.dataLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
+ self.stale = True
+
+ def set_default_intervals(self):
+ 'set the default limits for the axis interval if they are not mutated'
+ ymin, ymax = 0., 1.
+ dataMutated = self.axes.dataLim.mutatedy()
+ viewMutated = self.axes.viewLim.mutatedy()
+ if not dataMutated or not viewMutated:
+ if self.converter is not None:
+ info = self.converter.axisinfo(self.units, self)
+ if info.default_limits is not None:
+ valmin, valmax = info.default_limits
+ ymin = self.converter.convert(valmin, self.units, self)
+ ymax = self.converter.convert(valmax, self.units, self)
+ if not dataMutated:
+ self.axes.dataLim.intervaly = ymin, ymax
+ if not viewMutated:
+ self.axes.viewLim.intervaly = ymin, ymax
+ self.stale = True
+
+ def get_tick_space(self):
+ ends = self.axes.transAxes.transform([[0, 0], [0, 1]])
+ length = ((ends[1][1] - ends[0][1]) / self.axes.figure.dpi) * 72.0
+ tick = self._get_tick(True)
+ # Having a spacing of at least 2 just looks good.
+ size = tick.label1.get_size() * 2.0
+ if size > 0:
+ return int(np.floor(length / size))
+ else:
+ return 2**31 - 1
diff --git a/contrib/python/matplotlib/py2/matplotlib/backend_bases.py b/contrib/python/matplotlib/py2/matplotlib/backend_bases.py
new file mode 100644
index 00000000000..136f567ebcd
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backend_bases.py
@@ -0,0 +1,3383 @@
+"""
+Abstract base classes define the primitives that renderers and
+graphics contexts must implement to serve as a matplotlib backend
+
+:class:`RendererBase`
+ An abstract base class to handle drawing/rendering operations.
+
+:class:`FigureCanvasBase`
+ The abstraction layer that separates the
+ :class:`matplotlib.figure.Figure` from the backend specific
+ details like a user interface drawing area
+
+:class:`GraphicsContextBase`
+ An abstract base class that provides color, line styles, etc...
+
+:class:`Event`
+ The base class for all of the matplotlib event
+ handling. Derived classes such as :class:`KeyEvent` and
+ :class:`MouseEvent` store the meta data like keys and buttons
+ pressed, x and y locations in pixel and
+ :class:`~matplotlib.axes.Axes` coordinates.
+
+:class:`ShowBase`
+ The base class for the Show class of each interactive backend;
+ the 'show' callable is then set to Show.__call__, inherited from
+ ShowBase.
+
+:class:`ToolContainerBase`
+ The base class for the Toolbar class of each interactive backend.
+
+:class:`StatusbarBase`
+ The base class for the messaging area.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange
+
+from contextlib import contextmanager
+from functools import partial
+import importlib
+import io
+import os
+import sys
+import time
+import warnings
+from weakref import WeakKeyDictionary
+
+import numpy as np
+
+from matplotlib import (
+ backend_tools as tools, cbook, colors, textpath, tight_bbox, transforms,
+ widgets, get_backend, is_interactive, rcParams)
+from matplotlib._pylab_helpers import Gcf
+from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
+from matplotlib.path import Path
+
+try:
+ from PIL import Image
+ _has_pil = True
+ del Image
+except ImportError:
+ _has_pil = False
+
+
+_default_filetypes = {
+ 'ps': 'Postscript',
+ 'eps': 'Encapsulated Postscript',
+ 'pdf': 'Portable Document Format',
+ 'pgf': 'PGF code for LaTeX',
+ 'png': 'Portable Network Graphics',
+ 'raw': 'Raw RGBA bitmap',
+ 'rgba': 'Raw RGBA bitmap',
+ 'svg': 'Scalable Vector Graphics',
+ 'svgz': 'Scalable Vector Graphics'
+}
+
+
+_default_backends = {
+ 'ps': 'matplotlib.backends.backend_ps',
+ 'eps': 'matplotlib.backends.backend_ps',
+ 'pdf': 'matplotlib.backends.backend_pdf',
+ 'pgf': 'matplotlib.backends.backend_pgf',
+ 'png': 'matplotlib.backends.backend_agg',
+ 'raw': 'matplotlib.backends.backend_agg',
+ 'rgba': 'matplotlib.backends.backend_agg',
+ 'svg': 'matplotlib.backends.backend_svg',
+ 'svgz': 'matplotlib.backends.backend_svg',
+}
+
+
+def register_backend(format, backend, description=None):
+ """
+ Register a backend for saving to a given file format.
+
+ Parameters
+ ----------
+ format : str
+ File extension
+
+ backend : module string or canvas class
+ Backend for handling file output
+
+ description : str, optional
+ Description of the file type. Defaults to an empty string
+ """
+ if description is None:
+ description = ''
+ _default_backends[format] = backend
+ _default_filetypes[format] = description
+
+
+def get_registered_canvas_class(format):
+ """
+ Return the registered default canvas for given file format.
+ Handles deferred import of required backend.
+ """
+ if format not in _default_backends:
+ return None
+ backend_class = _default_backends[format]
+ if isinstance(backend_class, six.string_types):
+ backend_class = importlib.import_module(backend_class).FigureCanvas
+ _default_backends[format] = backend_class
+ return backend_class
+
+
+class _Backend(object):
+ # A backend can be defined by using the following pattern:
+ #
+ # @_Backend.export
+ # class FooBackend(_Backend):
+ # # override the attributes and methods documented below.
+
+ # The following attributes and methods must be overridden by subclasses.
+
+ # The `FigureCanvas` and `FigureManager` classes must be defined.
+ FigureCanvas = None
+ FigureManager = None
+
+ # The following methods must be left as None for non-interactive backends.
+ # For interactive backends, `trigger_manager_draw` should be a function
+ # taking a manager as argument and triggering a canvas draw, and `mainloop`
+ # should be a function taking no argument and starting the backend main
+ # loop.
+ trigger_manager_draw = None
+ mainloop = None
+
+ # The following methods will be automatically defined and exported, but
+ # can be overridden.
+
+ @classmethod
+ def new_figure_manager(cls, num, *args, **kwargs):
+ """Create a new figure manager instance.
+ """
+ # This import needs to happen here due to circular imports.
+ from matplotlib.figure import Figure
+ fig_cls = kwargs.pop('FigureClass', Figure)
+ fig = fig_cls(*args, **kwargs)
+ return cls.new_figure_manager_given_figure(num, fig)
+
+ @classmethod
+ def new_figure_manager_given_figure(cls, num, figure):
+ """Create a new figure manager instance for the given figure.
+ """
+ canvas = cls.FigureCanvas(figure)
+ manager = cls.FigureManager(canvas, num)
+ return manager
+
+ @classmethod
+ def draw_if_interactive(cls):
+ if cls.trigger_manager_draw is not None and is_interactive():
+ manager = Gcf.get_active()
+ if manager:
+ cls.trigger_manager_draw(manager)
+
+ @classmethod
+ def show(cls, block=None):
+ """Show all figures.
+
+ `show` blocks by calling `mainloop` if *block* is ``True``, or if it
+ is ``None`` and we are neither in IPython's ``%pylab`` mode, nor in
+ `interactive` mode.
+ """
+ if cls.mainloop is None:
+ return
+ managers = Gcf.get_all_fig_managers()
+ if not managers:
+ return
+ for manager in managers:
+ manager.show()
+ if block is None:
+ # Hack: Are we in IPython's pylab mode?
+ from matplotlib import pyplot
+ try:
+ # IPython versions >= 0.10 tack the _needmain attribute onto
+ # pyplot.show, and always set it to False, when in %pylab mode.
+ ipython_pylab = not pyplot.show._needmain
+ except AttributeError:
+ ipython_pylab = False
+ block = not ipython_pylab and not is_interactive()
+ # TODO: The above is a hack to get the WebAgg backend working with
+ # ipython's `%pylab` mode until proper integration is implemented.
+ if get_backend() == "WebAgg":
+ block = True
+ if block:
+ cls.mainloop()
+
+ # This method is the one actually exporting the required methods.
+
+ @staticmethod
+ def export(cls):
+ for name in ["FigureCanvas",
+ "FigureManager",
+ "new_figure_manager",
+ "new_figure_manager_given_figure",
+ "draw_if_interactive",
+ "show"]:
+ setattr(sys.modules[cls.__module__], name, getattr(cls, name))
+
+ # For back-compatibility, generate a shim `Show` class.
+
+ class Show(ShowBase):
+ def mainloop(self):
+ return cls.mainloop()
+
+ setattr(sys.modules[cls.__module__], "Show", Show)
+ return cls
+
+
+class ShowBase(_Backend):
+ """
+ Simple base class to generate a show() callable in backends.
+
+ Subclass must override mainloop() method.
+ """
+
+ def __call__(self, block=None):
+ return self.show(block=block)
+
+
+class RendererBase(object):
+ """An abstract base class to handle drawing/rendering operations.
+
+ The following methods must be implemented in the backend for full
+ functionality (though just implementing :meth:`draw_path` alone would
+ give a highly capable backend):
+
+ * :meth:`draw_path`
+ * :meth:`draw_image`
+ * :meth:`draw_gouraud_triangle`
+
+ The following methods *should* be implemented in the backend for
+ optimization reasons:
+
+ * :meth:`draw_text`
+ * :meth:`draw_markers`
+ * :meth:`draw_path_collection`
+ * :meth:`draw_quad_mesh`
+
+ """
+ def __init__(self):
+ self._texmanager = None
+ self._text2path = textpath.TextToPath()
+
+ def open_group(self, s, gid=None):
+ """
+ Open a grouping element with label *s*. If *gid* is given, use
+ *gid* as the id of the group. Is only currently used by
+ :mod:`~matplotlib.backends.backend_svg`.
+ """
+
+ def close_group(self, s):
+ """
+ Close a grouping element with label *s*
+ Is only currently used by :mod:`~matplotlib.backends.backend_svg`
+ """
+
+ def draw_path(self, gc, path, transform, rgbFace=None):
+ """
+ Draws a :class:`~matplotlib.path.Path` instance using the
+ given affine transform.
+ """
+ raise NotImplementedError
+
+ def draw_markers(self, gc, marker_path, marker_trans, path,
+ trans, rgbFace=None):
+ """
+ Draws a marker at each of the vertices in path. This includes
+ all vertices, including control points on curves. To avoid
+ that behavior, those vertices should be removed before calling
+ this function.
+
+ This provides a fallback implementation of draw_markers that
+ makes multiple calls to :meth:`draw_path`. Some backends may
+ want to override this method in order to draw the marker only
+ once and reuse it multiple times.
+
+ Parameters
+ ----------
+ gc : `GraphicsContextBase`
+ The graphics context
+
+ marker_trans : `matplotlib.transforms.Transform`
+ An affine transform applied to the marker.
+
+ trans : `matplotlib.transforms.Transform`
+ An affine transform applied to the path.
+
+ """
+ for vertices, codes in path.iter_segments(trans, simplify=False):
+ if len(vertices):
+ x, y = vertices[-2:]
+ self.draw_path(gc, marker_path,
+ marker_trans +
+ transforms.Affine2D().translate(x, y),
+ rgbFace)
+
+ def draw_path_collection(self, gc, master_transform, paths, all_transforms,
+ offsets, offsetTrans, facecolors, edgecolors,
+ linewidths, linestyles, antialiaseds, urls,
+ offset_position):
+ """
+ Draws a collection of paths selecting drawing properties from
+ the lists *facecolors*, *edgecolors*, *linewidths*,
+ *linestyles* and *antialiaseds*. *offsets* is a list of
+ offsets to apply to each of the paths. The offsets in
+ *offsets* are first transformed by *offsetTrans* before being
+ applied. *offset_position* may be either "screen" or "data"
+ depending on the space that the offsets are in.
+
+ This provides a fallback implementation of
+ :meth:`draw_path_collection` that makes multiple calls to
+ :meth:`draw_path`. Some backends may want to override this in
+ order to render each set of path data only once, and then
+ reference that path multiple times with the different offsets,
+ colors, styles etc. The generator methods
+ :meth:`_iter_collection_raw_paths` and
+ :meth:`_iter_collection` are provided to help with (and
+ standardize) the implementation across backends. It is highly
+ recommended to use those generators, so that changes to the
+ behavior of :meth:`draw_path_collection` can be made globally.
+ """
+ path_ids = []
+ for path, transform in self._iter_collection_raw_paths(
+ master_transform, paths, all_transforms):
+ path_ids.append((path, transforms.Affine2D(transform)))
+
+ for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
+ gc, master_transform, all_transforms, path_ids, offsets,
+ offsetTrans, facecolors, edgecolors, linewidths, linestyles,
+ antialiaseds, urls, offset_position):
+ path, transform = path_id
+ transform = transforms.Affine2D(
+ transform.get_matrix()).translate(xo, yo)
+ self.draw_path(gc0, path, transform, rgbFace)
+
+ def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
+ coordinates, offsets, offsetTrans, facecolors,
+ antialiased, edgecolors):
+ """
+ This provides a fallback implementation of
+ :meth:`draw_quad_mesh` that generates paths and then calls
+ :meth:`draw_path_collection`.
+ """
+
+ from matplotlib.collections import QuadMesh
+ paths = QuadMesh.convert_mesh_to_paths(
+ meshWidth, meshHeight, coordinates)
+
+ if edgecolors is None:
+ edgecolors = facecolors
+ linewidths = np.array([gc.get_linewidth()], float)
+
+ return self.draw_path_collection(
+ gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
+ edgecolors, linewidths, [], [antialiased], [None], 'screen')
+
+ def draw_gouraud_triangle(self, gc, points, colors, transform):
+ """
+ Draw a Gouraud-shaded triangle.
+
+ Parameters
+ ----------
+ points : array_like, shape=(3, 2)
+ Array of (x, y) points for the triangle.
+
+ colors : array_like, shape=(3, 4)
+ RGBA colors for each point of the triangle.
+
+ transform : `matplotlib.transforms.Transform`
+ An affine transform to apply to the points.
+
+ """
+ raise NotImplementedError
+
+ def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
+ transform):
+ """
+ Draws a series of Gouraud triangles.
+
+ Parameters
+ ----------
+ points : array_like, shape=(N, 3, 2)
+ Array of *N* (x, y) points for the triangles.
+
+ colors : array_like, shape=(N, 3, 4)
+ Array of *N* RGBA colors for each point of the triangles.
+
+ transform : `matplotlib.transforms.Transform`
+ An affine transform to apply to the points.
+ """
+ transform = transform.frozen()
+ for tri, col in zip(triangles_array, colors_array):
+ self.draw_gouraud_triangle(gc, tri, col, transform)
+
+ def _iter_collection_raw_paths(self, master_transform, paths,
+ all_transforms):
+ """
+ This is a helper method (along with :meth:`_iter_collection`) to make
+ it easier to write a space-efficient :meth:`draw_path_collection`
+ implementation in a backend.
+
+ This method yields all of the base path/transform
+ combinations, given a master transform, a list of paths and
+ list of transforms.
+
+ The arguments should be exactly what is passed in to
+ :meth:`draw_path_collection`.
+
+ The backend should take each yielded path and transform and
+ create an object that can be referenced (reused) later.
+ """
+ Npaths = len(paths)
+ Ntransforms = len(all_transforms)
+ N = max(Npaths, Ntransforms)
+
+ if Npaths == 0:
+ return
+
+ transform = transforms.IdentityTransform()
+ for i in xrange(N):
+ path = paths[i % Npaths]
+ if Ntransforms:
+ transform = Affine2D(all_transforms[i % Ntransforms])
+ yield path, transform + master_transform
+
+ def _iter_collection_uses_per_path(self, paths, all_transforms,
+ offsets, facecolors, edgecolors):
+ """
+ Compute how many times each raw path object returned by
+ _iter_collection_raw_paths would be used when calling
+ _iter_collection. This is intended for the backend to decide
+ on the tradeoff between using the paths in-line and storing
+ them once and reusing. Rounds up in case the number of uses
+ is not the same for every path.
+ """
+ Npaths = len(paths)
+ if Npaths == 0 or (len(facecolors) == 0 and len(edgecolors) == 0):
+ return 0
+ Npath_ids = max(Npaths, len(all_transforms))
+ N = max(Npath_ids, len(offsets))
+ return (N + Npath_ids - 1) // Npath_ids
+
+ def _iter_collection(self, gc, master_transform, all_transforms,
+ path_ids, offsets, offsetTrans, facecolors,
+ edgecolors, linewidths, linestyles,
+ antialiaseds, urls, offset_position):
+ """
+ This is a helper method (along with
+ :meth:`_iter_collection_raw_paths`) to make it easier to write
+ a space-efficient :meth:`draw_path_collection` implementation in a
+ backend.
+
+ This method yields all of the path, offset and graphics
+ context combinations to draw the path collection. The caller
+ should already have looped over the results of
+ :meth:`_iter_collection_raw_paths` to draw this collection.
+
+ The arguments should be the same as that passed into
+ :meth:`draw_path_collection`, with the exception of
+ *path_ids*, which is a list of arbitrary objects that the
+ backend will use to reference one of the paths created in the
+ :meth:`_iter_collection_raw_paths` stage.
+
+ Each yielded result is of the form::
+
+ xo, yo, path_id, gc, rgbFace
+
+ where *xo*, *yo* is an offset; *path_id* is one of the elements of
+ *path_ids*; *gc* is a graphics context and *rgbFace* is a color to
+ use for filling the path.
+ """
+ Ntransforms = len(all_transforms)
+ Npaths = len(path_ids)
+ Noffsets = len(offsets)
+ N = max(Npaths, Noffsets)
+ Nfacecolors = len(facecolors)
+ Nedgecolors = len(edgecolors)
+ Nlinewidths = len(linewidths)
+ Nlinestyles = len(linestyles)
+ Naa = len(antialiaseds)
+ Nurls = len(urls)
+
+ if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
+ return
+ if Noffsets:
+ toffsets = offsetTrans.transform(offsets)
+
+ gc0 = self.new_gc()
+ gc0.copy_properties(gc)
+
+ if Nfacecolors == 0:
+ rgbFace = None
+
+ if Nedgecolors == 0:
+ gc0.set_linewidth(0.0)
+
+ xo, yo = 0, 0
+ for i in xrange(N):
+ path_id = path_ids[i % Npaths]
+ if Noffsets:
+ xo, yo = toffsets[i % Noffsets]
+ if offset_position == 'data':
+ if Ntransforms:
+ transform = (
+ Affine2D(all_transforms[i % Ntransforms]) +
+ master_transform)
+ else:
+ transform = master_transform
+ xo, yo = transform.transform_point((xo, yo))
+ xp, yp = transform.transform_point((0, 0))
+ xo = -(xp - xo)
+ yo = -(yp - yo)
+ if not (np.isfinite(xo) and np.isfinite(yo)):
+ continue
+ if Nfacecolors:
+ rgbFace = facecolors[i % Nfacecolors]
+ if Nedgecolors:
+ if Nlinewidths:
+ gc0.set_linewidth(linewidths[i % Nlinewidths])
+ if Nlinestyles:
+ gc0.set_dashes(*linestyles[i % Nlinestyles])
+ fg = edgecolors[i % Nedgecolors]
+ if len(fg) == 4:
+ if fg[3] == 0.0:
+ gc0.set_linewidth(0)
+ else:
+ gc0.set_foreground(fg)
+ else:
+ gc0.set_foreground(fg)
+ if rgbFace is not None and len(rgbFace) == 4:
+ if rgbFace[3] == 0:
+ rgbFace = None
+ gc0.set_antialiased(antialiaseds[i % Naa])
+ if Nurls:
+ gc0.set_url(urls[i % Nurls])
+
+ yield xo, yo, path_id, gc0, rgbFace
+ gc0.restore()
+
+ def get_image_magnification(self):
+ """
+ Get the factor by which to magnify images passed to :meth:`draw_image`.
+ Allows a backend to have images at a different resolution to other
+ artists.
+ """
+ return 1.0
+
+ def draw_image(self, gc, x, y, im, transform=None):
+ """
+ Draw an RGBA image.
+
+ Parameters
+ ----------
+ gc : `GraphicsContextBase`
+ a graphics context with clipping information.
+
+ x : scalar
+ the distance in physical units (i.e., dots or pixels) from the left
+ hand side of the canvas.
+
+ y : scalar
+ the distance in physical units (i.e., dots or pixels) from the
+ bottom side of the canvas.
+
+ im : array_like, shape=(N, M, 4), dtype=np.uint8
+ An array of RGBA pixels.
+
+ transform : `matplotlib.transforms.Affine2DBase`
+ If and only if the concrete backend is written such that
+ :meth:`option_scale_image` returns ``True``, an affine
+ transformation *may* be passed to :meth:`draw_image`. It takes the
+ form of a :class:`~matplotlib.transforms.Affine2DBase` instance.
+ The translation vector of the transformation is given in physical
+ units (i.e., dots or pixels). Note that the transformation does not
+ override `x` and `y`, and has to be applied *before* translating
+ the result by `x` and `y` (this can be accomplished by adding `x`
+ and `y` to the translation vector defined by `transform`).
+ """
+ raise NotImplementedError
+
+ def option_image_nocomposite(self):
+ """
+ override this method for renderers that do not necessarily always
+ want to rescale and composite raster images. (like SVG, PDF, or PS)
+ """
+ return False
+
+ def option_scale_image(self):
+ """
+ override this method for renderers that support arbitrary affine
+ transformations in :meth:`draw_image` (most vector backends).
+ """
+ return False
+
+ def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
+ """
+ """
+ self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
+
+ def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
+ """
+ Draw the text instance
+
+ Parameters
+ ----------
+ gc : `GraphicsContextBase`
+ the graphics context
+
+ x : scalar
+ the x location of the text in display coords
+
+ y : scalar
+ the y location of the text baseline in display coords
+
+ s : str
+ the text string
+
+ prop : `matplotlib.font_manager.FontProperties`
+ font properties
+
+ angle : scalar
+ the rotation angle in degrees
+
+ mtext : `matplotlib.text.Text`
+ the original text object to be rendered
+
+ Notes
+ -----
+ **backend implementers note**
+
+ When you are trying to determine if you have gotten your bounding box
+ right (which is what enables the text layout/alignment to work
+ properly), it helps to change the line in text.py::
+
+ if 0: bbox_artist(self, renderer)
+
+ to if 1, and then the actual bounding box will be plotted along with
+ your text.
+ """
+
+ self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
+
+ def _get_text_path_transform(self, x, y, s, prop, angle, ismath):
+ """
+ return the text path and transform
+
+ Parameters
+ ----------
+ prop : `matplotlib.font_manager.FontProperties`
+ font property
+
+ s : str
+ text to be converted
+
+ usetex : bool
+ If True, use matplotlib usetex mode.
+
+ ismath : bool
+ If True, use mathtext parser. If "TeX", use *usetex* mode.
+ """
+
+ text2path = self._text2path
+ fontsize = self.points_to_pixels(prop.get_size_in_points())
+
+ if ismath == "TeX":
+ verts, codes = text2path.get_text_path(prop, s, ismath=False,
+ usetex=True)
+ else:
+ verts, codes = text2path.get_text_path(prop, s, ismath=ismath,
+ usetex=False)
+
+ path = Path(verts, codes)
+ angle = np.deg2rad(angle)
+ if self.flipy():
+ transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
+ fontsize / text2path.FONT_SCALE)
+ transform = transform.rotate(angle).translate(x, self.height - y)
+ else:
+ transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
+ fontsize / text2path.FONT_SCALE)
+ transform = transform.rotate(angle).translate(x, y)
+
+ return path, transform
+
+ def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
+ """
+ draw the text by converting them to paths using textpath module.
+
+ Parameters
+ ----------
+ prop : `matplotlib.font_manager.FontProperties`
+ font property
+
+ s : str
+ text to be converted
+
+ usetex : bool
+ If True, use matplotlib usetex mode.
+
+ ismath : bool
+ If True, use mathtext parser. If "TeX", use *usetex* mode.
+ """
+ path, transform = self._get_text_path_transform(
+ x, y, s, prop, angle, ismath)
+ color = gc.get_rgb()
+
+ gc.set_linewidth(0.0)
+ self.draw_path(gc, path, transform, rgbFace=color)
+
+ def get_text_width_height_descent(self, s, prop, ismath):
+ """
+ Get the width, height, and descent (offset from the bottom
+ to the baseline), in display coords, of the string *s* with
+ :class:`~matplotlib.font_manager.FontProperties` *prop*
+ """
+ if ismath == 'TeX':
+ # todo: handle props
+ size = prop.get_size_in_points()
+ texmanager = self._text2path.get_texmanager()
+ fontsize = prop.get_size_in_points()
+ w, h, d = texmanager.get_text_width_height_descent(
+ s, fontsize, renderer=self)
+ return w, h, d
+
+ dpi = self.points_to_pixels(72)
+ if ismath:
+ dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
+ return dims[0:3] # return width, height, descent
+
+ flags = self._text2path._get_hinting_flag()
+ font = self._text2path._get_font(prop)
+ size = prop.get_size_in_points()
+ font.set_size(size, dpi)
+ # the width and height of unrotated string
+ font.set_text(s, 0.0, flags=flags)
+ w, h = font.get_width_height()
+ d = font.get_descent()
+ w /= 64.0 # convert from subpixels
+ h /= 64.0
+ d /= 64.0
+ return w, h, d
+
+ def flipy(self):
+ """
+ Return true if y small numbers are top for renderer Is used
+ for drawing text (:mod:`matplotlib.text`) and images
+ (:mod:`matplotlib.image`) only
+ """
+ return True
+
+ def get_canvas_width_height(self):
+ 'return the canvas width and height in display coords'
+ return 1, 1
+
+ def get_texmanager(self):
+ """
+ return the :class:`matplotlib.texmanager.TexManager` instance
+ """
+ if self._texmanager is None:
+ from matplotlib.texmanager import TexManager
+ self._texmanager = TexManager()
+ return self._texmanager
+
+ def new_gc(self):
+ """
+ Return an instance of a :class:`GraphicsContextBase`
+ """
+ return GraphicsContextBase()
+
+ def points_to_pixels(self, points):
+ """
+ Convert points to display units
+
+ You need to override this function (unless your backend
+ doesn't have a dpi, e.g., postscript or svg). Some imaging
+ systems assume some value for pixels per inch::
+
+ points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
+
+ Parameters
+ ----------
+ points : scalar or array_like
+ a float or a numpy array of float
+
+ Returns
+ -------
+ Points converted to pixels
+ """
+ return points
+
+ def strip_math(self, s):
+ return cbook.strip_math(s)
+
+ def start_rasterizing(self):
+ """
+ Used in MixedModeRenderer. Switch to the raster renderer.
+ """
+
+ def stop_rasterizing(self):
+ """
+ Used in MixedModeRenderer. Switch back to the vector renderer
+ and draw the contents of the raster renderer as an image on
+ the vector renderer.
+ """
+
+ def start_filter(self):
+ """
+ Used in AggRenderer. Switch to a temporary renderer for image
+ filtering effects.
+ """
+
+ def stop_filter(self, filter_func):
+ """
+ Used in AggRenderer. Switch back to the original renderer.
+ The contents of the temporary renderer is processed with the
+ *filter_func* and is drawn on the original renderer as an
+ image.
+ """
+
+
+class GraphicsContextBase(object):
+ """
+ An abstract base class that provides color, line styles, etc...
+ """
+
+ def __init__(self):
+ self._alpha = 1.0
+ self._forced_alpha = False # if True, _alpha overrides A from RGBA
+ self._antialiased = 1 # use 0,1 not True, False for extension code
+ self._capstyle = 'butt'
+ self._cliprect = None
+ self._clippath = None
+ self._dashes = None, None
+ self._joinstyle = 'round'
+ self._linestyle = 'solid'
+ self._linewidth = 1
+ self._rgb = (0.0, 0.0, 0.0, 1.0)
+ self._hatch = None
+ self._hatch_color = colors.to_rgba(rcParams['hatch.color'])
+ self._hatch_linewidth = rcParams['hatch.linewidth']
+ self._url = None
+ self._gid = None
+ self._snap = None
+ self._sketch = None
+
+ def copy_properties(self, gc):
+ 'Copy properties from gc to self'
+ self._alpha = gc._alpha
+ self._forced_alpha = gc._forced_alpha
+ self._antialiased = gc._antialiased
+ self._capstyle = gc._capstyle
+ self._cliprect = gc._cliprect
+ self._clippath = gc._clippath
+ self._dashes = gc._dashes
+ self._joinstyle = gc._joinstyle
+ self._linestyle = gc._linestyle
+ self._linewidth = gc._linewidth
+ self._rgb = gc._rgb
+ self._hatch = gc._hatch
+ self._hatch_color = gc._hatch_color
+ self._hatch_linewidth = gc._hatch_linewidth
+ self._url = gc._url
+ self._gid = gc._gid
+ self._snap = gc._snap
+ self._sketch = gc._sketch
+
+ def restore(self):
+ """
+ Restore the graphics context from the stack - needed only
+ for backends that save graphics contexts on a stack
+ """
+
+ def get_alpha(self):
+ """
+ Return the alpha value used for blending - not supported on
+ all backends
+ """
+ return self._alpha
+
+ def get_antialiased(self):
+ "Return true if the object should try to do antialiased rendering"
+ return self._antialiased
+
+ def get_capstyle(self):
+ """
+ Return the capstyle as a string in ('butt', 'round', 'projecting')
+ """
+ return self._capstyle
+
+ def get_clip_rectangle(self):
+ """
+ Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox`
+ instance
+ """
+ return self._cliprect
+
+ def get_clip_path(self):
+ """
+ Return the clip path in the form (path, transform), where path
+ is a :class:`~matplotlib.path.Path` instance, and transform is
+ an affine transform to apply to the path before clipping.
+ """
+ if self._clippath is not None:
+ return self._clippath.get_transformed_path_and_affine()
+ return None, None
+
+ def get_dashes(self):
+ """
+ Return the dash information as an offset dashlist tuple.
+
+ The dash list is a even size list that gives the ink on, ink
+ off in pixels.
+
+ See p107 of to PostScript `BLUEBOOK
+ <https://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
+ for more info.
+
+ Default value is None
+ """
+ return self._dashes
+
+ def get_forced_alpha(self):
+ """
+ Return whether the value given by get_alpha() should be used to
+ override any other alpha-channel values.
+ """
+ return self._forced_alpha
+
+ def get_joinstyle(self):
+ """
+ Return the line join style as one of ('miter', 'round', 'bevel')
+ """
+ return self._joinstyle
+
+ @cbook.deprecated("2.1")
+ def get_linestyle(self):
+ """
+ Return the linestyle: one of ('solid', 'dashed', 'dashdot',
+ 'dotted').
+ """
+ return self._linestyle
+
+ def get_linewidth(self):
+ """
+ Return the line width in points as a scalar
+ """
+ return self._linewidth
+
+ def get_rgb(self):
+ """
+ returns a tuple of three or four floats from 0-1.
+ """
+ return self._rgb
+
+ def get_url(self):
+ """
+ returns a url if one is set, None otherwise
+ """
+ return self._url
+
+ def get_gid(self):
+ """
+ Return the object identifier if one is set, None otherwise.
+ """
+ return self._gid
+
+ def get_snap(self):
+ """
+ returns the snap setting which may be:
+
+ * True: snap vertices to the nearest pixel center
+
+ * False: leave vertices as-is
+
+ * None: (auto) If the path contains only rectilinear line
+ segments, round to the nearest pixel center
+ """
+ return self._snap
+
+ def set_alpha(self, alpha):
+ """
+ Set the alpha value used for blending - not supported on all backends.
+ If ``alpha=None`` (the default), the alpha components of the
+ foreground and fill colors will be used to set their respective
+ transparencies (where applicable); otherwise, ``alpha`` will override
+ them.
+ """
+ if alpha is not None:
+ self._alpha = alpha
+ self._forced_alpha = True
+ else:
+ self._alpha = 1.0
+ self._forced_alpha = False
+ self.set_foreground(self._rgb, isRGBA=True)
+
+ def set_antialiased(self, b):
+ """
+ True if object should be drawn with antialiased rendering
+ """
+
+ # use 0, 1 to make life easier on extension code trying to read the gc
+ if b:
+ self._antialiased = 1
+ else:
+ self._antialiased = 0
+
+ def set_capstyle(self, cs):
+ """
+ Set the capstyle as a string in ('butt', 'round', 'projecting')
+ """
+ if cs in ('butt', 'round', 'projecting'):
+ self._capstyle = cs
+ else:
+ raise ValueError('Unrecognized cap style. Found %s' % cs)
+
+ def set_clip_rectangle(self, rectangle):
+ """
+ Set the clip rectangle with sequence (left, bottom, width, height)
+ """
+ self._cliprect = rectangle
+
+ def set_clip_path(self, path):
+ """
+ Set the clip path and transformation. Path should be a
+ :class:`~matplotlib.transforms.TransformedPath` instance.
+ """
+ if (path is not None
+ and not isinstance(path, transforms.TransformedPath)):
+ raise ValueError("Path should be a "
+ "matplotlib.transforms.TransformedPath instance")
+ self._clippath = path
+
+ def set_dashes(self, dash_offset, dash_list):
+ """
+ Set the dash style for the gc.
+
+ Parameters
+ ----------
+ dash_offset : float
+ is the offset (usually 0).
+
+ dash_list : array_like
+ specifies the on-off sequence as points.
+ ``(None, None)`` specifies a solid line
+
+ """
+ if dash_list is not None:
+ dl = np.asarray(dash_list)
+ if np.any(dl < 0.0):
+ raise ValueError(
+ "All values in the dash list must be positive")
+ self._dashes = dash_offset, dash_list
+
+ def set_foreground(self, fg, isRGBA=False):
+ """
+ Set the foreground color. fg can be a MATLAB format string, a
+ html hex color string, an rgb or rgba unit tuple, or a float between 0
+ and 1. In the latter case, grayscale is used.
+
+ If you know fg is rgba, set ``isRGBA=True`` for efficiency.
+ """
+ if self._forced_alpha and isRGBA:
+ self._rgb = fg[:3] + (self._alpha,)
+ elif self._forced_alpha:
+ self._rgb = colors.to_rgba(fg, self._alpha)
+ elif isRGBA:
+ self._rgb = fg
+ else:
+ self._rgb = colors.to_rgba(fg)
+
+ def set_joinstyle(self, js):
+ """
+ Set the join style to be one of ('miter', 'round', 'bevel')
+ """
+ if js in ('miter', 'round', 'bevel'):
+ self._joinstyle = js
+ else:
+ raise ValueError('Unrecognized join style. Found %s' % js)
+
+ def set_linewidth(self, w):
+ """
+ Set the linewidth in points
+ """
+ self._linewidth = float(w)
+
+ @cbook.deprecated("2.1")
+ def set_linestyle(self, style):
+ """
+ Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
+ 'dotted'). These are defined in the rcParams
+ `lines.dashed_pattern`, `lines.dashdot_pattern` and
+ `lines.dotted_pattern`. One may also specify customized dash
+ styles by providing a tuple of (offset, dash pairs).
+ """
+ self._linestyle = style
+
+ def set_url(self, url):
+ """
+ Sets the url for links in compatible backends
+ """
+ self._url = url
+
+ def set_gid(self, id):
+ """
+ Sets the id.
+ """
+ self._gid = id
+
+ def set_snap(self, snap):
+ """
+ Sets the snap setting which may be:
+
+ * True: snap vertices to the nearest pixel center
+
+ * False: leave vertices as-is
+
+ * None: (auto) If the path contains only rectilinear line
+ segments, round to the nearest pixel center
+ """
+ self._snap = snap
+
+ def set_hatch(self, hatch):
+ """
+ Sets the hatch style for filling
+ """
+ self._hatch = hatch
+
+ def get_hatch(self):
+ """
+ Gets the current hatch style
+ """
+ return self._hatch
+
+ def get_hatch_path(self, density=6.0):
+ """
+ Returns a Path for the current hatch.
+ """
+ hatch = self.get_hatch()
+ if hatch is None:
+ return None
+ return Path.hatch(hatch, density)
+
+ def get_hatch_color(self):
+ """
+ Gets the color to use for hatching.
+ """
+ return self._hatch_color
+
+ def set_hatch_color(self, hatch_color):
+ """
+ sets the color to use for hatching.
+ """
+ self._hatch_color = hatch_color
+
+ def get_hatch_linewidth(self):
+ """
+ Gets the linewidth to use for hatching.
+ """
+ return self._hatch_linewidth
+
+ def get_sketch_params(self):
+ """
+ Returns the sketch parameters for the artist.
+
+ Returns
+ -------
+ sketch_params : tuple or `None`
+
+ A 3-tuple with the following elements:
+
+ * `scale`: The amplitude of the wiggle perpendicular to the
+ source line.
+
+ * `length`: The length of the wiggle along the line.
+
+ * `randomness`: The scale factor by which the length is
+ shrunken or expanded.
+
+ May return `None` if no sketch parameters were set.
+ """
+ return self._sketch
+
+ def set_sketch_params(self, scale=None, length=None, randomness=None):
+ """
+ Sets the sketch parameters.
+
+ Parameters
+ ----------
+
+ scale : float, optional
+ The amplitude of the wiggle perpendicular to the source
+ line, in pixels. If scale is `None`, or not provided, no
+ sketch filter will be provided.
+
+ length : float, optional
+ The length of the wiggle along the line, in pixels
+ (default 128)
+
+ randomness : float, optional
+ The scale factor by which the length is shrunken or
+ expanded (default 16)
+ """
+ self._sketch = (
+ None if scale is None
+ else (scale, length or 128., randomness or 16.))
+
+
+class TimerBase(object):
+ '''
+ A base class for providing timer events, useful for things animations.
+ Backends need to implement a few specific methods in order to use their
+ own timing mechanisms so that the timer events are integrated into their
+ event loops.
+
+ Mandatory functions that must be implemented:
+
+ * `_timer_start`: Contains backend-specific code for starting
+ the timer
+
+ * `_timer_stop`: Contains backend-specific code for stopping
+ the timer
+
+ Optional overrides:
+
+ * `_timer_set_single_shot`: Code for setting the timer to
+ single shot operating mode, if supported by the timer
+ object. If not, the `Timer` class itself will store the flag
+ and the `_on_timer` method should be overridden to support
+ such behavior.
+
+ * `_timer_set_interval`: Code for setting the interval on the
+ timer, if there is a method for doing so on the timer
+ object.
+
+ * `_on_timer`: This is the internal function that any timer
+ object should call, which will handle the task of running
+ all callbacks that have been set.
+
+ Attributes
+ ----------
+ interval : scalar
+ The time between timer events in milliseconds. Default is 1000 ms.
+
+ single_shot : bool
+ Boolean flag indicating whether this timer should operate as single
+ shot (run once and then stop). Defaults to `False`.
+
+ callbacks : List[Tuple[callable, Tuple, Dict]]
+ Stores list of (func, args, kwargs) tuples that will be called upon
+ timer events. This list can be manipulated directly, or the
+ functions `add_callback` and `remove_callback` can be used.
+
+ '''
+ def __init__(self, interval=None, callbacks=None):
+ #Initialize empty callbacks list and setup default settings if necssary
+ if callbacks is None:
+ self.callbacks = []
+ else:
+ self.callbacks = callbacks[:] # Create a copy
+
+ if interval is None:
+ self._interval = 1000
+ else:
+ self._interval = interval
+
+ self._single = False
+
+ # Default attribute for holding the GUI-specific timer object
+ self._timer = None
+
+ def __del__(self):
+ 'Need to stop timer and possibly disconnect timer.'
+ self._timer_stop()
+
+ def start(self, interval=None):
+ '''
+ Start the timer object. `interval` is optional and will be used
+ to reset the timer interval first if provided.
+ '''
+ if interval is not None:
+ self._set_interval(interval)
+ self._timer_start()
+
+ def stop(self):
+ '''
+ Stop the timer.
+ '''
+ self._timer_stop()
+
+ def _timer_start(self):
+ pass
+
+ def _timer_stop(self):
+ pass
+
+ def _get_interval(self):
+ return self._interval
+
+ def _set_interval(self, interval):
+ # Force to int since none of the backends actually support fractional
+ # milliseconds, and some error or give warnings.
+ interval = int(interval)
+ self._interval = interval
+ self._timer_set_interval()
+
+ interval = property(_get_interval, _set_interval)
+
+ def _get_single_shot(self):
+ return self._single
+
+ def _set_single_shot(self, ss=True):
+ self._single = ss
+ self._timer_set_single_shot()
+
+ single_shot = property(_get_single_shot, _set_single_shot)
+
+ def add_callback(self, func, *args, **kwargs):
+ '''
+ Register `func` to be called by timer when the event fires. Any
+ additional arguments provided will be passed to `func`.
+ '''
+ self.callbacks.append((func, args, kwargs))
+
+ def remove_callback(self, func, *args, **kwargs):
+ '''
+ Remove `func` from list of callbacks. `args` and `kwargs` are optional
+ and used to distinguish between copies of the same function registered
+ to be called with different arguments.
+ '''
+ if args or kwargs:
+ self.callbacks.remove((func, args, kwargs))
+ else:
+ funcs = [c[0] for c in self.callbacks]
+ if func in funcs:
+ self.callbacks.pop(funcs.index(func))
+
+ def _timer_set_interval(self):
+ """Used to set interval on underlying timer object."""
+
+ def _timer_set_single_shot(self):
+ """Used to set single shot on underlying timer object."""
+
+ def _on_timer(self):
+ '''
+ Runs all function that have been registered as callbacks. Functions
+ can return False (or 0) if they should not be called any more. If there
+ are no callbacks, the timer is automatically stopped.
+ '''
+ for func, args, kwargs in self.callbacks:
+ ret = func(*args, **kwargs)
+ # docstring above explains why we use `if ret == 0` here,
+ # instead of `if not ret`.
+ # This will also catch `ret == False` as `False == 0`
+ # but does not annoy the linters
+ # https://docs.python.org/3/library/stdtypes.html#boolean-values
+ if ret == 0:
+ self.callbacks.remove((func, args, kwargs))
+
+ if len(self.callbacks) == 0:
+ self.stop()
+
+
+class Event(object):
+ """
+ A matplotlib event. Attach additional attributes as defined in
+ :meth:`FigureCanvasBase.mpl_connect`. The following attributes
+ are defined and shown with their default values
+
+ Attributes
+ ----------
+ name : str
+ the event name
+
+ canvas : `FigureCanvasBase`
+ the backend-specific canvas instance generating the event
+
+ guiEvent
+ the GUI event that triggered the matplotlib event
+
+ """
+ def __init__(self, name, canvas, guiEvent=None):
+ self.name = name
+ self.canvas = canvas
+ self.guiEvent = guiEvent
+
+
+@cbook.deprecated("2.1")
+class IdleEvent(Event):
+ """
+ An event triggered by the GUI backend when it is idle -- useful
+ for passive animation
+ """
+
+
+class DrawEvent(Event):
+ """
+ An event triggered by a draw operation on the canvas
+
+ In most backends callbacks subscribed to this callback will be
+ fired after the rendering is complete but before the screen is
+ updated. Any extra artists drawn to the canvas's renderer will
+ be reflected without an explicit call to ``blit``.
+
+ .. warning ::
+
+ Calling ``canvas.draw`` and ``canvas.blit`` in these callbacks may
+ not be safe with all backends and may cause infinite recursion.
+
+ In addition to the :class:`Event` attributes, the following event
+ attributes are defined:
+
+ Attributes
+ ----------
+ renderer : `RendererBase`
+ the renderer for the draw event
+
+ """
+ def __init__(self, name, canvas, renderer):
+ Event.__init__(self, name, canvas)
+ self.renderer = renderer
+
+
+class ResizeEvent(Event):
+ """
+ An event triggered by a canvas resize
+
+ In addition to the :class:`Event` attributes, the following event
+ attributes are defined:
+
+ Attributes
+ ----------
+ width : scalar
+ width of the canvas in pixels
+
+ height : scalar
+ height of the canvas in pixels
+
+ """
+ def __init__(self, name, canvas):
+ Event.__init__(self, name, canvas)
+ self.width, self.height = canvas.get_width_height()
+
+
+class CloseEvent(Event):
+ """
+ An event triggered by a figure being closed
+
+ """
+ def __init__(self, name, canvas, guiEvent=None):
+ Event.__init__(self, name, canvas, guiEvent)
+
+
+class LocationEvent(Event):
+ """
+ An event that has a screen location
+
+ The following additional attributes are defined and shown with
+ their default values.
+
+ In addition to the :class:`Event` attributes, the following
+ event attributes are defined:
+
+ Attributes
+ ----------
+ x : scalar
+ x position - pixels from left of canvas
+
+ y : scalar
+ y position - pixels from bottom of canvas
+
+ inaxes : bool
+ the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
+
+ xdata : scalar
+ x coord of mouse in data coords
+
+ ydata : scalar
+ y coord of mouse in data coords
+
+ """
+ x = None # x position - pixels from left of canvas
+ y = None # y position - pixels from right of canvas
+ inaxes = None # the Axes instance if mouse us over axes
+ xdata = None # x coord of mouse in data coords
+ ydata = None # y coord of mouse in data coords
+
+ # the last event that was triggered before this one
+ lastevent = None
+
+ def __init__(self, name, canvas, x, y, guiEvent=None):
+ """
+ *x*, *y* in figure coords, 0,0 = bottom, left
+ """
+ Event.__init__(self, name, canvas, guiEvent=guiEvent)
+ self.x = x
+ self.y = y
+
+ if x is None or y is None:
+ # cannot check if event was in axes if no x,y info
+ self.inaxes = None
+ self._update_enter_leave()
+ return
+
+ # Find all axes containing the mouse
+ if self.canvas.mouse_grabber is None:
+ axes_list = [a for a in self.canvas.figure.get_axes()
+ if a.in_axes(self)]
+ else:
+ axes_list = [self.canvas.mouse_grabber]
+
+ if axes_list:
+ self.inaxes = cbook._topmost_artist(axes_list)
+ try:
+ trans = self.inaxes.transData.inverted()
+ xdata, ydata = trans.transform_point((x, y))
+ except ValueError:
+ self.xdata = None
+ self.ydata = None
+ else:
+ self.xdata = xdata
+ self.ydata = ydata
+ else:
+ self.inaxes = None
+
+ self._update_enter_leave()
+
+ def _update_enter_leave(self):
+ 'process the figure/axes enter leave events'
+ if LocationEvent.lastevent is not None:
+ last = LocationEvent.lastevent
+ if last.inaxes != self.inaxes:
+ # process axes enter/leave events
+ try:
+ if last.inaxes is not None:
+ last.canvas.callbacks.process('axes_leave_event', last)
+ except:
+ pass
+ # See ticket 2901582.
+ # I think this is a valid exception to the rule
+ # against catching all exceptions; if anything goes
+ # wrong, we simply want to move on and process the
+ # current event.
+ if self.inaxes is not None:
+ self.canvas.callbacks.process('axes_enter_event', self)
+
+ else:
+ # process a figure enter event
+ if self.inaxes is not None:
+ self.canvas.callbacks.process('axes_enter_event', self)
+
+ LocationEvent.lastevent = self
+
+
+class MouseEvent(LocationEvent):
+ """
+ A mouse event ('button_press_event',
+ 'button_release_event',
+ 'scroll_event',
+ 'motion_notify_event').
+
+ In addition to the :class:`Event` and :class:`LocationEvent`
+ attributes, the following attributes are defined:
+
+ Attributes
+ ----------
+ button : None, scalar, or str
+ button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
+ for scroll events). Note that in the nbagg backend, both the
+ middle and right clicks return 3 since right clicking will bring
+ up the context menu in some browsers.
+
+ key : None, or str
+ the key depressed when the mouse event triggered (see
+ :class:`KeyEvent`)
+
+ step : scalar
+ number of scroll steps (positive for 'up', negative for 'down')
+
+ Examples
+ --------
+ Usage::
+
+ def on_press(event):
+ print('you pressed', event.button, event.xdata, event.ydata)
+
+ cid = fig.canvas.mpl_connect('button_press_event', on_press)
+
+ """
+ x = None # x position - pixels from left of canvas
+ y = None # y position - pixels from right of canvas
+ button = None # button pressed None, 1, 2, 3
+ dblclick = None # whether or not the event is the result of a double click
+ inaxes = None # the Axes instance if mouse us over axes
+ xdata = None # x coord of mouse in data coords
+ ydata = None # y coord of mouse in data coords
+ step = None # scroll steps for scroll events
+
+ def __init__(self, name, canvas, x, y, button=None, key=None,
+ step=0, dblclick=False, guiEvent=None):
+ """
+ x, y in figure coords, 0,0 = bottom, left
+ button pressed None, 1, 2, 3, 'up', 'down'
+ """
+ LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
+ self.button = button
+ self.key = key
+ self.step = step
+ self.dblclick = dblclick
+
+ def __str__(self):
+ return ("MPL MouseEvent: xy=(%d,%d) xydata=(%s,%s) button=%s " +
+ "dblclick=%s inaxes=%s") % (self.x, self.y, self.xdata,
+ self.ydata, self.button,
+ self.dblclick, self.inaxes)
+
+
+class PickEvent(Event):
+ """
+ a pick event, fired when the user picks a location on the canvas
+ sufficiently close to an artist.
+
+ Attrs: all the :class:`Event` attributes plus
+
+ Attributes
+ ----------
+ mouseevent : `MouseEvent`
+ the mouse event that generated the pick
+
+ artist : `matplotlib.artist.Artist`
+ the picked artist
+
+ other
+ extra class dependent attrs -- e.g., a
+ :class:`~matplotlib.lines.Line2D` pick may define different
+ extra attributes than a
+ :class:`~matplotlib.collections.PatchCollection` pick event
+
+ Examples
+ --------
+ Usage::
+
+ ax.plot(np.rand(100), 'o', picker=5) # 5 points tolerance
+
+ def on_pick(event):
+ line = event.artist
+ xdata, ydata = line.get_data()
+ ind = event.ind
+ print('on pick line:', np.array([xdata[ind], ydata[ind]]).T)
+
+ cid = fig.canvas.mpl_connect('pick_event', on_pick)
+
+ """
+ def __init__(self, name, canvas, mouseevent, artist,
+ guiEvent=None, **kwargs):
+ Event.__init__(self, name, canvas, guiEvent)
+ self.mouseevent = mouseevent
+ self.artist = artist
+ self.__dict__.update(kwargs)
+
+
+class KeyEvent(LocationEvent):
+ """
+ A key event (key press, key release).
+
+ Attach additional attributes as defined in
+ :meth:`FigureCanvasBase.mpl_connect`.
+
+ In addition to the :class:`Event` and :class:`LocationEvent`
+ attributes, the following attributes are defined:
+
+ Attributes
+ ----------
+ key : None or str
+ the key(s) pressed. Could be **None**, a single case sensitive ascii
+ character ("g", "G", "#", etc.), a special key
+ ("control", "shift", "f1", "up", etc.) or a
+ combination of the above (e.g., "ctrl+alt+g", "ctrl+alt+G").
+
+ Notes
+ -----
+ Modifier keys will be prefixed to the pressed key and will be in the order
+ "ctrl", "alt", "super". The exception to this rule is when the pressed key
+ is itself a modifier key, therefore "ctrl+alt" and "alt+control" can both
+ be valid key values.
+
+ Examples
+ --------
+ Usage::
+
+ def on_key(event):
+ print('you pressed', event.key, event.xdata, event.ydata)
+
+ cid = fig.canvas.mpl_connect('key_press_event', on_key)
+
+ """
+ def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
+ LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
+ self.key = key
+
+
+class FigureCanvasBase(object):
+ """
+ The canvas the figure renders into.
+
+ Public attributes
+
+ Attributes
+ ----------
+ figure : `matplotlib.figure.Figure`
+ A high-level figure instance
+
+ """
+ events = [
+ 'resize_event',
+ 'draw_event',
+ 'key_press_event',
+ 'key_release_event',
+ 'button_press_event',
+ 'button_release_event',
+ 'scroll_event',
+ 'motion_notify_event',
+ 'pick_event',
+ 'idle_event',
+ 'figure_enter_event',
+ 'figure_leave_event',
+ 'axes_enter_event',
+ 'axes_leave_event',
+ 'close_event'
+ ]
+
+ supports_blit = True
+ fixed_dpi = None
+
+ filetypes = _default_filetypes
+ if _has_pil:
+ # JPEG support
+ register_backend('jpg', 'matplotlib.backends.backend_agg',
+ 'Joint Photographic Experts Group')
+ register_backend('jpeg', 'matplotlib.backends.backend_agg',
+ 'Joint Photographic Experts Group')
+ # TIFF support
+ register_backend('tif', 'matplotlib.backends.backend_agg',
+ 'Tagged Image File Format')
+ register_backend('tiff', 'matplotlib.backends.backend_agg',
+ 'Tagged Image File Format')
+
+ def __init__(self, figure):
+ self._is_idle_drawing = True
+ self._is_saving = False
+ figure.set_canvas(self)
+ self.figure = figure
+ # a dictionary from event name to a dictionary that maps cid->func
+ self.callbacks = cbook.CallbackRegistry()
+ self.widgetlock = widgets.LockDraw()
+ self._button = None # the button pressed
+ self._key = None # the key pressed
+ self._lastx, self._lasty = None, None
+ self.button_pick_id = self.mpl_connect('button_press_event', self.pick)
+ self.scroll_pick_id = self.mpl_connect('scroll_event', self.pick)
+ self.mouse_grabber = None # the axes currently grabbing mouse
+ self.toolbar = None # NavigationToolbar2 will set me
+ self._is_idle_drawing = False
+
+ @contextmanager
+ def _idle_draw_cntx(self):
+ self._is_idle_drawing = True
+ yield
+ self._is_idle_drawing = False
+
+ def is_saving(self):
+ """
+ Returns whether the renderer is in the process of saving
+ to a file, rather than rendering for an on-screen buffer.
+ """
+ return self._is_saving
+
+ @cbook.deprecated("2.2")
+ def onRemove(self, ev):
+ """
+ Mouse event processor which removes the top artist
+ under the cursor. Connect this to the 'mouse_press_event'
+ using::
+
+ canvas.mpl_connect('mouse_press_event',canvas.onRemove)
+ """
+ # Find the top artist under the cursor
+ under = cbook._topmost_artist(self.figure.hitlist(ev))
+ h = None
+ if under:
+ h = under[-1]
+
+ # Try deleting that artist, or its parent if you
+ # can't delete the artist
+ while h:
+ if h.remove():
+ self.draw_idle()
+ break
+ parent = None
+ for p in under:
+ if h in p.get_children():
+ parent = p
+ break
+ h = parent
+
+ def pick(self, mouseevent):
+ if not self.widgetlock.locked():
+ self.figure.pick(mouseevent)
+
+ def blit(self, bbox=None):
+ """Blit the canvas in bbox (default entire canvas)."""
+
+ def resize(self, w, h):
+ """Set the canvas size in pixels."""
+
+ def draw_event(self, renderer):
+ """Pass a `DrawEvent` to all functions connected to ``draw_event``."""
+ s = 'draw_event'
+ event = DrawEvent(s, self, renderer)
+ self.callbacks.process(s, event)
+
+ def resize_event(self):
+ """Pass a `ResizeEvent` to all functions connected to ``resize_event``.
+ """
+ s = 'resize_event'
+ event = ResizeEvent(s, self)
+ self.callbacks.process(s, event)
+ self.draw_idle()
+
+ def close_event(self, guiEvent=None):
+ """Pass a `CloseEvent` to all functions connected to ``close_event``.
+ """
+ s = 'close_event'
+ try:
+ event = CloseEvent(s, self, guiEvent=guiEvent)
+ self.callbacks.process(s, event)
+ except (TypeError, AttributeError):
+ pass
+ # Suppress the TypeError when the python session is being killed.
+ # It may be that a better solution would be a mechanism to
+ # disconnect all callbacks upon shutdown.
+ # AttributeError occurs on OSX with qt4agg upon exiting
+ # with an open window; 'callbacks' attribute no longer exists.
+
+ def key_press_event(self, key, guiEvent=None):
+ """Pass a `KeyEvent` to all functions connected to ``key_press_event``.
+ """
+ self._key = key
+ s = 'key_press_event'
+ event = KeyEvent(
+ s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
+ self.callbacks.process(s, event)
+
+ def key_release_event(self, key, guiEvent=None):
+ """
+ Pass a `KeyEvent` to all functions connected to ``key_release_event``.
+ """
+ s = 'key_release_event'
+ event = KeyEvent(
+ s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
+ self.callbacks.process(s, event)
+ self._key = None
+
+ def pick_event(self, mouseevent, artist, **kwargs):
+ """
+ This method will be called by artists who are picked and will
+ fire off :class:`PickEvent` callbacks registered listeners
+ """
+ s = 'pick_event'
+ event = PickEvent(s, self, mouseevent, artist,
+ guiEvent=mouseevent.guiEvent,
+ **kwargs)
+ self.callbacks.process(s, event)
+
+ def scroll_event(self, x, y, step, guiEvent=None):
+ """
+ Backend derived classes should call this function on any
+ scroll wheel event. x,y are the canvas coords: 0,0 is lower,
+ left. button and key are as defined in MouseEvent.
+
+ This method will be call all functions connected to the
+ 'scroll_event' with a :class:`MouseEvent` instance.
+ """
+ if step >= 0:
+ self._button = 'up'
+ else:
+ self._button = 'down'
+ s = 'scroll_event'
+ mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
+ step=step, guiEvent=guiEvent)
+ self.callbacks.process(s, mouseevent)
+
+ def button_press_event(self, x, y, button, dblclick=False, guiEvent=None):
+ """
+ Backend derived classes should call this function on any mouse
+ button press. x,y are the canvas coords: 0,0 is lower, left.
+ button and key are as defined in :class:`MouseEvent`.
+
+ This method will be call all functions connected to the
+ 'button_press_event' with a :class:`MouseEvent` instance.
+ """
+ self._button = button
+ s = 'button_press_event'
+ mouseevent = MouseEvent(s, self, x, y, button, self._key,
+ dblclick=dblclick, guiEvent=guiEvent)
+ self.callbacks.process(s, mouseevent)
+
+ def button_release_event(self, x, y, button, guiEvent=None):
+ """
+ Backend derived classes should call this function on any mouse
+ button release.
+
+ This method will call all functions connected to the
+ 'button_release_event' with a :class:`MouseEvent` instance.
+
+ Parameters
+ ----------
+ x : scalar
+ the canvas coordinates where 0=left
+
+ y : scalar
+ the canvas coordinates where 0=bottom
+
+ guiEvent
+ the native UI event that generated the mpl event
+
+ """
+ s = 'button_release_event'
+ event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
+ self.callbacks.process(s, event)
+ self._button = None
+
+ def motion_notify_event(self, x, y, guiEvent=None):
+ """
+ Backend derived classes should call this function on any
+ motion-notify-event.
+
+ This method will call all functions connected to the
+ 'motion_notify_event' with a :class:`MouseEvent` instance.
+
+ Parameters
+ ----------
+ x : scalar
+ the canvas coordinates where 0=left
+
+ y : scalar
+ the canvas coordinates where 0=bottom
+
+ guiEvent
+ the native UI event that generated the mpl event
+
+ """
+ self._lastx, self._lasty = x, y
+ s = 'motion_notify_event'
+ event = MouseEvent(s, self, x, y, self._button, self._key,
+ guiEvent=guiEvent)
+ self.callbacks.process(s, event)
+
+ def leave_notify_event(self, guiEvent=None):
+ """
+ Backend derived classes should call this function when leaving
+ canvas
+
+ Parameters
+ ----------
+ guiEvent
+ the native UI event that generated the mpl event
+
+ """
+
+ self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
+ LocationEvent.lastevent = None
+ self._lastx, self._lasty = None, None
+
+ def enter_notify_event(self, guiEvent=None, xy=None):
+ """
+ Backend derived classes should call this function when entering
+ canvas
+
+ Parameters
+ ----------
+ guiEvent
+ the native UI event that generated the mpl event
+ xy : tuple of 2 scalars
+ the coordinate location of the pointer when the canvas is
+ entered
+
+ """
+ if xy is not None:
+ x, y = xy
+ self._lastx, self._lasty = x, y
+
+ event = Event('figure_enter_event', self, guiEvent)
+ self.callbacks.process('figure_enter_event', event)
+
+ @cbook.deprecated("2.1")
+ def idle_event(self, guiEvent=None):
+ """Called when GUI is idle."""
+ s = 'idle_event'
+ event = IdleEvent(s, self, guiEvent=guiEvent)
+ self.callbacks.process(s, event)
+
+ def grab_mouse(self, ax):
+ """
+ Set the child axes which are currently grabbing the mouse events.
+ Usually called by the widgets themselves.
+ It is an error to call this if the mouse is already grabbed by
+ another axes.
+ """
+ if self.mouse_grabber not in (None, ax):
+ raise RuntimeError("Another Axes already grabs mouse input")
+ self.mouse_grabber = ax
+
+ def release_mouse(self, ax):
+ """
+ Release the mouse grab held by the axes, ax.
+ Usually called by the widgets.
+ It is ok to call this even if you ax doesn't have the mouse
+ grab currently.
+ """
+ if self.mouse_grabber is ax:
+ self.mouse_grabber = None
+
+ def draw(self, *args, **kwargs):
+ """Render the :class:`~matplotlib.figure.Figure`."""
+
+ def draw_idle(self, *args, **kwargs):
+ """
+ :meth:`draw` only if idle; defaults to draw but backends can override
+ """
+ if not self._is_idle_drawing:
+ with self._idle_draw_cntx():
+ self.draw(*args, **kwargs)
+
+ def draw_cursor(self, event):
+ """
+ Draw a cursor in the event.axes if inaxes is not None. Use
+ native GUI drawing for efficiency if possible
+ """
+
+ def get_width_height(self):
+ """
+ Return the figure width and height in points or pixels
+ (depending on the backend), truncated to integers
+ """
+ return int(self.figure.bbox.width), int(self.figure.bbox.height)
+
+ @classmethod
+ def get_supported_filetypes(cls):
+ """Return dict of savefig file formats supported by this backend"""
+ return cls.filetypes
+
+ @classmethod
+ def get_supported_filetypes_grouped(cls):
+ """Return a dict of savefig file formats supported by this backend,
+ where the keys are a file type name, such as 'Joint Photographic
+ Experts Group', and the values are a list of filename extensions used
+ for that filetype, such as ['jpg', 'jpeg']."""
+ groupings = {}
+ for ext, name in six.iteritems(cls.filetypes):
+ groupings.setdefault(name, []).append(ext)
+ groupings[name].sort()
+ return groupings
+
+ def _get_output_canvas(self, fmt):
+ """
+ Return a canvas suitable for saving figures to a specified file format.
+
+ If necessary, this function will switch to a registered backend that
+ supports the format.
+ """
+ method_name = 'print_%s' % fmt
+ # Return the current canvas if it supports the requested format.
+ if hasattr(self, method_name):
+ return self
+ # Return a default canvas for the requested format, if it exists.
+ canvas_class = get_registered_canvas_class(fmt)
+ if canvas_class:
+ return self.switch_backends(canvas_class)
+ # Else report error for unsupported format.
+ raise ValueError(
+ "Format {!r} is not supported (supported formats: {})"
+ .format(fmt, ", ".join(sorted(self.get_supported_filetypes()))))
+
+ def print_figure(self, filename, dpi=None, facecolor=None, edgecolor=None,
+ orientation='portrait', format=None, **kwargs):
+ """
+ Render the figure to hardcopy. Set the figure patch face and edge
+ colors. This is useful because some of the GUIs have a gray figure
+ face color background and you'll probably want to override this on
+ hardcopy.
+
+ Parameters
+ ----------
+ filename
+ can also be a file object on image backends
+
+ orientation : {'landscape', 'portrait'}, optional
+ only currently applies to PostScript printing.
+
+ dpi : scalar, optional
+ the dots per inch to save the figure in; if None, use savefig.dpi
+
+ facecolor : color spec or None, optional
+ the facecolor of the figure; if None, defaults to savefig.facecolor
+
+ edgecolor : color spec or None, optional
+ the edgecolor of the figure; if None, defaults to savefig.edgecolor
+
+ format : str, optional
+ when set, forcibly set the file format to save to
+
+ bbox_inches : str or `~matplotlib.transforms.Bbox`, optional
+ Bbox in inches. Only the given portion of the figure is
+ saved. If 'tight', try to figure out the tight bbox of
+ the figure. If None, use savefig.bbox
+
+ pad_inches : scalar, optional
+ Amount of padding around the figure when bbox_inches is
+ 'tight'. If None, use savefig.pad_inches
+
+ bbox_extra_artists : list of `~matplotlib.artist.Artist`, optional
+ A list of extra artists that will be considered when the
+ tight bbox is calculated.
+
+ """
+ self._is_saving = True
+ # Remove the figure manager, if any, to avoid resizing the GUI widget.
+ # Having *no* manager and a *None* manager are currently different (see
+ # Figure.show); should probably be normalized to None at some point.
+ _no_manager = object()
+ if hasattr(self, 'manager'):
+ manager = self.manager
+ del self.manager
+ else:
+ manager = _no_manager
+
+ if format is None:
+ # get format from filename, or from backend's default filetype
+ if isinstance(filename, getattr(os, "PathLike", ())):
+ filename = os.fspath(filename)
+ if isinstance(filename, six.string_types):
+ format = os.path.splitext(filename)[1][1:]
+ if format is None or format == '':
+ format = self.get_default_filetype()
+ if isinstance(filename, six.string_types):
+ filename = filename.rstrip('.') + '.' + format
+ format = format.lower()
+
+ # get canvas object and print method for format
+ canvas = self._get_output_canvas(format)
+ print_method = getattr(canvas, 'print_%s' % format)
+
+ if dpi is None:
+ dpi = rcParams['savefig.dpi']
+
+ if dpi == 'figure':
+ dpi = getattr(self.figure, '_original_dpi', self.figure.dpi)
+
+ if facecolor is None:
+ facecolor = rcParams['savefig.facecolor']
+ if edgecolor is None:
+ edgecolor = rcParams['savefig.edgecolor']
+
+ origDPI = self.figure.dpi
+ origfacecolor = self.figure.get_facecolor()
+ origedgecolor = self.figure.get_edgecolor()
+
+ self.figure.dpi = dpi
+ self.figure.set_facecolor(facecolor)
+ self.figure.set_edgecolor(edgecolor)
+
+ bbox_inches = kwargs.pop("bbox_inches", None)
+ if bbox_inches is None:
+ bbox_inches = rcParams['savefig.bbox']
+
+ if bbox_inches:
+ # call adjust_bbox to save only the given area
+ if bbox_inches == "tight":
+ # When bbox_inches == "tight", it saves the figure twice. The
+ # first save command (to a BytesIO) is just to estimate the
+ # bounding box of the figure.
+ result = print_method(
+ io.BytesIO(),
+ dpi=dpi,
+ facecolor=facecolor,
+ edgecolor=edgecolor,
+ orientation=orientation,
+ dryrun=True,
+ **kwargs)
+ renderer = self.figure._cachedRenderer
+ bbox_inches = self.figure.get_tightbbox(renderer)
+
+ bbox_artists = kwargs.pop("bbox_extra_artists", None)
+ if bbox_artists is None:
+ bbox_artists = self.figure.get_default_bbox_extra_artists()
+
+ bbox_filtered = []
+ for a in bbox_artists:
+ bbox = a.get_window_extent(renderer)
+ if a.get_clip_on():
+ clip_box = a.get_clip_box()
+ if clip_box is not None:
+ bbox = Bbox.intersection(bbox, clip_box)
+ clip_path = a.get_clip_path()
+ if clip_path is not None and bbox is not None:
+ clip_path = clip_path.get_fully_transformed_path()
+ bbox = Bbox.intersection(bbox,
+ clip_path.get_extents())
+ if bbox is not None and (bbox.width != 0 or
+ bbox.height != 0):
+ bbox_filtered.append(bbox)
+
+ if bbox_filtered:
+ _bbox = Bbox.union(bbox_filtered)
+ trans = Affine2D().scale(1.0 / self.figure.dpi)
+ bbox_extra = TransformedBbox(_bbox, trans)
+ bbox_inches = Bbox.union([bbox_inches, bbox_extra])
+
+ pad = kwargs.pop("pad_inches", None)
+ if pad is None:
+ pad = rcParams['savefig.pad_inches']
+
+ bbox_inches = bbox_inches.padded(pad)
+
+ restore_bbox = tight_bbox.adjust_bbox(self.figure, bbox_inches,
+ canvas.fixed_dpi)
+
+ _bbox_inches_restore = (bbox_inches, restore_bbox)
+ else:
+ _bbox_inches_restore = None
+
+ try:
+ result = print_method(
+ filename,
+ dpi=dpi,
+ facecolor=facecolor,
+ edgecolor=edgecolor,
+ orientation=orientation,
+ bbox_inches_restore=_bbox_inches_restore,
+ **kwargs)
+ finally:
+ if bbox_inches and restore_bbox:
+ restore_bbox()
+
+ self.figure.dpi = origDPI
+ self.figure.set_facecolor(origfacecolor)
+ self.figure.set_edgecolor(origedgecolor)
+ self.figure.set_canvas(self)
+ if manager is not _no_manager:
+ self.manager = manager
+ self._is_saving = False
+ return result
+
+ @classmethod
+ def get_default_filetype(cls):
+ """
+ Get the default savefig file format as specified in rcParam
+ ``savefig.format``. Returned string excludes period. Overridden
+ in backends that only support a single file type.
+ """
+ return rcParams['savefig.format']
+
+ def get_window_title(self):
+ """
+ Get the title text of the window containing the figure.
+ Return None if there is no window (e.g., a PS backend).
+ """
+ if hasattr(self, "manager"):
+ return self.manager.get_window_title()
+
+ def set_window_title(self, title):
+ """
+ Set the title text of the window containing the figure. Note that
+ this has no effect if there is no window (e.g., a PS backend).
+ """
+ if hasattr(self, "manager"):
+ self.manager.set_window_title(title)
+
+ def get_default_filename(self):
+ """
+ Return a string, which includes extension, suitable for use as
+ a default filename.
+ """
+ default_basename = self.get_window_title() or 'image'
+ default_basename = default_basename.replace(' ', '_')
+ default_filetype = self.get_default_filetype()
+ default_filename = default_basename + '.' + default_filetype
+
+ save_dir = os.path.expanduser(rcParams['savefig.directory'])
+
+ # ensure non-existing filename in save dir
+ i = 1
+ while os.path.isfile(os.path.join(save_dir, default_filename)):
+ # attach numerical count to basename
+ default_filename = (
+ '{}-{}.{}'.format(default_basename, i, default_filetype))
+ i += 1
+
+ return default_filename
+
+ def switch_backends(self, FigureCanvasClass):
+ """
+ Instantiate an instance of FigureCanvasClass
+
+ This is used for backend switching, e.g., to instantiate a
+ FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
+ not done, so any changes to one of the instances (e.g., setting
+ figure size or line props), will be reflected in the other
+ """
+ newCanvas = FigureCanvasClass(self.figure)
+ newCanvas._is_saving = self._is_saving
+ return newCanvas
+
+ def mpl_connect(self, s, func):
+ """
+ Connect event with string *s* to *func*. The signature of *func* is::
+
+ def func(event)
+
+ where event is a :class:`matplotlib.backend_bases.Event`. The
+ following events are recognized
+
+ - 'button_press_event'
+ - 'button_release_event'
+ - 'draw_event'
+ - 'key_press_event'
+ - 'key_release_event'
+ - 'motion_notify_event'
+ - 'pick_event'
+ - 'resize_event'
+ - 'scroll_event'
+ - 'figure_enter_event',
+ - 'figure_leave_event',
+ - 'axes_enter_event',
+ - 'axes_leave_event'
+ - 'close_event'
+
+ For the location events (button and key press/release), if the
+ mouse is over the axes, the variable ``event.inaxes`` will be
+ set to the :class:`~matplotlib.axes.Axes` the event occurs is
+ over, and additionally, the variables ``event.xdata`` and
+ ``event.ydata`` will be defined. This is the mouse location
+ in data coords. See
+ :class:`~matplotlib.backend_bases.KeyEvent` and
+ :class:`~matplotlib.backend_bases.MouseEvent` for more info.
+
+ Return value is a connection id that can be used with
+ :meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
+
+ Examples
+ --------
+ Usage::
+
+ def on_press(event):
+ print('you pressed', event.button, event.xdata, event.ydata)
+
+ cid = canvas.mpl_connect('button_press_event', on_press)
+
+ """
+ if s == 'idle_event':
+ cbook.warn_deprecated(1.5,
+ "idle_event is only implemented for the wx backend, and will "
+ "be removed in matplotlib 2.1. Use the animations module "
+ "instead.")
+
+ return self.callbacks.connect(s, func)
+
+ def mpl_disconnect(self, cid):
+ """
+ Disconnect callback id cid
+
+ Examples
+ --------
+ Usage::
+
+ cid = canvas.mpl_connect('button_press_event', on_press)
+ #...later
+ canvas.mpl_disconnect(cid)
+ """
+ return self.callbacks.disconnect(cid)
+
+ def new_timer(self, *args, **kwargs):
+ """
+ Creates a new backend-specific subclass of
+ :class:`backend_bases.Timer`. This is useful for getting periodic
+ events through the backend's native event loop. Implemented only for
+ backends with GUIs.
+
+ Other Parameters
+ ----------------
+ interval : scalar
+ Timer interval in milliseconds
+
+ callbacks : List[Tuple[callable, Tuple, Dict]]
+ Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
+ will be executed by the timer every *interval*.
+
+ callbacks which return ``False`` or ``0`` will be removed from the
+ timer.
+
+ Examples
+ --------
+
+ >>> timer = fig.canvas.new_timer(callbacks=[(f1, (1, ), {'a': 3}),])
+
+ """
+ return TimerBase(*args, **kwargs)
+
+ def flush_events(self):
+ """Flush the GUI events for the figure.
+
+ Interactive backends need to reimplement this method.
+ """
+
+ def start_event_loop(self, timeout=0):
+ """Start a blocking event loop.
+
+ Such an event loop is used by interactive functions, such as `ginput`
+ and `waitforbuttonpress`, to wait for events.
+
+ The event loop blocks until a callback function triggers
+ `stop_event_loop`, or *timeout* is reached.
+
+ If *timeout* is negative, never timeout.
+
+ Only interactive backends need to reimplement this method and it relies
+ on `flush_events` being properly implemented.
+
+ Interactive backends should implement this in a more native way.
+ """
+ if timeout <= 0:
+ timeout = np.inf
+ timestep = 0.01
+ counter = 0
+ self._looping = True
+ while self._looping and counter * timestep < timeout:
+ self.flush_events()
+ time.sleep(timestep)
+ counter += 1
+
+ def stop_event_loop(self):
+ """Stop the current blocking event loop.
+
+ Interactive backends need to reimplement this to match
+ `start_event_loop`
+ """
+ self._looping = False
+
+ start_event_loop_default = cbook.deprecated(
+ "2.1", name="start_event_loop_default")(start_event_loop)
+ stop_event_loop_default = cbook.deprecated(
+ "2.1", name="stop_event_loop_default")(stop_event_loop)
+
+
+def key_press_handler(event, canvas, toolbar=None):
+ """
+ Implement the default mpl key bindings for the canvas and toolbar
+ described at :ref:`key-event-handling`
+
+ Parameters
+ ----------
+ event : :class:`KeyEvent`
+ a key press/release event
+ canvas : :class:`FigureCanvasBase`
+ the backend-specific canvas instance
+ toolbar : :class:`NavigationToolbar2`
+ the navigation cursor toolbar
+
+ """
+ # these bindings happen whether you are over an axes or not
+
+ if event.key is None:
+ return
+
+ # Load key-mappings from your matplotlibrc file.
+ fullscreen_keys = rcParams['keymap.fullscreen']
+ home_keys = rcParams['keymap.home']
+ back_keys = rcParams['keymap.back']
+ forward_keys = rcParams['keymap.forward']
+ pan_keys = rcParams['keymap.pan']
+ zoom_keys = rcParams['keymap.zoom']
+ save_keys = rcParams['keymap.save']
+ quit_keys = rcParams['keymap.quit']
+ grid_keys = rcParams['keymap.grid']
+ grid_minor_keys = rcParams['keymap.grid_minor']
+ toggle_yscale_keys = rcParams['keymap.yscale']
+ toggle_xscale_keys = rcParams['keymap.xscale']
+ all_keys = rcParams['keymap.all_axes']
+
+ # toggle fullscreen mode ('f', 'ctrl + f')
+ if event.key in fullscreen_keys:
+ try:
+ canvas.manager.full_screen_toggle()
+ except AttributeError:
+ pass
+
+ # quit the figure (default key 'ctrl+w')
+ if event.key in quit_keys:
+ Gcf.destroy_fig(canvas.figure)
+
+ if toolbar is not None:
+ # home or reset mnemonic (default key 'h', 'home' and 'r')
+ if event.key in home_keys:
+ toolbar.home()
+ # forward / backward keys to enable left handed quick navigation
+ # (default key for backward: 'left', 'backspace' and 'c')
+ elif event.key in back_keys:
+ toolbar.back()
+ # (default key for forward: 'right' and 'v')
+ elif event.key in forward_keys:
+ toolbar.forward()
+ # pan mnemonic (default key 'p')
+ elif event.key in pan_keys:
+ toolbar.pan()
+ toolbar._set_cursor(event)
+ # zoom mnemonic (default key 'o')
+ elif event.key in zoom_keys:
+ toolbar.zoom()
+ toolbar._set_cursor(event)
+ # saving current figure (default key 's')
+ elif event.key in save_keys:
+ toolbar.save_figure()
+
+ if event.inaxes is None:
+ return
+
+ # these bindings require the mouse to be over an axes to trigger
+ def _get_uniform_gridstate(ticks):
+ # Return True/False if all grid lines are on or off, None if they are
+ # not all in the same state.
+ if all(tick.gridOn for tick in ticks):
+ return True
+ elif not any(tick.gridOn for tick in ticks):
+ return False
+ else:
+ return None
+
+ ax = event.inaxes
+ # toggle major grids in current axes (default key 'g')
+ # Both here and below (for 'G'), we do nothing if *any* grid (major or
+ # minor, x or y) is not in a uniform state, to avoid messing up user
+ # customization.
+ if (event.key in grid_keys
+ # Exclude minor grids not in a uniform state.
+ and None not in [_get_uniform_gridstate(ax.xaxis.minorTicks),
+ _get_uniform_gridstate(ax.yaxis.minorTicks)]):
+ x_state = _get_uniform_gridstate(ax.xaxis.majorTicks)
+ y_state = _get_uniform_gridstate(ax.yaxis.majorTicks)
+ cycle = [(False, False), (True, False), (True, True), (False, True)]
+ try:
+ x_state, y_state = (
+ cycle[(cycle.index((x_state, y_state)) + 1) % len(cycle)])
+ except ValueError:
+ # Exclude major grids not in a uniform state.
+ pass
+ else:
+ # If turning major grids off, also turn minor grids off.
+ ax.grid(x_state, which="major" if x_state else "both", axis="x")
+ ax.grid(y_state, which="major" if y_state else "both", axis="y")
+ canvas.draw_idle()
+ # toggle major and minor grids in current axes (default key 'G')
+ if (event.key in grid_minor_keys
+ # Exclude major grids not in a uniform state.
+ and None not in [_get_uniform_gridstate(ax.xaxis.majorTicks),
+ _get_uniform_gridstate(ax.yaxis.majorTicks)]):
+ x_state = _get_uniform_gridstate(ax.xaxis.minorTicks)
+ y_state = _get_uniform_gridstate(ax.yaxis.minorTicks)
+ cycle = [(False, False), (True, False), (True, True), (False, True)]
+ try:
+ x_state, y_state = (
+ cycle[(cycle.index((x_state, y_state)) + 1) % len(cycle)])
+ except ValueError:
+ # Exclude minor grids not in a uniform state.
+ pass
+ else:
+ ax.grid(x_state, which="both", axis="x")
+ ax.grid(y_state, which="both", axis="y")
+ canvas.draw_idle()
+ # toggle scaling of y-axes between 'log and 'linear' (default key 'l')
+ elif event.key in toggle_yscale_keys:
+ scale = ax.get_yscale()
+ if scale == 'log':
+ ax.set_yscale('linear')
+ ax.figure.canvas.draw_idle()
+ elif scale == 'linear':
+ try:
+ ax.set_yscale('log')
+ except ValueError as exc:
+ warnings.warn(str(exc))
+ ax.set_yscale('linear')
+ ax.figure.canvas.draw_idle()
+ # toggle scaling of x-axes between 'log and 'linear' (default key 'k')
+ elif event.key in toggle_xscale_keys:
+ scalex = ax.get_xscale()
+ if scalex == 'log':
+ ax.set_xscale('linear')
+ ax.figure.canvas.draw_idle()
+ elif scalex == 'linear':
+ try:
+ ax.set_xscale('log')
+ except ValueError as exc:
+ warnings.warn(str(exc))
+ ax.set_xscale('linear')
+ ax.figure.canvas.draw_idle()
+
+ elif (event.key.isdigit() and event.key != '0') or event.key in all_keys:
+ # keys in list 'all' enables all axes (default key 'a'),
+ # otherwise if key is a number only enable this particular axes
+ # if it was the axes, where the event was raised
+ if not (event.key in all_keys):
+ n = int(event.key) - 1
+ for i, a in enumerate(canvas.figure.get_axes()):
+ # consider axes, in which the event was raised
+ # FIXME: Why only this axes?
+ if event.x is not None and event.y is not None \
+ and a.in_axes(event):
+ if event.key in all_keys:
+ a.set_navigate(True)
+ else:
+ a.set_navigate(i == n)
+
+
+class NonGuiException(Exception):
+ pass
+
+
+class FigureManagerBase(object):
+ """
+ Helper class for pyplot mode, wraps everything up into a neat bundle
+
+ Attributes
+ ----------
+ canvas : :class:`FigureCanvasBase`
+ The backend-specific canvas instance
+
+ num : int or str
+ The figure number
+
+ key_press_handler_id : int
+ The default key handler cid, when using the toolmanager. Can be used
+ to disable default key press handling ::
+
+ figure.canvas.mpl_disconnect(
+ figure.canvas.manager.key_press_handler_id)
+ """
+ def __init__(self, canvas, num):
+ self.canvas = canvas
+ canvas.manager = self # store a pointer to parent
+ self.num = num
+
+ self.key_press_handler_id = None
+ if rcParams['toolbar'] != 'toolmanager':
+ self.key_press_handler_id = self.canvas.mpl_connect(
+ 'key_press_event',
+ self.key_press)
+
+ def show(self):
+ """
+ For GUI backends, show the figure window and redraw.
+ For non-GUI backends, raise an exception to be caught
+ by :meth:`~matplotlib.figure.Figure.show`, for an
+ optional warning.
+ """
+ raise NonGuiException()
+
+ def destroy(self):
+ pass
+
+ def full_screen_toggle(self):
+ pass
+
+ def resize(self, w, h):
+ """"For GUI backends, resize the window (in pixels)."""
+
+ def key_press(self, event):
+ """
+ Implement the default mpl key bindings defined at
+ :ref:`key-event-handling`
+ """
+ if rcParams['toolbar'] != 'toolmanager':
+ key_press_handler(event, self.canvas, self.canvas.toolbar)
+
+ @cbook.deprecated("2.2")
+ def show_popup(self, msg):
+ """Display message in a popup -- GUI only."""
+
+ def get_window_title(self):
+ """Get the title text of the window containing the figure.
+
+ Return None for non-GUI (e.g., PS) backends.
+ """
+ return 'image'
+
+ def set_window_title(self, title):
+ """Set the title text of the window containing the figure.
+
+ This has no effect for non-GUI (e.g., PS) backends.
+ """
+
+
+cursors = tools.cursors
+
+
+class NavigationToolbar2(object):
+ """
+ Base class for the navigation cursor, version 2
+
+ backends must implement a canvas that handles connections for
+ 'button_press_event' and 'button_release_event'. See
+ :meth:`FigureCanvasBase.mpl_connect` for more information
+
+
+ They must also define
+
+ :meth:`save_figure`
+ save the current figure
+
+ :meth:`set_cursor`
+ if you want the pointer icon to change
+
+ :meth:`_init_toolbar`
+ create your toolbar widget
+
+ :meth:`draw_rubberband` (optional)
+ draw the zoom to rect "rubberband" rectangle
+
+ :meth:`press` (optional)
+ whenever a mouse button is pressed, you'll be notified with
+ the event
+
+ :meth:`release` (optional)
+ whenever a mouse button is released, you'll be notified with
+ the event
+
+ :meth:`set_message` (optional)
+ display message
+
+ :meth:`set_history_buttons` (optional)
+ you can change the history back / forward buttons to
+ indicate disabled / enabled state.
+
+ That's it, we'll do the rest!
+ """
+
+ # list of toolitems to add to the toolbar, format is:
+ # (
+ # text, # the text of the button (often not visible to users)
+ # tooltip_text, # the tooltip shown on hover (where possible)
+ # image_file, # name of the image for the button (without the extension)
+ # name_of_method, # name of the method in NavigationToolbar2 to call
+ # )
+ toolitems = (
+ ('Home', 'Reset original view', 'home', 'home'),
+ ('Back', 'Back to previous view', 'back', 'back'),
+ ('Forward', 'Forward to next view', 'forward', 'forward'),
+ (None, None, None, None),
+ ('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
+ ('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
+ ('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
+ (None, None, None, None),
+ ('Save', 'Save the figure', 'filesave', 'save_figure'),
+ )
+
+ def __init__(self, canvas):
+ self.canvas = canvas
+ canvas.toolbar = self
+ self._nav_stack = cbook.Stack()
+ self._xypress = None # the location and axis info at the time
+ # of the press
+ self._idPress = None
+ self._idRelease = None
+ self._active = None
+ # This cursor will be set after the initial draw.
+ self._lastCursor = cursors.POINTER
+ self._init_toolbar()
+ self._idDrag = self.canvas.mpl_connect(
+ 'motion_notify_event', self.mouse_move)
+
+ self._ids_zoom = []
+ self._zoom_mode = None
+
+ self._button_pressed = None # determined by the button pressed
+ # at start
+
+ self.mode = '' # a mode string for the status bar
+ self.set_history_buttons()
+
+ def set_message(self, s):
+ """Display a message on toolbar or in status bar."""
+
+ def back(self, *args):
+ """move back up the view lim stack"""
+ self._nav_stack.back()
+ self.set_history_buttons()
+ self._update_view()
+
+ @cbook.deprecated("2.1", alternative="canvas.draw_idle")
+ def dynamic_update(self):
+ self.canvas.draw_idle()
+
+ def draw_rubberband(self, event, x0, y0, x1, y1):
+ """Draw a rectangle rubberband to indicate zoom limits.
+
+ Note that it is not guaranteed that ``x0 <= x1`` and ``y0 <= y1``.
+ """
+
+ def remove_rubberband(self):
+ """Remove the rubberband."""
+
+ def forward(self, *args):
+ """Move forward in the view lim stack."""
+ self._nav_stack.forward()
+ self.set_history_buttons()
+ self._update_view()
+
+ def home(self, *args):
+ """Restore the original view."""
+ self._nav_stack.home()
+ self.set_history_buttons()
+ self._update_view()
+
+ def _init_toolbar(self):
+ """
+ This is where you actually build the GUI widgets (called by
+ __init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
+ ``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
+ across backends (there are ppm versions in CVS also).
+
+ You just need to set the callbacks
+
+ home : self.home
+ back : self.back
+ forward : self.forward
+ hand : self.pan
+ zoom_to_rect : self.zoom
+ filesave : self.save_figure
+
+ You only need to define the last one - the others are in the base
+ class implementation.
+
+ """
+ raise NotImplementedError
+
+ def _set_cursor(self, event):
+ if not event.inaxes or not self._active:
+ if self._lastCursor != cursors.POINTER:
+ self.set_cursor(cursors.POINTER)
+ self._lastCursor = cursors.POINTER
+ else:
+ if (self._active == 'ZOOM'
+ and self._lastCursor != cursors.SELECT_REGION):
+ self.set_cursor(cursors.SELECT_REGION)
+ self._lastCursor = cursors.SELECT_REGION
+ elif (self._active == 'PAN' and
+ self._lastCursor != cursors.MOVE):
+ self.set_cursor(cursors.MOVE)
+ self._lastCursor = cursors.MOVE
+
+ def mouse_move(self, event):
+ self._set_cursor(event)
+
+ if event.inaxes and event.inaxes.get_navigate():
+
+ try:
+ s = event.inaxes.format_coord(event.xdata, event.ydata)
+ except (ValueError, OverflowError):
+ pass
+ else:
+ artists = [a for a in event.inaxes.mouseover_set
+ if a.contains(event) and a.get_visible()]
+
+ if artists:
+ a = cbook._topmost_artist(artists)
+ if a is not event.inaxes.patch:
+ data = a.get_cursor_data(event)
+ if data is not None:
+ s += ' [%s]' % a.format_cursor_data(data)
+
+ if len(self.mode):
+ self.set_message('%s, %s' % (self.mode, s))
+ else:
+ self.set_message(s)
+ else:
+ self.set_message(self.mode)
+
+ def pan(self, *args):
+ """Activate the pan/zoom tool. pan with left button, zoom with right"""
+ # set the pointer icon and button press funcs to the
+ # appropriate callbacks
+
+ if self._active == 'PAN':
+ self._active = None
+ else:
+ self._active = 'PAN'
+ if self._idPress is not None:
+ self._idPress = self.canvas.mpl_disconnect(self._idPress)
+ self.mode = ''
+
+ if self._idRelease is not None:
+ self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
+ self.mode = ''
+
+ if self._active:
+ self._idPress = self.canvas.mpl_connect(
+ 'button_press_event', self.press_pan)
+ self._idRelease = self.canvas.mpl_connect(
+ 'button_release_event', self.release_pan)
+ self.mode = 'pan/zoom'
+ self.canvas.widgetlock(self)
+ else:
+ self.canvas.widgetlock.release(self)
+
+ for a in self.canvas.figure.get_axes():
+ a.set_navigate_mode(self._active)
+
+ self.set_message(self.mode)
+
+ def press(self, event):
+ """Called whenever a mouse button is pressed."""
+
+ def press_pan(self, event):
+ """Callback for mouse button press in pan/zoom mode."""
+
+ if event.button == 1:
+ self._button_pressed = 1
+ elif event.button == 3:
+ self._button_pressed = 3
+ else:
+ self._button_pressed = None
+ return
+
+ if self._nav_stack() is None:
+ # set the home button to this view
+ self.push_current()
+
+ x, y = event.x, event.y
+ self._xypress = []
+ for i, a in enumerate(self.canvas.figure.get_axes()):
+ if (x is not None and y is not None and a.in_axes(event) and
+ a.get_navigate() and a.can_pan()):
+ a.start_pan(x, y, event.button)
+ self._xypress.append((a, i))
+ self.canvas.mpl_disconnect(self._idDrag)
+ self._idDrag = self.canvas.mpl_connect('motion_notify_event',
+ self.drag_pan)
+
+ self.press(event)
+
+ def press_zoom(self, event):
+ """Callback for mouse button press in zoom to rect mode."""
+ # If we're already in the middle of a zoom, pressing another
+ # button works to "cancel"
+ if self._ids_zoom != []:
+ for zoom_id in self._ids_zoom:
+ self.canvas.mpl_disconnect(zoom_id)
+ self.release(event)
+ self.draw()
+ self._xypress = None
+ self._button_pressed = None
+ self._ids_zoom = []
+ return
+
+ if event.button == 1:
+ self._button_pressed = 1
+ elif event.button == 3:
+ self._button_pressed = 3
+ else:
+ self._button_pressed = None
+ return
+
+ if self._nav_stack() is None:
+ # set the home button to this view
+ self.push_current()
+
+ x, y = event.x, event.y
+ self._xypress = []
+ for i, a in enumerate(self.canvas.figure.get_axes()):
+ if (x is not None and y is not None and a.in_axes(event) and
+ a.get_navigate() and a.can_zoom()):
+ self._xypress.append((x, y, a, i, a._get_view()))
+
+ id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)
+ id2 = self.canvas.mpl_connect('key_press_event',
+ self._switch_on_zoom_mode)
+ id3 = self.canvas.mpl_connect('key_release_event',
+ self._switch_off_zoom_mode)
+
+ self._ids_zoom = id1, id2, id3
+ self._zoom_mode = event.key
+
+ self.press(event)
+
+ def _switch_on_zoom_mode(self, event):
+ self._zoom_mode = event.key
+ self.mouse_move(event)
+
+ def _switch_off_zoom_mode(self, event):
+ self._zoom_mode = None
+ self.mouse_move(event)
+
+ def push_current(self):
+ """Push the current view limits and position onto the stack."""
+ self._nav_stack.push(
+ WeakKeyDictionary(
+ {ax: (ax._get_view(),
+ # Store both the original and modified positions.
+ (ax.get_position(True).frozen(),
+ ax.get_position().frozen()))
+ for ax in self.canvas.figure.axes}))
+ self.set_history_buttons()
+
+ def release(self, event):
+ """Callback for mouse button release."""
+
+ def release_pan(self, event):
+ """Callback for mouse button release in pan/zoom mode."""
+
+ if self._button_pressed is None:
+ return
+ self.canvas.mpl_disconnect(self._idDrag)
+ self._idDrag = self.canvas.mpl_connect(
+ 'motion_notify_event', self.mouse_move)
+ for a, ind in self._xypress:
+ a.end_pan()
+ if not self._xypress:
+ return
+ self._xypress = []
+ self._button_pressed = None
+ self.push_current()
+ self.release(event)
+ self.draw()
+
+ def drag_pan(self, event):
+ """Callback for dragging in pan/zoom mode."""
+ for a, ind in self._xypress:
+ #safer to use the recorded button at the press than current button:
+ #multiple button can get pressed during motion...
+ a.drag_pan(self._button_pressed, event.key, event.x, event.y)
+ self.canvas.draw_idle()
+
+ def drag_zoom(self, event):
+ """Callback for dragging in zoom mode."""
+ if self._xypress:
+ x, y = event.x, event.y
+ lastx, lasty, a, ind, view = self._xypress[0]
+ (x1, y1), (x2, y2) = np.clip(
+ [[lastx, lasty], [x, y]], a.bbox.min, a.bbox.max)
+ if self._zoom_mode == "x":
+ y1, y2 = a.bbox.intervaly
+ elif self._zoom_mode == "y":
+ x1, x2 = a.bbox.intervalx
+ self.draw_rubberband(event, x1, y1, x2, y2)
+
+ def release_zoom(self, event):
+ """Callback for mouse button release in zoom to rect mode."""
+ for zoom_id in self._ids_zoom:
+ self.canvas.mpl_disconnect(zoom_id)
+ self._ids_zoom = []
+
+ self.remove_rubberband()
+
+ if not self._xypress:
+ return
+
+ last_a = []
+
+ for cur_xypress in self._xypress:
+ x, y = event.x, event.y
+ lastx, lasty, a, ind, view = cur_xypress
+ # ignore singular clicks - 5 pixels is a threshold
+ # allows the user to "cancel" a zoom action
+ # by zooming by less than 5 pixels
+ if ((abs(x - lastx) < 5 and self._zoom_mode!="y") or
+ (abs(y - lasty) < 5 and self._zoom_mode!="x")):
+ self._xypress = None
+ self.release(event)
+ self.draw()
+ return
+
+ # detect twinx,y axes and avoid double zooming
+ twinx, twiny = False, False
+ if last_a:
+ for la in last_a:
+ if a.get_shared_x_axes().joined(a, la):
+ twinx = True
+ if a.get_shared_y_axes().joined(a, la):
+ twiny = True
+ last_a.append(a)
+
+ if self._button_pressed == 1:
+ direction = 'in'
+ elif self._button_pressed == 3:
+ direction = 'out'
+ else:
+ continue
+
+ a._set_view_from_bbox((lastx, lasty, x, y), direction,
+ self._zoom_mode, twinx, twiny)
+
+ self.draw()
+ self._xypress = None
+ self._button_pressed = None
+
+ self._zoom_mode = None
+
+ self.push_current()
+ self.release(event)
+
+ def draw(self):
+ """Redraw the canvases, update the locators."""
+ for a in self.canvas.figure.get_axes():
+ xaxis = getattr(a, 'xaxis', None)
+ yaxis = getattr(a, 'yaxis', None)
+ locators = []
+ if xaxis is not None:
+ locators.append(xaxis.get_major_locator())
+ locators.append(xaxis.get_minor_locator())
+ if yaxis is not None:
+ locators.append(yaxis.get_major_locator())
+ locators.append(yaxis.get_minor_locator())
+
+ for loc in locators:
+ loc.refresh()
+ self.canvas.draw_idle()
+
+ def _update_view(self):
+ """Update the viewlim and position from the view and
+ position stack for each axes.
+ """
+ nav_info = self._nav_stack()
+ if nav_info is None:
+ return
+ # Retrieve all items at once to avoid any risk of GC deleting an Axes
+ # while in the middle of the loop below.
+ items = list(nav_info.items())
+ for ax, (view, (pos_orig, pos_active)) in items:
+ ax._set_view(view)
+ # Restore both the original and modified positions
+ ax._set_position(pos_orig, 'original')
+ ax._set_position(pos_active, 'active')
+ self.canvas.draw_idle()
+
+ def save_figure(self, *args):
+ """Save the current figure."""
+ raise NotImplementedError
+
+ def set_cursor(self, cursor):
+ """Set the current cursor to one of the :class:`Cursors` enums values.
+
+ If required by the backend, this method should trigger an update in
+ the backend event loop after the cursor is set, as this method may be
+ called e.g. before a long-running task during which the GUI is not
+ updated.
+ """
+
+ def update(self):
+ """Reset the axes stack."""
+ self._nav_stack.clear()
+ self.set_history_buttons()
+
+ def zoom(self, *args):
+ """Activate zoom to rect mode."""
+ if self._active == 'ZOOM':
+ self._active = None
+ else:
+ self._active = 'ZOOM'
+
+ if self._idPress is not None:
+ self._idPress = self.canvas.mpl_disconnect(self._idPress)
+ self.mode = ''
+
+ if self._idRelease is not None:
+ self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
+ self.mode = ''
+
+ if self._active:
+ self._idPress = self.canvas.mpl_connect('button_press_event',
+ self.press_zoom)
+ self._idRelease = self.canvas.mpl_connect('button_release_event',
+ self.release_zoom)
+ self.mode = 'zoom rect'
+ self.canvas.widgetlock(self)
+ else:
+ self.canvas.widgetlock.release(self)
+
+ for a in self.canvas.figure.get_axes():
+ a.set_navigate_mode(self._active)
+
+ self.set_message(self.mode)
+
+ def set_history_buttons(self):
+ """Enable or disable the back/forward button."""
+
+
+class ToolContainerBase(object):
+ """
+ Base class for all tool containers, e.g. toolbars.
+
+ Attributes
+ ----------
+ toolmanager : `ToolManager`
+ The tools with which this `ToolContainer` wants to communicate.
+ """
+
+ _icon_extension = '.png'
+ """
+ Toolcontainer button icon image format extension
+
+ **String**: Image extension
+ """
+
+ def __init__(self, toolmanager):
+ self.toolmanager = toolmanager
+ self.toolmanager.toolmanager_connect('tool_removed_event',
+ self._remove_tool_cbk)
+
+ def _tool_toggled_cbk(self, event):
+ """
+ Captures the 'tool_trigger_[name]'
+
+ This only gets used for toggled tools
+ """
+ self.toggle_toolitem(event.tool.name, event.tool.toggled)
+
+ def add_tool(self, tool, group, position=-1):
+ """
+ Adds a tool to this container
+
+ Parameters
+ ----------
+ tool : tool_like
+ The tool to add, see `ToolManager.get_tool`.
+ group : str
+ The name of the group to add this tool to.
+ position : int (optional)
+ The position within the group to place this tool. Defaults to end.
+ """
+ tool = self.toolmanager.get_tool(tool)
+ image = self._get_image_filename(tool.image)
+ toggle = getattr(tool, 'toggled', None) is not None
+ self.add_toolitem(tool.name, group, position,
+ image, tool.description, toggle)
+ if toggle:
+ self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,
+ self._tool_toggled_cbk)
+ # If initially toggled
+ if tool.toggled:
+ self.toggle_toolitem(tool.name, True)
+
+ def _remove_tool_cbk(self, event):
+ """Captures the 'tool_removed_event' signal and removes the tool."""
+ self.remove_toolitem(event.tool.name)
+
+ def _get_image_filename(self, image):
+ """Find the image based on its name."""
+ if not image:
+ return None
+
+ basedir = os.path.join(rcParams['datapath'], 'images')
+ possible_images = (
+ image,
+ image + self._icon_extension,
+ os.path.join(basedir, image),
+ os.path.join(basedir, image) + self._icon_extension)
+
+ for fname in possible_images:
+ if os.path.isfile(fname):
+ return fname
+
+ def trigger_tool(self, name):
+ """
+ Trigger the tool
+
+ Parameters
+ ----------
+ name : String
+ Name (id) of the tool triggered from within the container
+ """
+ self.toolmanager.trigger_tool(name, sender=self)
+
+ def add_toolitem(self, name, group, position, image, description, toggle):
+ """
+ Add a toolitem to the container
+
+ This method must get implemented per backend
+
+ The callback associated with the button click event,
+ must be **EXACTLY** `self.trigger_tool(name)`
+
+ Parameters
+ ----------
+ name : string
+ Name of the tool to add, this gets used as the tool's ID and as the
+ default label of the buttons
+ group : String
+ Name of the group that this tool belongs to
+ position : Int
+ Position of the tool within its group, if -1 it goes at the End
+ image_file : String
+ Filename of the image for the button or `None`
+ description : String
+ Description of the tool, used for the tooltips
+ toggle : Bool
+ * `True` : The button is a toggle (change the pressed/unpressed
+ state between consecutive clicks)
+ * `False` : The button is a normal button (returns to unpressed
+ state after release)
+ """
+ raise NotImplementedError
+
+ def toggle_toolitem(self, name, toggled):
+ """
+ Toggle the toolitem without firing event
+
+ Parameters
+ ----------
+ name : String
+ Id of the tool to toggle
+ toggled : bool
+ Whether to set this tool as toggled or not.
+ """
+ raise NotImplementedError
+
+ def remove_toolitem(self, name):
+ """
+ Remove a toolitem from the `ToolContainer`
+
+ This method must get implemented per backend
+
+ Called when `ToolManager` emits a `tool_removed_event`
+
+ Parameters
+ ----------
+ name : string
+ Name of the tool to remove
+ """
+ raise NotImplementedError
+
+
+class StatusbarBase(object):
+ """Base class for the statusbar"""
+ def __init__(self, toolmanager):
+ self.toolmanager = toolmanager
+ self.toolmanager.toolmanager_connect('tool_message_event',
+ self._message_cbk)
+
+ def _message_cbk(self, event):
+ """Captures the 'tool_message_event' and set the message"""
+ self.set_message(event.message)
+
+ def set_message(self, s):
+ """
+ Display a message on toolbar or in status bar
+
+ Parameters
+ ----------
+ s : str
+ Message text
+ """
+ pass
diff --git a/contrib/python/matplotlib/py2/matplotlib/backend_managers.py b/contrib/python/matplotlib/py2/matplotlib/backend_managers.py
new file mode 100644
index 00000000000..ab9a503fab8
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backend_managers.py
@@ -0,0 +1,436 @@
+"""
+`ToolManager`
+ Class that makes the bridge between user interaction (key press,
+ toolbar clicks, ..) and the actions in response to the user inputs.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+import six
+import warnings
+
+import matplotlib.cbook as cbook
+import matplotlib.widgets as widgets
+from matplotlib.rcsetup import validate_stringlist
+import matplotlib.backend_tools as tools
+
+
+class ToolEvent(object):
+ """Event for tool manipulation (add/remove)"""
+ def __init__(self, name, sender, tool, data=None):
+ self.name = name
+ self.sender = sender
+ self.tool = tool
+ self.data = data
+
+
+class ToolTriggerEvent(ToolEvent):
+ """Event to inform that a tool has been triggered"""
+ def __init__(self, name, sender, tool, canvasevent=None, data=None):
+ ToolEvent.__init__(self, name, sender, tool, data)
+ self.canvasevent = canvasevent
+
+
+class ToolManagerMessageEvent(object):
+ """
+ Event carrying messages from toolmanager
+
+ Messages usually get displayed to the user by the toolbar
+ """
+ def __init__(self, name, sender, message):
+ self.name = name
+ self.sender = sender
+ self.message = message
+
+
+class ToolManager(object):
+ """
+ Helper class that groups all the user interactions for a Figure
+
+ Attributes
+ ----------
+ figure: `Figure`
+ keypresslock: `widgets.LockDraw`
+ `LockDraw` object to know if the `canvas` key_press_event is locked
+ messagelock: `widgets.LockDraw`
+ `LockDraw` object to know if the message is available to write
+ """
+
+ def __init__(self, figure=None):
+ warnings.warn('Treat the new Tool classes introduced in v1.5 as ' +
+ 'experimental for now, the API will likely change in ' +
+ 'version 2.1 and perhaps the rcParam as well')
+
+ self._key_press_handler_id = None
+
+ self._tools = {}
+ self._keys = {}
+ self._toggled = {}
+ self._callbacks = cbook.CallbackRegistry()
+
+ # to process keypress event
+ self.keypresslock = widgets.LockDraw()
+ self.messagelock = widgets.LockDraw()
+
+ self._figure = None
+ self.set_figure(figure)
+
+ @property
+ def canvas(self):
+ """Canvas managed by FigureManager"""
+ if not self._figure:
+ return None
+ return self._figure.canvas
+
+ @property
+ def figure(self):
+ """Figure that holds the canvas"""
+ return self._figure
+
+ @figure.setter
+ def figure(self, figure):
+ self.set_figure(figure)
+
+ def set_figure(self, figure, update_tools=True):
+ """
+ Sets the figure to interact with the tools
+
+ Parameters
+ ==========
+ figure: `Figure`
+ update_tools: bool
+ Force tools to update figure
+ """
+ if self._key_press_handler_id:
+ self.canvas.mpl_disconnect(self._key_press_handler_id)
+ self._figure = figure
+ if figure:
+ self._key_press_handler_id = self.canvas.mpl_connect(
+ 'key_press_event', self._key_press)
+ if update_tools:
+ for tool in self._tools.values():
+ tool.figure = figure
+
+ def toolmanager_connect(self, s, func):
+ """
+ Connect event with string *s* to *func*.
+
+ Parameters
+ ----------
+ s : String
+ Name of the event
+
+ The following events are recognized
+
+ - 'tool_message_event'
+ - 'tool_removed_event'
+ - 'tool_added_event'
+
+ For every tool added a new event is created
+
+ - 'tool_trigger_TOOLNAME`
+ Where TOOLNAME is the id of the tool.
+
+ func : function
+ Function to be called with signature
+ def func(event)
+ """
+ return self._callbacks.connect(s, func)
+
+ def toolmanager_disconnect(self, cid):
+ """
+ Disconnect callback id *cid*
+
+ Example usage::
+
+ cid = toolmanager.toolmanager_connect('tool_trigger_zoom',
+ on_press)
+ #...later
+ toolmanager.toolmanager_disconnect(cid)
+ """
+ return self._callbacks.disconnect(cid)
+
+ def message_event(self, message, sender=None):
+ """ Emit a `ToolManagerMessageEvent`"""
+ if sender is None:
+ sender = self
+
+ s = 'tool_message_event'
+ event = ToolManagerMessageEvent(s, sender, message)
+ self._callbacks.process(s, event)
+
+ @property
+ def active_toggle(self):
+ """Currently toggled tools"""
+
+ return self._toggled
+
+ def get_tool_keymap(self, name):
+ """
+ Get the keymap associated with the specified tool
+
+ Parameters
+ ----------
+ name : string
+ Name of the Tool
+
+ Returns
+ -------
+ list : list of keys associated with the Tool
+ """
+
+ keys = [k for k, i in six.iteritems(self._keys) if i == name]
+ return keys
+
+ def _remove_keys(self, name):
+ for k in self.get_tool_keymap(name):
+ del self._keys[k]
+
+ def update_keymap(self, name, *keys):
+ """
+ Set the keymap to associate with the specified tool
+
+ Parameters
+ ----------
+ name : string
+ Name of the Tool
+ keys : keys to associate with the Tool
+ """
+
+ if name not in self._tools:
+ raise KeyError('%s not in Tools' % name)
+
+ self._remove_keys(name)
+
+ for key in keys:
+ for k in validate_stringlist(key):
+ if k in self._keys:
+ warnings.warn('Key %s changed from %s to %s' %
+ (k, self._keys[k], name))
+ self._keys[k] = name
+
+ def remove_tool(self, name):
+ """
+ Remove tool from `ToolManager`
+
+ Parameters
+ ----------
+ name : string
+ Name of the Tool
+ """
+
+ tool = self.get_tool(name)
+ tool.destroy()
+
+ # If is a toggle tool and toggled, untoggle
+ if getattr(tool, 'toggled', False):
+ self.trigger_tool(tool, 'toolmanager')
+
+ self._remove_keys(name)
+
+ s = 'tool_removed_event'
+ event = ToolEvent(s, self, tool)
+ self._callbacks.process(s, event)
+
+ del self._tools[name]
+
+ def add_tool(self, name, tool, *args, **kwargs):
+ """
+ Add *tool* to `ToolManager`
+
+ If successful adds a new event `tool_trigger_name` where **name** is
+ the **name** of the tool, this event is fired everytime
+ the tool is triggered.
+
+ Parameters
+ ----------
+ name : str
+ Name of the tool, treated as the ID, has to be unique
+ tool : class_like, i.e. str or type
+ Reference to find the class of the Tool to added.
+
+ Notes
+ -----
+ args and kwargs get passed directly to the tools constructor.
+
+ See Also
+ --------
+ matplotlib.backend_tools.ToolBase : The base class for tools.
+ """
+
+ tool_cls = self._get_cls_to_instantiate(tool)
+ if not tool_cls:
+ raise ValueError('Impossible to find class for %s' % str(tool))
+
+ if name in self._tools:
+ warnings.warn('A "Tool class" with the same name already exists, '
+ 'not added')
+ return self._tools[name]
+
+ tool_obj = tool_cls(self, name, *args, **kwargs)
+ self._tools[name] = tool_obj
+
+ if tool_cls.default_keymap is not None:
+ self.update_keymap(name, tool_cls.default_keymap)
+
+ # For toggle tools init the radio_group in self._toggled
+ if isinstance(tool_obj, tools.ToolToggleBase):
+ # None group is not mutually exclusive, a set is used to keep track
+ # of all toggled tools in this group
+ if tool_obj.radio_group is None:
+ self._toggled.setdefault(None, set())
+ else:
+ self._toggled.setdefault(tool_obj.radio_group, None)
+
+ # If initially toggled
+ if tool_obj.toggled:
+ self._handle_toggle(tool_obj, None, None, None)
+ tool_obj.set_figure(self.figure)
+
+ self._tool_added_event(tool_obj)
+ return tool_obj
+
+ def _tool_added_event(self, tool):
+ s = 'tool_added_event'
+ event = ToolEvent(s, self, tool)
+ self._callbacks.process(s, event)
+
+ def _handle_toggle(self, tool, sender, canvasevent, data):
+ """
+ Toggle tools, need to untoggle prior to using other Toggle tool
+ Called from trigger_tool
+
+ Parameters
+ ----------
+ tool: Tool object
+ sender: object
+ Object that wishes to trigger the tool
+ canvasevent : Event
+ Original Canvas event or None
+ data : Object
+ Extra data to pass to the tool when triggering
+ """
+
+ radio_group = tool.radio_group
+ # radio_group None is not mutually exclusive
+ # just keep track of toggled tools in this group
+ if radio_group is None:
+ if tool.name in self._toggled[None]:
+ self._toggled[None].remove(tool.name)
+ else:
+ self._toggled[None].add(tool.name)
+ return
+
+ # If the tool already has a toggled state, untoggle it
+ if self._toggled[radio_group] == tool.name:
+ toggled = None
+ # If no tool was toggled in the radio_group
+ # toggle it
+ elif self._toggled[radio_group] is None:
+ toggled = tool.name
+ # Other tool in the radio_group is toggled
+ else:
+ # Untoggle previously toggled tool
+ self.trigger_tool(self._toggled[radio_group],
+ self,
+ canvasevent,
+ data)
+ toggled = tool.name
+
+ # Keep track of the toggled tool in the radio_group
+ self._toggled[radio_group] = toggled
+
+ def _get_cls_to_instantiate(self, callback_class):
+ # Find the class that corresponds to the tool
+ if isinstance(callback_class, six.string_types):
+ # FIXME: make more complete searching structure
+ if callback_class in globals():
+ callback_class = globals()[callback_class]
+ else:
+ mod = 'backend_tools'
+ current_module = __import__(mod,
+ globals(), locals(), [mod], 1)
+
+ callback_class = getattr(current_module, callback_class, False)
+ if callable(callback_class):
+ return callback_class
+ else:
+ return None
+
+ def trigger_tool(self, name, sender=None, canvasevent=None,
+ data=None):
+ """
+ Trigger a tool and emit the tool_trigger_[name] event
+
+ Parameters
+ ----------
+ name : string
+ Name of the tool
+ sender: object
+ Object that wishes to trigger the tool
+ canvasevent : Event
+ Original Canvas event or None
+ data : Object
+ Extra data to pass to the tool when triggering
+ """
+ tool = self.get_tool(name)
+ if tool is None:
+ return
+
+ if sender is None:
+ sender = self
+
+ self._trigger_tool(name, sender, canvasevent, data)
+
+ s = 'tool_trigger_%s' % name
+ event = ToolTriggerEvent(s, sender, tool, canvasevent, data)
+ self._callbacks.process(s, event)
+
+ def _trigger_tool(self, name, sender=None, canvasevent=None, data=None):
+ """
+ Trigger on a tool
+
+ Method to actually trigger the tool
+ """
+ tool = self.get_tool(name)
+
+ if isinstance(tool, tools.ToolToggleBase):
+ self._handle_toggle(tool, sender, canvasevent, data)
+
+ # Important!!!
+ # This is where the Tool object gets triggered
+ tool.trigger(sender, canvasevent, data)
+
+ def _key_press(self, event):
+ if event.key is None or self.keypresslock.locked():
+ return
+
+ name = self._keys.get(event.key, None)
+ if name is None:
+ return
+ self.trigger_tool(name, canvasevent=event)
+
+ @property
+ def tools(self):
+ """Return the tools controlled by `ToolManager`"""
+
+ return self._tools
+
+ def get_tool(self, name, warn=True):
+ """
+ Return the tool object, also accepts the actual tool for convenience
+
+ Parameters
+ ----------
+ name : str, ToolBase
+ Name of the tool, or the tool itself
+ warn : bool, optional
+ If this method should give warnings.
+ """
+ if isinstance(name, tools.ToolBase) and name.name in self._tools:
+ return name
+ if name not in self._tools:
+ if warn:
+ warnings.warn("ToolManager does not control tool %s" % name)
+ return None
+ return self._tools[name]
diff --git a/contrib/python/matplotlib/py2/matplotlib/backend_tools.py b/contrib/python/matplotlib/py2/matplotlib/backend_tools.py
new file mode 100644
index 00000000000..6639763e417
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backend_tools.py
@@ -0,0 +1,1081 @@
+"""
+Abstract base classes define the primitives for Tools.
+These tools are used by `matplotlib.backend_managers.ToolManager`
+
+:class:`ToolBase`
+ Simple stateless tool
+
+:class:`ToolToggleBase`
+ Tool that has two states, only one Toggle tool can be
+ active at any given time for the same
+ `matplotlib.backend_managers.ToolManager`
+"""
+
+
+from matplotlib import rcParams
+from matplotlib._pylab_helpers import Gcf
+import matplotlib.cbook as cbook
+from weakref import WeakKeyDictionary
+import six
+import time
+import warnings
+import numpy as np
+
+
+class Cursors(object):
+ """Simple namespace for cursor reference"""
+ HAND, POINTER, SELECT_REGION, MOVE, WAIT = list(range(5))
+cursors = Cursors()
+
+# Views positions tool
+_views_positions = 'viewpos'
+
+
+class ToolBase(object):
+ """
+ Base tool class
+
+ A base tool, only implements `trigger` method or not method at all.
+ The tool is instantiated by `matplotlib.backend_managers.ToolManager`
+
+ Attributes
+ ----------
+ toolmanager: `matplotlib.backend_managers.ToolManager`
+ ToolManager that controls this Tool
+ figure: `FigureCanvas`
+ Figure instance that is affected by this Tool
+ name: String
+ Used as **Id** of the tool, has to be unique among tools of the same
+ ToolManager
+ """
+
+ default_keymap = None
+ """
+ Keymap to associate with this tool
+
+ **String**: List of comma separated keys that will be used to call this
+ tool when the keypress event of *self.figure.canvas* is emitted
+ """
+
+ description = None
+ """
+ Description of the Tool
+
+ **String**: If the Tool is included in the Toolbar this text is used
+ as a Tooltip
+ """
+
+ image = None
+ """
+ Filename of the image
+
+ **String**: Filename of the image to use in the toolbar. If None, the
+ `name` is used as a label in the toolbar button
+ """
+
+ def __init__(self, toolmanager, name):
+ warnings.warn('Treat the new Tool classes introduced in v1.5 as ' +
+ 'experimental for now, the API will likely change in ' +
+ 'version 2.1, and some tools might change name')
+ self._name = name
+ self._toolmanager = toolmanager
+ self._figure = None
+
+ @property
+ def figure(self):
+ return self._figure
+
+ @figure.setter
+ def figure(self, figure):
+ self.set_figure(figure)
+
+ @property
+ def canvas(self):
+ if not self._figure:
+ return None
+ return self._figure.canvas
+
+ @property
+ def toolmanager(self):
+ return self._toolmanager
+
+ def set_figure(self, figure):
+ """
+ Assign a figure to the tool
+
+ Parameters
+ ----------
+ figure: `Figure`
+ """
+ self._figure = figure
+
+ def trigger(self, sender, event, data=None):
+ """
+ Called when this tool gets used
+
+ This method is called by
+ `matplotlib.backend_managers.ToolManager.trigger_tool`
+
+ Parameters
+ ----------
+ event: `Event`
+ The Canvas event that caused this tool to be called
+ sender: object
+ Object that requested the tool to be triggered
+ data: object
+ Extra data
+ """
+
+ pass
+
+ @property
+ def name(self):
+ """Tool Id"""
+ return self._name
+
+ def destroy(self):
+ """
+ Destroy the tool
+
+ This method is called when the tool is removed by
+ `matplotlib.backend_managers.ToolManager.remove_tool`
+ """
+ pass
+
+
+class ToolToggleBase(ToolBase):
+ """
+ Toggleable tool
+
+ Every time it is triggered, it switches between enable and disable
+
+ Parameters
+ ----------
+ ``*args``
+ Variable length argument to be used by the Tool
+ ``**kwargs``
+ `toggled` if present and True, sets the initial state of the Tool
+ Arbitrary keyword arguments to be consumed by the Tool
+ """
+
+ radio_group = None
+ """Attribute to group 'radio' like tools (mutually exclusive)
+
+ **String** that identifies the group or **None** if not belonging to a
+ group
+ """
+
+ cursor = None
+ """Cursor to use when the tool is active"""
+
+ default_toggled = False
+ """Default of toggled state"""
+
+ def __init__(self, *args, **kwargs):
+ self._toggled = kwargs.pop('toggled', self.default_toggled)
+ ToolBase.__init__(self, *args, **kwargs)
+
+ def trigger(self, sender, event, data=None):
+ """Calls `enable` or `disable` based on `toggled` value"""
+ if self._toggled:
+ self.disable(event)
+ else:
+ self.enable(event)
+ self._toggled = not self._toggled
+
+ def enable(self, event=None):
+ """
+ Enable the toggle tool
+
+ `trigger` calls this method when `toggled` is False
+ """
+
+ pass
+
+ def disable(self, event=None):
+ """
+ Disable the toggle tool
+
+ `trigger` call this method when `toggled` is True.
+
+ This can happen in different circumstances
+
+ * Click on the toolbar tool button
+ * Call to `matplotlib.backend_managers.ToolManager.trigger_tool`
+ * Another `ToolToggleBase` derived tool is triggered
+ (from the same `ToolManager`)
+ """
+
+ pass
+
+ @property
+ def toggled(self):
+ """State of the toggled tool"""
+
+ return self._toggled
+
+ def set_figure(self, figure):
+ toggled = self.toggled
+ if toggled:
+ if self.figure:
+ self.trigger(self, None)
+ else:
+ # if no figure the internal state is not changed
+ # we change it here so next call to trigger will change it back
+ self._toggled = False
+ ToolBase.set_figure(self, figure)
+ if toggled:
+ if figure:
+ self.trigger(self, None)
+ else:
+ # if there is no figure, trigger won't change the internal
+ # state we change it back
+ self._toggled = True
+
+
+class SetCursorBase(ToolBase):
+ """
+ Change to the current cursor while inaxes
+
+ This tool, keeps track of all `ToolToggleBase` derived tools, and calls
+ set_cursor when a tool gets triggered
+ """
+ def __init__(self, *args, **kwargs):
+ ToolBase.__init__(self, *args, **kwargs)
+ self._idDrag = None
+ self._cursor = None
+ self._default_cursor = cursors.POINTER
+ self._last_cursor = self._default_cursor
+ self.toolmanager.toolmanager_connect('tool_added_event',
+ self._add_tool_cbk)
+
+ # process current tools
+ for tool in self.toolmanager.tools.values():
+ self._add_tool(tool)
+
+ def set_figure(self, figure):
+ if self._idDrag:
+ self.canvas.mpl_disconnect(self._idDrag)
+ ToolBase.set_figure(self, figure)
+ if figure:
+ self._idDrag = self.canvas.mpl_connect(
+ 'motion_notify_event', self._set_cursor_cbk)
+
+ def _tool_trigger_cbk(self, event):
+ if event.tool.toggled:
+ self._cursor = event.tool.cursor
+ else:
+ self._cursor = None
+
+ self._set_cursor_cbk(event.canvasevent)
+
+ def _add_tool(self, tool):
+ """set the cursor when the tool is triggered"""
+ if getattr(tool, 'cursor', None) is not None:
+ self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,
+ self._tool_trigger_cbk)
+
+ def _add_tool_cbk(self, event):
+ """Process every newly added tool"""
+ if event.tool is self:
+ return
+
+ self._add_tool(event.tool)
+
+ def _set_cursor_cbk(self, event):
+ if not event:
+ return
+
+ if not getattr(event, 'inaxes', False) or not self._cursor:
+ if self._last_cursor != self._default_cursor:
+ self.set_cursor(self._default_cursor)
+ self._last_cursor = self._default_cursor
+ elif self._cursor:
+ cursor = self._cursor
+ if cursor and self._last_cursor != cursor:
+ self.set_cursor(cursor)
+ self._last_cursor = cursor
+
+ def set_cursor(self, cursor):
+ """
+ Set the cursor
+
+ This method has to be implemented per backend
+ """
+ raise NotImplementedError
+
+
+class ToolCursorPosition(ToolBase):
+ """
+ Send message with the current pointer position
+
+ This tool runs in the background reporting the position of the cursor
+ """
+ def __init__(self, *args, **kwargs):
+ self._idDrag = None
+ ToolBase.__init__(self, *args, **kwargs)
+
+ def set_figure(self, figure):
+ if self._idDrag:
+ self.canvas.mpl_disconnect(self._idDrag)
+ ToolBase.set_figure(self, figure)
+ if figure:
+ self._idDrag = self.canvas.mpl_connect(
+ 'motion_notify_event', self.send_message)
+
+ def send_message(self, event):
+ """Call `matplotlib.backend_managers.ToolManager.message_event`"""
+ if self.toolmanager.messagelock.locked():
+ return
+
+ message = ' '
+
+ if event.inaxes and event.inaxes.get_navigate():
+ try:
+ s = event.inaxes.format_coord(event.xdata, event.ydata)
+ except (ValueError, OverflowError):
+ pass
+ else:
+ artists = [a for a in event.inaxes.mouseover_set
+ if a.contains(event) and a.get_visible()]
+
+ if artists:
+ a = cbook._topmost_artist(artists)
+ if a is not event.inaxes.patch:
+ data = a.get_cursor_data(event)
+ if data is not None:
+ s += ' [%s]' % a.format_cursor_data(data)
+
+ message = s
+ self.toolmanager.message_event(message, self)
+
+
+class RubberbandBase(ToolBase):
+ """Draw and remove rubberband"""
+ def trigger(self, sender, event, data):
+ """Call `draw_rubberband` or `remove_rubberband` based on data"""
+ if not self.figure.canvas.widgetlock.available(sender):
+ return
+ if data is not None:
+ self.draw_rubberband(*data)
+ else:
+ self.remove_rubberband()
+
+ def draw_rubberband(self, *data):
+ """
+ Draw rubberband
+
+ This method must get implemented per backend
+ """
+ raise NotImplementedError
+
+ def remove_rubberband(self):
+ """
+ Remove rubberband
+
+ This method should get implemented per backend
+ """
+ pass
+
+
+class ToolQuit(ToolBase):
+ """Tool to call the figure manager destroy method"""
+
+ description = 'Quit the figure'
+ default_keymap = rcParams['keymap.quit']
+
+ def trigger(self, sender, event, data=None):
+ Gcf.destroy_fig(self.figure)
+
+
+class ToolQuitAll(ToolBase):
+ """Tool to call the figure manager destroy method"""
+
+ description = 'Quit all figures'
+ default_keymap = rcParams['keymap.quit_all']
+
+ def trigger(self, sender, event, data=None):
+ Gcf.destroy_all()
+
+
+class ToolEnableAllNavigation(ToolBase):
+ """Tool to enable all axes for toolmanager interaction"""
+
+ description = 'Enables all axes toolmanager'
+ default_keymap = rcParams['keymap.all_axes']
+
+ def trigger(self, sender, event, data=None):
+ if event.inaxes is None:
+ return
+
+ for a in self.figure.get_axes():
+ if (event.x is not None and event.y is not None
+ and a.in_axes(event)):
+ a.set_navigate(True)
+
+
+class ToolEnableNavigation(ToolBase):
+ """Tool to enable a specific axes for toolmanager interaction"""
+
+ description = 'Enables one axes toolmanager'
+ default_keymap = (1, 2, 3, 4, 5, 6, 7, 8, 9)
+
+ def trigger(self, sender, event, data=None):
+ if event.inaxes is None:
+ return
+
+ n = int(event.key) - 1
+ for i, a in enumerate(self.figure.get_axes()):
+ if (event.x is not None and event.y is not None
+ and a.in_axes(event)):
+ a.set_navigate(i == n)
+
+
+class _ToolGridBase(ToolBase):
+ """Common functionality between ToolGrid and ToolMinorGrid."""
+
+ _cycle = [(False, False), (True, False), (True, True), (False, True)]
+
+ def trigger(self, sender, event, data=None):
+ ax = event.inaxes
+ if ax is None:
+ return
+ try:
+ x_state, x_which, y_state, y_which = self._get_next_grid_states(ax)
+ except ValueError:
+ pass
+ else:
+ ax.grid(x_state, which=x_which, axis="x")
+ ax.grid(y_state, which=y_which, axis="y")
+ ax.figure.canvas.draw_idle()
+
+ @staticmethod
+ def _get_uniform_grid_state(ticks):
+ """
+ Check whether all grid lines are in the same visibility state.
+
+ Returns True/False if all grid lines are on or off, None if they are
+ not all in the same state.
+ """
+ if all(tick.gridOn for tick in ticks):
+ return True
+ elif not any(tick.gridOn for tick in ticks):
+ return False
+ else:
+ return None
+
+
+class ToolGrid(_ToolGridBase):
+ """Tool to toggle the major grids of the figure"""
+
+ description = 'Toogle major grids'
+ default_keymap = rcParams['keymap.grid']
+
+ def _get_next_grid_states(self, ax):
+ if None in map(self._get_uniform_grid_state,
+ [ax.xaxis.minorTicks, ax.yaxis.minorTicks]):
+ # Bail out if minor grids are not in a uniform state.
+ raise ValueError
+ x_state, y_state = map(self._get_uniform_grid_state,
+ [ax.xaxis.majorTicks, ax.yaxis.majorTicks])
+ cycle = self._cycle
+ # Bail out (via ValueError) if major grids are not in a uniform state.
+ x_state, y_state = (
+ cycle[(cycle.index((x_state, y_state)) + 1) % len(cycle)])
+ return (x_state, "major" if x_state else "both",
+ y_state, "major" if y_state else "both")
+
+
+class ToolMinorGrid(_ToolGridBase):
+ """Tool to toggle the major and minor grids of the figure"""
+
+ description = 'Toogle major and minor grids'
+ default_keymap = rcParams['keymap.grid_minor']
+
+ def _get_next_grid_states(self, ax):
+ if None in map(self._get_uniform_grid_state,
+ [ax.xaxis.majorTicks, ax.yaxis.majorTicks]):
+ # Bail out if major grids are not in a uniform state.
+ raise ValueError
+ x_state, y_state = map(self._get_uniform_grid_state,
+ [ax.xaxis.minorTicks, ax.yaxis.minorTicks])
+ cycle = self._cycle
+ # Bail out (via ValueError) if minor grids are not in a uniform state.
+ x_state, y_state = (
+ cycle[(cycle.index((x_state, y_state)) + 1) % len(cycle)])
+ return x_state, "both", y_state, "both"
+
+
+class ToolFullScreen(ToolToggleBase):
+ """Tool to toggle full screen"""
+
+ description = 'Toogle Fullscreen mode'
+ default_keymap = rcParams['keymap.fullscreen']
+
+ def enable(self, event):
+ self.figure.canvas.manager.full_screen_toggle()
+
+ def disable(self, event):
+ self.figure.canvas.manager.full_screen_toggle()
+
+
+class AxisScaleBase(ToolToggleBase):
+ """Base Tool to toggle between linear and logarithmic"""
+
+ def trigger(self, sender, event, data=None):
+ if event.inaxes is None:
+ return
+ ToolToggleBase.trigger(self, sender, event, data)
+
+ def enable(self, event):
+ self.set_scale(event.inaxes, 'log')
+ self.figure.canvas.draw_idle()
+
+ def disable(self, event):
+ self.set_scale(event.inaxes, 'linear')
+ self.figure.canvas.draw_idle()
+
+
+class ToolYScale(AxisScaleBase):
+ """Tool to toggle between linear and logarithmic scales on the Y axis"""
+
+ description = 'Toogle Scale Y axis'
+ default_keymap = rcParams['keymap.yscale']
+
+ def set_scale(self, ax, scale):
+ ax.set_yscale(scale)
+
+
+class ToolXScale(AxisScaleBase):
+ """Tool to toggle between linear and logarithmic scales on the X axis"""
+
+ description = 'Toogle Scale X axis'
+ default_keymap = rcParams['keymap.xscale']
+
+ def set_scale(self, ax, scale):
+ ax.set_xscale(scale)
+
+
+class ToolViewsPositions(ToolBase):
+ """
+ Auxiliary Tool to handle changes in views and positions
+
+ Runs in the background and should get used by all the tools that
+ need to access the figure's history of views and positions, e.g.
+
+ * `ToolZoom`
+ * `ToolPan`
+ * `ToolHome`
+ * `ToolBack`
+ * `ToolForward`
+ """
+
+ def __init__(self, *args, **kwargs):
+ self.views = WeakKeyDictionary()
+ self.positions = WeakKeyDictionary()
+ self.home_views = WeakKeyDictionary()
+ ToolBase.__init__(self, *args, **kwargs)
+
+ def add_figure(self, figure):
+ """Add the current figure to the stack of views and positions"""
+
+ if figure not in self.views:
+ self.views[figure] = cbook.Stack()
+ self.positions[figure] = cbook.Stack()
+ self.home_views[figure] = WeakKeyDictionary()
+ # Define Home
+ self.push_current(figure)
+ # Make sure we add a home view for new axes as they're added
+ figure.add_axobserver(lambda fig: self.update_home_views(fig))
+
+ def clear(self, figure):
+ """Reset the axes stack"""
+ if figure in self.views:
+ self.views[figure].clear()
+ self.positions[figure].clear()
+ self.home_views[figure].clear()
+ self.update_home_views()
+
+ def update_view(self):
+ """
+ Update the view limits and position for each axes from the current
+ stack position. If any axes are present in the figure that aren't in
+ the current stack position, use the home view limits for those axes and
+ don't update *any* positions.
+ """
+
+ views = self.views[self.figure]()
+ if views is None:
+ return
+ pos = self.positions[self.figure]()
+ if pos is None:
+ return
+ home_views = self.home_views[self.figure]
+ all_axes = self.figure.get_axes()
+ for a in all_axes:
+ if a in views:
+ cur_view = views[a]
+ else:
+ cur_view = home_views[a]
+ a._set_view(cur_view)
+
+ if set(all_axes).issubset(pos):
+ for a in all_axes:
+ # Restore both the original and modified positions
+ a._set_position(pos[a][0], 'original')
+ a._set_position(pos[a][1], 'active')
+
+ self.figure.canvas.draw_idle()
+
+ def push_current(self, figure=None):
+ """
+ Push the current view limits and position onto their respective stacks
+ """
+ if not figure:
+ figure = self.figure
+ views = WeakKeyDictionary()
+ pos = WeakKeyDictionary()
+ for a in figure.get_axes():
+ views[a] = a._get_view()
+ pos[a] = self._axes_pos(a)
+ self.views[figure].push(views)
+ self.positions[figure].push(pos)
+
+ def _axes_pos(self, ax):
+ """
+ Return the original and modified positions for the specified axes
+
+ Parameters
+ ----------
+ ax : (matplotlib.axes.AxesSubplot)
+ The axes to get the positions for
+
+ Returns
+ -------
+ limits : (tuple)
+ A tuple of the original and modified positions
+ """
+
+ return (ax.get_position(True).frozen(),
+ ax.get_position().frozen())
+
+ def update_home_views(self, figure=None):
+ """
+ Make sure that self.home_views has an entry for all axes present in the
+ figure
+ """
+
+ if not figure:
+ figure = self.figure
+ for a in figure.get_axes():
+ if a not in self.home_views[figure]:
+ self.home_views[figure][a] = a._get_view()
+
+ def refresh_locators(self):
+ """Redraw the canvases, update the locators"""
+ for a in self.figure.get_axes():
+ xaxis = getattr(a, 'xaxis', None)
+ yaxis = getattr(a, 'yaxis', None)
+ zaxis = getattr(a, 'zaxis', None)
+ locators = []
+ if xaxis is not None:
+ locators.append(xaxis.get_major_locator())
+ locators.append(xaxis.get_minor_locator())
+ if yaxis is not None:
+ locators.append(yaxis.get_major_locator())
+ locators.append(yaxis.get_minor_locator())
+ if zaxis is not None:
+ locators.append(zaxis.get_major_locator())
+ locators.append(zaxis.get_minor_locator())
+
+ for loc in locators:
+ loc.refresh()
+ self.figure.canvas.draw_idle()
+
+ def home(self):
+ """Recall the first view and position from the stack"""
+ self.views[self.figure].home()
+ self.positions[self.figure].home()
+
+ def back(self):
+ """Back one step in the stack of views and positions"""
+ self.views[self.figure].back()
+ self.positions[self.figure].back()
+
+ def forward(self):
+ """Forward one step in the stack of views and positions"""
+ self.views[self.figure].forward()
+ self.positions[self.figure].forward()
+
+
+class ViewsPositionsBase(ToolBase):
+ """Base class for `ToolHome`, `ToolBack` and `ToolForward`"""
+
+ _on_trigger = None
+
+ def trigger(self, sender, event, data=None):
+ self.toolmanager.get_tool(_views_positions).add_figure(self.figure)
+ getattr(self.toolmanager.get_tool(_views_positions),
+ self._on_trigger)()
+ self.toolmanager.get_tool(_views_positions).update_view()
+
+
+class ToolHome(ViewsPositionsBase):
+ """Restore the original view lim"""
+
+ description = 'Reset original view'
+ image = 'home'
+ default_keymap = rcParams['keymap.home']
+ _on_trigger = 'home'
+
+
+class ToolBack(ViewsPositionsBase):
+ """Move back up the view lim stack"""
+
+ description = 'Back to previous view'
+ image = 'back'
+ default_keymap = rcParams['keymap.back']
+ _on_trigger = 'back'
+
+
+class ToolForward(ViewsPositionsBase):
+ """Move forward in the view lim stack"""
+
+ description = 'Forward to next view'
+ image = 'forward'
+ default_keymap = rcParams['keymap.forward']
+ _on_trigger = 'forward'
+
+
+class ConfigureSubplotsBase(ToolBase):
+ """Base tool for the configuration of subplots"""
+
+ description = 'Configure subplots'
+ image = 'subplots'
+
+
+class SaveFigureBase(ToolBase):
+ """Base tool for figure saving"""
+
+ description = 'Save the figure'
+ image = 'filesave'
+ default_keymap = rcParams['keymap.save']
+
+
+class ZoomPanBase(ToolToggleBase):
+ """Base class for `ToolZoom` and `ToolPan`"""
+ def __init__(self, *args):
+ ToolToggleBase.__init__(self, *args)
+ self._button_pressed = None
+ self._xypress = None
+ self._idPress = None
+ self._idRelease = None
+ self._idScroll = None
+ self.base_scale = 2.
+ self.scrollthresh = .5 # .5 second scroll threshold
+ self.lastscroll = time.time()-self.scrollthresh
+
+ def enable(self, event):
+ """Connect press/release events and lock the canvas"""
+ self.figure.canvas.widgetlock(self)
+ self._idPress = self.figure.canvas.mpl_connect(
+ 'button_press_event', self._press)
+ self._idRelease = self.figure.canvas.mpl_connect(
+ 'button_release_event', self._release)
+ self._idScroll = self.figure.canvas.mpl_connect(
+ 'scroll_event', self.scroll_zoom)
+
+ def disable(self, event):
+ """Release the canvas and disconnect press/release events"""
+ self._cancel_action()
+ self.figure.canvas.widgetlock.release(self)
+ self.figure.canvas.mpl_disconnect(self._idPress)
+ self.figure.canvas.mpl_disconnect(self._idRelease)
+ self.figure.canvas.mpl_disconnect(self._idScroll)
+
+ def trigger(self, sender, event, data=None):
+ self.toolmanager.get_tool(_views_positions).add_figure(self.figure)
+ ToolToggleBase.trigger(self, sender, event, data)
+
+ def scroll_zoom(self, event):
+ # https://gist.github.com/tacaswell/3144287
+ if event.inaxes is None:
+ return
+
+ if event.button == 'up':
+ # deal with zoom in
+ scl = self.base_scale
+ elif event.button == 'down':
+ # deal with zoom out
+ scl = 1/self.base_scale
+ else:
+ # deal with something that should never happen
+ scl = 1
+
+ ax = event.inaxes
+ ax._set_view_from_bbox([event.x, event.y, scl])
+
+ # If last scroll was done within the timing threshold, delete the
+ # previous view
+ if (time.time()-self.lastscroll) < self.scrollthresh:
+ self.toolmanager.get_tool(_views_positions).back()
+
+ self.figure.canvas.draw_idle() # force re-draw
+
+ self.lastscroll = time.time()
+ self.toolmanager.get_tool(_views_positions).push_current()
+
+
+class ToolZoom(ZoomPanBase):
+ """Zoom to rectangle"""
+
+ description = 'Zoom to rectangle'
+ image = 'zoom_to_rect'
+ default_keymap = rcParams['keymap.zoom']
+ cursor = cursors.SELECT_REGION
+ radio_group = 'default'
+
+ def __init__(self, *args):
+ ZoomPanBase.__init__(self, *args)
+ self._ids_zoom = []
+
+ def _cancel_action(self):
+ for zoom_id in self._ids_zoom:
+ self.figure.canvas.mpl_disconnect(zoom_id)
+ self.toolmanager.trigger_tool('rubberband', self)
+ self.toolmanager.get_tool(_views_positions).refresh_locators()
+ self._xypress = None
+ self._button_pressed = None
+ self._ids_zoom = []
+ return
+
+ def _press(self, event):
+ """the _press mouse button in zoom to rect mode callback"""
+
+ # If we're already in the middle of a zoom, pressing another
+ # button works to "cancel"
+ if self._ids_zoom != []:
+ self._cancel_action()
+
+ if event.button == 1:
+ self._button_pressed = 1
+ elif event.button == 3:
+ self._button_pressed = 3
+ else:
+ self._cancel_action()
+ return
+
+ x, y = event.x, event.y
+
+ self._xypress = []
+ for i, a in enumerate(self.figure.get_axes()):
+ if (x is not None and y is not None and a.in_axes(event) and
+ a.get_navigate() and a.can_zoom()):
+ self._xypress.append((x, y, a, i, a._get_view()))
+
+ id1 = self.figure.canvas.mpl_connect(
+ 'motion_notify_event', self._mouse_move)
+ id2 = self.figure.canvas.mpl_connect(
+ 'key_press_event', self._switch_on_zoom_mode)
+ id3 = self.figure.canvas.mpl_connect(
+ 'key_release_event', self._switch_off_zoom_mode)
+
+ self._ids_zoom = id1, id2, id3
+ self._zoom_mode = event.key
+
+ def _switch_on_zoom_mode(self, event):
+ self._zoom_mode = event.key
+ self._mouse_move(event)
+
+ def _switch_off_zoom_mode(self, event):
+ self._zoom_mode = None
+ self._mouse_move(event)
+
+ def _mouse_move(self, event):
+ """the drag callback in zoom mode"""
+
+ if self._xypress:
+ x, y = event.x, event.y
+ lastx, lasty, a, ind, view = self._xypress[0]
+ (x1, y1), (x2, y2) = np.clip(
+ [[lastx, lasty], [x, y]], a.bbox.min, a.bbox.max)
+ if self._zoom_mode == "x":
+ y1, y2 = a.bbox.intervaly
+ elif self._zoom_mode == "y":
+ x1, x2 = a.bbox.intervalx
+ self.toolmanager.trigger_tool(
+ 'rubberband', self, data=(x1, y1, x2, y2))
+
+ def _release(self, event):
+ """the release mouse button callback in zoom to rect mode"""
+
+ for zoom_id in self._ids_zoom:
+ self.figure.canvas.mpl_disconnect(zoom_id)
+ self._ids_zoom = []
+
+ if not self._xypress:
+ self._cancel_action()
+ return
+
+ last_a = []
+
+ for cur_xypress in self._xypress:
+ x, y = event.x, event.y
+ lastx, lasty, a, _ind, view = cur_xypress
+ # ignore singular clicks - 5 pixels is a threshold
+ if abs(x - lastx) < 5 or abs(y - lasty) < 5:
+ self._cancel_action()
+ return
+
+ # detect twinx,y axes and avoid double zooming
+ twinx, twiny = False, False
+ if last_a:
+ for la in last_a:
+ if a.get_shared_x_axes().joined(a, la):
+ twinx = True
+ if a.get_shared_y_axes().joined(a, la):
+ twiny = True
+ last_a.append(a)
+
+ if self._button_pressed == 1:
+ direction = 'in'
+ elif self._button_pressed == 3:
+ direction = 'out'
+ else:
+ continue
+
+ a._set_view_from_bbox((lastx, lasty, x, y), direction,
+ self._zoom_mode, twinx, twiny)
+
+ self._zoom_mode = None
+ self.toolmanager.get_tool(_views_positions).push_current()
+ self._cancel_action()
+
+
+class ToolPan(ZoomPanBase):
+ """Pan axes with left mouse, zoom with right"""
+
+ default_keymap = rcParams['keymap.pan']
+ description = 'Pan axes with left mouse, zoom with right'
+ image = 'move'
+ cursor = cursors.MOVE
+ radio_group = 'default'
+
+ def __init__(self, *args):
+ ZoomPanBase.__init__(self, *args)
+ self._idDrag = None
+
+ def _cancel_action(self):
+ self._button_pressed = None
+ self._xypress = []
+ self.figure.canvas.mpl_disconnect(self._idDrag)
+ self.toolmanager.messagelock.release(self)
+ self.toolmanager.get_tool(_views_positions).refresh_locators()
+
+ def _press(self, event):
+ if event.button == 1:
+ self._button_pressed = 1
+ elif event.button == 3:
+ self._button_pressed = 3
+ else:
+ self._cancel_action()
+ return
+
+ x, y = event.x, event.y
+
+ self._xypress = []
+ for i, a in enumerate(self.figure.get_axes()):
+ if (x is not None and y is not None and a.in_axes(event) and
+ a.get_navigate() and a.can_pan()):
+ a.start_pan(x, y, event.button)
+ self._xypress.append((a, i))
+ self.toolmanager.messagelock(self)
+ self._idDrag = self.figure.canvas.mpl_connect(
+ 'motion_notify_event', self._mouse_move)
+
+ def _release(self, event):
+ if self._button_pressed is None:
+ self._cancel_action()
+ return
+
+ self.figure.canvas.mpl_disconnect(self._idDrag)
+ self.toolmanager.messagelock.release(self)
+
+ for a, _ind in self._xypress:
+ a.end_pan()
+ if not self._xypress:
+ self._cancel_action()
+ return
+
+ self.toolmanager.get_tool(_views_positions).push_current()
+ self._cancel_action()
+
+ def _mouse_move(self, event):
+ for a, _ind in self._xypress:
+ # safer to use the recorded button at the _press than current
+ # button: # multiple button can get pressed during motion...
+ a.drag_pan(self._button_pressed, event.key, event.x, event.y)
+ self.toolmanager.canvas.draw_idle()
+
+
+default_tools = {'home': ToolHome, 'back': ToolBack, 'forward': ToolForward,
+ 'zoom': ToolZoom, 'pan': ToolPan,
+ 'subplots': 'ToolConfigureSubplots',
+ 'save': 'ToolSaveFigure',
+ 'grid': ToolGrid,
+ 'grid_minor': ToolMinorGrid,
+ 'fullscreen': ToolFullScreen,
+ 'quit': ToolQuit,
+ 'quit_all': ToolQuitAll,
+ 'allnav': ToolEnableAllNavigation,
+ 'nav': ToolEnableNavigation,
+ 'xscale': ToolXScale,
+ 'yscale': ToolYScale,
+ 'position': ToolCursorPosition,
+ _views_positions: ToolViewsPositions,
+ 'cursor': 'ToolSetCursor',
+ 'rubberband': 'ToolRubberband',
+ }
+"""Default tools"""
+
+default_toolbar_tools = [['navigation', ['home', 'back', 'forward']],
+ ['zoompan', ['pan', 'zoom', 'subplots']],
+ ['io', ['save']]]
+"""Default tools in the toolbar"""
+
+
+def add_tools_to_manager(toolmanager, tools=default_tools):
+ """
+ Add multiple tools to `ToolManager`
+
+ Parameters
+ ----------
+ toolmanager: ToolManager
+ `backend_managers.ToolManager` object that will get the tools added
+ tools : {str: class_like}, optional
+ The tools to add in a {name: tool} dict, see `add_tool` for more
+ info.
+ """
+
+ for name, tool in six.iteritems(tools):
+ toolmanager.add_tool(name, tool)
+
+
+def add_tools_to_container(container, tools=default_toolbar_tools):
+ """
+ Add multiple tools to the container.
+
+ Parameters
+ ----------
+ container: Container
+ `backend_bases.ToolContainerBase` object that will get the tools added
+ tools : list, optional
+ List in the form
+ [[group1, [tool1, tool2 ...]], [group2, [...]]]
+ Where the tools given by tool1, and tool2 will display in group1.
+ See `add_tool` for details.
+ """
+
+ for group, grouptools in tools:
+ for position, tool in enumerate(grouptools):
+ container.add_tool(tool, group, position)
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/__init__.py b/contrib/python/matplotlib/py2/matplotlib/backends/__init__.py
new file mode 100644
index 00000000000..33c60d85508
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/__init__.py
@@ -0,0 +1,97 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import matplotlib
+import inspect
+import traceback
+import warnings
+import logging
+
+_log = logging.getLogger(__name__)
+
+backend = matplotlib.get_backend()
+# the `str` calls here are to make non-ascii paths work on python2
+_backend_loading_tb = str("").join(
+ line for line in traceback.format_stack()
+ # Filter out line noise from importlib line.
+ if not line.startswith(str(' File "<frozen importlib._bootstrap')))
+
+
+def pylab_setup(name=None):
+ '''return new_figure_manager, draw_if_interactive and show for pyplot
+
+ This provides the backend-specific functions that are used by
+ pyplot to abstract away the difference between interactive backends.
+
+ Parameters
+ ----------
+ name : str, optional
+ The name of the backend to use. If `None`, falls back to
+ ``matplotlib.get_backend()`` (which return :rc:`backend`).
+
+ Returns
+ -------
+ backend_mod : module
+ The module which contains the backend of choice
+
+ new_figure_manager : function
+ Create a new figure manager (roughly maps to GUI window)
+
+ draw_if_interactive : function
+ Redraw the current figure if pyplot is interactive
+
+ show : function
+ Show (and possibly block) any unshown figures.
+
+ '''
+ # Import the requested backend into a generic module object
+ if name is None:
+ # validates, to match all_backends
+ name = matplotlib.get_backend()
+ if name.startswith('module://'):
+ backend_name = name[9:]
+ else:
+ backend_name = 'backend_' + name
+ backend_name = backend_name.lower() # until we banish mixed case
+ backend_name = 'matplotlib.backends.%s' % backend_name.lower()
+
+ # the last argument is specifies whether to use absolute or relative
+ # imports. 0 means only perform absolute imports.
+ backend_mod = __import__(backend_name, globals(), locals(),
+ [backend_name], 0)
+
+ # Things we pull in from all backends
+ new_figure_manager = backend_mod.new_figure_manager
+
+ # image backends like pdf, agg or svg do not need to do anything
+ # for "show" or "draw_if_interactive", so if they are not defined
+ # by the backend, just do nothing
+ def do_nothing_show(*args, **kwargs):
+ frame = inspect.currentframe()
+ fname = frame.f_back.f_code.co_filename
+ if fname in ('<stdin>', '<ipython console>'):
+ warnings.warn("""
+Your currently selected backend, '%s' does not support show().
+Please select a GUI backend in your matplotlibrc file ('%s')
+or with matplotlib.use()""" %
+ (name, matplotlib.matplotlib_fname()))
+
+ def do_nothing(*args, **kwargs):
+ pass
+
+ backend_version = getattr(backend_mod, 'backend_version', 'unknown')
+
+ show = getattr(backend_mod, 'show', do_nothing_show)
+
+ draw_if_interactive = getattr(backend_mod, 'draw_if_interactive',
+ do_nothing)
+
+ _log.debug('backend %s version %s', name, backend_version)
+
+ # need to keep a global reference to the backend for compatibility
+ # reasons. See https://github.com/matplotlib/matplotlib/issues/6092
+ global backend
+ backend = name
+ return backend_mod, new_figure_manager, draw_if_interactive, show
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/_backend_tk.py b/contrib/python/matplotlib/py2/matplotlib/backends/_backend_tk.py
new file mode 100644
index 00000000000..da404b6e1bc
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/_backend_tk.py
@@ -0,0 +1,1075 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import tkinter as Tk
+
+import logging
+import os.path
+import sys
+
+# Paint image to Tk photo blitter extension
+import matplotlib.backends.tkagg as tkagg
+
+from matplotlib.backends.backend_agg import FigureCanvasAgg
+import matplotlib.backends.windowing as windowing
+
+import matplotlib
+from matplotlib import backend_tools, cbook, rcParams
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
+ StatusbarBase, TimerBase, ToolContainerBase, cursors)
+from matplotlib.backend_managers import ToolManager
+from matplotlib._pylab_helpers import Gcf
+from matplotlib.figure import Figure
+from matplotlib.widgets import SubplotTool
+
+
+_log = logging.getLogger(__name__)
+
+backend_version = Tk.TkVersion
+
+# the true dots per inch on the screen; should be display dependent
+# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
+PIXELS_PER_INCH = 75
+
+cursord = {
+ cursors.MOVE: "fleur",
+ cursors.HAND: "hand2",
+ cursors.POINTER: "arrow",
+ cursors.SELECT_REGION: "tcross",
+ cursors.WAIT: "watch",
+ }
+
+
+def raise_msg_to_str(msg):
+ """msg is a return arg from a raise. Join with new lines"""
+ if not isinstance(msg, six.string_types):
+ msg = '\n'.join(map(str, msg))
+ return msg
+
+def error_msg_tkpaint(msg, parent=None):
+ from six.moves import tkinter_messagebox as tkMessageBox
+ tkMessageBox.showerror("matplotlib", msg)
+
+
+class TimerTk(TimerBase):
+ '''
+ Subclass of :class:`backend_bases.TimerBase` that uses Tk's timer events.
+
+ Attributes
+ ----------
+ interval : int
+ The time between timer events in milliseconds. Default is 1000 ms.
+ single_shot : bool
+ Boolean flag indicating whether this timer should operate as single
+ shot (run once and then stop). Defaults to False.
+ callbacks : list
+ Stores list of (func, args) tuples that will be called upon timer
+ events. This list can be manipulated directly, or the functions
+ `add_callback` and `remove_callback` can be used.
+
+ '''
+ def __init__(self, parent, *args, **kwargs):
+ TimerBase.__init__(self, *args, **kwargs)
+ self.parent = parent
+ self._timer = None
+
+ def _timer_start(self):
+ self._timer_stop()
+ self._timer = self.parent.after(self._interval, self._on_timer)
+
+ def _timer_stop(self):
+ if self._timer is not None:
+ self.parent.after_cancel(self._timer)
+ self._timer = None
+
+ def _on_timer(self):
+ TimerBase._on_timer(self)
+
+ # Tk after() is only a single shot, so we need to add code here to
+ # reset the timer if we're not operating in single shot mode. However,
+ # if _timer is None, this means that _timer_stop has been called; so
+ # don't recreate the timer in that case.
+ if not self._single and self._timer:
+ self._timer = self.parent.after(self._interval, self._on_timer)
+ else:
+ self._timer = None
+
+
+class FigureCanvasTk(FigureCanvasBase):
+ keyvald = {65507 : 'control',
+ 65505 : 'shift',
+ 65513 : 'alt',
+ 65515 : 'super',
+ 65508 : 'control',
+ 65506 : 'shift',
+ 65514 : 'alt',
+ 65361 : 'left',
+ 65362 : 'up',
+ 65363 : 'right',
+ 65364 : 'down',
+ 65307 : 'escape',
+ 65470 : 'f1',
+ 65471 : 'f2',
+ 65472 : 'f3',
+ 65473 : 'f4',
+ 65474 : 'f5',
+ 65475 : 'f6',
+ 65476 : 'f7',
+ 65477 : 'f8',
+ 65478 : 'f9',
+ 65479 : 'f10',
+ 65480 : 'f11',
+ 65481 : 'f12',
+ 65300 : 'scroll_lock',
+ 65299 : 'break',
+ 65288 : 'backspace',
+ 65293 : 'enter',
+ 65379 : 'insert',
+ 65535 : 'delete',
+ 65360 : 'home',
+ 65367 : 'end',
+ 65365 : 'pageup',
+ 65366 : 'pagedown',
+ 65438 : '0',
+ 65436 : '1',
+ 65433 : '2',
+ 65435 : '3',
+ 65430 : '4',
+ 65437 : '5',
+ 65432 : '6',
+ 65429 : '7',
+ 65431 : '8',
+ 65434 : '9',
+ 65451 : '+',
+ 65453 : '-',
+ 65450 : '*',
+ 65455 : '/',
+ 65439 : 'dec',
+ 65421 : 'enter',
+ }
+
+ _keycode_lookup = {
+ 262145: 'control',
+ 524320: 'alt',
+ 524352: 'alt',
+ 1048584: 'super',
+ 1048592: 'super',
+ 131074: 'shift',
+ 131076: 'shift',
+ }
+ """_keycode_lookup is used for badly mapped (i.e. no event.key_sym set)
+ keys on apple keyboards."""
+
+ def __init__(self, figure, master=None, resize_callback=None):
+ super(FigureCanvasTk, self).__init__(figure)
+ self._idle = True
+ self._idle_callback = None
+ t1,t2,w,h = self.figure.bbox.bounds
+ w, h = int(w), int(h)
+ self._tkcanvas = Tk.Canvas(
+ master=master, background="white",
+ width=w, height=h, borderwidth=0, highlightthickness=0)
+ self._tkphoto = Tk.PhotoImage(
+ master=self._tkcanvas, width=w, height=h)
+ self._tkcanvas.create_image(w//2, h//2, image=self._tkphoto)
+ self._resize_callback = resize_callback
+ self._tkcanvas.bind("<Configure>", self.resize)
+ self._tkcanvas.bind("<Key>", self.key_press)
+ self._tkcanvas.bind("<Motion>", self.motion_notify_event)
+ self._tkcanvas.bind("<KeyRelease>", self.key_release)
+ for name in "<Button-1>", "<Button-2>", "<Button-3>":
+ self._tkcanvas.bind(name, self.button_press_event)
+ for name in "<Double-Button-1>", "<Double-Button-2>", "<Double-Button-3>":
+ self._tkcanvas.bind(name, self.button_dblclick_event)
+ for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
+ self._tkcanvas.bind(name, self.button_release_event)
+
+ # Mouse wheel on Linux generates button 4/5 events
+ for name in "<Button-4>", "<Button-5>":
+ self._tkcanvas.bind(name, self.scroll_event)
+ # Mouse wheel for windows goes to the window with the focus.
+ # Since the canvas won't usually have the focus, bind the
+ # event to the window containing the canvas instead.
+ # See http://wiki.tcl.tk/3893 (mousewheel) for details
+ root = self._tkcanvas.winfo_toplevel()
+ root.bind("<MouseWheel>", self.scroll_event_windows, "+")
+
+ # Can't get destroy events by binding to _tkcanvas. Therefore, bind
+ # to the window and filter.
+ def filter_destroy(evt):
+ if evt.widget is self._tkcanvas:
+ self._master.update_idletasks()
+ self.close_event()
+ root.bind("<Destroy>", filter_destroy, "+")
+
+ self._master = master
+ self._tkcanvas.focus_set()
+
+ def resize(self, event):
+ width, height = event.width, event.height
+ if self._resize_callback is not None:
+ self._resize_callback(event)
+
+ # compute desired figure size in inches
+ dpival = self.figure.dpi
+ winch = width/dpival
+ hinch = height/dpival
+ self.figure.set_size_inches(winch, hinch, forward=False)
+
+
+ self._tkcanvas.delete(self._tkphoto)
+ self._tkphoto = Tk.PhotoImage(
+ master=self._tkcanvas, width=int(width), height=int(height))
+ self._tkcanvas.create_image(int(width/2),int(height/2),image=self._tkphoto)
+ self.resize_event()
+ self.draw()
+
+ # a resizing will in general move the pointer position
+ # relative to the canvas, so process it as a motion notify
+ # event. An intended side effect of this call is to allow
+ # window raises (which trigger a resize) to get the cursor
+ # position to the mpl event framework so key presses which are
+ # over the axes will work w/o clicks or explicit motion
+ self._update_pointer_position(event)
+
+ def _update_pointer_position(self, guiEvent=None):
+ """
+ Figure out if we are inside the canvas or not and update the
+ canvas enter/leave events
+ """
+ # if the pointer if over the canvas, set the lastx and lasty
+ # attrs of the canvas so it can process event w/o mouse click
+ # or move
+
+ # the window's upper, left coords in screen coords
+ xw = self._tkcanvas.winfo_rootx()
+ yw = self._tkcanvas.winfo_rooty()
+ # the pointer's location in screen coords
+ xp, yp = self._tkcanvas.winfo_pointerxy()
+
+ # not figure out the canvas coordinates of the pointer
+ xc = xp - xw
+ yc = yp - yw
+
+ # flip top/bottom
+ yc = self.figure.bbox.height - yc
+
+ # JDH: this method was written originally to get the pointer
+ # location to the backend lastx and lasty attrs so that events
+ # like KeyEvent can be handled without mouse events. e.g., if
+ # the cursor is already above the axes, then key presses like
+ # 'g' should toggle the grid. In order for this to work in
+ # backend_bases, the canvas needs to know _lastx and _lasty.
+ # There are three ways to get this info the canvas:
+ #
+ # 1) set it explicitly
+ #
+ # 2) call enter/leave events explicitly. The downside of this
+ # in the impl below is that enter could be repeatedly
+ # triggered if the mouse is over the axes and one is
+ # resizing with the keyboard. This is not entirely bad,
+ # because the mouse position relative to the canvas is
+ # changing, but it may be surprising to get repeated entries
+ # without leaves
+ #
+ # 3) process it as a motion notify event. This also has pros
+ # and cons. The mouse is moving relative to the window, but
+ # this may surpise an event handler writer who is getting
+ # motion_notify_events even if the mouse has not moved
+
+ # here are the three scenarios
+ if 1:
+ # just manually set it
+ self._lastx, self._lasty = xc, yc
+ elif 0:
+ # alternate implementation: process it as a motion
+ FigureCanvasBase.motion_notify_event(self, xc, yc, guiEvent)
+ elif 0:
+ # alternate implementation -- process enter/leave events
+ # instead of motion/notify
+ if self.figure.bbox.contains(xc, yc):
+ self.enter_notify_event(guiEvent, xy=(xc,yc))
+ else:
+ self.leave_notify_event(guiEvent)
+
+ show = cbook.deprecated("2.2", name="FigureCanvasTk.show",
+ alternative="FigureCanvasTk.draw")(
+ lambda self: self.draw())
+
+ def draw_idle(self):
+ 'update drawing area only if idle'
+ if self._idle is False:
+ return
+
+ self._idle = False
+
+ def idle_draw(*args):
+ try:
+ self.draw()
+ finally:
+ self._idle = True
+
+ self._idle_callback = self._tkcanvas.after_idle(idle_draw)
+
+ def get_tk_widget(self):
+ """returns the Tk widget used to implement FigureCanvasTkAgg.
+ Although the initial implementation uses a Tk canvas, this routine
+ is intended to hide that fact.
+ """
+ return self._tkcanvas
+
+ def motion_notify_event(self, event):
+ x = event.x
+ # flipy so y=0 is bottom of canvas
+ y = self.figure.bbox.height - event.y
+ FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
+
+
+ def button_press_event(self, event, dblclick=False):
+ x = event.x
+ # flipy so y=0 is bottom of canvas
+ y = self.figure.bbox.height - event.y
+ num = getattr(event, 'num', None)
+
+ if sys.platform=='darwin':
+ # 2 and 3 were reversed on the OSX platform I
+ # tested under tkagg
+ if num==2: num=3
+ elif num==3: num=2
+
+ FigureCanvasBase.button_press_event(self, x, y, num, dblclick=dblclick, guiEvent=event)
+
+ def button_dblclick_event(self,event):
+ self.button_press_event(event,dblclick=True)
+
+ def button_release_event(self, event):
+ x = event.x
+ # flipy so y=0 is bottom of canvas
+ y = self.figure.bbox.height - event.y
+
+ num = getattr(event, 'num', None)
+
+ if sys.platform=='darwin':
+ # 2 and 3 were reversed on the OSX platform I
+ # tested under tkagg
+ if num==2: num=3
+ elif num==3: num=2
+
+ FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
+
+ def scroll_event(self, event):
+ x = event.x
+ y = self.figure.bbox.height - event.y
+ num = getattr(event, 'num', None)
+ if num==4: step = +1
+ elif num==5: step = -1
+ else: step = 0
+
+ FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
+
+ def scroll_event_windows(self, event):
+ """MouseWheel event processor"""
+ # need to find the window that contains the mouse
+ w = event.widget.winfo_containing(event.x_root, event.y_root)
+ if w == self._tkcanvas:
+ x = event.x_root - w.winfo_rootx()
+ y = event.y_root - w.winfo_rooty()
+ y = self.figure.bbox.height - y
+ step = event.delta/120.
+ FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
+
+ def _get_key(self, event):
+ val = event.keysym_num
+ if val in self.keyvald:
+ key = self.keyvald[val]
+ elif val == 0 and sys.platform == 'darwin' and \
+ event.keycode in self._keycode_lookup:
+ key = self._keycode_lookup[event.keycode]
+ elif val < 256:
+ key = chr(val)
+ else:
+ key = None
+
+ # add modifier keys to the key string. Bit details originate from
+ # http://effbot.org/tkinterbook/tkinter-events-and-bindings.htm
+ # BIT_SHIFT = 0x001; BIT_CAPSLOCK = 0x002; BIT_CONTROL = 0x004;
+ # BIT_LEFT_ALT = 0x008; BIT_NUMLOCK = 0x010; BIT_RIGHT_ALT = 0x080;
+ # BIT_MB_1 = 0x100; BIT_MB_2 = 0x200; BIT_MB_3 = 0x400;
+ # In general, the modifier key is excluded from the modifier flag,
+ # however this is not the case on "darwin", so double check that
+ # we aren't adding repeat modifier flags to a modifier key.
+ if sys.platform == 'win32':
+ modifiers = [(17, 'alt', 'alt'),
+ (2, 'ctrl', 'control'),
+ ]
+ elif sys.platform == 'darwin':
+ modifiers = [(3, 'super', 'super'),
+ (4, 'alt', 'alt'),
+ (2, 'ctrl', 'control'),
+ ]
+ else:
+ modifiers = [(6, 'super', 'super'),
+ (3, 'alt', 'alt'),
+ (2, 'ctrl', 'control'),
+ ]
+
+ if key is not None:
+ # note, shift is not added to the keys as this is already accounted for
+ for bitmask, prefix, key_name in modifiers:
+ if event.state & (1 << bitmask) and key_name not in key:
+ key = '{0}+{1}'.format(prefix, key)
+
+ return key
+
+ def key_press(self, event):
+ key = self._get_key(event)
+ FigureCanvasBase.key_press_event(self, key, guiEvent=event)
+
+ def key_release(self, event):
+ key = self._get_key(event)
+ FigureCanvasBase.key_release_event(self, key, guiEvent=event)
+
+ def new_timer(self, *args, **kwargs):
+ """
+ Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
+ This is useful for getting periodic events through the backend's native
+ event loop. Implemented only for backends with GUIs.
+
+ Other Parameters
+ ----------------
+ interval : scalar
+ Timer interval in milliseconds
+ callbacks : list
+ Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
+ will be executed by the timer every *interval*.
+
+ """
+ return TimerTk(self._tkcanvas, *args, **kwargs)
+
+ def flush_events(self):
+ self._master.update()
+
+
+class FigureManagerTk(FigureManagerBase):
+ """
+ Attributes
+ ----------
+ canvas : `FigureCanvas`
+ The FigureCanvas instance
+ num : int or str
+ The Figure number
+ toolbar : tk.Toolbar
+ The tk.Toolbar
+ window : tk.Window
+ The tk.Window
+
+ """
+ def __init__(self, canvas, num, window):
+ FigureManagerBase.__init__(self, canvas, num)
+ self.window = window
+ self.window.withdraw()
+ self.set_window_title("Figure %d" % num)
+ self.canvas = canvas
+ # If using toolmanager it has to be present when initializing the toolbar
+ self.toolmanager = self._get_toolmanager()
+ # packing toolbar first, because if space is getting low, last packed widget is getting shrunk first (-> the canvas)
+ self.toolbar = self._get_toolbar()
+ self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
+ self._num = num
+
+ self.statusbar = None
+
+ if self.toolmanager:
+ backend_tools.add_tools_to_manager(self.toolmanager)
+ if self.toolbar:
+ backend_tools.add_tools_to_container(self.toolbar)
+ self.statusbar = StatusbarTk(self.window, self.toolmanager)
+
+ self._shown = False
+
+ def notify_axes_change(fig):
+ 'this will be called whenever the current axes is changed'
+ if self.toolmanager is not None:
+ pass
+ elif self.toolbar is not None:
+ self.toolbar.update()
+ self.canvas.figure.add_axobserver(notify_axes_change)
+
+ def _get_toolbar(self):
+ if matplotlib.rcParams['toolbar'] == 'toolbar2':
+ toolbar = NavigationToolbar2Tk(self.canvas, self.window)
+ elif matplotlib.rcParams['toolbar'] == 'toolmanager':
+ toolbar = ToolbarTk(self.toolmanager, self.window)
+ else:
+ toolbar = None
+ return toolbar
+
+ def _get_toolmanager(self):
+ if rcParams['toolbar'] == 'toolmanager':
+ toolmanager = ToolManager(self.canvas.figure)
+ else:
+ toolmanager = None
+ return toolmanager
+
+ def resize(self, width, height=None):
+ # before 09-12-22, the resize method takes a single *event*
+ # parameter. On the other hand, the resize method of other
+ # FigureManager class takes *width* and *height* parameter,
+ # which is used to change the size of the window. For the
+ # Figure.set_size_inches with forward=True work with Tk
+ # backend, I changed the function signature but tried to keep
+ # it backward compatible. -JJL
+
+ # when a single parameter is given, consider it as a event
+ if height is None:
+ cbook.warn_deprecated("2.2", "FigureManagerTkAgg.resize now takes "
+ "width and height as separate arguments")
+ width = width.width
+ else:
+ self.canvas._tkcanvas.master.geometry("%dx%d" % (width, height))
+
+ if self.toolbar is not None:
+ self.toolbar.configure(width=width)
+
+ def show(self):
+ """
+ this function doesn't segfault but causes the
+ PyEval_RestoreThread: NULL state bug on win32
+ """
+ _focus = windowing.FocusManager()
+ if not self._shown:
+ def destroy(*args):
+ self.window = None
+ Gcf.destroy(self._num)
+ self.canvas._tkcanvas.bind("<Destroy>", destroy)
+ self.window.deiconify()
+ else:
+ self.canvas.draw_idle()
+ # Raise the new window.
+ self.canvas.manager.window.attributes('-topmost', 1)
+ self.canvas.manager.window.attributes('-topmost', 0)
+ self._shown = True
+
+ def destroy(self, *args):
+ if self.window is not None:
+ #self.toolbar.destroy()
+ if self.canvas._idle_callback:
+ self.canvas._tkcanvas.after_cancel(self.canvas._idle_callback)
+ self.window.destroy()
+ if Gcf.get_num_fig_managers()==0:
+ if self.window is not None:
+ self.window.quit()
+ self.window = None
+
+ def get_window_title(self):
+ return self.window.wm_title()
+
+ def set_window_title(self, title):
+ self.window.wm_title(title)
+
+ def full_screen_toggle(self):
+ is_fullscreen = bool(self.window.attributes('-fullscreen'))
+ self.window.attributes('-fullscreen', not is_fullscreen)
+
+
+@cbook.deprecated("2.2")
+class AxisMenu(object):
+ def __init__(self, master, naxes):
+ self._master = master
+ self._naxes = naxes
+ self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
+ self._mbar.pack(side=Tk.LEFT)
+ self._mbutton = Tk.Menubutton(
+ master=self._mbar, text="Axes", underline=0)
+ self._mbutton.pack(side=Tk.LEFT, padx="2m")
+ self._mbutton.menu = Tk.Menu(self._mbutton)
+ self._mbutton.menu.add_command(
+ label="Select All", command=self.select_all)
+ self._mbutton.menu.add_command(
+ label="Invert All", command=self.invert_all)
+ self._axis_var = []
+ self._checkbutton = []
+ for i in range(naxes):
+ self._axis_var.append(Tk.IntVar())
+ self._axis_var[i].set(1)
+ self._checkbutton.append(self._mbutton.menu.add_checkbutton(
+ label = "Axis %d" % (i+1),
+ variable=self._axis_var[i],
+ command=self.set_active))
+ self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
+ self._mbutton['menu'] = self._mbutton.menu
+ self._mbar.tk_menuBar(self._mbutton)
+ self.set_active()
+
+ def adjust(self, naxes):
+ if self._naxes < naxes:
+ for i in range(self._naxes, naxes):
+ self._axis_var.append(Tk.IntVar())
+ self._axis_var[i].set(1)
+ self._checkbutton.append( self._mbutton.menu.add_checkbutton(
+ label = "Axis %d" % (i+1),
+ variable=self._axis_var[i],
+ command=self.set_active))
+ elif self._naxes > naxes:
+ for i in range(self._naxes-1, naxes-1, -1):
+ del self._axis_var[i]
+ self._mbutton.menu.forget(self._checkbutton[i])
+ del self._checkbutton[i]
+ self._naxes = naxes
+ self.set_active()
+
+ def get_indices(self):
+ a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
+ return a
+
+ def set_active(self):
+ self._master.set_active(self.get_indices())
+
+ def invert_all(self):
+ for a in self._axis_var:
+ a.set(not a.get())
+ self.set_active()
+
+ def select_all(self):
+ for a in self._axis_var:
+ a.set(1)
+ self.set_active()
+
+
+class NavigationToolbar2Tk(NavigationToolbar2, Tk.Frame):
+ """
+ Attributes
+ ----------
+ canvas : `FigureCanvas`
+ the figure canvas on which to operate
+ win : tk.Window
+ the tk.Window which owns this toolbar
+
+ """
+ def __init__(self, canvas, window):
+ self.canvas = canvas
+ self.window = window
+ NavigationToolbar2.__init__(self, canvas)
+
+ def destroy(self, *args):
+ del self.message
+ Tk.Frame.destroy(self, *args)
+
+ def set_message(self, s):
+ self.message.set(s)
+
+ def draw_rubberband(self, event, x0, y0, x1, y1):
+ height = self.canvas.figure.bbox.height
+ y0 = height - y0
+ y1 = height - y1
+ if hasattr(self, "lastrect"):
+ self.canvas._tkcanvas.delete(self.lastrect)
+ self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
+
+ #self.canvas.draw()
+
+ def release(self, event):
+ try: self.lastrect
+ except AttributeError: pass
+ else:
+ self.canvas._tkcanvas.delete(self.lastrect)
+ del self.lastrect
+
+ def set_cursor(self, cursor):
+ self.window.configure(cursor=cursord[cursor])
+ self.window.update_idletasks()
+
+ def _Button(self, text, file, command, extension='.gif'):
+ img_file = os.path.join(
+ rcParams['datapath'], 'images', file + extension)
+ im = Tk.PhotoImage(master=self, file=img_file)
+ b = Tk.Button(
+ master=self, text=text, padx=2, pady=2, image=im, command=command)
+ b._ntimage = im
+ b.pack(side=Tk.LEFT)
+ return b
+
+ def _Spacer(self):
+ # Buttons are 30px high, so make this 26px tall with padding to center it
+ s = Tk.Frame(
+ master=self, height=26, relief=Tk.RIDGE, pady=2, bg="DarkGray")
+ s.pack(side=Tk.LEFT, padx=5)
+ return s
+
+ def _init_toolbar(self):
+ xmin, xmax = self.canvas.figure.bbox.intervalx
+ height, width = 50, xmax-xmin
+ Tk.Frame.__init__(self, master=self.window,
+ width=int(width), height=int(height),
+ borderwidth=2)
+
+ self.update() # Make axes menu
+
+ for text, tooltip_text, image_file, callback in self.toolitems:
+ if text is None:
+ # Add a spacer; return value is unused.
+ self._Spacer()
+ else:
+ button = self._Button(text=text, file=image_file,
+ command=getattr(self, callback))
+ if tooltip_text is not None:
+ ToolTip.createToolTip(button, tooltip_text)
+
+ self.message = Tk.StringVar(master=self)
+ self._message_label = Tk.Label(master=self, textvariable=self.message)
+ self._message_label.pack(side=Tk.RIGHT)
+ self.pack(side=Tk.BOTTOM, fill=Tk.X)
+
+ def configure_subplots(self):
+ toolfig = Figure(figsize=(6,3))
+ window = Tk.Toplevel()
+ canvas = type(self.canvas)(toolfig, master=window)
+ toolfig.subplots_adjust(top=0.9)
+ canvas.tool = SubplotTool(self.canvas.figure, toolfig)
+ canvas.draw()
+ canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
+ window.grab_set()
+
+ def save_figure(self, *args):
+ from six.moves import tkinter_tkfiledialog, tkinter_messagebox
+ filetypes = self.canvas.get_supported_filetypes().copy()
+ default_filetype = self.canvas.get_default_filetype()
+
+ # Tk doesn't provide a way to choose a default filetype,
+ # so we just have to put it first
+ default_filetype_name = filetypes.pop(default_filetype)
+ sorted_filetypes = ([(default_filetype, default_filetype_name)]
+ + sorted(six.iteritems(filetypes)))
+ tk_filetypes = [(name, '*.%s' % ext) for ext, name in sorted_filetypes]
+
+ # adding a default extension seems to break the
+ # asksaveasfilename dialog when you choose various save types
+ # from the dropdown. Passing in the empty string seems to
+ # work - JDH!
+ #defaultextension = self.canvas.get_default_filetype()
+ defaultextension = ''
+ initialdir = os.path.expanduser(rcParams['savefig.directory'])
+ initialfile = self.canvas.get_default_filename()
+ fname = tkinter_tkfiledialog.asksaveasfilename(
+ master=self.window,
+ title='Save the figure',
+ filetypes=tk_filetypes,
+ defaultextension=defaultextension,
+ initialdir=initialdir,
+ initialfile=initialfile,
+ )
+
+ if fname in ["", ()]:
+ return
+ # Save dir for next time, unless empty str (i.e., use cwd).
+ if initialdir != "":
+ rcParams['savefig.directory'] = (
+ os.path.dirname(six.text_type(fname)))
+ try:
+ # This method will handle the delegation to the correct type
+ self.canvas.figure.savefig(fname)
+ except Exception as e:
+ tkinter_messagebox.showerror("Error saving file", str(e))
+
+ def set_active(self, ind):
+ self._ind = ind
+ self._active = [self._axes[i] for i in self._ind]
+
+ def update(self):
+ _focus = windowing.FocusManager()
+ self._axes = self.canvas.figure.axes
+ NavigationToolbar2.update(self)
+
+
+class ToolTip(object):
+ """
+ Tooltip recipe from
+ http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml#e387
+ """
+ @staticmethod
+ def createToolTip(widget, text):
+ toolTip = ToolTip(widget)
+ def enter(event):
+ toolTip.showtip(text)
+ def leave(event):
+ toolTip.hidetip()
+ widget.bind('<Enter>', enter)
+ widget.bind('<Leave>', leave)
+
+ def __init__(self, widget):
+ self.widget = widget
+ self.tipwindow = None
+ self.id = None
+ self.x = self.y = 0
+
+ def showtip(self, text):
+ "Display text in tooltip window"
+ self.text = text
+ if self.tipwindow or not self.text:
+ return
+ x, y, _, _ = self.widget.bbox("insert")
+ x = x + self.widget.winfo_rootx() + 27
+ y = y + self.widget.winfo_rooty()
+ self.tipwindow = tw = Tk.Toplevel(self.widget)
+ tw.wm_overrideredirect(1)
+ tw.wm_geometry("+%d+%d" % (x, y))
+ try:
+ # For Mac OS
+ tw.tk.call("::tk::unsupported::MacWindowStyle",
+ "style", tw._w,
+ "help", "noActivates")
+ except Tk.TclError:
+ pass
+ label = Tk.Label(tw, text=self.text, justify=Tk.LEFT,
+ background="#ffffe0", relief=Tk.SOLID, borderwidth=1)
+ label.pack(ipadx=1)
+
+ def hidetip(self):
+ tw = self.tipwindow
+ self.tipwindow = None
+ if tw:
+ tw.destroy()
+
+
+class RubberbandTk(backend_tools.RubberbandBase):
+ def __init__(self, *args, **kwargs):
+ backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
+
+ def draw_rubberband(self, x0, y0, x1, y1):
+ height = self.figure.canvas.figure.bbox.height
+ y0 = height - y0
+ y1 = height - y1
+ if hasattr(self, "lastrect"):
+ self.figure.canvas._tkcanvas.delete(self.lastrect)
+ self.lastrect = self.figure.canvas._tkcanvas.create_rectangle(
+ x0, y0, x1, y1)
+
+ def remove_rubberband(self):
+ if hasattr(self, "lastrect"):
+ self.figure.canvas._tkcanvas.delete(self.lastrect)
+ del self.lastrect
+
+
+class SetCursorTk(backend_tools.SetCursorBase):
+ def set_cursor(self, cursor):
+ self.figure.canvas.manager.window.configure(cursor=cursord[cursor])
+
+
+class ToolbarTk(ToolContainerBase, Tk.Frame):
+ _icon_extension = '.gif'
+ def __init__(self, toolmanager, window):
+ ToolContainerBase.__init__(self, toolmanager)
+ xmin, xmax = self.toolmanager.canvas.figure.bbox.intervalx
+ height, width = 50, xmax - xmin
+ Tk.Frame.__init__(self, master=window,
+ width=int(width), height=int(height),
+ borderwidth=2)
+ self._toolitems = {}
+ self.pack(side=Tk.TOP, fill=Tk.X)
+ self._groups = {}
+
+ def add_toolitem(
+ self, name, group, position, image_file, description, toggle):
+ frame = self._get_groupframe(group)
+ button = self._Button(name, image_file, toggle, frame)
+ if description is not None:
+ ToolTip.createToolTip(button, description)
+ self._toolitems.setdefault(name, [])
+ self._toolitems[name].append(button)
+
+ def _get_groupframe(self, group):
+ if group not in self._groups:
+ if self._groups:
+ self._add_separator()
+ frame = Tk.Frame(master=self, borderwidth=0)
+ frame.pack(side=Tk.LEFT, fill=Tk.Y)
+ self._groups[group] = frame
+ return self._groups[group]
+
+ def _add_separator(self):
+ separator = Tk.Frame(master=self, bd=5, width=1, bg='black')
+ separator.pack(side=Tk.LEFT, fill=Tk.Y, padx=2)
+
+ def _Button(self, text, image_file, toggle, frame):
+ if image_file is not None:
+ im = Tk.PhotoImage(master=self, file=image_file)
+ else:
+ im = None
+
+ if not toggle:
+ b = Tk.Button(master=frame, text=text, padx=2, pady=2, image=im,
+ command=lambda: self._button_click(text))
+ else:
+ # There is a bug in tkinter included in some python 3.6 versions
+ # that without this variable, produces a "visual" toggling of
+ # other near checkbuttons
+ # https://bugs.python.org/issue29402
+ # https://bugs.python.org/issue25684
+ var = Tk.IntVar()
+ b = Tk.Checkbutton(master=frame, text=text, padx=2, pady=2,
+ image=im, indicatoron=False,
+ command=lambda: self._button_click(text),
+ variable=var)
+ b._ntimage = im
+ b.pack(side=Tk.LEFT)
+ return b
+
+ def _button_click(self, name):
+ self.trigger_tool(name)
+
+ def toggle_toolitem(self, name, toggled):
+ if name not in self._toolitems:
+ return
+ for toolitem in self._toolitems[name]:
+ if toggled:
+ toolitem.select()
+ else:
+ toolitem.deselect()
+
+ def remove_toolitem(self, name):
+ for toolitem in self._toolitems[name]:
+ toolitem.pack_forget()
+ del self._toolitems[name]
+
+
+class StatusbarTk(StatusbarBase, Tk.Frame):
+ def __init__(self, window, *args, **kwargs):
+ StatusbarBase.__init__(self, *args, **kwargs)
+ xmin, xmax = self.toolmanager.canvas.figure.bbox.intervalx
+ height, width = 50, xmax - xmin
+ Tk.Frame.__init__(self, master=window,
+ width=int(width), height=int(height),
+ borderwidth=2)
+ self._message = Tk.StringVar(master=self)
+ self._message_label = Tk.Label(master=self, textvariable=self._message)
+ self._message_label.pack(side=Tk.RIGHT)
+ self.pack(side=Tk.TOP, fill=Tk.X)
+
+ def set_message(self, s):
+ self._message.set(s)
+
+
+class SaveFigureTk(backend_tools.SaveFigureBase):
+ def trigger(self, *args):
+ from six.moves import tkinter_tkfiledialog, tkinter_messagebox
+ filetypes = self.figure.canvas.get_supported_filetypes().copy()
+ default_filetype = self.figure.canvas.get_default_filetype()
+
+ # Tk doesn't provide a way to choose a default filetype,
+ # so we just have to put it first
+ default_filetype_name = filetypes.pop(default_filetype)
+ sorted_filetypes = ([(default_filetype, default_filetype_name)]
+ + sorted(six.iteritems(filetypes)))
+ tk_filetypes = [(name, '*.%s' % ext) for ext, name in sorted_filetypes]
+
+ # adding a default extension seems to break the
+ # asksaveasfilename dialog when you choose various save types
+ # from the dropdown. Passing in the empty string seems to
+ # work - JDH!
+ # defaultextension = self.figure.canvas.get_default_filetype()
+ defaultextension = ''
+ initialdir = os.path.expanduser(rcParams['savefig.directory'])
+ initialfile = self.figure.canvas.get_default_filename()
+ fname = tkinter_tkfiledialog.asksaveasfilename(
+ master=self.figure.canvas.manager.window,
+ title='Save the figure',
+ filetypes=tk_filetypes,
+ defaultextension=defaultextension,
+ initialdir=initialdir,
+ initialfile=initialfile,
+ )
+
+ if fname == "" or fname == ():
+ return
+ else:
+ if initialdir == '':
+ # explicitly missing key or empty str signals to use cwd
+ rcParams['savefig.directory'] = initialdir
+ else:
+ # save dir for next time
+ rcParams['savefig.directory'] = os.path.dirname(
+ six.text_type(fname))
+ try:
+ # This method will handle the delegation to the correct type
+ self.figure.savefig(fname)
+ except Exception as e:
+ tkinter_messagebox.showerror("Error saving file", str(e))
+
+
+class ConfigureSubplotsTk(backend_tools.ConfigureSubplotsBase):
+ def __init__(self, *args, **kwargs):
+ backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)
+ self.window = None
+
+ def trigger(self, *args):
+ self.init_window()
+ self.window.lift()
+
+ def init_window(self):
+ if self.window:
+ return
+
+ toolfig = Figure(figsize=(6, 3))
+ self.window = Tk.Tk()
+
+ canvas = type(self.canvas)(toolfig, master=self.window)
+ toolfig.subplots_adjust(top=0.9)
+ _tool = SubplotTool(self.figure, toolfig)
+ canvas.draw()
+ canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
+ self.window.protocol("WM_DELETE_WINDOW", self.destroy)
+
+ def destroy(self, *args, **kwargs):
+ self.window.destroy()
+ self.window = None
+
+
+backend_tools.ToolSaveFigure = SaveFigureTk
+backend_tools.ToolConfigureSubplots = ConfigureSubplotsTk
+backend_tools.ToolSetCursor = SetCursorTk
+backend_tools.ToolRubberband = RubberbandTk
+Toolbar = ToolbarTk
+
+
+@_Backend.export
+class _BackendTk(_Backend):
+ FigureManager = FigureManagerTk
+
+ @classmethod
+ def new_figure_manager_given_figure(cls, num, figure):
+ """
+ Create a new figure manager instance for the given figure.
+ """
+ _focus = windowing.FocusManager()
+ window = Tk.Tk(className="matplotlib")
+ window.withdraw()
+
+ # Put a mpl icon on the window rather than the default tk icon.
+ # Tkinter doesn't allow colour icons on linux systems, but tk>=8.5 has
+ # a iconphoto command which we call directly. Source:
+ # http://mail.python.org/pipermail/tkinter-discuss/2006-November/000954.html
+ icon_fname = os.path.join(
+ rcParams['datapath'], 'images', 'matplotlib.ppm')
+ icon_img = Tk.PhotoImage(file=icon_fname)
+ try:
+ window.tk.call('wm', 'iconphoto', window._w, icon_img)
+ except Exception as exc:
+ # log the failure (due e.g. to Tk version), but carry on
+ _log.info('Could not load matplotlib icon: %s', exc)
+
+ canvas = cls.FigureCanvas(figure, master=window)
+ manager = cls.FigureManager(canvas, num, window)
+ if matplotlib.is_interactive():
+ manager.show()
+ canvas.draw_idle()
+ return manager
+
+ @staticmethod
+ def trigger_manager_draw(manager):
+ manager.show()
+
+ @staticmethod
+ def mainloop():
+ Tk.mainloop()
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/_gtk3_compat.py b/contrib/python/matplotlib/py2/matplotlib/backends/_gtk3_compat.py
new file mode 100644
index 00000000000..825fa2341c8
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/_gtk3_compat.py
@@ -0,0 +1,41 @@
+"""
+GObject compatibility loader; supports ``gi`` and ``pgi``.
+
+The binding selection rules are as follows:
+- if ``gi`` has already been imported, use it; else
+- if ``pgi`` has already been imported, use it; else
+- if ``gi`` can be imported, use it; else
+- if ``pgi`` can be imported, use it; else
+- error out.
+
+Thus, to force usage of PGI when both bindings are installed, import it first.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import importlib
+import sys
+
+
+if "gi" in sys.modules:
+ import gi
+elif "pgi" in sys.modules:
+ import pgi as gi
+else:
+ try:
+ import gi
+ except ImportError:
+ try:
+ import pgi as gi
+ except ImportError:
+ raise ImportError("The Gtk3 backend requires PyGObject or pgi")
+
+
+gi.require_version("Gtk", "3.0")
+globals().update(
+ {name:
+ importlib.import_module("{}.repository.{}".format(gi.__name__, name))
+ for name in ["GLib", "GObject", "Gtk", "Gdk"]})
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_agg.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_agg.py
new file mode 100644
index 00000000000..491a9b2c5d6
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_agg.py
@@ -0,0 +1,606 @@
+"""
+An agg http://antigrain.com/ backend
+
+Features that are implemented
+
+ * capstyles and join styles
+ * dashes
+ * linewidth
+ * lines, rectangles, ellipses
+ * clipping to a rectangle
+ * output to RGBA and PNG, optionally JPEG and TIFF
+ * alpha blending
+ * DPI scaling properly - everything scales properly (dashes, linewidths, etc)
+ * draw polygon
+ * freetype2 w/ ft2font
+
+TODO:
+
+ * integrate screen dpi w/ ppi and text
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import numpy as np
+from collections import OrderedDict
+from math import radians, cos, sin
+from matplotlib import cbook, rcParams, __version__
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, RendererBase, cursors)
+from matplotlib.cbook import maxdict
+from matplotlib.figure import Figure
+from matplotlib.font_manager import findfont, get_font
+from matplotlib.ft2font import (LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING,
+ LOAD_DEFAULT, LOAD_NO_AUTOHINT)
+from matplotlib.mathtext import MathTextParser
+from matplotlib.path import Path
+from matplotlib.transforms import Bbox, BboxBase
+from matplotlib import colors as mcolors
+
+from matplotlib.backends._backend_agg import RendererAgg as _RendererAgg
+from matplotlib import _png
+
+try:
+ from PIL import Image
+ _has_pil = True
+except ImportError:
+ _has_pil = False
+
+backend_version = 'v2.2'
+
+def get_hinting_flag():
+ mapping = {
+ True: LOAD_FORCE_AUTOHINT,
+ False: LOAD_NO_HINTING,
+ 'either': LOAD_DEFAULT,
+ 'native': LOAD_NO_AUTOHINT,
+ 'auto': LOAD_FORCE_AUTOHINT,
+ 'none': LOAD_NO_HINTING
+ }
+ return mapping[rcParams['text.hinting']]
+
+
+class RendererAgg(RendererBase):
+ """
+ The renderer handles all the drawing primitives using a graphics
+ context instance that controls the colors/styles
+ """
+
+ @property
+ @cbook.deprecated("2.2")
+ def debug(self):
+ return 1
+
+ # we want to cache the fonts at the class level so that when
+ # multiple figures are created we can reuse them. This helps with
+ # a bug on windows where the creation of too many figures leads to
+ # too many open file handles. However, storing them at the class
+ # level is not thread safe. The solution here is to let the
+ # FigureCanvas acquire a lock on the fontd at the start of the
+ # draw, and release it when it is done. This allows multiple
+ # renderers to share the cached fonts, but only one figure can
+ # draw at time and so the font cache is used by only one
+ # renderer at a time.
+
+ lock = threading.RLock()
+
+ def __init__(self, width, height, dpi):
+ RendererBase.__init__(self)
+
+ self.dpi = dpi
+ self.width = width
+ self.height = height
+ self._renderer = _RendererAgg(int(width), int(height), dpi)
+ self._filter_renderers = []
+
+ self._update_methods()
+ self.mathtext_parser = MathTextParser('Agg')
+
+ self.bbox = Bbox.from_bounds(0, 0, self.width, self.height)
+
+ def __getstate__(self):
+ # We only want to preserve the init keywords of the Renderer.
+ # Anything else can be re-created.
+ return {'width': self.width, 'height': self.height, 'dpi': self.dpi}
+
+ def __setstate__(self, state):
+ self.__init__(state['width'], state['height'], state['dpi'])
+
+ def _get_hinting_flag(self):
+ if rcParams['text.hinting']:
+ return LOAD_FORCE_AUTOHINT
+ else:
+ return LOAD_NO_HINTING
+
+ # for filtering to work with rasterization, methods needs to be wrapped.
+ # maybe there is better way to do it.
+ def draw_markers(self, *kl, **kw):
+ return self._renderer.draw_markers(*kl, **kw)
+
+ def draw_path_collection(self, *kl, **kw):
+ return self._renderer.draw_path_collection(*kl, **kw)
+
+ def _update_methods(self):
+ self.draw_quad_mesh = self._renderer.draw_quad_mesh
+ self.draw_gouraud_triangle = self._renderer.draw_gouraud_triangle
+ self.draw_gouraud_triangles = self._renderer.draw_gouraud_triangles
+ self.draw_image = self._renderer.draw_image
+ self.copy_from_bbox = self._renderer.copy_from_bbox
+ self.get_content_extents = self._renderer.get_content_extents
+
+ def tostring_rgba_minimized(self):
+ extents = self.get_content_extents()
+ bbox = [[extents[0], self.height - (extents[1] + extents[3])],
+ [extents[0] + extents[2], self.height - extents[1]]]
+ region = self.copy_from_bbox(bbox)
+ return np.array(region), extents
+
+ def draw_path(self, gc, path, transform, rgbFace=None):
+ """
+ Draw the path
+ """
+ nmax = rcParams['agg.path.chunksize'] # here at least for testing
+ npts = path.vertices.shape[0]
+
+ if (nmax > 100 and npts > nmax and path.should_simplify and
+ rgbFace is None and gc.get_hatch() is None):
+ nch = np.ceil(npts / nmax)
+ chsize = int(np.ceil(npts / nch))
+ i0 = np.arange(0, npts, chsize)
+ i1 = np.zeros_like(i0)
+ i1[:-1] = i0[1:] - 1
+ i1[-1] = npts
+ for ii0, ii1 in zip(i0, i1):
+ v = path.vertices[ii0:ii1, :]
+ c = path.codes
+ if c is not None:
+ c = c[ii0:ii1]
+ c[0] = Path.MOVETO # move to end of last chunk
+ p = Path(v, c)
+ try:
+ self._renderer.draw_path(gc, p, transform, rgbFace)
+ except OverflowError:
+ raise OverflowError("Exceeded cell block limit (set "
+ "'agg.path.chunksize' rcparam)")
+ else:
+ try:
+ self._renderer.draw_path(gc, path, transform, rgbFace)
+ except OverflowError:
+ raise OverflowError("Exceeded cell block limit (set "
+ "'agg.path.chunksize' rcparam)")
+
+
+ def draw_mathtext(self, gc, x, y, s, prop, angle):
+ """
+ Draw the math text using matplotlib.mathtext
+ """
+ ox, oy, width, height, descent, font_image, used_characters = \
+ self.mathtext_parser.parse(s, self.dpi, prop)
+
+ xd = descent * sin(radians(angle))
+ yd = descent * cos(radians(angle))
+ x = np.round(x + ox + xd)
+ y = np.round(y - oy + yd)
+ self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)
+
+ def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
+ """
+ Render the text
+ """
+ if ismath:
+ return self.draw_mathtext(gc, x, y, s, prop, angle)
+
+ flags = get_hinting_flag()
+ font = self._get_agg_font(prop)
+
+ if font is None:
+ return None
+ if len(s) == 1 and ord(s) > 127:
+ font.load_char(ord(s), flags=flags)
+ else:
+ # We pass '0' for angle here, since it will be rotated (in raster
+ # space) in the following call to draw_text_image).
+ font.set_text(s, 0, flags=flags)
+ font.draw_glyphs_to_bitmap(antialiased=rcParams['text.antialiased'])
+ d = font.get_descent() / 64.0
+ # The descent needs to be adjusted for the angle.
+ xo, yo = font.get_bitmap_offset()
+ xo /= 64.0
+ yo /= 64.0
+ xd = -d * sin(radians(angle))
+ yd = d * cos(radians(angle))
+
+ self._renderer.draw_text_image(
+ font, np.round(x - xd + xo), np.round(y + yd + yo) + 1, angle, gc)
+
+ def get_text_width_height_descent(self, s, prop, ismath):
+ """
+ Get the width, height, and descent (offset from the bottom
+ to the baseline), in display coords, of the string *s* with
+ :class:`~matplotlib.font_manager.FontProperties` *prop*
+ """
+ if ismath in ["TeX", "TeX!"]:
+ # todo: handle props
+ size = prop.get_size_in_points()
+ texmanager = self.get_texmanager()
+ fontsize = prop.get_size_in_points()
+ w, h, d = texmanager.get_text_width_height_descent(
+ s, fontsize, renderer=self)
+ return w, h, d
+
+ if ismath:
+ ox, oy, width, height, descent, fonts, used_characters = \
+ self.mathtext_parser.parse(s, self.dpi, prop)
+ return width, height, descent
+
+ flags = get_hinting_flag()
+ font = self._get_agg_font(prop)
+ font.set_text(s, 0.0, flags=flags)
+ w, h = font.get_width_height() # width and height of unrotated string
+ d = font.get_descent()
+ w /= 64.0 # convert from subpixels
+ h /= 64.0
+ d /= 64.0
+ return w, h, d
+
+ def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
+ # todo, handle props, angle, origins
+ size = prop.get_size_in_points()
+
+ texmanager = self.get_texmanager()
+
+ Z = texmanager.get_grey(s, size, self.dpi)
+ Z = np.array(Z * 255.0, np.uint8)
+
+ w, h, d = self.get_text_width_height_descent(s, prop, ismath)
+ xd = d * sin(radians(angle))
+ yd = d * cos(radians(angle))
+ x = np.round(x + xd)
+ y = np.round(y + yd)
+
+ self._renderer.draw_text_image(Z, x, y, angle, gc)
+
+ def get_canvas_width_height(self):
+ 'return the canvas width and height in display coords'
+ return self.width, self.height
+
+ def _get_agg_font(self, prop):
+ """
+ Get the font for text instance t, cacheing for efficiency
+ """
+ fname = findfont(prop)
+ font = get_font(fname)
+
+ font.clear()
+ size = prop.get_size_in_points()
+ font.set_size(size, self.dpi)
+
+ return font
+
+ def points_to_pixels(self, points):
+ """
+ convert point measures to pixes using dpi and the pixels per
+ inch of the display
+ """
+ return points*self.dpi/72.0
+
+ def tostring_rgb(self):
+ return self._renderer.tostring_rgb()
+
+ def tostring_argb(self):
+ return self._renderer.tostring_argb()
+
+ def buffer_rgba(self):
+ return self._renderer.buffer_rgba()
+
+ def clear(self):
+ self._renderer.clear()
+
+ def option_image_nocomposite(self):
+ # It is generally faster to composite each image directly to
+ # the Figure, and there's no file size benefit to compositing
+ # with the Agg backend
+ return True
+
+ def option_scale_image(self):
+ """
+ agg backend doesn't support arbitrary scaling of image.
+ """
+ return False
+
+ def restore_region(self, region, bbox=None, xy=None):
+ """
+ Restore the saved region. If bbox (instance of BboxBase, or
+ its extents) is given, only the region specified by the bbox
+ will be restored. *xy* (a tuple of two floasts) optionally
+ specifies the new position (the LLC of the original region,
+ not the LLC of the bbox) where the region will be restored.
+
+ >>> region = renderer.copy_from_bbox()
+ >>> x1, y1, x2, y2 = region.get_extents()
+ >>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2),
+ ... xy=(x1-dx, y1))
+
+ """
+ if bbox is not None or xy is not None:
+ if bbox is None:
+ x1, y1, x2, y2 = region.get_extents()
+ elif isinstance(bbox, BboxBase):
+ x1, y1, x2, y2 = bbox.extents
+ else:
+ x1, y1, x2, y2 = bbox
+
+ if xy is None:
+ ox, oy = x1, y1
+ else:
+ ox, oy = xy
+
+ # The incoming data is float, but the _renderer type-checking wants
+ # to see integers.
+ self._renderer.restore_region(region, int(x1), int(y1),
+ int(x2), int(y2), int(ox), int(oy))
+
+ else:
+ self._renderer.restore_region(region)
+
+ def start_filter(self):
+ """
+ Start filtering. It simply create a new canvas (the old one is saved).
+ """
+ self._filter_renderers.append(self._renderer)
+ self._renderer = _RendererAgg(int(self.width), int(self.height),
+ self.dpi)
+ self._update_methods()
+
+ def stop_filter(self, post_processing):
+ """
+ Save the plot in the current canvas as a image and apply
+ the *post_processing* function.
+
+ def post_processing(image, dpi):
+ # ny, nx, depth = image.shape
+ # image (numpy array) has RGBA channels and has a depth of 4.
+ ...
+ # create a new_image (numpy array of 4 channels, size can be
+ # different). The resulting image may have offsets from
+ # lower-left corner of the original image
+ return new_image, offset_x, offset_y
+
+ The saved renderer is restored and the returned image from
+ post_processing is plotted (using draw_image) on it.
+ """
+
+ # WARNING: For agg_filter to work, the renderer's method need to
+ # overridden in the class. See draw_markers and draw_path_collections.
+
+ width, height = int(self.width), int(self.height)
+
+ buffer, bounds = self.tostring_rgba_minimized()
+
+ l, b, w, h = bounds
+
+ self._renderer = self._filter_renderers.pop()
+ self._update_methods()
+
+ if w > 0 and h > 0:
+ img = np.frombuffer(buffer, np.uint8)
+ img, ox, oy = post_processing(img.reshape((h, w, 4)) / 255.,
+ self.dpi)
+ gc = self.new_gc()
+ if img.dtype.kind == 'f':
+ img = np.asarray(img * 255., np.uint8)
+ img = img[::-1]
+ self._renderer.draw_image(
+ gc, l + ox, height - b - h + oy, img)
+
+
+class FigureCanvasAgg(FigureCanvasBase):
+ """
+ The canvas the figure renders into. Calls the draw and print fig
+ methods, creates the renderers, etc...
+
+ Attributes
+ ----------
+ figure : `matplotlib.figure.Figure`
+ A high-level Figure instance
+
+ """
+
+ def copy_from_bbox(self, bbox):
+ renderer = self.get_renderer()
+ return renderer.copy_from_bbox(bbox)
+
+ def restore_region(self, region, bbox=None, xy=None):
+ renderer = self.get_renderer()
+ return renderer.restore_region(region, bbox, xy)
+
+ def draw(self):
+ """
+ Draw the figure using the renderer
+ """
+ self.renderer = self.get_renderer(cleared=True)
+ # acquire a lock on the shared font cache
+ RendererAgg.lock.acquire()
+
+ toolbar = self.toolbar
+ try:
+ # if toolbar:
+ # toolbar.set_cursor(cursors.WAIT)
+ self.figure.draw(self.renderer)
+ # A GUI class may be need to update a window using this draw, so
+ # don't forget to call the superclass.
+ super(FigureCanvasAgg, self).draw()
+ finally:
+ # if toolbar:
+ # toolbar.set_cursor(toolbar._lastCursor)
+ RendererAgg.lock.release()
+
+ def get_renderer(self, cleared=False):
+ l, b, w, h = self.figure.bbox.bounds
+ key = w, h, self.figure.dpi
+ try: self._lastKey, self.renderer
+ except AttributeError: need_new_renderer = True
+ else: need_new_renderer = (self._lastKey != key)
+
+ if need_new_renderer:
+ self.renderer = RendererAgg(w, h, self.figure.dpi)
+ self._lastKey = key
+ elif cleared:
+ self.renderer.clear()
+ return self.renderer
+
+ def tostring_rgb(self):
+ '''Get the image as an RGB byte string
+
+ `draw` must be called at least once before this function will work and
+ to update the renderer for any subsequent changes to the Figure.
+
+ Returns
+ -------
+ bytes
+ '''
+ return self.renderer.tostring_rgb()
+
+ def tostring_argb(self):
+ '''Get the image as an ARGB byte string
+
+ `draw` must be called at least once before this function will work and
+ to update the renderer for any subsequent changes to the Figure.
+
+ Returns
+ -------
+ bytes
+
+ '''
+ return self.renderer.tostring_argb()
+
+ def buffer_rgba(self):
+ '''Get the image as an RGBA byte string
+
+ `draw` must be called at least once before this function will work and
+ to update the renderer for any subsequent changes to the Figure.
+
+ Returns
+ -------
+ bytes
+ '''
+ return self.renderer.buffer_rgba()
+
+ def print_raw(self, filename_or_obj, *args, **kwargs):
+ FigureCanvasAgg.draw(self)
+ renderer = self.get_renderer()
+ original_dpi = renderer.dpi
+ renderer.dpi = self.figure.dpi
+ if isinstance(filename_or_obj, six.string_types):
+ fileobj = open(filename_or_obj, 'wb')
+ close = True
+ else:
+ fileobj = filename_or_obj
+ close = False
+ try:
+ fileobj.write(renderer._renderer.buffer_rgba())
+ finally:
+ if close:
+ fileobj.close()
+ renderer.dpi = original_dpi
+ print_rgba = print_raw
+
+ def print_png(self, filename_or_obj, *args, **kwargs):
+ FigureCanvasAgg.draw(self)
+ renderer = self.get_renderer()
+ original_dpi = renderer.dpi
+ renderer.dpi = self.figure.dpi
+
+ version_str = 'matplotlib version ' + __version__ + \
+ ', http://matplotlib.org/'
+ metadata = OrderedDict({'Software': version_str})
+ user_metadata = kwargs.pop("metadata", None)
+ if user_metadata is not None:
+ metadata.update(user_metadata)
+
+ try:
+ with cbook.open_file_cm(filename_or_obj, "wb") as fh:
+ _png.write_png(renderer._renderer, fh,
+ self.figure.dpi, metadata=metadata)
+ finally:
+ renderer.dpi = original_dpi
+
+ def print_to_buffer(self):
+ FigureCanvasAgg.draw(self)
+ renderer = self.get_renderer()
+ original_dpi = renderer.dpi
+ renderer.dpi = self.figure.dpi
+ try:
+ result = (renderer._renderer.buffer_rgba(),
+ (int(renderer.width), int(renderer.height)))
+ finally:
+ renderer.dpi = original_dpi
+ return result
+
+ if _has_pil:
+ # add JPEG support
+ def print_jpg(self, filename_or_obj, *args, **kwargs):
+ """
+ Other Parameters
+ ----------------
+ quality : int
+ The image quality, on a scale from 1 (worst) to
+ 95 (best). The default is 95, if not given in the
+ matplotlibrc file in the savefig.jpeg_quality parameter.
+ Values above 95 should be avoided; 100 completely
+ disables the JPEG quantization stage.
+
+ optimize : bool
+ If present, indicates that the encoder should
+ make an extra pass over the image in order to select
+ optimal encoder settings.
+
+ progressive : bool
+ If present, indicates that this image
+ should be stored as a progressive JPEG file.
+ """
+ buf, size = self.print_to_buffer()
+ if kwargs.pop("dryrun", False):
+ return
+ # The image is "pasted" onto a white background image to safely
+ # handle any transparency
+ image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
+ rgba = mcolors.to_rgba(rcParams['savefig.facecolor'])
+ color = tuple([int(x * 255.0) for x in rgba[:3]])
+ background = Image.new('RGB', size, color)
+ background.paste(image, image)
+ options = {k: kwargs[k]
+ for k in ['quality', 'optimize', 'progressive', 'dpi']
+ if k in kwargs}
+ options.setdefault('quality', rcParams['savefig.jpeg_quality'])
+ if 'dpi' in options:
+ # Set the same dpi in both x and y directions
+ options['dpi'] = (options['dpi'], options['dpi'])
+
+ return background.save(filename_or_obj, format='jpeg', **options)
+ print_jpeg = print_jpg
+
+ # add TIFF support
+ def print_tif(self, filename_or_obj, *args, **kwargs):
+ buf, size = self.print_to_buffer()
+ if kwargs.pop("dryrun", False):
+ return
+ image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
+ dpi = (self.figure.dpi, self.figure.dpi)
+ return image.save(filename_or_obj, format='tiff',
+ dpi=dpi)
+ print_tiff = print_tif
+
+
+@_Backend.export
+class _BackendAgg(_Backend):
+ FigureCanvas = FigureCanvasAgg
+ FigureManager = FigureManagerBase
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_cairo.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_cairo.py
new file mode 100644
index 00000000000..c870ba60a55
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_cairo.py
@@ -0,0 +1,520 @@
+"""
+A Cairo backend for matplotlib
+==============================
+:Author: Steve Chaplin and others
+
+This backend depends on `cairo <http://cairographics.org>`_, and either on
+cairocffi, or (Python 2 only) on pycairo.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import gzip
+import sys
+import warnings
+
+import numpy as np
+
+# cairocffi is more widely compatible than pycairo (in particular pgi only
+# works with cairocffi) so try it first.
+try:
+ import cairocffi as cairo
+except ImportError:
+ try:
+ import cairo
+ except ImportError:
+ raise ImportError("cairo backend requires that cairocffi or pycairo "
+ "is installed")
+ else:
+ HAS_CAIRO_CFFI = False
+else:
+ HAS_CAIRO_CFFI = True
+
+if cairo.version_info < (1, 4, 0):
+ raise ImportError("cairo {} is installed; "
+ "cairo>=1.4.0 is required".format(cairo.version))
+backend_version = cairo.version
+
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
+ RendererBase)
+from matplotlib.mathtext import MathTextParser
+from matplotlib.path import Path
+from matplotlib.transforms import Affine2D
+from matplotlib.font_manager import ttfFontProperty
+
+
+class ArrayWrapper:
+ """Thin wrapper around numpy ndarray to expose the interface
+ expected by cairocffi. Basically replicates the
+ array.array interface.
+ """
+ def __init__(self, myarray):
+ self.__array = myarray
+ self.__data = myarray.ctypes.data
+ self.__size = len(myarray.flatten())
+ self.itemsize = myarray.itemsize
+
+ def buffer_info(self):
+ return (self.__data, self.__size)
+
+
+class RendererCairo(RendererBase):
+ fontweights = {
+ 100 : cairo.FONT_WEIGHT_NORMAL,
+ 200 : cairo.FONT_WEIGHT_NORMAL,
+ 300 : cairo.FONT_WEIGHT_NORMAL,
+ 400 : cairo.FONT_WEIGHT_NORMAL,
+ 500 : cairo.FONT_WEIGHT_NORMAL,
+ 600 : cairo.FONT_WEIGHT_BOLD,
+ 700 : cairo.FONT_WEIGHT_BOLD,
+ 800 : cairo.FONT_WEIGHT_BOLD,
+ 900 : cairo.FONT_WEIGHT_BOLD,
+ 'ultralight' : cairo.FONT_WEIGHT_NORMAL,
+ 'light' : cairo.FONT_WEIGHT_NORMAL,
+ 'normal' : cairo.FONT_WEIGHT_NORMAL,
+ 'medium' : cairo.FONT_WEIGHT_NORMAL,
+ 'regular' : cairo.FONT_WEIGHT_NORMAL,
+ 'semibold' : cairo.FONT_WEIGHT_BOLD,
+ 'bold' : cairo.FONT_WEIGHT_BOLD,
+ 'heavy' : cairo.FONT_WEIGHT_BOLD,
+ 'ultrabold' : cairo.FONT_WEIGHT_BOLD,
+ 'black' : cairo.FONT_WEIGHT_BOLD,
+ }
+ fontangles = {
+ 'italic' : cairo.FONT_SLANT_ITALIC,
+ 'normal' : cairo.FONT_SLANT_NORMAL,
+ 'oblique' : cairo.FONT_SLANT_OBLIQUE,
+ }
+
+
+ def __init__(self, dpi):
+ self.dpi = dpi
+ self.gc = GraphicsContextCairo(renderer=self)
+ self.text_ctx = cairo.Context(
+ cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1))
+ self.mathtext_parser = MathTextParser('Cairo')
+ RendererBase.__init__(self)
+
+ def set_ctx_from_surface(self, surface):
+ self.gc.ctx = cairo.Context(surface)
+ # Although it may appear natural to automatically call
+ # `self.set_width_height(surface.get_width(), surface.get_height())`
+ # here (instead of having the caller do so separately), this would fail
+ # for PDF/PS/SVG surfaces, which have no way to report their extents.
+
+ def set_width_height(self, width, height):
+ self.width = width
+ self.height = height
+
+ def _fill_and_stroke(self, ctx, fill_c, alpha, alpha_overrides):
+ if fill_c is not None:
+ ctx.save()
+ if len(fill_c) == 3 or alpha_overrides:
+ ctx.set_source_rgba(fill_c[0], fill_c[1], fill_c[2], alpha)
+ else:
+ ctx.set_source_rgba(fill_c[0], fill_c[1], fill_c[2], fill_c[3])
+ ctx.fill_preserve()
+ ctx.restore()
+ ctx.stroke()
+
+ @staticmethod
+ def convert_path(ctx, path, transform, clip=None):
+ for points, code in path.iter_segments(transform, clip=clip):
+ if code == Path.MOVETO:
+ ctx.move_to(*points)
+ elif code == Path.CLOSEPOLY:
+ ctx.close_path()
+ elif code == Path.LINETO:
+ ctx.line_to(*points)
+ elif code == Path.CURVE3:
+ ctx.curve_to(points[0], points[1],
+ points[0], points[1],
+ points[2], points[3])
+ elif code == Path.CURVE4:
+ ctx.curve_to(*points)
+
+ def draw_path(self, gc, path, transform, rgbFace=None):
+ ctx = gc.ctx
+
+ # We'll clip the path to the actual rendering extents
+ # if the path isn't filled.
+ if rgbFace is None and gc.get_hatch() is None:
+ clip = ctx.clip_extents()
+ else:
+ clip = None
+
+ transform = (transform
+ + Affine2D().scale(1.0, -1.0).translate(0, self.height))
+
+ ctx.new_path()
+ self.convert_path(ctx, path, transform, clip)
+
+ self._fill_and_stroke(
+ ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())
+
+ def draw_markers(self, gc, marker_path, marker_trans, path, transform,
+ rgbFace=None):
+ ctx = gc.ctx
+
+ ctx.new_path()
+ # Create the path for the marker; it needs to be flipped here already!
+ self.convert_path(
+ ctx, marker_path, marker_trans + Affine2D().scale(1.0, -1.0))
+ marker_path = ctx.copy_path_flat()
+
+ # Figure out whether the path has a fill
+ x1, y1, x2, y2 = ctx.fill_extents()
+ if x1 == 0 and y1 == 0 and x2 == 0 and y2 == 0:
+ filled = False
+ # No fill, just unset this (so we don't try to fill it later on)
+ rgbFace = None
+ else:
+ filled = True
+
+ transform = (transform
+ + Affine2D().scale(1.0, -1.0).translate(0, self.height))
+
+ ctx.new_path()
+ for i, (vertices, codes) in enumerate(
+ path.iter_segments(transform, simplify=False)):
+ if len(vertices):
+ x, y = vertices[-2:]
+ ctx.save()
+
+ # Translate and apply path
+ ctx.translate(x, y)
+ ctx.append_path(marker_path)
+
+ ctx.restore()
+
+ # Slower code path if there is a fill; we need to draw
+ # the fill and stroke for each marker at the same time.
+ # Also flush out the drawing every once in a while to
+ # prevent the paths from getting way too long.
+ if filled or i % 1000 == 0:
+ self._fill_and_stroke(
+ ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())
+
+ # Fast path, if there is no fill, draw everything in one step
+ if not filled:
+ self._fill_and_stroke(
+ ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())
+
+ def draw_image(self, gc, x, y, im):
+ # bbox - not currently used
+ if sys.byteorder == 'little':
+ im = im[:, :, (2, 1, 0, 3)]
+ else:
+ im = im[:, :, (3, 0, 1, 2)]
+ if HAS_CAIRO_CFFI:
+ # cairocffi tries to use the buffer_info from array.array
+ # that we replicate in ArrayWrapper and alternatively falls back
+ # on ctypes to get a pointer to the numpy array. This works
+ # correctly on a numpy array in python3 but not 2.7. We replicate
+ # the array.array functionality here to get cross version support.
+ imbuffer = ArrayWrapper(im.flatten())
+ else:
+ # pycairo uses PyObject_AsWriteBuffer to get a pointer to the
+ # numpy array; this works correctly on a regular numpy array but
+ # not on a py2 memoryview.
+ imbuffer = im.flatten()
+ surface = cairo.ImageSurface.create_for_data(
+ imbuffer, cairo.FORMAT_ARGB32,
+ im.shape[1], im.shape[0], im.shape[1]*4)
+ ctx = gc.ctx
+ y = self.height - y - im.shape[0]
+
+ ctx.save()
+ ctx.set_source_surface(surface, float(x), float(y))
+ if gc.get_alpha() != 1.0:
+ ctx.paint_with_alpha(gc.get_alpha())
+ else:
+ ctx.paint()
+ ctx.restore()
+
+ def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
+ # Note: x,y are device/display coords, not user-coords, unlike other
+ # draw_* methods
+ if ismath:
+ self._draw_mathtext(gc, x, y, s, prop, angle)
+
+ else:
+ ctx = gc.ctx
+ ctx.new_path()
+ ctx.move_to(x, y)
+ ctx.select_font_face(prop.get_name(),
+ self.fontangles[prop.get_style()],
+ self.fontweights[prop.get_weight()])
+
+ size = prop.get_size_in_points() * self.dpi / 72.0
+
+ ctx.save()
+ if angle:
+ ctx.rotate(np.deg2rad(-angle))
+ ctx.set_font_size(size)
+
+ if HAS_CAIRO_CFFI:
+ if not isinstance(s, six.text_type):
+ s = six.text_type(s)
+ else:
+ if six.PY2 and isinstance(s, six.text_type):
+ s = s.encode("utf-8")
+
+ ctx.show_text(s)
+ ctx.restore()
+
+ def _draw_mathtext(self, gc, x, y, s, prop, angle):
+ ctx = gc.ctx
+ width, height, descent, glyphs, rects = self.mathtext_parser.parse(
+ s, self.dpi, prop)
+
+ ctx.save()
+ ctx.translate(x, y)
+ if angle:
+ ctx.rotate(np.deg2rad(-angle))
+
+ for font, fontsize, s, ox, oy in glyphs:
+ ctx.new_path()
+ ctx.move_to(ox, oy)
+
+ fontProp = ttfFontProperty(font)
+ ctx.save()
+ ctx.select_font_face(fontProp.name,
+ self.fontangles[fontProp.style],
+ self.fontweights[fontProp.weight])
+
+ size = fontsize * self.dpi / 72.0
+ ctx.set_font_size(size)
+ if not six.PY3 and isinstance(s, six.text_type):
+ s = s.encode("utf-8")
+ ctx.show_text(s)
+ ctx.restore()
+
+ for ox, oy, w, h in rects:
+ ctx.new_path()
+ ctx.rectangle(ox, oy, w, h)
+ ctx.set_source_rgb(0, 0, 0)
+ ctx.fill_preserve()
+
+ ctx.restore()
+
+ def get_canvas_width_height(self):
+ return self.width, self.height
+
+ def get_text_width_height_descent(self, s, prop, ismath):
+ if ismath:
+ width, height, descent, fonts, used_characters = \
+ self.mathtext_parser.parse(s, self.dpi, prop)
+ return width, height, descent
+
+ ctx = self.text_ctx
+ ctx.save()
+ ctx.select_font_face(prop.get_name(),
+ self.fontangles[prop.get_style()],
+ self.fontweights[prop.get_weight()])
+
+ # Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c
+ # but if /96.0 is used the font is too small
+ size = prop.get_size_in_points() * self.dpi / 72
+
+ # problem - scale remembers last setting and font can become
+ # enormous causing program to crash
+ # save/restore prevents the problem
+ ctx.set_font_size(size)
+
+ y_bearing, w, h = ctx.text_extents(s)[1:4]
+ ctx.restore()
+
+ return w, h, h + y_bearing
+
+ def new_gc(self):
+ self.gc.ctx.save()
+ self.gc._alpha = 1
+ self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA
+ return self.gc
+
+ def points_to_pixels(self, points):
+ return points / 72 * self.dpi
+
+
+class GraphicsContextCairo(GraphicsContextBase):
+ _joind = {
+ 'bevel' : cairo.LINE_JOIN_BEVEL,
+ 'miter' : cairo.LINE_JOIN_MITER,
+ 'round' : cairo.LINE_JOIN_ROUND,
+ }
+
+ _capd = {
+ 'butt' : cairo.LINE_CAP_BUTT,
+ 'projecting' : cairo.LINE_CAP_SQUARE,
+ 'round' : cairo.LINE_CAP_ROUND,
+ }
+
+ def __init__(self, renderer):
+ GraphicsContextBase.__init__(self)
+ self.renderer = renderer
+
+ def restore(self):
+ self.ctx.restore()
+
+ def set_alpha(self, alpha):
+ GraphicsContextBase.set_alpha(self, alpha)
+ _alpha = self.get_alpha()
+ rgb = self._rgb
+ if self.get_forced_alpha():
+ self.ctx.set_source_rgba(rgb[0], rgb[1], rgb[2], _alpha)
+ else:
+ self.ctx.set_source_rgba(rgb[0], rgb[1], rgb[2], rgb[3])
+
+ # def set_antialiased(self, b):
+ # cairo has many antialiasing modes, we need to pick one for True and
+ # one for False.
+
+ def set_capstyle(self, cs):
+ if cs in ('butt', 'round', 'projecting'):
+ self._capstyle = cs
+ self.ctx.set_line_cap(self._capd[cs])
+ else:
+ raise ValueError('Unrecognized cap style. Found %s' % cs)
+
+ def set_clip_rectangle(self, rectangle):
+ if not rectangle:
+ return
+ x, y, w, h = np.round(rectangle.bounds)
+ ctx = self.ctx
+ ctx.new_path()
+ ctx.rectangle(x, self.renderer.height - h - y, w, h)
+ ctx.clip()
+
+ def set_clip_path(self, path):
+ if not path:
+ return
+ tpath, affine = path.get_transformed_path_and_affine()
+ ctx = self.ctx
+ ctx.new_path()
+ affine = (affine
+ + Affine2D().scale(1, -1).translate(0, self.renderer.height))
+ RendererCairo.convert_path(ctx, tpath, affine)
+ ctx.clip()
+
+ def set_dashes(self, offset, dashes):
+ self._dashes = offset, dashes
+ if dashes == None:
+ self.ctx.set_dash([], 0) # switch dashes off
+ else:
+ self.ctx.set_dash(
+ list(self.renderer.points_to_pixels(np.asarray(dashes))),
+ offset)
+
+ def set_foreground(self, fg, isRGBA=None):
+ GraphicsContextBase.set_foreground(self, fg, isRGBA)
+ if len(self._rgb) == 3:
+ self.ctx.set_source_rgb(*self._rgb)
+ else:
+ self.ctx.set_source_rgba(*self._rgb)
+
+ def get_rgb(self):
+ return self.ctx.get_source().get_rgba()[:3]
+
+ def set_joinstyle(self, js):
+ if js in ('miter', 'round', 'bevel'):
+ self._joinstyle = js
+ self.ctx.set_line_join(self._joind[js])
+ else:
+ raise ValueError('Unrecognized join style. Found %s' % js)
+
+ def set_linewidth(self, w):
+ self._linewidth = float(w)
+ self.ctx.set_line_width(self.renderer.points_to_pixels(w))
+
+
+class FigureCanvasCairo(FigureCanvasBase):
+ supports_blit = False
+
+ def print_png(self, fobj, *args, **kwargs):
+ width, height = self.get_width_height()
+
+ renderer = RendererCairo(self.figure.dpi)
+ renderer.set_width_height(width, height)
+ surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
+ renderer.set_ctx_from_surface(surface)
+
+ self.figure.draw(renderer)
+ surface.write_to_png(fobj)
+
+ def print_pdf(self, fobj, *args, **kwargs):
+ return self._save(fobj, 'pdf', *args, **kwargs)
+
+ def print_ps(self, fobj, *args, **kwargs):
+ return self._save(fobj, 'ps', *args, **kwargs)
+
+ def print_svg(self, fobj, *args, **kwargs):
+ return self._save(fobj, 'svg', *args, **kwargs)
+
+ def print_svgz(self, fobj, *args, **kwargs):
+ return self._save(fobj, 'svgz', *args, **kwargs)
+
+ def _save(self, fo, fmt, **kwargs):
+ # save PDF/PS/SVG
+ orientation = kwargs.get('orientation', 'portrait')
+
+ dpi = 72
+ self.figure.dpi = dpi
+ w_in, h_in = self.figure.get_size_inches()
+ width_in_points, height_in_points = w_in * dpi, h_in * dpi
+
+ if orientation == 'landscape':
+ width_in_points, height_in_points = (
+ height_in_points, width_in_points)
+
+ if fmt == 'ps':
+ if not hasattr(cairo, 'PSSurface'):
+ raise RuntimeError('cairo has not been compiled with PS '
+ 'support enabled')
+ surface = cairo.PSSurface(fo, width_in_points, height_in_points)
+ elif fmt == 'pdf':
+ if not hasattr(cairo, 'PDFSurface'):
+ raise RuntimeError('cairo has not been compiled with PDF '
+ 'support enabled')
+ surface = cairo.PDFSurface(fo, width_in_points, height_in_points)
+ elif fmt in ('svg', 'svgz'):
+ if not hasattr(cairo, 'SVGSurface'):
+ raise RuntimeError('cairo has not been compiled with SVG '
+ 'support enabled')
+ if fmt == 'svgz':
+ if isinstance(fo, six.string_types):
+ fo = gzip.GzipFile(fo, 'wb')
+ else:
+ fo = gzip.GzipFile(None, 'wb', fileobj=fo)
+ surface = cairo.SVGSurface(fo, width_in_points, height_in_points)
+ else:
+ warnings.warn("unknown format: %s" % fmt)
+ return
+
+ # surface.set_dpi() can be used
+ renderer = RendererCairo(self.figure.dpi)
+ renderer.set_width_height(width_in_points, height_in_points)
+ renderer.set_ctx_from_surface(surface)
+ ctx = renderer.gc.ctx
+
+ if orientation == 'landscape':
+ ctx.rotate(np.pi / 2)
+ ctx.translate(0, -height_in_points)
+ # Perhaps add an '%%Orientation: Landscape' comment?
+
+ self.figure.draw(renderer)
+
+ ctx.show_page()
+ surface.finish()
+ if fmt == 'svgz':
+ fo.close()
+
+
+@_Backend.export
+class _BackendCairo(_Backend):
+ FigureCanvas = FigureCanvasCairo
+ FigureManager = FigureManagerBase
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_gdk.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gdk.py
new file mode 100644
index 00000000000..7d18922fc37
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gdk.py
@@ -0,0 +1,438 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import warnings
+
+import gobject
+import gtk; gdk = gtk.gdk
+import pango
+pygtk_version_required = (2,2,0)
+if gtk.pygtk_version < pygtk_version_required:
+ raise ImportError ("PyGTK %d.%d.%d is installed\n"
+ "PyGTK %d.%d.%d or later is required"
+ % (gtk.pygtk_version + pygtk_version_required))
+del pygtk_version_required
+
+import numpy as np
+
+import matplotlib
+from matplotlib import rcParams
+from matplotlib._pylab_helpers import Gcf
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
+ RendererBase)
+from matplotlib.cbook import warn_deprecated
+from matplotlib.mathtext import MathTextParser
+from matplotlib.transforms import Affine2D
+from matplotlib.backends._backend_gdk import pixbuf_get_pixels_array
+
+backend_version = "%d.%d.%d" % gtk.pygtk_version
+
+# Image formats that this backend supports - for FileChooser and print_figure()
+IMAGE_FORMAT = sorted(['bmp', 'eps', 'jpg', 'png', 'ps', 'svg']) # 'raw', 'rgb'
+IMAGE_FORMAT_DEFAULT = 'png'
+
+
+class RendererGDK(RendererBase):
+ fontweights = {
+ 100 : pango.WEIGHT_ULTRALIGHT,
+ 200 : pango.WEIGHT_LIGHT,
+ 300 : pango.WEIGHT_LIGHT,
+ 400 : pango.WEIGHT_NORMAL,
+ 500 : pango.WEIGHT_NORMAL,
+ 600 : pango.WEIGHT_BOLD,
+ 700 : pango.WEIGHT_BOLD,
+ 800 : pango.WEIGHT_HEAVY,
+ 900 : pango.WEIGHT_ULTRABOLD,
+ 'ultralight' : pango.WEIGHT_ULTRALIGHT,
+ 'light' : pango.WEIGHT_LIGHT,
+ 'normal' : pango.WEIGHT_NORMAL,
+ 'medium' : pango.WEIGHT_NORMAL,
+ 'semibold' : pango.WEIGHT_BOLD,
+ 'bold' : pango.WEIGHT_BOLD,
+ 'heavy' : pango.WEIGHT_HEAVY,
+ 'ultrabold' : pango.WEIGHT_ULTRABOLD,
+ 'black' : pango.WEIGHT_ULTRABOLD,
+ }
+
+ # cache for efficiency, these must be at class, not instance level
+ layoutd = {} # a map from text prop tups to pango layouts
+ rotated = {} # a map from text prop tups to rotated text pixbufs
+
+ def __init__(self, gtkDA, dpi):
+ # widget gtkDA is used for:
+ # '<widget>.create_pango_layout(s)'
+ # cmap line below)
+ self.gtkDA = gtkDA
+ self.dpi = dpi
+ self._cmap = gtkDA.get_colormap()
+ self.mathtext_parser = MathTextParser("Agg")
+
+ def set_pixmap (self, pixmap):
+ self.gdkDrawable = pixmap
+
+ def set_width_height (self, width, height):
+ """w,h is the figure w,h not the pixmap w,h
+ """
+ self.width, self.height = width, height
+
+ def draw_path(self, gc, path, transform, rgbFace=None):
+ transform = transform + Affine2D(). \
+ scale(1.0, -1.0).translate(0, self.height)
+ polygons = path.to_polygons(transform, self.width, self.height)
+ for polygon in polygons:
+ # draw_polygon won't take an arbitrary sequence -- it must be a list
+ # of tuples
+ polygon = [(int(np.round(x)), int(np.round(y))) for x, y in polygon]
+ if rgbFace is not None:
+ saveColor = gc.gdkGC.foreground
+ gc.gdkGC.foreground = gc.rgb_to_gdk_color(rgbFace)
+ self.gdkDrawable.draw_polygon(gc.gdkGC, True, polygon)
+ gc.gdkGC.foreground = saveColor
+ if gc.gdkGC.line_width > 0:
+ self.gdkDrawable.draw_lines(gc.gdkGC, polygon)
+
+ def draw_image(self, gc, x, y, im):
+ bbox = gc.get_clip_rectangle()
+
+ if bbox != None:
+ l,b,w,h = bbox.bounds
+ #rectangle = (int(l), self.height-int(b+h),
+ # int(w), int(h))
+ # set clip rect?
+
+ rows, cols = im.shape[:2]
+
+ pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
+ has_alpha=True, bits_per_sample=8,
+ width=cols, height=rows)
+
+ array = pixbuf_get_pixels_array(pixbuf)
+ array[:, :, :] = im[::-1]
+
+ gc = self.new_gc()
+
+
+ y = self.height-y-rows
+
+ try: # new in 2.2
+ # can use None instead of gc.gdkGC, if don't need clipping
+ self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
+ int(x), int(y), cols, rows,
+ gdk.RGB_DITHER_NONE, 0, 0)
+ except AttributeError:
+ # deprecated in 2.2
+ pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
+ int(x), int(y), cols, rows,
+ gdk.RGB_DITHER_NONE, 0, 0)
+
+ def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
+ x, y = int(x), int(y)
+
+ if x < 0 or y < 0: # window has shrunk and text is off the edge
+ return
+
+ if angle not in (0,90):
+ warnings.warn('backend_gdk: unable to draw text at angles ' +
+ 'other than 0 or 90')
+ elif ismath:
+ self._draw_mathtext(gc, x, y, s, prop, angle)
+
+ elif angle==90:
+ self._draw_rotated_text(gc, x, y, s, prop, angle)
+
+ else:
+ layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
+ l, b, w, h = inkRect
+ if (x + w > self.width or y + h > self.height):
+ return
+
+ self.gdkDrawable.draw_layout(gc.gdkGC, x, y-h-b, layout)
+
+ def _draw_mathtext(self, gc, x, y, s, prop, angle):
+ ox, oy, width, height, descent, font_image, used_characters = \
+ self.mathtext_parser.parse(s, self.dpi, prop)
+
+ if angle == 90:
+ width, height = height, width
+ x -= width
+ y -= height
+
+ imw = font_image.get_width()
+ imh = font_image.get_height()
+
+ pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, has_alpha=True,
+ bits_per_sample=8, width=imw, height=imh)
+
+ array = pixbuf_get_pixels_array(pixbuf)
+
+ rgb = gc.get_rgb()
+ array[:,:,0] = int(rgb[0]*255)
+ array[:,:,1] = int(rgb[1]*255)
+ array[:,:,2] = int(rgb[2]*255)
+ array[:,:,3] = (
+ np.fromstring(font_image.as_str(), np.uint8).reshape((imh, imw)))
+
+ # can use None instead of gc.gdkGC, if don't need clipping
+ self.gdkDrawable.draw_pixbuf(gc.gdkGC, pixbuf, 0, 0,
+ int(x), int(y), imw, imh,
+ gdk.RGB_DITHER_NONE, 0, 0)
+
+ def _draw_rotated_text(self, gc, x, y, s, prop, angle):
+ """
+ Draw the text rotated 90 degrees, other angles are not supported
+ """
+ # this function (and its called functions) is a bottleneck
+ # Pango 1.6 supports rotated text, but pygtk 2.4.0 does not yet have
+ # wrapper functions
+ # GTK+ 2.6 pixbufs support rotation
+
+ gdrawable = self.gdkDrawable
+ ggc = gc.gdkGC
+
+ layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
+ l, b, w, h = inkRect
+ x = int(x-h)
+ y = int(y-w)
+
+ if (x < 0 or y < 0 or # window has shrunk and text is off the edge
+ x + w > self.width or y + h > self.height):
+ return
+
+ key = (x,y,s,angle,hash(prop))
+ imageVert = self.rotated.get(key)
+ if imageVert != None:
+ gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
+ return
+
+ imageBack = gdrawable.get_image(x, y, w, h)
+ imageVert = gdrawable.get_image(x, y, h, w)
+ imageFlip = gtk.gdk.Image(type=gdk.IMAGE_FASTEST,
+ visual=gdrawable.get_visual(),
+ width=w, height=h)
+ if imageFlip == None or imageBack == None or imageVert == None:
+ warnings.warn("Could not renderer vertical text")
+ return
+ imageFlip.set_colormap(self._cmap)
+ for i in range(w):
+ for j in range(h):
+ imageFlip.put_pixel(i, j, imageVert.get_pixel(j,w-i-1) )
+
+ gdrawable.draw_image(ggc, imageFlip, 0, 0, x, y, w, h)
+ gdrawable.draw_layout(ggc, x, y-b, layout)
+
+ imageIn = gdrawable.get_image(x, y, w, h)
+ for i in range(w):
+ for j in range(h):
+ imageVert.put_pixel(j, i, imageIn.get_pixel(w-i-1,j) )
+
+ gdrawable.draw_image(ggc, imageBack, 0, 0, x, y, w, h)
+ gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
+ self.rotated[key] = imageVert
+
+ def _get_pango_layout(self, s, prop):
+ """
+ Create a pango layout instance for Text 's' with properties 'prop'.
+ Return - pango layout (from cache if already exists)
+
+ Note that pango assumes a logical DPI of 96
+ Ref: pango/fonts.c/pango_font_description_set_size() manual page
+ """
+ # problem? - cache gets bigger and bigger, is never cleared out
+ # two (not one) layouts are created for every text item s (then they
+ # are cached) - why?
+
+ key = self.dpi, s, hash(prop)
+ value = self.layoutd.get(key)
+ if value != None:
+ return value
+
+ size = prop.get_size_in_points() * self.dpi / 96.0
+ size = np.round(size)
+
+ font_str = '%s, %s %i' % (prop.get_name(), prop.get_style(), size,)
+ font = pango.FontDescription(font_str)
+
+ # later - add fontweight to font_str
+ font.set_weight(self.fontweights[prop.get_weight()])
+
+ layout = self.gtkDA.create_pango_layout(s)
+ layout.set_font_description(font)
+ inkRect, logicalRect = layout.get_pixel_extents()
+
+ self.layoutd[key] = layout, inkRect, logicalRect
+ return layout, inkRect, logicalRect
+
+ def flipy(self):
+ return True
+
+ def get_canvas_width_height(self):
+ return self.width, self.height
+
+ def get_text_width_height_descent(self, s, prop, ismath):
+ if ismath:
+ ox, oy, width, height, descent, font_image, used_characters = \
+ self.mathtext_parser.parse(s, self.dpi, prop)
+ return width, height, descent
+
+ layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
+ l, b, w, h = inkRect
+ ll, lb, lw, lh = logicalRect
+
+ return w, h + 1, h - lh
+
+ def new_gc(self):
+ return GraphicsContextGDK(renderer=self)
+
+ def points_to_pixels(self, points):
+ return points/72.0 * self.dpi
+
+
+class GraphicsContextGDK(GraphicsContextBase):
+ # a cache shared by all class instances
+ _cached = {} # map: rgb color -> gdk.Color
+
+ _joind = {
+ 'bevel' : gdk.JOIN_BEVEL,
+ 'miter' : gdk.JOIN_MITER,
+ 'round' : gdk.JOIN_ROUND,
+ }
+
+ _capd = {
+ 'butt' : gdk.CAP_BUTT,
+ 'projecting' : gdk.CAP_PROJECTING,
+ 'round' : gdk.CAP_ROUND,
+ }
+
+
+ def __init__(self, renderer):
+ GraphicsContextBase.__init__(self)
+ self.renderer = renderer
+ self.gdkGC = gtk.gdk.GC(renderer.gdkDrawable)
+ self._cmap = renderer._cmap
+
+
+ def rgb_to_gdk_color(self, rgb):
+ """
+ rgb - an RGB tuple (three 0.0-1.0 values)
+ return an allocated gtk.gdk.Color
+ """
+ try:
+ return self._cached[tuple(rgb)]
+ except KeyError:
+ color = self._cached[tuple(rgb)] = \
+ self._cmap.alloc_color(
+ int(rgb[0]*65535),int(rgb[1]*65535),int(rgb[2]*65535))
+ return color
+
+
+ #def set_antialiased(self, b):
+ # anti-aliasing is not supported by GDK
+
+ def set_capstyle(self, cs):
+ GraphicsContextBase.set_capstyle(self, cs)
+ self.gdkGC.cap_style = self._capd[self._capstyle]
+
+
+ def set_clip_rectangle(self, rectangle):
+ GraphicsContextBase.set_clip_rectangle(self, rectangle)
+ if rectangle is None:
+ return
+ l,b,w,h = rectangle.bounds
+ rectangle = (int(l), self.renderer.height-int(b+h)+1,
+ int(w), int(h))
+ #rectangle = (int(l), self.renderer.height-int(b+h),
+ # int(w+1), int(h+2))
+ self.gdkGC.set_clip_rectangle(rectangle)
+
+ def set_dashes(self, dash_offset, dash_list):
+ GraphicsContextBase.set_dashes(self, dash_offset, dash_list)
+
+ if dash_list == None:
+ self.gdkGC.line_style = gdk.LINE_SOLID
+ else:
+ pixels = self.renderer.points_to_pixels(np.asarray(dash_list))
+ dl = [max(1, int(np.round(val))) for val in pixels]
+ self.gdkGC.set_dashes(dash_offset, dl)
+ self.gdkGC.line_style = gdk.LINE_ON_OFF_DASH
+
+
+ def set_foreground(self, fg, isRGBA=False):
+ GraphicsContextBase.set_foreground(self, fg, isRGBA)
+ self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
+
+
+ def set_joinstyle(self, js):
+ GraphicsContextBase.set_joinstyle(self, js)
+ self.gdkGC.join_style = self._joind[self._joinstyle]
+
+
+ def set_linewidth(self, w):
+ GraphicsContextBase.set_linewidth(self, w)
+ if w == 0:
+ self.gdkGC.line_width = 0
+ else:
+ pixels = self.renderer.points_to_pixels(w)
+ self.gdkGC.line_width = max(1, int(np.round(pixels)))
+
+
+class FigureCanvasGDK (FigureCanvasBase):
+ def __init__(self, figure):
+ FigureCanvasBase.__init__(self, figure)
+ if self.__class__ == matplotlib.backends.backend_gdk.FigureCanvasGDK:
+ warn_deprecated('2.0', message="The GDK backend is "
+ "deprecated. It is untested, known to be "
+ "broken and will be removed in Matplotlib 3.0. "
+ "Use the Agg backend instead. "
+ "See Matplotlib usage FAQ for"
+ " more info on backends.",
+ alternative="Agg")
+ self._renderer_init()
+
+ def _renderer_init(self):
+ self._renderer = RendererGDK (gtk.DrawingArea(), self.figure.dpi)
+
+ def _render_figure(self, pixmap, width, height):
+ self._renderer.set_pixmap (pixmap)
+ self._renderer.set_width_height (width, height)
+ self.figure.draw (self._renderer)
+
+ filetypes = FigureCanvasBase.filetypes.copy()
+ filetypes['jpg'] = 'JPEG'
+ filetypes['jpeg'] = 'JPEG'
+
+ def print_jpeg(self, filename, *args, **kwargs):
+ return self._print_image(filename, 'jpeg')
+ print_jpg = print_jpeg
+
+ def print_png(self, filename, *args, **kwargs):
+ return self._print_image(filename, 'png')
+
+ def _print_image(self, filename, format, *args, **kwargs):
+ width, height = self.get_width_height()
+ pixmap = gtk.gdk.Pixmap (None, width, height, depth=24)
+ self._render_figure(pixmap, width, height)
+
+ # jpg colors don't match the display very well, png colors match
+ # better
+ pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 0, 8,
+ width, height)
+ pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
+ 0, 0, 0, 0, width, height)
+
+ # set the default quality, if we are writing a JPEG.
+ # http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html#method-gdkpixbuf--save
+ options = {k: kwargs[k] for k in ['quality'] if k in kwargs}
+ if format in ['jpg', 'jpeg']:
+ options.setdefault('quality', rcParams['savefig.jpeg_quality'])
+ options['quality'] = str(options['quality'])
+
+ pixbuf.save(filename, format, options=options)
+
+
+@_Backend.export
+class _BackendGDK(_Backend):
+ FigureCanvas = FigureCanvasGDK
+ FigureManager = FigureManagerBase
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk.py
new file mode 100644
index 00000000000..a4ae7cc28b7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk.py
@@ -0,0 +1,1037 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import logging
+import os
+import sys
+import warnings
+
+if six.PY3:
+ warnings.warn(
+ "The gtk* backends have not been tested with Python 3.x",
+ ImportWarning)
+
+try:
+ import gobject
+ import gtk; gdk = gtk.gdk
+ import pango
+except ImportError:
+ raise ImportError("Gtk* backend requires pygtk to be installed.")
+
+pygtk_version_required = (2,4,0)
+if gtk.pygtk_version < pygtk_version_required:
+ raise ImportError ("PyGTK %d.%d.%d is installed\n"
+ "PyGTK %d.%d.%d or later is required"
+ % (gtk.pygtk_version + pygtk_version_required))
+del pygtk_version_required
+
+_new_tooltip_api = (gtk.pygtk_version[1] >= 12)
+
+import matplotlib
+from matplotlib._pylab_helpers import Gcf
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
+ TimerBase, cursors)
+
+from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
+from matplotlib.cbook import is_writable_file_like, warn_deprecated
+from matplotlib.figure import Figure
+from matplotlib.widgets import SubplotTool
+
+from matplotlib import (
+ cbook, colors as mcolors, lines, markers, rcParams)
+
+_log = logging.getLogger(__name__)
+
+backend_version = "%d.%d.%d" % gtk.pygtk_version
+
+# the true dots per inch on the screen; should be display dependent
+# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
+PIXELS_PER_INCH = 96
+
+# Hide the benign warning that it can't stat a file that doesn't
+warnings.filterwarnings('ignore', '.*Unable to retrieve the file info for.*', gtk.Warning)
+
+cursord = {
+ cursors.MOVE : gdk.Cursor(gdk.FLEUR),
+ cursors.HAND : gdk.Cursor(gdk.HAND2),
+ cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
+ cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
+ cursors.WAIT : gdk.Cursor(gdk.WATCH),
+ }
+
+# ref gtk+/gtk/gtkwidget.h
+def GTK_WIDGET_DRAWABLE(w):
+ flags = w.flags();
+ return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
+
+
+class TimerGTK(TimerBase):
+ '''
+ Subclass of :class:`backend_bases.TimerBase` using GTK for timer events.
+
+ Attributes
+ ----------
+ interval : int
+ The time between timer events in milliseconds. Default is 1000 ms.
+ single_shot : bool
+ Boolean flag indicating whether this timer should operate as single
+ shot (run once and then stop). Defaults to False.
+ callbacks : list
+ Stores list of (func, args) tuples that will be called upon timer
+ events. This list can be manipulated directly, or the functions
+ `add_callback` and `remove_callback` can be used.
+
+ '''
+ def _timer_start(self):
+ # Need to stop it, otherwise we potentially leak a timer id that will
+ # never be stopped.
+ self._timer_stop()
+ self._timer = gobject.timeout_add(self._interval, self._on_timer)
+
+ def _timer_stop(self):
+ if self._timer is not None:
+ gobject.source_remove(self._timer)
+ self._timer = None
+
+ def _timer_set_interval(self):
+ # Only stop and restart it if the timer has already been started
+ if self._timer is not None:
+ self._timer_stop()
+ self._timer_start()
+
+ def _on_timer(self):
+ TimerBase._on_timer(self)
+
+ # Gtk timeout_add() requires that the callback returns True if it
+ # is to be called again.
+ if len(self.callbacks) > 0 and not self._single:
+ return True
+ else:
+ self._timer = None
+ return False
+
+
+class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
+ keyvald = {65507 : 'control',
+ 65505 : 'shift',
+ 65513 : 'alt',
+ 65508 : 'control',
+ 65506 : 'shift',
+ 65514 : 'alt',
+ 65361 : 'left',
+ 65362 : 'up',
+ 65363 : 'right',
+ 65364 : 'down',
+ 65307 : 'escape',
+ 65470 : 'f1',
+ 65471 : 'f2',
+ 65472 : 'f3',
+ 65473 : 'f4',
+ 65474 : 'f5',
+ 65475 : 'f6',
+ 65476 : 'f7',
+ 65477 : 'f8',
+ 65478 : 'f9',
+ 65479 : 'f10',
+ 65480 : 'f11',
+ 65481 : 'f12',
+ 65300 : 'scroll_lock',
+ 65299 : 'break',
+ 65288 : 'backspace',
+ 65293 : 'enter',
+ 65379 : 'insert',
+ 65535 : 'delete',
+ 65360 : 'home',
+ 65367 : 'end',
+ 65365 : 'pageup',
+ 65366 : 'pagedown',
+ 65438 : '0',
+ 65436 : '1',
+ 65433 : '2',
+ 65435 : '3',
+ 65430 : '4',
+ 65437 : '5',
+ 65432 : '6',
+ 65429 : '7',
+ 65431 : '8',
+ 65434 : '9',
+ 65451 : '+',
+ 65453 : '-',
+ 65450 : '*',
+ 65455 : '/',
+ 65439 : 'dec',
+ 65421 : 'enter',
+ 65511 : 'super',
+ 65512 : 'super',
+ 65406 : 'alt',
+ 65289 : 'tab',
+ }
+
+ # Setting this as a static constant prevents
+ # this resulting expression from leaking
+ event_mask = (gdk.BUTTON_PRESS_MASK |
+ gdk.BUTTON_RELEASE_MASK |
+ gdk.EXPOSURE_MASK |
+ gdk.KEY_PRESS_MASK |
+ gdk.KEY_RELEASE_MASK |
+ gdk.ENTER_NOTIFY_MASK |
+ gdk.LEAVE_NOTIFY_MASK |
+ gdk.POINTER_MOTION_MASK |
+ gdk.POINTER_MOTION_HINT_MASK)
+
+ def __init__(self, figure):
+ if self.__class__ == matplotlib.backends.backend_gtk.FigureCanvasGTK:
+ warn_deprecated('2.0', message="The GTK backend is "
+ "deprecated. It is untested, known to be "
+ "broken and will be removed in Matplotlib 3.0. "
+ "Use the GTKAgg backend instead. "
+ "See Matplotlib usage FAQ for"
+ " more info on backends.",
+ alternative="GTKAgg")
+ FigureCanvasBase.__init__(self, figure)
+ gtk.DrawingArea.__init__(self)
+
+ self._idle_draw_id = 0
+ self._need_redraw = True
+ self._pixmap_width = -1
+ self._pixmap_height = -1
+ self._lastCursor = None
+
+ self.connect('scroll_event', self.scroll_event)
+ self.connect('button_press_event', self.button_press_event)
+ self.connect('button_release_event', self.button_release_event)
+ self.connect('configure_event', self.configure_event)
+ self.connect('expose_event', self.expose_event)
+ self.connect('key_press_event', self.key_press_event)
+ self.connect('key_release_event', self.key_release_event)
+ self.connect('motion_notify_event', self.motion_notify_event)
+ self.connect('leave_notify_event', self.leave_notify_event)
+ self.connect('enter_notify_event', self.enter_notify_event)
+
+ self.set_events(self.__class__.event_mask)
+
+ self.set_double_buffered(False)
+ self.set_flags(gtk.CAN_FOCUS)
+ self._renderer_init()
+
+ self.last_downclick = {}
+
+ def destroy(self):
+ #gtk.DrawingArea.destroy(self)
+ self.close_event()
+ if self._idle_draw_id != 0:
+ gobject.source_remove(self._idle_draw_id)
+
+ def scroll_event(self, widget, event):
+ x = event.x
+ # flipy so y=0 is bottom of canvas
+ y = self.allocation.height - event.y
+ if event.direction==gdk.SCROLL_UP:
+ step = 1
+ else:
+ step = -1
+ FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
+ return False # finish event propagation?
+
+ def button_press_event(self, widget, event):
+ x = event.x
+ # flipy so y=0 is bottom of canvas
+ y = self.allocation.height - event.y
+ dblclick = (event.type == gdk._2BUTTON_PRESS)
+ if not dblclick:
+ # GTK is the only backend that generates a DOWN-UP-DOWN-DBLCLICK-UP event
+ # sequence for a double click. All other backends have a DOWN-UP-DBLCLICK-UP
+ # sequence. In order to provide consistency to matplotlib users, we will
+ # eat the extra DOWN event in the case that we detect it is part of a double
+ # click.
+ # first, get the double click time in milliseconds.
+ current_time = event.get_time()
+ last_time = self.last_downclick.get(event.button,0)
+ dblclick_time = gtk.settings_get_for_screen(gdk.screen_get_default()).get_property('gtk-double-click-time')
+ delta_time = current_time-last_time
+ if delta_time < dblclick_time:
+ del self.last_downclick[event.button] # we do not want to eat more than one event.
+ return False # eat.
+ self.last_downclick[event.button] = current_time
+ FigureCanvasBase.button_press_event(self, x, y, event.button, dblclick=dblclick, guiEvent=event)
+ return False # finish event propagation?
+
+ def button_release_event(self, widget, event):
+ x = event.x
+ # flipy so y=0 is bottom of canvas
+ y = self.allocation.height - event.y
+ FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
+ return False # finish event propagation?
+
+ def key_press_event(self, widget, event):
+ key = self._get_key(event)
+ FigureCanvasBase.key_press_event(self, key, guiEvent=event)
+ return True # stop event propagation
+
+ def key_release_event(self, widget, event):
+ key = self._get_key(event)
+ FigureCanvasBase.key_release_event(self, key, guiEvent=event)
+ return True # stop event propagation
+
+ def motion_notify_event(self, widget, event):
+ if event.is_hint:
+ x, y, state = event.window.get_pointer()
+ else:
+ x, y, state = event.x, event.y, event.state
+
+ # flipy so y=0 is bottom of canvas
+ y = self.allocation.height - y
+ FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
+ return False # finish event propagation?
+
+ def leave_notify_event(self, widget, event):
+ FigureCanvasBase.leave_notify_event(self, event)
+
+ def enter_notify_event(self, widget, event):
+ x, y, state = event.window.get_pointer()
+ FigureCanvasBase.enter_notify_event(self, event, xy=(x, y))
+
+ def _get_key(self, event):
+ if event.keyval in self.keyvald:
+ key = self.keyvald[event.keyval]
+ elif event.keyval < 256:
+ key = chr(event.keyval)
+ else:
+ key = None
+
+ for key_mask, prefix in (
+ [gdk.MOD4_MASK, 'super'],
+ [gdk.MOD1_MASK, 'alt'],
+ [gdk.CONTROL_MASK, 'ctrl'], ):
+ if event.state & key_mask:
+ key = '{0}+{1}'.format(prefix, key)
+
+ return key
+
+ def configure_event(self, widget, event):
+ if widget.window is None:
+ return
+ w, h = event.width, event.height
+ if w < 3 or h < 3:
+ return # empty fig
+
+ # resize the figure (in inches)
+ dpi = self.figure.dpi
+ self.figure.set_size_inches(w/dpi, h/dpi, forward=False)
+ self._need_redraw = True
+
+ return False # finish event propagation?
+
+ def draw(self):
+ # Note: FigureCanvasBase.draw() is inconveniently named as it clashes
+ # with the deprecated gtk.Widget.draw()
+
+ self._need_redraw = True
+ if GTK_WIDGET_DRAWABLE(self):
+ self.queue_draw()
+ # do a synchronous draw (its less efficient than an async draw,
+ # but is required if/when animation is used)
+ self.window.process_updates (False)
+
+ def draw_idle(self):
+ if self._idle_draw_id != 0:
+ return
+ def idle_draw(*args):
+ try:
+ self.draw()
+ finally:
+ self._idle_draw_id = 0
+ return False
+ self._idle_draw_id = gobject.idle_add(idle_draw)
+
+
+ def _renderer_init(self):
+ """Override by GTK backends to select a different renderer
+ Renderer should provide the methods:
+ set_pixmap ()
+ set_width_height ()
+ that are used by
+ _render_figure() / _pixmap_prepare()
+ """
+ self._renderer = RendererGDK (self, self.figure.dpi)
+
+
+ def _pixmap_prepare(self, width, height):
+ """
+ Make sure _._pixmap is at least width, height,
+ create new pixmap if necessary
+ """
+ create_pixmap = False
+ if width > self._pixmap_width:
+ # increase the pixmap in 10%+ (rather than 1 pixel) steps
+ self._pixmap_width = max (int (self._pixmap_width * 1.1),
+ width)
+ create_pixmap = True
+
+ if height > self._pixmap_height:
+ self._pixmap_height = max (int (self._pixmap_height * 1.1),
+ height)
+ create_pixmap = True
+
+ if create_pixmap:
+ self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
+ self._pixmap_height)
+ self._renderer.set_pixmap (self._pixmap)
+
+
+ def _render_figure(self, pixmap, width, height):
+ """used by GTK and GTKcairo. GTKAgg overrides
+ """
+ self._renderer.set_width_height (width, height)
+ self.figure.draw (self._renderer)
+
+
+ def expose_event(self, widget, event):
+ """Expose_event for all GTK backends. Should not be overridden.
+ """
+ toolbar = self.toolbar
+ # if toolbar:
+ # toolbar.set_cursor(cursors.WAIT)
+ if GTK_WIDGET_DRAWABLE(self):
+ if self._need_redraw:
+ x, y, w, h = self.allocation
+ self._pixmap_prepare (w, h)
+ self._render_figure(self._pixmap, w, h)
+ self._need_redraw = False
+ x, y, w, h = event.area
+ self.window.draw_drawable (self.style.fg_gc[self.state],
+ self._pixmap, x, y, x, y, w, h)
+ # if toolbar:
+ # toolbar.set_cursor(toolbar._lastCursor)
+ return False # finish event propagation?
+
+ filetypes = FigureCanvasBase.filetypes.copy()
+ filetypes['jpg'] = 'JPEG'
+ filetypes['jpeg'] = 'JPEG'
+ filetypes['png'] = 'Portable Network Graphics'
+
+ def print_jpeg(self, filename, *args, **kwargs):
+ return self._print_image(filename, 'jpeg')
+ print_jpg = print_jpeg
+
+ def print_png(self, filename, *args, **kwargs):
+ return self._print_image(filename, 'png')
+
+ def _print_image(self, filename, format, *args, **kwargs):
+ if self.flags() & gtk.REALIZED == 0:
+ # for self.window(for pixmap) and has a side effect of altering
+ # figure width,height (via configure-event?)
+ gtk.DrawingArea.realize(self)
+
+ width, height = self.get_width_height()
+ pixmap = gdk.Pixmap (self.window, width, height)
+ self._renderer.set_pixmap (pixmap)
+ self._render_figure(pixmap, width, height)
+
+ # jpg colors don't match the display very well, png colors match
+ # better
+ pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
+ pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
+ 0, 0, 0, 0, width, height)
+
+ # set the default quality, if we are writing a JPEG.
+ # http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html#method-gdkpixbuf--save
+ options = {k: kwargs[k] for k in ['quality'] if k in kwargs}
+ if format in ['jpg', 'jpeg']:
+ options.setdefault('quality', rcParams['savefig.jpeg_quality'])
+ options['quality'] = str(options['quality'])
+
+ if isinstance(filename, six.string_types):
+ try:
+ pixbuf.save(filename, format, options=options)
+ except gobject.GError as exc:
+ error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
+ elif is_writable_file_like(filename):
+ if hasattr(pixbuf, 'save_to_callback'):
+ def save_callback(buf, data=None):
+ data.write(buf)
+ try:
+ pixbuf.save_to_callback(save_callback, format, user_data=filename, options=options)
+ except gobject.GError as exc:
+ error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
+ else:
+ raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
+ else:
+ raise ValueError("filename must be a path or a file-like object")
+
+ def new_timer(self, *args, **kwargs):
+ """
+ Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
+ This is useful for getting periodic events through the backend's native
+ event loop. Implemented only for backends with GUIs.
+
+ Other Parameters
+ ----------------
+ interval : scalar
+ Timer interval in milliseconds
+ callbacks : list
+ Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
+ will be executed by the timer every *interval*.
+ """
+ return TimerGTK(*args, **kwargs)
+
+ def flush_events(self):
+ gtk.gdk.threads_enter()
+ while gtk.events_pending():
+ gtk.main_iteration(True)
+ gtk.gdk.flush()
+ gtk.gdk.threads_leave()
+
+
+class FigureManagerGTK(FigureManagerBase):
+ """
+ Attributes
+ ----------
+ canvas : `FigureCanvas`
+ The FigureCanvas instance
+ num : int or str
+ The Figure number
+ toolbar : gtk.Toolbar
+ The gtk.Toolbar (gtk only)
+ vbox : gtk.VBox
+ The gtk.VBox containing the canvas and toolbar (gtk only)
+ window : gtk.Window
+ The gtk.Window (gtk only)
+
+ """
+ def __init__(self, canvas, num):
+ FigureManagerBase.__init__(self, canvas, num)
+
+ self.window = gtk.Window()
+ self.window.set_wmclass("matplotlib", "Matplotlib")
+ self.set_window_title("Figure %d" % num)
+ if window_icon:
+ try:
+ self.window.set_icon_from_file(window_icon)
+ except:
+ # some versions of gtk throw a glib.GError but not
+ # all, so I am not sure how to catch it. I am unhappy
+ # diong a blanket catch here, but an not sure what a
+ # better way is - JDH
+ _log.info('Could not load matplotlib '
+ 'icon: %s', sys.exc_info()[1])
+
+ self.vbox = gtk.VBox()
+ self.window.add(self.vbox)
+ self.vbox.show()
+
+ self.canvas.show()
+
+ self.vbox.pack_start(self.canvas, True, True)
+
+ self.toolbar = self._get_toolbar(canvas)
+
+ # calculate size for window
+ w = int (self.canvas.figure.bbox.width)
+ h = int (self.canvas.figure.bbox.height)
+
+ if self.toolbar is not None:
+ self.toolbar.show()
+ self.vbox.pack_end(self.toolbar, False, False)
+
+ tb_w, tb_h = self.toolbar.size_request()
+ h += tb_h
+ self.window.set_default_size (w, h)
+
+ def destroy(*args):
+ Gcf.destroy(num)
+ self.window.connect("destroy", destroy)
+ self.window.connect("delete_event", destroy)
+ if matplotlib.is_interactive():
+ self.window.show()
+ self.canvas.draw_idle()
+
+ def notify_axes_change(fig):
+ 'this will be called whenever the current axes is changed'
+ if self.toolbar is not None: self.toolbar.update()
+ self.canvas.figure.add_axobserver(notify_axes_change)
+
+ self.canvas.grab_focus()
+
+ def destroy(self, *args):
+ if hasattr(self, 'toolbar') and self.toolbar is not None:
+ self.toolbar.destroy()
+ if hasattr(self, 'vbox'):
+ self.vbox.destroy()
+ if hasattr(self, 'window'):
+ self.window.destroy()
+ if hasattr(self, 'canvas'):
+ self.canvas.destroy()
+ self.__dict__.clear() #Is this needed? Other backends don't have it.
+
+ if Gcf.get_num_fig_managers()==0 and \
+ not matplotlib.is_interactive() and \
+ gtk.main_level() >= 1:
+ gtk.main_quit()
+
+ def show(self):
+ # show the figure window
+ self.window.show()
+ # raise the window above others and release the "above lock"
+ self.window.set_keep_above(True)
+ self.window.set_keep_above(False)
+
+ def full_screen_toggle(self):
+ self._full_screen_flag = not self._full_screen_flag
+ if self._full_screen_flag:
+ self.window.fullscreen()
+ else:
+ self.window.unfullscreen()
+ _full_screen_flag = False
+
+
+ def _get_toolbar(self, canvas):
+ # must be inited after the window, drawingArea and figure
+ # attrs are set
+ if rcParams['toolbar'] == 'toolbar2':
+ toolbar = NavigationToolbar2GTK (canvas, self.window)
+ else:
+ toolbar = None
+ return toolbar
+
+ def get_window_title(self):
+ return self.window.get_title()
+
+ def set_window_title(self, title):
+ self.window.set_title(title)
+
+ def resize(self, width, height):
+ 'set the canvas size in pixels'
+ #_, _, cw, ch = self.canvas.allocation
+ #_, _, ww, wh = self.window.allocation
+ #self.window.resize (width-cw+ww, height-ch+wh)
+ self.window.resize(width, height)
+
+
+class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
+ def __init__(self, canvas, window):
+ self.win = window
+ gtk.Toolbar.__init__(self)
+ NavigationToolbar2.__init__(self, canvas)
+
+ def set_message(self, s):
+ self.message.set_label(s)
+
+ def set_cursor(self, cursor):
+ self.canvas.window.set_cursor(cursord[cursor])
+ gtk.main_iteration()
+
+ def release(self, event):
+ try: del self._pixmapBack
+ except AttributeError: pass
+
+ def draw_rubberband(self, event, x0, y0, x1, y1):
+ 'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
+ drawable = self.canvas.window
+ if drawable is None:
+ return
+
+ gc = drawable.new_gc()
+
+ height = self.canvas.figure.bbox.height
+ y1 = height - y1
+ y0 = height - y0
+
+ w = abs(x1 - x0)
+ h = abs(y1 - y0)
+
+ rect = [int(val)for val in (min(x0,x1), min(y0, y1), w, h)]
+ try:
+ lastrect, pixmapBack = self._pixmapBack
+ except AttributeError:
+ #snap image back
+ if event.inaxes is None:
+ return
+
+ ax = event.inaxes
+ l,b,w,h = [int(val) for val in ax.bbox.bounds]
+ b = int(height)-(b+h)
+ axrect = l,b,w,h
+ self._pixmapBack = axrect, gtk.gdk.Pixmap(drawable, w, h)
+ self._pixmapBack[1].draw_drawable(gc, drawable, l, b, 0, 0, w, h)
+ else:
+ drawable.draw_drawable(gc, pixmapBack, 0, 0, *lastrect)
+ drawable.draw_rectangle(gc, False, *rect)
+
+
+ def _init_toolbar(self):
+ self.set_style(gtk.TOOLBAR_ICONS)
+ self._init_toolbar2_4()
+
+
+ def _init_toolbar2_4(self):
+ basedir = os.path.join(rcParams['datapath'],'images')
+ if not _new_tooltip_api:
+ self.tooltips = gtk.Tooltips()
+
+ for text, tooltip_text, image_file, callback in self.toolitems:
+ if text is None:
+ self.insert( gtk.SeparatorToolItem(), -1 )
+ continue
+ fname = os.path.join(basedir, image_file + '.png')
+ image = gtk.Image()
+ image.set_from_file(fname)
+ tbutton = gtk.ToolButton(image, text)
+ self.insert(tbutton, -1)
+ tbutton.connect('clicked', getattr(self, callback))
+ if _new_tooltip_api:
+ tbutton.set_tooltip_text(tooltip_text)
+ else:
+ tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
+
+ toolitem = gtk.SeparatorToolItem()
+ self.insert(toolitem, -1)
+ # set_draw() not making separator invisible,
+ # bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
+ toolitem.set_draw(False)
+ toolitem.set_expand(True)
+
+ toolitem = gtk.ToolItem()
+ self.insert(toolitem, -1)
+ self.message = gtk.Label()
+ toolitem.add(self.message)
+
+ self.show_all()
+
+ def get_filechooser(self):
+ fc = FileChooserDialog(
+ title='Save the figure',
+ parent=self.win,
+ path=os.path.expanduser(rcParams['savefig.directory']),
+ filetypes=self.canvas.get_supported_filetypes(),
+ default_filetype=self.canvas.get_default_filetype())
+ fc.set_current_name(self.canvas.get_default_filename())
+ return fc
+
+ def save_figure(self, *args):
+ chooser = self.get_filechooser()
+ fname, format = chooser.get_filename_from_user()
+ chooser.destroy()
+ if fname:
+ startpath = os.path.expanduser(rcParams['savefig.directory'])
+ # Save dir for next time, unless empty str (i.e., use cwd).
+ if startpath != "":
+ rcParams['savefig.directory'] = (
+ os.path.dirname(six.text_type(fname)))
+ try:
+ self.canvas.figure.savefig(fname, format=format)
+ except Exception as e:
+ error_msg_gtk(str(e), parent=self)
+
+ def configure_subplots(self, button):
+ toolfig = Figure(figsize=(6,3))
+ canvas = self._get_canvas(toolfig)
+ toolfig.subplots_adjust(top=0.9)
+ tool = SubplotTool(self.canvas.figure, toolfig)
+
+ w = int(toolfig.bbox.width)
+ h = int(toolfig.bbox.height)
+
+ window = gtk.Window()
+ if window_icon:
+ try:
+ window.set_icon_from_file(window_icon)
+ except:
+ # we presumably already logged a message on the
+ # failure of the main plot, don't keep reporting
+ pass
+ window.set_title("Subplot Configuration Tool")
+ window.set_default_size(w, h)
+ vbox = gtk.VBox()
+ window.add(vbox)
+ vbox.show()
+
+ canvas.show()
+ vbox.pack_start(canvas, True, True)
+ window.show()
+
+ def _get_canvas(self, fig):
+ return FigureCanvasGTK(fig)
+
+
+class FileChooserDialog(gtk.FileChooserDialog):
+ """GTK+ 2.4 file selector which presents the user with a menu
+ of supported image formats
+ """
+ def __init__ (self,
+ title = 'Save file',
+ parent = None,
+ action = gtk.FILE_CHOOSER_ACTION_SAVE,
+ buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
+ gtk.STOCK_SAVE, gtk.RESPONSE_OK),
+ path = None,
+ filetypes = [],
+ default_filetype = None
+ ):
+ super(FileChooserDialog, self).__init__(title, parent, action, buttons)
+ super(FileChooserDialog, self).set_do_overwrite_confirmation(True)
+ self.set_default_response(gtk.RESPONSE_OK)
+
+ if not path:
+ path = os.getcwd() + os.sep
+
+ # create an extra widget to list supported image formats
+ self.set_current_folder (path)
+ self.set_current_name ('image.' + default_filetype)
+
+ hbox = gtk.HBox(spacing=10)
+ hbox.pack_start(gtk.Label ("File Format:"), expand=False)
+
+ liststore = gtk.ListStore(gobject.TYPE_STRING)
+ cbox = gtk.ComboBox(liststore)
+ cell = gtk.CellRendererText()
+ cbox.pack_start(cell, True)
+ cbox.add_attribute(cell, 'text', 0)
+ hbox.pack_start(cbox)
+
+ self.filetypes = filetypes
+ self.sorted_filetypes = sorted(six.iteritems(filetypes))
+ default = 0
+ for i, (ext, name) in enumerate(self.sorted_filetypes):
+ cbox.append_text("%s (*.%s)" % (name, ext))
+ if ext == default_filetype:
+ default = i
+ cbox.set_active(default)
+ self.ext = default_filetype
+
+ def cb_cbox_changed (cbox, data=None):
+ """File extension changed"""
+ head, filename = os.path.split(self.get_filename())
+ root, ext = os.path.splitext(filename)
+ ext = ext[1:]
+ new_ext = self.sorted_filetypes[cbox.get_active()][0]
+ self.ext = new_ext
+
+ if ext in self.filetypes:
+ filename = root + '.' + new_ext
+ elif ext == '':
+ filename = filename.rstrip('.') + '.' + new_ext
+
+ self.set_current_name(filename)
+ cbox.connect("changed", cb_cbox_changed)
+
+ hbox.show_all()
+ self.set_extra_widget(hbox)
+
+ def get_filename_from_user (self):
+ while True:
+ filename = None
+ if self.run() != int(gtk.RESPONSE_OK):
+ break
+ filename = self.get_filename()
+ break
+
+ return filename, self.ext
+
+
+class DialogLineprops(object):
+ """
+ A GUI dialog for controlling lineprops
+ """
+ signals = (
+ 'on_combobox_lineprops_changed',
+ 'on_combobox_linestyle_changed',
+ 'on_combobox_marker_changed',
+ 'on_colorbutton_linestyle_color_set',
+ 'on_colorbutton_markerface_color_set',
+ 'on_dialog_lineprops_okbutton_clicked',
+ 'on_dialog_lineprops_cancelbutton_clicked',
+ )
+
+ linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
+ linestyled = {s: i for i, s in enumerate(linestyles)}
+
+ markers = [m for m in markers.MarkerStyle.markers
+ if isinstance(m, six.string_types)]
+ markerd = {s: i for i, s in enumerate(markers)}
+
+ def __init__(self, lines):
+ import gtk.glade
+
+ datadir = matplotlib.get_data_path()
+ gladefile = os.path.join(datadir, 'lineprops.glade')
+ if not os.path.exists(gladefile):
+ raise IOError(
+ 'Could not find gladefile lineprops.glade in %s' % datadir)
+
+ self._inited = False
+ self._updateson = True # suppress updates when setting widgets manually
+ self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
+ self.wtree.signal_autoconnect(
+ {s: getattr(self, s) for s in self.signals})
+
+ self.dlg = self.wtree.get_widget('dialog_lineprops')
+
+ self.lines = lines
+
+ cbox = self.wtree.get_widget('combobox_lineprops')
+ cbox.set_active(0)
+ self.cbox_lineprops = cbox
+
+ cbox = self.wtree.get_widget('combobox_linestyles')
+ for ls in self.linestyles:
+ cbox.append_text(ls)
+ cbox.set_active(0)
+ self.cbox_linestyles = cbox
+
+ cbox = self.wtree.get_widget('combobox_markers')
+ for m in self.markers:
+ cbox.append_text(m)
+ cbox.set_active(0)
+ self.cbox_markers = cbox
+ self._lastcnt = 0
+ self._inited = True
+
+ def show(self):
+ 'populate the combo box'
+ self._updateson = False
+ # flush the old
+ cbox = self.cbox_lineprops
+ for i in range(self._lastcnt-1,-1,-1):
+ cbox.remove_text(i)
+
+ # add the new
+ for line in self.lines:
+ cbox.append_text(line.get_label())
+ cbox.set_active(0)
+
+ self._updateson = True
+ self._lastcnt = len(self.lines)
+ self.dlg.show()
+
+ def get_active_line(self):
+ 'get the active line'
+ ind = self.cbox_lineprops.get_active()
+ line = self.lines[ind]
+ return line
+
+ def get_active_linestyle(self):
+ 'get the active lineinestyle'
+ ind = self.cbox_linestyles.get_active()
+ ls = self.linestyles[ind]
+ return ls
+
+ def get_active_marker(self):
+ 'get the active lineinestyle'
+ ind = self.cbox_markers.get_active()
+ m = self.markers[ind]
+ return m
+
+ def _update(self):
+ 'update the active line props from the widgets'
+ if not self._inited or not self._updateson: return
+ line = self.get_active_line()
+ ls = self.get_active_linestyle()
+ marker = self.get_active_marker()
+ line.set_linestyle(ls)
+ line.set_marker(marker)
+
+ button = self.wtree.get_widget('colorbutton_linestyle')
+ color = button.get_color()
+ r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
+ line.set_color((r,g,b))
+
+ button = self.wtree.get_widget('colorbutton_markerface')
+ color = button.get_color()
+ r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
+ line.set_markerfacecolor((r,g,b))
+
+ line.figure.canvas.draw()
+
+ def on_combobox_lineprops_changed(self, item):
+ 'update the widgets from the active line'
+ if not self._inited: return
+ self._updateson = False
+ line = self.get_active_line()
+
+ ls = line.get_linestyle()
+ if ls is None: ls = 'None'
+ self.cbox_linestyles.set_active(self.linestyled[ls])
+
+ marker = line.get_marker()
+ if marker is None: marker = 'None'
+ self.cbox_markers.set_active(self.markerd[marker])
+
+ rgba = mcolors.to_rgba(line.get_color())
+ color = gtk.gdk.Color(*[int(val*65535) for val in rgba[:3]])
+ button = self.wtree.get_widget('colorbutton_linestyle')
+ button.set_color(color)
+
+ rgba = mcolors.to_rgba(line.get_markerfacecolor())
+ color = gtk.gdk.Color(*[int(val*65535) for val in rgba[:3]])
+ button = self.wtree.get_widget('colorbutton_markerface')
+ button.set_color(color)
+ self._updateson = True
+
+ def on_combobox_linestyle_changed(self, item):
+ self._update()
+
+ def on_combobox_marker_changed(self, item):
+ self._update()
+
+ def on_colorbutton_linestyle_color_set(self, button):
+ self._update()
+
+ def on_colorbutton_markerface_color_set(self, button):
+ 'called colorbutton marker clicked'
+ self._update()
+
+ def on_dialog_lineprops_okbutton_clicked(self, button):
+ self._update()
+ self.dlg.hide()
+
+ def on_dialog_lineprops_cancelbutton_clicked(self, button):
+ self.dlg.hide()
+
+# set icon used when windows are minimized
+# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
+# versions of pygtk, so we have to use a PNG file instead.
+try:
+ if gtk.pygtk_version < (2, 8, 0) or sys.platform == 'win32':
+ icon_filename = 'matplotlib.png'
+ else:
+ icon_filename = 'matplotlib.svg'
+ window_icon = os.path.join(rcParams['datapath'], 'images', icon_filename)
+except:
+ window_icon = None
+ _log.info('Could not load matplotlib icon: %s', sys.exc_info()[1])
+
+def error_msg_gtk(msg, parent=None):
+ if parent is not None: # find the toplevel gtk.Window
+ parent = parent.get_toplevel()
+ if parent.flags() & gtk.TOPLEVEL == 0:
+ parent = None
+
+ if not isinstance(msg, six.string_types):
+ msg = ','.join(map(str, msg))
+
+ dialog = gtk.MessageDialog(
+ parent = parent,
+ type = gtk.MESSAGE_ERROR,
+ buttons = gtk.BUTTONS_OK,
+ message_format = msg)
+ dialog.run()
+ dialog.destroy()
+
+
+@_Backend.export
+class _BackendGTK(_Backend):
+ FigureCanvas = FigureCanvasGTK
+ FigureManager = FigureManagerGTK
+
+ @staticmethod
+ def trigger_manager_draw(manager):
+ manager.canvas.draw_idle()
+
+ @staticmethod
+ def mainloop():
+ if gtk.main_level() == 0:
+ gtk.main()
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk3.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk3.py
new file mode 100644
index 00000000000..359b8fd8848
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk3.py
@@ -0,0 +1,920 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import logging
+import os
+import sys
+
+import matplotlib
+from matplotlib import backend_tools, rcParams
+from matplotlib._pylab_helpers import Gcf
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
+ StatusbarBase, TimerBase, ToolContainerBase, cursors)
+from matplotlib.backend_managers import ToolManager
+from matplotlib.figure import Figure
+from matplotlib.widgets import SubplotTool
+from ._gtk3_compat import GLib, GObject, Gtk, Gdk
+
+
+_log = logging.getLogger(__name__)
+
+backend_version = "%s.%s.%s" % (
+ Gtk.get_major_version(), Gtk.get_micro_version(), Gtk.get_minor_version())
+
+# the true dots per inch on the screen; should be display dependent
+# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
+PIXELS_PER_INCH = 96
+
+cursord = {
+ cursors.MOVE : Gdk.Cursor.new(Gdk.CursorType.FLEUR),
+ cursors.HAND : Gdk.Cursor.new(Gdk.CursorType.HAND2),
+ cursors.POINTER : Gdk.Cursor.new(Gdk.CursorType.LEFT_PTR),
+ cursors.SELECT_REGION : Gdk.Cursor.new(Gdk.CursorType.TCROSS),
+ cursors.WAIT : Gdk.Cursor.new(Gdk.CursorType.WATCH),
+ }
+
+
+class TimerGTK3(TimerBase):
+ '''
+ Subclass of :class:`backend_bases.TimerBase` using GTK3 for timer events.
+
+ Attributes
+ ----------
+ interval : int
+ The time between timer events in milliseconds. Default is 1000 ms.
+ single_shot : bool
+ Boolean flag indicating whether this timer should operate as single
+ shot (run once and then stop). Defaults to False.
+ callbacks : list
+ Stores list of (func, args) tuples that will be called upon timer
+ events. This list can be manipulated directly, or the functions
+ `add_callback` and `remove_callback` can be used.
+
+ '''
+ def _timer_start(self):
+ # Need to stop it, otherwise we potentially leak a timer id that will
+ # never be stopped.
+ self._timer_stop()
+ self._timer = GLib.timeout_add(self._interval, self._on_timer)
+
+ def _timer_stop(self):
+ if self._timer is not None:
+ GLib.source_remove(self._timer)
+ self._timer = None
+
+ def _timer_set_interval(self):
+ # Only stop and restart it if the timer has already been started
+ if self._timer is not None:
+ self._timer_stop()
+ self._timer_start()
+
+ def _on_timer(self):
+ TimerBase._on_timer(self)
+
+ # Gtk timeout_add() requires that the callback returns True if it
+ # is to be called again.
+ if len(self.callbacks) > 0 and not self._single:
+ return True
+ else:
+ self._timer = None
+ return False
+
+
+class FigureCanvasGTK3(Gtk.DrawingArea, FigureCanvasBase):
+ keyvald = {65507 : 'control',
+ 65505 : 'shift',
+ 65513 : 'alt',
+ 65508 : 'control',
+ 65506 : 'shift',
+ 65514 : 'alt',
+ 65361 : 'left',
+ 65362 : 'up',
+ 65363 : 'right',
+ 65364 : 'down',
+ 65307 : 'escape',
+ 65470 : 'f1',
+ 65471 : 'f2',
+ 65472 : 'f3',
+ 65473 : 'f4',
+ 65474 : 'f5',
+ 65475 : 'f6',
+ 65476 : 'f7',
+ 65477 : 'f8',
+ 65478 : 'f9',
+ 65479 : 'f10',
+ 65480 : 'f11',
+ 65481 : 'f12',
+ 65300 : 'scroll_lock',
+ 65299 : 'break',
+ 65288 : 'backspace',
+ 65293 : 'enter',
+ 65379 : 'insert',
+ 65535 : 'delete',
+ 65360 : 'home',
+ 65367 : 'end',
+ 65365 : 'pageup',
+ 65366 : 'pagedown',
+ 65438 : '0',
+ 65436 : '1',
+ 65433 : '2',
+ 65435 : '3',
+ 65430 : '4',
+ 65437 : '5',
+ 65432 : '6',
+ 65429 : '7',
+ 65431 : '8',
+ 65434 : '9',
+ 65451 : '+',
+ 65453 : '-',
+ 65450 : '*',
+ 65455 : '/',
+ 65439 : 'dec',
+ 65421 : 'enter',
+ }
+
+ # Setting this as a static constant prevents
+ # this resulting expression from leaking
+ event_mask = (Gdk.EventMask.BUTTON_PRESS_MASK |
+ Gdk.EventMask.BUTTON_RELEASE_MASK |
+ Gdk.EventMask.EXPOSURE_MASK |
+ Gdk.EventMask.KEY_PRESS_MASK |
+ Gdk.EventMask.KEY_RELEASE_MASK |
+ Gdk.EventMask.ENTER_NOTIFY_MASK |
+ Gdk.EventMask.LEAVE_NOTIFY_MASK |
+ Gdk.EventMask.POINTER_MOTION_MASK |
+ Gdk.EventMask.POINTER_MOTION_HINT_MASK|
+ Gdk.EventMask.SCROLL_MASK)
+
+ def __init__(self, figure):
+ FigureCanvasBase.__init__(self, figure)
+ GObject.GObject.__init__(self)
+
+ self._idle_draw_id = 0
+ self._lastCursor = None
+
+ self.connect('scroll_event', self.scroll_event)
+ self.connect('button_press_event', self.button_press_event)
+ self.connect('button_release_event', self.button_release_event)
+ self.connect('configure_event', self.configure_event)
+ self.connect('draw', self.on_draw_event)
+ self.connect('key_press_event', self.key_press_event)
+ self.connect('key_release_event', self.key_release_event)
+ self.connect('motion_notify_event', self.motion_notify_event)
+ self.connect('leave_notify_event', self.leave_notify_event)
+ self.connect('enter_notify_event', self.enter_notify_event)
+ self.connect('size_allocate', self.size_allocate)
+
+ self.set_events(self.__class__.event_mask)
+
+ self.set_double_buffered(True)
+ self.set_can_focus(True)
+ self._renderer_init()
+ default_context = GLib.main_context_get_thread_default() or GLib.main_context_default()
+
+ def destroy(self):
+ #Gtk.DrawingArea.destroy(self)
+ self.close_event()
+ if self._idle_draw_id != 0:
+ GLib.source_remove(self._idle_draw_id)
+
+ def scroll_event(self, widget, event):
+ x = event.x
+ # flipy so y=0 is bottom of canvas
+ y = self.get_allocation().height - event.y
+ if event.direction==Gdk.ScrollDirection.UP:
+ step = 1
+ else:
+ step = -1
+ FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
+ return False # finish event propagation?
+
+ def button_press_event(self, widget, event):
+ x = event.x
+ # flipy so y=0 is bottom of canvas
+ y = self.get_allocation().height - event.y
+ FigureCanvasBase.button_press_event(self, x, y, event.button, guiEvent=event)
+ return False # finish event propagation?
+
+ def button_release_event(self, widget, event):
+ x = event.x
+ # flipy so y=0 is bottom of canvas
+ y = self.get_allocation().height - event.y
+ FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
+ return False # finish event propagation?
+
+ def key_press_event(self, widget, event):
+ key = self._get_key(event)
+ FigureCanvasBase.key_press_event(self, key, guiEvent=event)
+ return True # stop event propagation
+
+ def key_release_event(self, widget, event):
+ key = self._get_key(event)
+ FigureCanvasBase.key_release_event(self, key, guiEvent=event)
+ return True # stop event propagation
+
+ def motion_notify_event(self, widget, event):
+ if event.is_hint:
+ t, x, y, state = event.window.get_pointer()
+ else:
+ x, y, state = event.x, event.y, event.get_state()
+
+ # flipy so y=0 is bottom of canvas
+ y = self.get_allocation().height - y
+ FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
+ return False # finish event propagation?
+
+ def leave_notify_event(self, widget, event):
+ FigureCanvasBase.leave_notify_event(self, event)
+
+ def enter_notify_event(self, widget, event):
+ FigureCanvasBase.enter_notify_event(self, event)
+
+ def size_allocate(self, widget, allocation):
+ dpival = self.figure.dpi
+ winch = allocation.width / dpival
+ hinch = allocation.height / dpival
+ self.figure.set_size_inches(winch, hinch, forward=False)
+ FigureCanvasBase.resize_event(self)
+ self.draw_idle()
+
+ def _get_key(self, event):
+ if event.keyval in self.keyvald:
+ key = self.keyvald[event.keyval]
+ elif event.keyval < 256:
+ key = chr(event.keyval)
+ else:
+ key = None
+
+ modifiers = [
+ (Gdk.ModifierType.MOD4_MASK, 'super'),
+ (Gdk.ModifierType.MOD1_MASK, 'alt'),
+ (Gdk.ModifierType.CONTROL_MASK, 'ctrl'),
+ ]
+ for key_mask, prefix in modifiers:
+ if event.state & key_mask:
+ key = '{0}+{1}'.format(prefix, key)
+
+ return key
+
+ def configure_event(self, widget, event):
+ if widget.get_property("window") is None:
+ return
+ w, h = event.width, event.height
+ if w < 3 or h < 3:
+ return # empty fig
+ # resize the figure (in inches)
+ dpi = self.figure.dpi
+ self.figure.set_size_inches(w/dpi, h/dpi, forward=False)
+ return False # finish event propagation?
+
+ def on_draw_event(self, widget, ctx):
+ # to be overwritten by GTK3Agg or GTK3Cairo
+ pass
+
+ def draw(self):
+ if self.get_visible() and self.get_mapped():
+ self.queue_draw()
+ # do a synchronous draw (its less efficient than an async draw,
+ # but is required if/when animation is used)
+ self.get_property("window").process_updates (False)
+
+ def draw_idle(self):
+ if self._idle_draw_id != 0:
+ return
+ def idle_draw(*args):
+ try:
+ self.draw()
+ finally:
+ self._idle_draw_id = 0
+ return False
+ self._idle_draw_id = GLib.idle_add(idle_draw)
+
+ def new_timer(self, *args, **kwargs):
+ """
+ Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
+ This is useful for getting periodic events through the backend's native
+ event loop. Implemented only for backends with GUIs.
+
+ Other Parameters
+ ----------------
+ interval : scalar
+ Timer interval in milliseconds
+ callbacks : list
+ Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
+ will be executed by the timer every *interval*.
+ """
+ return TimerGTK3(*args, **kwargs)
+
+ def flush_events(self):
+ Gdk.threads_enter()
+ while Gtk.events_pending():
+ Gtk.main_iteration()
+ Gdk.flush()
+ Gdk.threads_leave()
+
+
+class FigureManagerGTK3(FigureManagerBase):
+ """
+ Attributes
+ ----------
+ canvas : `FigureCanvas`
+ The FigureCanvas instance
+ num : int or str
+ The Figure number
+ toolbar : Gtk.Toolbar
+ The Gtk.Toolbar (gtk only)
+ vbox : Gtk.VBox
+ The Gtk.VBox containing the canvas and toolbar (gtk only)
+ window : Gtk.Window
+ The Gtk.Window (gtk only)
+
+ """
+ def __init__(self, canvas, num):
+ FigureManagerBase.__init__(self, canvas, num)
+
+ self.window = Gtk.Window()
+ self.window.set_wmclass("matplotlib", "Matplotlib")
+ self.set_window_title("Figure %d" % num)
+ try:
+ self.window.set_icon_from_file(window_icon)
+ except (SystemExit, KeyboardInterrupt):
+ # re-raise exit type Exceptions
+ raise
+ except:
+ # some versions of gtk throw a glib.GError but not
+ # all, so I am not sure how to catch it. I am unhappy
+ # doing a blanket catch here, but am not sure what a
+ # better way is - JDH
+ _log.info('Could not load matplotlib icon: %s', sys.exc_info()[1])
+
+ self.vbox = Gtk.Box()
+ self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
+ self.window.add(self.vbox)
+ self.vbox.show()
+
+ self.canvas.show()
+
+ self.vbox.pack_start(self.canvas, True, True, 0)
+ # calculate size for window
+ w = int (self.canvas.figure.bbox.width)
+ h = int (self.canvas.figure.bbox.height)
+
+ self.toolmanager = self._get_toolmanager()
+ self.toolbar = self._get_toolbar()
+ self.statusbar = None
+
+ def add_widget(child, expand, fill, padding):
+ child.show()
+ self.vbox.pack_end(child, False, False, 0)
+ size_request = child.size_request()
+ return size_request.height
+
+ if self.toolmanager:
+ backend_tools.add_tools_to_manager(self.toolmanager)
+ if self.toolbar:
+ backend_tools.add_tools_to_container(self.toolbar)
+ self.statusbar = StatusbarGTK3(self.toolmanager)
+ h += add_widget(self.statusbar, False, False, 0)
+ h += add_widget(Gtk.HSeparator(), False, False, 0)
+
+ if self.toolbar is not None:
+ self.toolbar.show()
+ h += add_widget(self.toolbar, False, False, 0)
+
+ self.window.set_default_size (w, h)
+
+ def destroy(*args):
+ Gcf.destroy(num)
+ self.window.connect("destroy", destroy)
+ self.window.connect("delete_event", destroy)
+ if matplotlib.is_interactive():
+ self.window.show()
+ self.canvas.draw_idle()
+
+ def notify_axes_change(fig):
+ 'this will be called whenever the current axes is changed'
+ if self.toolmanager is not None:
+ pass
+ elif self.toolbar is not None:
+ self.toolbar.update()
+ self.canvas.figure.add_axobserver(notify_axes_change)
+
+ self.canvas.grab_focus()
+
+ def destroy(self, *args):
+ self.vbox.destroy()
+ self.window.destroy()
+ self.canvas.destroy()
+ if self.toolbar:
+ self.toolbar.destroy()
+
+ if (Gcf.get_num_fig_managers() == 0 and
+ not matplotlib.is_interactive() and
+ Gtk.main_level() >= 1):
+ Gtk.main_quit()
+
+ def show(self):
+ # show the figure window
+ self.window.show()
+ self.window.present()
+
+ def full_screen_toggle (self):
+ self._full_screen_flag = not self._full_screen_flag
+ if self._full_screen_flag:
+ self.window.fullscreen()
+ else:
+ self.window.unfullscreen()
+ _full_screen_flag = False
+
+ def _get_toolbar(self):
+ # must be inited after the window, drawingArea and figure
+ # attrs are set
+ if rcParams['toolbar'] == 'toolbar2':
+ toolbar = NavigationToolbar2GTK3(self.canvas, self.window)
+ elif rcParams['toolbar'] == 'toolmanager':
+ toolbar = ToolbarGTK3(self.toolmanager)
+ else:
+ toolbar = None
+ return toolbar
+
+ def _get_toolmanager(self):
+ # must be initialised after toolbar has been set
+ if rcParams['toolbar'] == 'toolmanager':
+ toolmanager = ToolManager(self.canvas.figure)
+ else:
+ toolmanager = None
+ return toolmanager
+
+ def get_window_title(self):
+ return self.window.get_title()
+
+ def set_window_title(self, title):
+ self.window.set_title(title)
+
+ def resize(self, width, height):
+ 'set the canvas size in pixels'
+ #_, _, cw, ch = self.canvas.allocation
+ #_, _, ww, wh = self.window.allocation
+ #self.window.resize (width-cw+ww, height-ch+wh)
+ self.window.resize(width, height)
+
+
+class NavigationToolbar2GTK3(NavigationToolbar2, Gtk.Toolbar):
+ def __init__(self, canvas, window):
+ self.win = window
+ GObject.GObject.__init__(self)
+ NavigationToolbar2.__init__(self, canvas)
+ self.ctx = None
+
+ def set_message(self, s):
+ self.message.set_label(s)
+
+ def set_cursor(self, cursor):
+ self.canvas.get_property("window").set_cursor(cursord[cursor])
+ Gtk.main_iteration()
+
+ def release(self, event):
+ try: del self._pixmapBack
+ except AttributeError: pass
+
+ def draw_rubberband(self, event, x0, y0, x1, y1):
+ 'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
+ self.ctx = self.canvas.get_property("window").cairo_create()
+
+ # todo: instead of redrawing the entire figure, copy the part of
+ # the figure that was covered by the previous rubberband rectangle
+ self.canvas.draw()
+
+ height = self.canvas.figure.bbox.height
+ y1 = height - y1
+ y0 = height - y0
+ w = abs(x1 - x0)
+ h = abs(y1 - y0)
+ rect = [int(val) for val in (min(x0,x1), min(y0, y1), w, h)]
+
+ self.ctx.new_path()
+ self.ctx.set_line_width(0.5)
+ self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
+ self.ctx.set_source_rgb(0, 0, 0)
+ self.ctx.stroke()
+
+ def _init_toolbar(self):
+ self.set_style(Gtk.ToolbarStyle.ICONS)
+ basedir = os.path.join(rcParams['datapath'],'images')
+
+ for text, tooltip_text, image_file, callback in self.toolitems:
+ if text is None:
+ self.insert( Gtk.SeparatorToolItem(), -1 )
+ continue
+ fname = os.path.join(basedir, image_file + '.png')
+ image = Gtk.Image()
+ image.set_from_file(fname)
+ tbutton = Gtk.ToolButton()
+ tbutton.set_label(text)
+ tbutton.set_icon_widget(image)
+ self.insert(tbutton, -1)
+ tbutton.connect('clicked', getattr(self, callback))
+ tbutton.set_tooltip_text(tooltip_text)
+
+ toolitem = Gtk.SeparatorToolItem()
+ self.insert(toolitem, -1)
+ toolitem.set_draw(False)
+ toolitem.set_expand(True)
+
+ toolitem = Gtk.ToolItem()
+ self.insert(toolitem, -1)
+ self.message = Gtk.Label()
+ toolitem.add(self.message)
+
+ self.show_all()
+
+ def get_filechooser(self):
+ fc = FileChooserDialog(
+ title='Save the figure',
+ parent=self.win,
+ path=os.path.expanduser(rcParams['savefig.directory']),
+ filetypes=self.canvas.get_supported_filetypes(),
+ default_filetype=self.canvas.get_default_filetype())
+ fc.set_current_name(self.canvas.get_default_filename())
+ return fc
+
+ def save_figure(self, *args):
+ chooser = self.get_filechooser()
+ fname, format = chooser.get_filename_from_user()
+ chooser.destroy()
+ if fname:
+ startpath = os.path.expanduser(rcParams['savefig.directory'])
+ # Save dir for next time, unless empty str (i.e., use cwd).
+ if startpath != "":
+ rcParams['savefig.directory'] = (
+ os.path.dirname(six.text_type(fname)))
+ try:
+ self.canvas.figure.savefig(fname, format=format)
+ except Exception as e:
+ error_msg_gtk(str(e), parent=self)
+
+ def configure_subplots(self, button):
+ toolfig = Figure(figsize=(6,3))
+ canvas = self._get_canvas(toolfig)
+ toolfig.subplots_adjust(top=0.9)
+ tool = SubplotTool(self.canvas.figure, toolfig)
+
+ w = int(toolfig.bbox.width)
+ h = int(toolfig.bbox.height)
+
+ window = Gtk.Window()
+ try:
+ window.set_icon_from_file(window_icon)
+ except (SystemExit, KeyboardInterrupt):
+ # re-raise exit type Exceptions
+ raise
+ except:
+ # we presumably already logged a message on the
+ # failure of the main plot, don't keep reporting
+ pass
+ window.set_title("Subplot Configuration Tool")
+ window.set_default_size(w, h)
+ vbox = Gtk.Box()
+ vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
+ window.add(vbox)
+ vbox.show()
+
+ canvas.show()
+ vbox.pack_start(canvas, True, True, 0)
+ window.show()
+
+ def _get_canvas(self, fig):
+ return self.canvas.__class__(fig)
+
+
+class FileChooserDialog(Gtk.FileChooserDialog):
+ """GTK+ file selector which remembers the last file/directory
+ selected and presents the user with a menu of supported image formats
+ """
+ def __init__ (self,
+ title = 'Save file',
+ parent = None,
+ action = Gtk.FileChooserAction.SAVE,
+ buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
+ Gtk.STOCK_SAVE, Gtk.ResponseType.OK),
+ path = None,
+ filetypes = [],
+ default_filetype = None
+ ):
+ super (FileChooserDialog, self).__init__ (title, parent, action,
+ buttons)
+ self.set_default_response (Gtk.ResponseType.OK)
+
+ if not path: path = os.getcwd() + os.sep
+
+ # create an extra widget to list supported image formats
+ self.set_current_folder (path)
+ self.set_current_name ('image.' + default_filetype)
+
+ hbox = Gtk.Box(spacing=10)
+ hbox.pack_start(Gtk.Label(label="File Format:"), False, False, 0)
+
+ liststore = Gtk.ListStore(GObject.TYPE_STRING)
+ cbox = Gtk.ComboBox() #liststore)
+ cbox.set_model(liststore)
+ cell = Gtk.CellRendererText()
+ cbox.pack_start(cell, True)
+ cbox.add_attribute(cell, 'text', 0)
+ hbox.pack_start(cbox, False, False, 0)
+
+ self.filetypes = filetypes
+ self.sorted_filetypes = sorted(six.iteritems(filetypes))
+ default = 0
+ for i, (ext, name) in enumerate(self.sorted_filetypes):
+ liststore.append(["%s (*.%s)" % (name, ext)])
+ if ext == default_filetype:
+ default = i
+ cbox.set_active(default)
+ self.ext = default_filetype
+
+ def cb_cbox_changed (cbox, data=None):
+ """File extension changed"""
+ head, filename = os.path.split(self.get_filename())
+ root, ext = os.path.splitext(filename)
+ ext = ext[1:]
+ new_ext = self.sorted_filetypes[cbox.get_active()][0]
+ self.ext = new_ext
+
+ if ext in self.filetypes:
+ filename = root + '.' + new_ext
+ elif ext == '':
+ filename = filename.rstrip('.') + '.' + new_ext
+
+ self.set_current_name (filename)
+ cbox.connect ("changed", cb_cbox_changed)
+
+ hbox.show_all()
+ self.set_extra_widget(hbox)
+
+ def get_filename_from_user (self):
+ while True:
+ filename = None
+ if self.run() != int(Gtk.ResponseType.OK):
+ break
+ filename = self.get_filename()
+ break
+
+ return filename, self.ext
+
+
+class RubberbandGTK3(backend_tools.RubberbandBase):
+ def __init__(self, *args, **kwargs):
+ backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
+ self.ctx = None
+
+ def draw_rubberband(self, x0, y0, x1, y1):
+ # 'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/
+ # Recipe/189744'
+ self.ctx = self.figure.canvas.get_property("window").cairo_create()
+
+ # todo: instead of redrawing the entire figure, copy the part of
+ # the figure that was covered by the previous rubberband rectangle
+ self.figure.canvas.draw()
+
+ height = self.figure.bbox.height
+ y1 = height - y1
+ y0 = height - y0
+ w = abs(x1 - x0)
+ h = abs(y1 - y0)
+ rect = [int(val) for val in (min(x0, x1), min(y0, y1), w, h)]
+
+ self.ctx.new_path()
+ self.ctx.set_line_width(0.5)
+ self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
+ self.ctx.set_source_rgb(0, 0, 0)
+ self.ctx.stroke()
+
+
+class ToolbarGTK3(ToolContainerBase, Gtk.Box):
+ _icon_extension = '.png'
+ def __init__(self, toolmanager):
+ ToolContainerBase.__init__(self, toolmanager)
+ Gtk.Box.__init__(self)
+ self.set_property("orientation", Gtk.Orientation.VERTICAL)
+
+ self._toolarea = Gtk.Box()
+ self._toolarea.set_property('orientation', Gtk.Orientation.HORIZONTAL)
+ self.pack_start(self._toolarea, False, False, 0)
+ self._toolarea.show_all()
+ self._groups = {}
+ self._toolitems = {}
+
+ def add_toolitem(self, name, group, position, image_file, description,
+ toggle):
+ if toggle:
+ tbutton = Gtk.ToggleToolButton()
+ else:
+ tbutton = Gtk.ToolButton()
+ tbutton.set_label(name)
+
+ if image_file is not None:
+ image = Gtk.Image()
+ image.set_from_file(image_file)
+ tbutton.set_icon_widget(image)
+
+ if position is None:
+ position = -1
+
+ self._add_button(tbutton, group, position)
+ signal = tbutton.connect('clicked', self._call_tool, name)
+ tbutton.set_tooltip_text(description)
+ tbutton.show_all()
+ self._toolitems.setdefault(name, [])
+ self._toolitems[name].append((tbutton, signal))
+
+ def _add_button(self, button, group, position):
+ if group not in self._groups:
+ if self._groups:
+ self._add_separator()
+ toolbar = Gtk.Toolbar()
+ toolbar.set_style(Gtk.ToolbarStyle.ICONS)
+ self._toolarea.pack_start(toolbar, False, False, 0)
+ toolbar.show_all()
+ self._groups[group] = toolbar
+ self._groups[group].insert(button, position)
+
+ def _call_tool(self, btn, name):
+ self.trigger_tool(name)
+
+ def toggle_toolitem(self, name, toggled):
+ if name not in self._toolitems:
+ return
+ for toolitem, signal in self._toolitems[name]:
+ toolitem.handler_block(signal)
+ toolitem.set_active(toggled)
+ toolitem.handler_unblock(signal)
+
+ def remove_toolitem(self, name):
+ if name not in self._toolitems:
+ self.toolmanager.message_event('%s Not in toolbar' % name, self)
+ return
+
+ for group in self._groups:
+ for toolitem, _signal in self._toolitems[name]:
+ if toolitem in self._groups[group]:
+ self._groups[group].remove(toolitem)
+ del self._toolitems[name]
+
+ def _add_separator(self):
+ sep = Gtk.Separator()
+ sep.set_property("orientation", Gtk.Orientation.VERTICAL)
+ self._toolarea.pack_start(sep, False, True, 0)
+ sep.show_all()
+
+
+class StatusbarGTK3(StatusbarBase, Gtk.Statusbar):
+ def __init__(self, *args, **kwargs):
+ StatusbarBase.__init__(self, *args, **kwargs)
+ Gtk.Statusbar.__init__(self)
+ self._context = self.get_context_id('message')
+
+ def set_message(self, s):
+ self.pop(self._context)
+ self.push(self._context, s)
+
+
+class SaveFigureGTK3(backend_tools.SaveFigureBase):
+
+ def get_filechooser(self):
+ fc = FileChooserDialog(
+ title='Save the figure',
+ parent=self.figure.canvas.manager.window,
+ path=os.path.expanduser(rcParams['savefig.directory']),
+ filetypes=self.figure.canvas.get_supported_filetypes(),
+ default_filetype=self.figure.canvas.get_default_filetype())
+ fc.set_current_name(self.figure.canvas.get_default_filename())
+ return fc
+
+ def trigger(self, *args, **kwargs):
+ chooser = self.get_filechooser()
+ fname, format_ = chooser.get_filename_from_user()
+ chooser.destroy()
+ if fname:
+ startpath = os.path.expanduser(rcParams['savefig.directory'])
+ if startpath == '':
+ # explicitly missing key or empty str signals to use cwd
+ rcParams['savefig.directory'] = startpath
+ else:
+ # save dir for next time
+ rcParams['savefig.directory'] = os.path.dirname(
+ six.text_type(fname))
+ try:
+ self.figure.canvas.print_figure(fname, format=format_)
+ except Exception as e:
+ error_msg_gtk(str(e), parent=self)
+
+
+class SetCursorGTK3(backend_tools.SetCursorBase):
+ def set_cursor(self, cursor):
+ self.figure.canvas.get_property("window").set_cursor(cursord[cursor])
+
+
+class ConfigureSubplotsGTK3(backend_tools.ConfigureSubplotsBase, Gtk.Window):
+ def __init__(self, *args, **kwargs):
+ backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)
+ self.window = None
+
+ def init_window(self):
+ if self.window:
+ return
+ self.window = Gtk.Window(title="Subplot Configuration Tool")
+
+ try:
+ self.window.window.set_icon_from_file(window_icon)
+ except (SystemExit, KeyboardInterrupt):
+ # re-raise exit type Exceptions
+ raise
+ except:
+ # we presumably already logged a message on the
+ # failure of the main plot, don't keep reporting
+ pass
+
+ self.vbox = Gtk.Box()
+ self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
+ self.window.add(self.vbox)
+ self.vbox.show()
+ self.window.connect('destroy', self.destroy)
+
+ toolfig = Figure(figsize=(6, 3))
+ canvas = self.figure.canvas.__class__(toolfig)
+
+ toolfig.subplots_adjust(top=0.9)
+ SubplotTool(self.figure, toolfig)
+
+ w = int(toolfig.bbox.width)
+ h = int(toolfig.bbox.height)
+
+ self.window.set_default_size(w, h)
+
+ canvas.show()
+ self.vbox.pack_start(canvas, True, True, 0)
+ self.window.show()
+
+ def destroy(self, *args):
+ self.window.destroy()
+ self.window = None
+
+ def _get_canvas(self, fig):
+ return self.canvas.__class__(fig)
+
+ def trigger(self, sender, event, data=None):
+ self.init_window()
+ self.window.present()
+
+
+# Define the file to use as the GTk icon
+if sys.platform == 'win32':
+ icon_filename = 'matplotlib.png'
+else:
+ icon_filename = 'matplotlib.svg'
+window_icon = os.path.join(
+ matplotlib.rcParams['datapath'], 'images', icon_filename)
+
+
+def error_msg_gtk(msg, parent=None):
+ if parent is not None: # find the toplevel Gtk.Window
+ parent = parent.get_toplevel()
+ if not parent.is_toplevel():
+ parent = None
+
+ if not isinstance(msg, six.string_types):
+ msg = ','.join(map(str, msg))
+
+ dialog = Gtk.MessageDialog(
+ parent = parent,
+ type = Gtk.MessageType.ERROR,
+ buttons = Gtk.ButtonsType.OK,
+ message_format = msg)
+ dialog.run()
+ dialog.destroy()
+
+
+backend_tools.ToolSaveFigure = SaveFigureGTK3
+backend_tools.ToolConfigureSubplots = ConfigureSubplotsGTK3
+backend_tools.ToolSetCursor = SetCursorGTK3
+backend_tools.ToolRubberband = RubberbandGTK3
+
+Toolbar = ToolbarGTK3
+
+
+@_Backend.export
+class _BackendGTK3(_Backend):
+ FigureCanvas = FigureCanvasGTK3
+ FigureManager = FigureManagerGTK3
+
+ @staticmethod
+ def trigger_manager_draw(manager):
+ manager.canvas.draw_idle()
+
+ @staticmethod
+ def mainloop():
+ if Gtk.main_level() == 0:
+ Gtk.main()
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk3agg.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk3agg.py
new file mode 100644
index 00000000000..53c625b8a50
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk3agg.py
@@ -0,0 +1,102 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import numpy as np
+import warnings
+
+from . import backend_agg, backend_gtk3
+from .backend_cairo import cairo, HAS_CAIRO_CFFI
+from .backend_gtk3 import _BackendGTK3
+from matplotlib import transforms
+
+if six.PY3 and not HAS_CAIRO_CFFI:
+ warnings.warn(
+ "The Gtk3Agg backend is known to not work on Python 3.x with pycairo. "
+ "Try installing cairocffi.")
+
+
+class FigureCanvasGTK3Agg(backend_gtk3.FigureCanvasGTK3,
+ backend_agg.FigureCanvasAgg):
+ def __init__(self, figure):
+ backend_gtk3.FigureCanvasGTK3.__init__(self, figure)
+ self._bbox_queue = []
+
+ def _renderer_init(self):
+ pass
+
+ def _render_figure(self, width, height):
+ backend_agg.FigureCanvasAgg.draw(self)
+
+ def on_draw_event(self, widget, ctx):
+ """ GtkDrawable draw event, like expose_event in GTK 2.X
+ """
+ allocation = self.get_allocation()
+ w, h = allocation.width, allocation.height
+
+ if not len(self._bbox_queue):
+ self._render_figure(w, h)
+ bbox_queue = [transforms.Bbox([[0, 0], [w, h]])]
+ else:
+ bbox_queue = self._bbox_queue
+
+ if HAS_CAIRO_CFFI and not isinstance(ctx, cairo.Context):
+ ctx = cairo.Context._from_pointer(
+ cairo.ffi.cast('cairo_t **',
+ id(ctx) + object.__basicsize__)[0],
+ incref=True)
+
+ for bbox in bbox_queue:
+ area = self.copy_from_bbox(bbox)
+ buf = np.fromstring(area.to_string_argb(), dtype='uint8')
+
+ x = int(bbox.x0)
+ y = h - int(bbox.y1)
+ width = int(bbox.x1) - int(bbox.x0)
+ height = int(bbox.y1) - int(bbox.y0)
+
+ if HAS_CAIRO_CFFI:
+ image = cairo.ImageSurface.create_for_data(
+ buf.data, cairo.FORMAT_ARGB32, width, height)
+ else:
+ image = cairo.ImageSurface.create_for_data(
+ buf, cairo.FORMAT_ARGB32, width, height)
+ ctx.set_source_surface(image, x, y)
+ ctx.paint()
+
+ if len(self._bbox_queue):
+ self._bbox_queue = []
+
+ return False
+
+ def blit(self, bbox=None):
+ # If bbox is None, blit the entire canvas to gtk. Otherwise
+ # blit only the area defined by the bbox.
+ if bbox is None:
+ bbox = self.figure.bbox
+
+ allocation = self.get_allocation()
+ w, h = allocation.width, allocation.height
+ x = int(bbox.x0)
+ y = h - int(bbox.y1)
+ width = int(bbox.x1) - int(bbox.x0)
+ height = int(bbox.y1) - int(bbox.y0)
+
+ self._bbox_queue.append(bbox)
+ self.queue_draw_area(x, y, width, height)
+
+ def print_png(self, filename, *args, **kwargs):
+ # Do this so we can save the resolution of figure in the PNG file
+ agg = self.switch_backends(backend_agg.FigureCanvasAgg)
+ return agg.print_png(filename, *args, **kwargs)
+
+
+class FigureManagerGTK3Agg(backend_gtk3.FigureManagerGTK3):
+ pass
+
+
+@_BackendGTK3.export
+class _BackendGTK3Cairo(_BackendGTK3):
+ FigureCanvas = FigureCanvasGTK3Agg
+ FigureManager = FigureManagerGTK3Agg
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk3cairo.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk3cairo.py
new file mode 100644
index 00000000000..2591b112d2c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtk3cairo.py
@@ -0,0 +1,55 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from . import backend_cairo, backend_gtk3
+from .backend_cairo import cairo, HAS_CAIRO_CFFI
+from .backend_gtk3 import _BackendGTK3
+from matplotlib.backend_bases import cursors
+
+
+class RendererGTK3Cairo(backend_cairo.RendererCairo):
+ def set_context(self, ctx):
+ if HAS_CAIRO_CFFI and not isinstance(ctx, cairo.Context):
+ ctx = cairo.Context._from_pointer(
+ cairo.ffi.cast(
+ 'cairo_t **',
+ id(ctx) + object.__basicsize__)[0],
+ incref=True)
+
+ self.gc.ctx = ctx
+
+
+class FigureCanvasGTK3Cairo(backend_gtk3.FigureCanvasGTK3,
+ backend_cairo.FigureCanvasCairo):
+
+ def _renderer_init(self):
+ """Use cairo renderer."""
+ self._renderer = RendererGTK3Cairo(self.figure.dpi)
+
+ def _render_figure(self, width, height):
+ self._renderer.set_width_height(width, height)
+ self.figure.draw(self._renderer)
+
+ def on_draw_event(self, widget, ctx):
+ """GtkDrawable draw event."""
+ toolbar = self.toolbar
+ # if toolbar:
+ # toolbar.set_cursor(cursors.WAIT)
+ self._renderer.set_context(ctx)
+ allocation = self.get_allocation()
+ self._render_figure(allocation.width, allocation.height)
+ # if toolbar:
+ # toolbar.set_cursor(toolbar._lastCursor)
+ return False # finish event propagation?
+
+
+class FigureManagerGTK3Cairo(backend_gtk3.FigureManagerGTK3):
+ pass
+
+
+@_BackendGTK3.export
+class _BackendGTK3Cairo(_BackendGTK3):
+ FigureCanvas = FigureCanvasGTK3Cairo
+ FigureManager = FigureManagerGTK3Cairo
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtkagg.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtkagg.py
new file mode 100644
index 00000000000..14240647ccb
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtkagg.py
@@ -0,0 +1,96 @@
+"""
+Render to gtk from agg
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import matplotlib
+from matplotlib.cbook import warn_deprecated
+from matplotlib.backends.backend_agg import FigureCanvasAgg
+from matplotlib.backends.backend_gtk import (
+ gtk, _BackendGTK, FigureCanvasGTK, FigureManagerGTK, NavigationToolbar2GTK,
+ backend_version, error_msg_gtk, PIXELS_PER_INCH)
+from matplotlib.backends._gtkagg import agg_to_gtk_drawable
+
+
+class NavigationToolbar2GTKAgg(NavigationToolbar2GTK):
+ def _get_canvas(self, fig):
+ return FigureCanvasGTKAgg(fig)
+
+
+class FigureManagerGTKAgg(FigureManagerGTK):
+ def _get_toolbar(self, canvas):
+ # must be inited after the window, drawingArea and figure
+ # attrs are set
+ if matplotlib.rcParams['toolbar']=='toolbar2':
+ toolbar = NavigationToolbar2GTKAgg (canvas, self.window)
+ else:
+ toolbar = None
+ return toolbar
+
+
+class FigureCanvasGTKAgg(FigureCanvasGTK, FigureCanvasAgg):
+ filetypes = FigureCanvasGTK.filetypes.copy()
+ filetypes.update(FigureCanvasAgg.filetypes)
+
+ def __init__(self, *args, **kwargs):
+ warn_deprecated('2.2',
+ message=('The GTKAgg backend is deprecated. It is '
+ 'untested and will be removed in Matplotlib '
+ '3.0. Use the GTK3Agg backend instead. See '
+ 'Matplotlib usage FAQ for more info on '
+ 'backends.'),
+ alternative='GTK3Agg')
+ super(FigureCanvasGTKAgg, self).__init__(*args, **kwargs)
+
+ def configure_event(self, widget, event=None):
+
+ if widget.window is None:
+ return
+ try:
+ del self.renderer
+ except AttributeError:
+ pass
+ w,h = widget.window.get_size()
+ if w==1 or h==1: return # empty fig
+
+ # compute desired figure size in inches
+ dpival = self.figure.dpi
+ winch = w/dpival
+ hinch = h/dpival
+ self.figure.set_size_inches(winch, hinch, forward=False)
+ self._need_redraw = True
+ self.resize_event()
+ return True
+
+ def _render_figure(self, pixmap, width, height):
+ FigureCanvasAgg.draw(self)
+
+ buf = self.buffer_rgba()
+ ren = self.get_renderer()
+ w = int(ren.width)
+ h = int(ren.height)
+
+ pixbuf = gtk.gdk.pixbuf_new_from_data(
+ buf, gtk.gdk.COLORSPACE_RGB, True, 8, w, h, w*4)
+ pixmap.draw_pixbuf(pixmap.new_gc(), pixbuf, 0, 0, 0, 0, w, h,
+ gtk.gdk.RGB_DITHER_NONE, 0, 0)
+
+ def blit(self, bbox=None):
+ agg_to_gtk_drawable(self._pixmap, self.renderer._renderer, bbox)
+ x, y, w, h = self.allocation
+ self.window.draw_drawable(self.style.fg_gc[self.state], self._pixmap,
+ 0, 0, 0, 0, w, h)
+
+ def print_png(self, filename, *args, **kwargs):
+ # Do this so we can save the resolution of figure in the PNG file
+ agg = self.switch_backends(FigureCanvasAgg)
+ return agg.print_png(filename, *args, **kwargs)
+
+
+@_BackendGTK.export
+class _BackendGTKAgg(_BackendGTK):
+ FigureCanvas = FigureCanvasGTKAgg
+ FigureManager = FigureManagerGTKAgg
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtkcairo.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtkcairo.py
new file mode 100644
index 00000000000..87e6debae79
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_gtkcairo.py
@@ -0,0 +1,74 @@
+"""
+GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
+Author: Steve Chaplin
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import gtk
+if gtk.pygtk_version < (2, 7, 0):
+ import cairo.gtk
+
+from matplotlib import cbook
+from matplotlib.backends import backend_cairo
+from matplotlib.backends.backend_gtk import *
+from matplotlib.backends.backend_gtk import _BackendGTK
+
+backend_version = ('PyGTK(%d.%d.%d) ' % gtk.pygtk_version
+ + 'Pycairo(%s)' % backend_cairo.backend_version)
+
+
+class RendererGTKCairo (backend_cairo.RendererCairo):
+ if gtk.pygtk_version >= (2,7,0):
+ def set_pixmap (self, pixmap):
+ self.gc.ctx = pixmap.cairo_create()
+ else:
+ def set_pixmap (self, pixmap):
+ self.gc.ctx = cairo.gtk.gdk_cairo_create (pixmap)
+
+
+class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
+ filetypes = FigureCanvasGTK.filetypes.copy()
+ filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
+
+ def __init__(self, *args, **kwargs):
+ warn_deprecated('2.2',
+ message=('The GTKCairo backend is deprecated. It is '
+ 'untested and will be removed in Matplotlib '
+ '3.0. Use the GTK3Cairo backend instead. See '
+ 'Matplotlib usage FAQ for more info on '
+ 'backends.'),
+ alternative='GTK3Cairo')
+ super(FigureCanvasGTKCairo, self).__init__(*args, **kwargs)
+
+ def _renderer_init(self):
+ """Override to use cairo (rather than GDK) renderer"""
+ self._renderer = RendererGTKCairo(self.figure.dpi)
+
+
+# This class has been unused for a while at least.
+@cbook.deprecated("2.1")
+class FigureManagerGTKCairo(FigureManagerGTK):
+ def _get_toolbar(self, canvas):
+ # must be inited after the window, drawingArea and figure
+ # attrs are set
+ if matplotlib.rcParams['toolbar']=='toolbar2':
+ toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
+ else:
+ toolbar = None
+ return toolbar
+
+
+# This class has been unused for a while at least.
+@cbook.deprecated("2.1")
+class NavigationToolbar2Cairo(NavigationToolbar2GTK):
+ def _get_canvas(self, fig):
+ return FigureCanvasGTKCairo(fig)
+
+
+@_BackendGTK.export
+class _BackendGTKCairo(_BackendGTK):
+ FigureCanvas = FigureCanvasGTKCairo
+ FigureManager = FigureManagerGTK
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_macosx.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_macosx.py
new file mode 100644
index 00000000000..4ab5d0c9077
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_macosx.py
@@ -0,0 +1,210 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import os
+
+from matplotlib._pylab_helpers import Gcf
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
+ TimerBase)
+
+from matplotlib.figure import Figure
+from matplotlib import rcParams
+
+from matplotlib.widgets import SubplotTool
+
+import matplotlib
+from matplotlib.backends import _macosx
+
+from .backend_agg import FigureCanvasAgg
+
+
+########################################################################
+#
+# The following functions and classes are for pylab and implement
+# window/figure managers, etc...
+#
+########################################################################
+
+
+class TimerMac(_macosx.Timer, TimerBase):
+ '''
+ Subclass of :class:`backend_bases.TimerBase` that uses CoreFoundation
+ run loops for timer events.
+
+ Attributes
+ ----------
+ interval : int
+ The time between timer events in milliseconds. Default is 1000 ms.
+ single_shot : bool
+ Boolean flag indicating whether this timer should operate as single
+ shot (run once and then stop). Defaults to False.
+ callbacks : list
+ Stores list of (func, args) tuples that will be called upon timer
+ events. This list can be manipulated directly, or the functions
+ `add_callback` and `remove_callback` can be used.
+
+ '''
+ # completely implemented at the C-level (in _macosx.Timer)
+
+
+class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasAgg):
+ """
+ The canvas the figure renders into. Calls the draw and print fig
+ methods, creates the renderers, etc...
+
+ Events such as button presses, mouse movements, and key presses
+ are handled in the C code and the base class methods
+ button_press_event, button_release_event, motion_notify_event,
+ key_press_event, and key_release_event are called from there.
+
+ Attributes
+ ----------
+ figure : `matplotlib.figure.Figure`
+ A high-level Figure instance
+
+ """
+
+ def __init__(self, figure):
+ FigureCanvasBase.__init__(self, figure)
+ width, height = self.get_width_height()
+ _macosx.FigureCanvas.__init__(self, width, height)
+ self._device_scale = 1.0
+
+ def _set_device_scale(self, value):
+ if self._device_scale != value:
+ self.figure.dpi = self.figure.dpi / self._device_scale * value
+ self._device_scale = value
+
+ def _draw(self):
+ renderer = self.get_renderer(cleared=self.figure.stale)
+
+ if self.figure.stale:
+ self.figure.draw(renderer)
+
+ return renderer
+
+ def draw(self):
+ self.invalidate()
+ self.flush_events()
+
+ def draw_idle(self, *args, **kwargs):
+ self.invalidate()
+
+ def blit(self, bbox):
+ self.invalidate()
+
+ def resize(self, width, height):
+ dpi = self.figure.dpi
+ width /= dpi
+ height /= dpi
+ self.figure.set_size_inches(width * self._device_scale,
+ height * self._device_scale,
+ forward=False)
+ FigureCanvasBase.resize_event(self)
+ self.draw_idle()
+
+ def new_timer(self, *args, **kwargs):
+ """
+ Creates a new backend-specific subclass of `backend_bases.Timer`.
+ This is useful for getting periodic events through the backend's native
+ event loop. Implemented only for backends with GUIs.
+
+ Other Parameters
+ ----------------
+ interval : scalar
+ Timer interval in milliseconds
+ callbacks : list
+ Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
+ will be executed by the timer every *interval*.
+ """
+ return TimerMac(*args, **kwargs)
+
+
+class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
+ """
+ Wrap everything up into a window for the pylab interface
+ """
+ def __init__(self, canvas, num):
+ FigureManagerBase.__init__(self, canvas, num)
+ title = "Figure %d" % num
+ _macosx.FigureManager.__init__(self, canvas, title)
+ if rcParams['toolbar']=='toolbar2':
+ self.toolbar = NavigationToolbar2Mac(canvas)
+ else:
+ self.toolbar = None
+ if self.toolbar is not None:
+ self.toolbar.update()
+
+ def notify_axes_change(fig):
+ 'this will be called whenever the current axes is changed'
+ if self.toolbar != None: self.toolbar.update()
+ self.canvas.figure.add_axobserver(notify_axes_change)
+
+ if matplotlib.is_interactive():
+ self.show()
+ self.canvas.draw_idle()
+
+ def close(self):
+ Gcf.destroy(self.num)
+
+
+class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
+
+ def __init__(self, canvas):
+ NavigationToolbar2.__init__(self, canvas)
+
+ def _init_toolbar(self):
+ basedir = os.path.join(rcParams['datapath'], "images")
+ _macosx.NavigationToolbar2.__init__(self, basedir)
+
+ def draw_rubberband(self, event, x0, y0, x1, y1):
+ self.canvas.set_rubberband(int(x0), int(y0), int(x1), int(y1))
+
+ def release(self, event):
+ self.canvas.remove_rubberband()
+
+ def set_cursor(self, cursor):
+ _macosx.set_cursor(cursor)
+
+ def save_figure(self, *args):
+ filename = _macosx.choose_save_file('Save the figure',
+ self.canvas.get_default_filename())
+ if filename is None: # Cancel
+ return
+ self.canvas.figure.savefig(filename)
+
+ def prepare_configure_subplots(self):
+ toolfig = Figure(figsize=(6,3))
+ canvas = FigureCanvasMac(toolfig)
+ toolfig.subplots_adjust(top=0.9)
+ tool = SubplotTool(self.canvas.figure, toolfig)
+ return canvas
+
+ def set_message(self, message):
+ _macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
+
+
+########################################################################
+#
+# Now just provide the standard names that backend.__init__ is expecting
+#
+########################################################################
+
+@_Backend.export
+class _BackendMac(_Backend):
+ FigureCanvas = FigureCanvasMac
+ FigureManager = FigureManagerMac
+
+ @staticmethod
+ def trigger_manager_draw(manager):
+ # For performance reasons, we don't want to redraw the figure after
+ # each draw command. Instead, we mark the figure as invalid, so that it
+ # will be redrawn as soon as the event loop resumes via PyOS_InputHook.
+ # This function should be called after each draw event, even if
+ # matplotlib is not running interactively.
+ manager.canvas.invalidate()
+
+ @staticmethod
+ def mainloop():
+ _macosx.show()
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_mixed.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_mixed.py
new file mode 100644
index 00000000000..8e475bd13c9
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_mixed.py
@@ -0,0 +1,155 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import numpy as np
+
+import six
+
+from matplotlib.backends.backend_agg import RendererAgg
+from matplotlib.tight_bbox import process_figure_for_rasterizing
+
+
+class MixedModeRenderer(object):
+ """
+ A helper class to implement a renderer that switches between
+ vector and raster drawing. An example may be a PDF writer, where
+ most things are drawn with PDF vector commands, but some very
+ complex objects, such as quad meshes, are rasterised and then
+ output as images.
+ """
+ def __init__(self, figure, width, height, dpi, vector_renderer,
+ raster_renderer_class=None,
+ bbox_inches_restore=None):
+ """
+ Parameters
+ ----------
+ figure : `matplotlib.figure.Figure`
+ The figure instance.
+
+ width : scalar
+ The width of the canvas in logical units
+
+ height : scalar
+ The height of the canvas in logical units
+
+ dpi : scalar
+ The dpi of the canvas
+
+ vector_renderer : `matplotlib.backend_bases.RendererBase`
+ An instance of a subclass of
+ `~matplotlib.backend_bases.RendererBase` that will be used for the
+ vector drawing.
+
+ raster_renderer_class : `matplotlib.backend_bases.RendererBase`
+ The renderer class to use for the raster drawing. If not provided,
+ this will use the Agg backend (which is currently the only viable
+ option anyway.)
+
+ """
+ if raster_renderer_class is None:
+ raster_renderer_class = RendererAgg
+
+ self._raster_renderer_class = raster_renderer_class
+ self._width = width
+ self._height = height
+ self.dpi = dpi
+
+ self._vector_renderer = vector_renderer
+
+ self._raster_renderer = None
+ self._rasterizing = 0
+
+ # A reference to the figure is needed as we need to change
+ # the figure dpi before and after the rasterization. Although
+ # this looks ugly, I couldn't find a better solution. -JJL
+ self.figure = figure
+ self._figdpi = figure.get_dpi()
+
+ self._bbox_inches_restore = bbox_inches_restore
+
+ self._set_current_renderer(vector_renderer)
+
+ _methods = """
+ close_group draw_image draw_markers draw_path
+ draw_path_collection draw_quad_mesh draw_tex draw_text
+ finalize flipy get_canvas_width_height get_image_magnification
+ get_texmanager get_text_width_height_descent new_gc open_group
+ option_image_nocomposite points_to_pixels strip_math
+ start_filter stop_filter draw_gouraud_triangle
+ draw_gouraud_triangles option_scale_image
+ _text2path _get_text_path_transform height width
+ """.split()
+
+ def _set_current_renderer(self, renderer):
+ self._renderer = renderer
+
+ for method in self._methods:
+ if hasattr(renderer, method):
+ setattr(self, method, getattr(renderer, method))
+ renderer.start_rasterizing = self.start_rasterizing
+ renderer.stop_rasterizing = self.stop_rasterizing
+
+ def start_rasterizing(self):
+ """
+ Enter "raster" mode. All subsequent drawing commands (until
+ stop_rasterizing is called) will be drawn with the raster
+ backend.
+
+ If start_rasterizing is called multiple times before
+ stop_rasterizing is called, this method has no effect.
+ """
+
+ # change the dpi of the figure temporarily.
+ self.figure.set_dpi(self.dpi)
+
+ if self._bbox_inches_restore: # when tight bbox is used
+ r = process_figure_for_rasterizing(self.figure,
+ self._bbox_inches_restore)
+ self._bbox_inches_restore = r
+
+ if self._rasterizing == 0:
+ self._raster_renderer = self._raster_renderer_class(
+ self._width*self.dpi, self._height*self.dpi, self.dpi)
+ self._set_current_renderer(self._raster_renderer)
+ self._rasterizing += 1
+
+ def stop_rasterizing(self):
+ """
+ Exit "raster" mode. All of the drawing that was done since
+ the last start_rasterizing command will be copied to the
+ vector backend by calling draw_image.
+
+ If stop_rasterizing is called multiple times before
+ start_rasterizing is called, this method has no effect.
+ """
+ self._rasterizing -= 1
+ if self._rasterizing == 0:
+ self._set_current_renderer(self._vector_renderer)
+
+ height = self._height * self.dpi
+ buffer, bounds = self._raster_renderer.tostring_rgba_minimized()
+ l, b, w, h = bounds
+ if w > 0 and h > 0:
+ image = np.frombuffer(buffer, dtype=np.uint8)
+ image = image.reshape((h, w, 4))
+ image = image[::-1]
+ gc = self._renderer.new_gc()
+ # TODO: If the mixedmode resolution differs from the figure's
+ # dpi, the image must be scaled (dpi->_figdpi). Not all
+ # backends support this.
+ self._renderer.draw_image(
+ gc,
+ l * self._figdpi / self.dpi,
+ (height-b-h) * self._figdpi / self.dpi,
+ image)
+ self._raster_renderer = None
+ self._rasterizing = False
+
+ # restore the figure dpi.
+ self.figure.set_dpi(self._figdpi)
+
+ if self._bbox_inches_restore: # when tight bbox is used
+ r = process_figure_for_rasterizing(self.figure,
+ self._bbox_inches_restore,
+ self._figdpi)
+ self._bbox_inches_restore = r
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_nbagg.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_nbagg.py
new file mode 100644
index 00000000000..429fb1e7cce
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_nbagg.py
@@ -0,0 +1,270 @@
+"""Interactive figures in the IPython notebook"""
+# Note: There is a notebook in
+# lib/matplotlib/backends/web_backend/nbagg_uat.ipynb to help verify
+# that changes made maintain expected behaviour.
+
+import six
+
+from base64 import b64encode
+import io
+import json
+import os
+import uuid
+
+from IPython.display import display, Javascript, HTML
+try:
+ # Jupyter/IPython 4.x or later
+ from ipykernel.comm import Comm
+except ImportError:
+ # Jupyter/IPython 3.x or earlier
+ from IPython.kernel.comm import Comm
+
+from matplotlib import rcParams, is_interactive
+from matplotlib._pylab_helpers import Gcf
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, NavigationToolbar2)
+from matplotlib.backends.backend_webagg_core import (
+ FigureCanvasWebAggCore, FigureManagerWebAgg, NavigationToolbar2WebAgg,
+ TimerTornado)
+
+
+def connection_info():
+ """
+ Return a string showing the figure and connection status for
+ the backend. This is intended as a diagnostic tool, and not for general
+ use.
+
+ """
+ result = []
+ for manager in Gcf.get_all_fig_managers():
+ fig = manager.canvas.figure
+ result.append('{0} - {0}'.format((fig.get_label() or
+ "Figure {0}".format(manager.num)),
+ manager.web_sockets))
+ if not is_interactive():
+ result.append('Figures pending show: {0}'.format(len(Gcf._activeQue)))
+ return '\n'.join(result)
+
+
+# Note: Version 3.2 and 4.x icons
+# http://fontawesome.io/3.2.1/icons/
+# http://fontawesome.io/
+# the `fa fa-xxx` part targets font-awesome 4, (IPython 3.x)
+# the icon-xxx targets font awesome 3.21 (IPython 2.x)
+_FONT_AWESOME_CLASSES = {
+ 'home': 'fa fa-home icon-home',
+ 'back': 'fa fa-arrow-left icon-arrow-left',
+ 'forward': 'fa fa-arrow-right icon-arrow-right',
+ 'zoom_to_rect': 'fa fa-square-o icon-check-empty',
+ 'move': 'fa fa-arrows icon-move',
+ 'download': 'fa fa-floppy-o icon-save',
+ None: None
+}
+
+
+class NavigationIPy(NavigationToolbar2WebAgg):
+
+ # Use the standard toolbar items + download button
+ toolitems = [(text, tooltip_text,
+ _FONT_AWESOME_CLASSES[image_file], name_of_method)
+ for text, tooltip_text, image_file, name_of_method
+ in (NavigationToolbar2.toolitems +
+ (('Download', 'Download plot', 'download', 'download'),))
+ if image_file in _FONT_AWESOME_CLASSES]
+
+
+class FigureManagerNbAgg(FigureManagerWebAgg):
+ ToolbarCls = NavigationIPy
+
+ def __init__(self, canvas, num):
+ self._shown = False
+ FigureManagerWebAgg.__init__(self, canvas, num)
+
+ def display_js(self):
+ # XXX How to do this just once? It has to deal with multiple
+ # browser instances using the same kernel (require.js - but the
+ # file isn't static?).
+ display(Javascript(FigureManagerNbAgg.get_javascript()))
+
+ def show(self):
+ if not self._shown:
+ self.display_js()
+ self._create_comm()
+ else:
+ self.canvas.draw_idle()
+ self._shown = True
+
+ def reshow(self):
+ """
+ A special method to re-show the figure in the notebook.
+
+ """
+ self._shown = False
+ self.show()
+
+ @property
+ def connected(self):
+ return bool(self.web_sockets)
+
+ @classmethod
+ def get_javascript(cls, stream=None):
+ if stream is None:
+ output = io.StringIO()
+ else:
+ output = stream
+ super(FigureManagerNbAgg, cls).get_javascript(stream=output)
+ with io.open(os.path.join(
+ os.path.dirname(__file__),
+ "web_backend", 'js',
+ "nbagg_mpl.js"), encoding='utf8') as fd:
+ output.write(fd.read())
+ if stream is None:
+ return output.getvalue()
+
+ def _create_comm(self):
+ comm = CommSocket(self)
+ self.add_web_socket(comm)
+ return comm
+
+ def destroy(self):
+ self._send_event('close')
+ # need to copy comms as callbacks will modify this list
+ for comm in list(self.web_sockets):
+ comm.on_close()
+ self.clearup_closed()
+
+ def clearup_closed(self):
+ """Clear up any closed Comms."""
+ self.web_sockets = set([socket for socket in self.web_sockets
+ if socket.is_open()])
+
+ if len(self.web_sockets) == 0:
+ self.canvas.close_event()
+
+ def remove_comm(self, comm_id):
+ self.web_sockets = set([socket for socket in self.web_sockets
+ if not socket.comm.comm_id == comm_id])
+
+
+class FigureCanvasNbAgg(FigureCanvasWebAggCore):
+ def new_timer(self, *args, **kwargs):
+ return TimerTornado(*args, **kwargs)
+
+
+class CommSocket(object):
+ """
+ Manages the Comm connection between IPython and the browser (client).
+
+ Comms are 2 way, with the CommSocket being able to publish a message
+ via the send_json method, and handle a message with on_message. On the
+ JS side figure.send_message and figure.ws.onmessage do the sending and
+ receiving respectively.
+
+ """
+ def __init__(self, manager):
+ self.supports_binary = None
+ self.manager = manager
+ self.uuid = str(uuid.uuid4())
+ # Publish an output area with a unique ID. The javascript can then
+ # hook into this area.
+ display(HTML("<div id=%r></div>" % self.uuid))
+ try:
+ self.comm = Comm('matplotlib', data={'id': self.uuid})
+ except AttributeError:
+ raise RuntimeError('Unable to create an IPython notebook Comm '
+ 'instance. Are you in the IPython notebook?')
+ self.comm.on_msg(self.on_message)
+
+ manager = self.manager
+ self._ext_close = False
+
+ def _on_close(close_message):
+ self._ext_close = True
+ manager.remove_comm(close_message['content']['comm_id'])
+ manager.clearup_closed()
+
+ self.comm.on_close(_on_close)
+
+ def is_open(self):
+ return not (self._ext_close or self.comm._closed)
+
+ def on_close(self):
+ # When the socket is closed, deregister the websocket with
+ # the FigureManager.
+ if self.is_open():
+ try:
+ self.comm.close()
+ except KeyError:
+ # apparently already cleaned it up?
+ pass
+
+ def send_json(self, content):
+ self.comm.send({'data': json.dumps(content)})
+
+ def send_binary(self, blob):
+ # The comm is ascii, so we always send the image in base64
+ # encoded data URL form.
+ data = b64encode(blob)
+ if six.PY3:
+ data = data.decode('ascii')
+ data_uri = "data:image/png;base64,{0}".format(data)
+ self.comm.send({'data': data_uri})
+
+ def on_message(self, message):
+ # The 'supports_binary' message is relevant to the
+ # websocket itself. The other messages get passed along
+ # to matplotlib as-is.
+
+ # Every message has a "type" and a "figure_id".
+ message = json.loads(message['content']['data'])
+ if message['type'] == 'closing':
+ self.on_close()
+ self.manager.clearup_closed()
+ elif message['type'] == 'supports_binary':
+ self.supports_binary = message['value']
+ else:
+ self.manager.handle_json(message)
+
+
+@_Backend.export
+class _BackendNbAgg(_Backend):
+ FigureCanvas = FigureCanvasNbAgg
+ FigureManager = FigureManagerNbAgg
+
+ @staticmethod
+ def new_figure_manager_given_figure(num, figure):
+ canvas = FigureCanvasNbAgg(figure)
+ manager = FigureManagerNbAgg(canvas, num)
+ if is_interactive():
+ manager.show()
+ figure.canvas.draw_idle()
+ canvas.mpl_connect('close_event', lambda event: Gcf.destroy(num))
+ return manager
+
+ @staticmethod
+ def trigger_manager_draw(manager):
+ manager.show()
+
+ @staticmethod
+ def show(*args, **kwargs):
+ ## TODO: something to do when keyword block==False ?
+ from matplotlib._pylab_helpers import Gcf
+
+ managers = Gcf.get_all_fig_managers()
+ if not managers:
+ return
+
+ interactive = is_interactive()
+
+ for manager in managers:
+ manager.show()
+
+ # plt.figure adds an event which puts the figure in focus
+ # in the activeQue. Disable this behaviour, as it results in
+ # figures being put as the active figure after they have been
+ # shown, even in non-interactive mode.
+ if hasattr(manager, '_cidgcf'):
+ manager.canvas.mpl_disconnect(manager._cidgcf)
+
+ if not interactive and manager in Gcf._activeQue:
+ Gcf._activeQue.remove(manager)
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_pdf.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_pdf.py
new file mode 100644
index 00000000000..4f248fde9a7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_pdf.py
@@ -0,0 +1,2604 @@
+# -*- coding: utf-8 -*-
+
+"""
+A PDF matplotlib backend
+Author: Jouni K Seppänen <jks@iki.fi>
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six import unichr
+
+import codecs
+import collections
+from datetime import datetime
+from functools import total_ordering
+from io import BytesIO
+import logging
+from math import ceil, cos, floor, pi, sin
+import os
+import re
+import struct
+import sys
+import time
+import warnings
+import zlib
+
+import numpy as np
+
+from matplotlib import cbook, __version__, rcParams
+from matplotlib._pylab_helpers import Gcf
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
+ RendererBase)
+from matplotlib.backends.backend_mixed import MixedModeRenderer
+from matplotlib.cbook import (Bunch, get_realpath_and_stat,
+ is_writable_file_like, maxdict)
+from matplotlib.figure import Figure
+from matplotlib.font_manager import findfont, is_opentype_cff_font, get_font
+from matplotlib.afm import AFM
+import matplotlib.type1font as type1font
+import matplotlib.dviread as dviread
+from matplotlib.ft2font import (FIXED_WIDTH, ITALIC, LOAD_NO_SCALE,
+ LOAD_NO_HINTING, KERNING_UNFITTED)
+from matplotlib.mathtext import MathTextParser
+from matplotlib.transforms import Affine2D, BboxBase
+from matplotlib.path import Path
+from matplotlib.dates import UTC
+from matplotlib import _path
+from matplotlib import _png
+from matplotlib import ttconv
+
+_log = logging.getLogger(__name__)
+
+# Overview
+#
+# The low-level knowledge about pdf syntax lies mainly in the pdfRepr
+# function and the classes Reference, Name, Operator, and Stream. The
+# PdfFile class knows about the overall structure of pdf documents.
+# It provides a "write" method for writing arbitrary strings in the
+# file, and an "output" method that passes objects through the pdfRepr
+# function before writing them in the file. The output method is
+# called by the RendererPdf class, which contains the various draw_foo
+# methods. RendererPdf contains a GraphicsContextPdf instance, and
+# each draw_foo calls self.check_gc before outputting commands. This
+# method checks whether the pdf graphics state needs to be modified
+# and outputs the necessary commands. GraphicsContextPdf represents
+# the graphics state, and its "delta" method returns the commands that
+# modify the state.
+
+# Add "pdf.use14corefonts: True" in your configuration file to use only
+# the 14 PDF core fonts. These fonts do not need to be embedded; every
+# PDF viewing application is required to have them. This results in very
+# light PDF files you can use directly in LaTeX or ConTeXt documents
+# generated with pdfTeX, without any conversion.
+
+# These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique,
+# Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique,
+# Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic,
+# Times-BoldItalic, Symbol, ZapfDingbats.
+#
+# Some tricky points:
+#
+# 1. The clip path can only be widened by popping from the state
+# stack. Thus the state must be pushed onto the stack before narrowing
+# the clip path. This is taken care of by GraphicsContextPdf.
+#
+# 2. Sometimes it is necessary to refer to something (e.g., font,
+# image, or extended graphics state, which contains the alpha value)
+# in the page stream by a name that needs to be defined outside the
+# stream. PdfFile provides the methods fontName, imageObject, and
+# alphaState for this purpose. The implementations of these methods
+# should perhaps be generalized.
+
+# TODOs:
+#
+# * encoding of fonts, including mathtext fonts and unicode support
+# * TTF support has lots of small TODOs, e.g., how do you know if a font
+# is serif/sans-serif, or symbolic/non-symbolic?
+# * draw_markers, draw_line_collection, etc.
+
+
+def fill(strings, linelen=75):
+ """Make one string from sequence of strings, with whitespace
+ in between. The whitespace is chosen to form lines of at most
+ linelen characters, if possible."""
+ currpos = 0
+ lasti = 0
+ result = []
+ for i, s in enumerate(strings):
+ length = len(s)
+ if currpos + length < linelen:
+ currpos += length + 1
+ else:
+ result.append(b' '.join(strings[lasti:i]))
+ lasti = i
+ currpos = length
+ result.append(b' '.join(strings[lasti:]))
+ return b'\n'.join(result)
+
+# PDF strings are supposed to be able to include any eight-bit data,
+# except that unbalanced parens and backslashes must be escaped by a
+# backslash. However, sf bug #2708559 shows that the carriage return
+# character may get read as a newline; these characters correspond to
+# \gamma and \Omega in TeX's math font encoding. Escaping them fixes
+# the bug.
+_string_escape_regex = re.compile(br'([\\()\r\n])')
+
+
+def _string_escape(match):
+ m = match.group(0)
+ if m in br'\()':
+ return b'\\' + m
+ elif m == b'\n':
+ return br'\n'
+ elif m == b'\r':
+ return br'\r'
+ assert False
+
+
+def pdfRepr(obj):
+ """Map Python objects to PDF syntax."""
+
+ # Some objects defined later have their own pdfRepr method.
+ if hasattr(obj, 'pdfRepr'):
+ return obj.pdfRepr()
+
+ # Floats. PDF does not have exponential notation (1.0e-10) so we
+ # need to use %f with some precision. Perhaps the precision
+ # should adapt to the magnitude of the number?
+ elif isinstance(obj, (float, np.floating)):
+ if not np.isfinite(obj):
+ raise ValueError("Can only output finite numbers in PDF")
+ r = ("%.10f" % obj).encode('ascii')
+ return r.rstrip(b'0').rstrip(b'.')
+
+ # Booleans. Needs to be tested before integers since
+ # isinstance(True, int) is true.
+ elif isinstance(obj, bool):
+ return [b'false', b'true'][obj]
+
+ # Integers are written as such.
+ elif isinstance(obj, (six.integer_types, np.integer)):
+ return ("%d" % obj).encode('ascii')
+
+ # Unicode strings are encoded in UTF-16BE with byte-order mark.
+ elif isinstance(obj, six.text_type):
+ try:
+ # But maybe it's really ASCII?
+ s = obj.encode('ASCII')
+ return pdfRepr(s)
+ except UnicodeEncodeError:
+ s = codecs.BOM_UTF16_BE + obj.encode('UTF-16BE')
+ return pdfRepr(s)
+
+ # Strings are written in parentheses, with backslashes and parens
+ # escaped. Actually balanced parens are allowed, but it is
+ # simpler to escape them all. TODO: cut long strings into lines;
+ # I believe there is some maximum line length in PDF.
+ elif isinstance(obj, bytes):
+ return b'(' + _string_escape_regex.sub(_string_escape, obj) + b')'
+
+ # Dictionaries. The keys must be PDF names, so if we find strings
+ # there, we make Name objects from them. The values may be
+ # anything, so the caller must ensure that PDF names are
+ # represented as Name objects.
+ elif isinstance(obj, dict):
+ r = [b"<<"]
+ r.extend([Name(key).pdfRepr() + b" " + pdfRepr(obj[key])
+ for key in sorted(obj)])
+ r.append(b">>")
+ return fill(r)
+
+ # Lists.
+ elif isinstance(obj, (list, tuple)):
+ r = [b"["]
+ r.extend([pdfRepr(val) for val in obj])
+ r.append(b"]")
+ return fill(r)
+
+ # The null keyword.
+ elif obj is None:
+ return b'null'
+
+ # A date.
+ elif isinstance(obj, datetime):
+ r = obj.strftime('D:%Y%m%d%H%M%S')
+ z = obj.utcoffset()
+ if z is not None:
+ z = z.seconds
+ else:
+ if time.daylight:
+ z = time.altzone
+ else:
+ z = time.timezone
+ if z == 0:
+ r += 'Z'
+ elif z < 0:
+ r += "+%02d'%02d'" % ((-z) // 3600, (-z) % 3600)
+ else:
+ r += "-%02d'%02d'" % (z // 3600, z % 3600)
+ return pdfRepr(r)
+
+ # A bounding box
+ elif isinstance(obj, BboxBase):
+ return fill([pdfRepr(val) for val in obj.bounds])
+
+ else:
+ raise TypeError("Don't know a PDF representation for {} objects"
+ .format(type(obj)))
+
+
+class Reference(object):
+ """PDF reference object.
+ Use PdfFile.reserveObject() to create References.
+ """
+
+ def __init__(self, id):
+ self.id = id
+
+ def __repr__(self):
+ return "<Reference %d>" % self.id
+
+ def pdfRepr(self):
+ return ("%d 0 R" % self.id).encode('ascii')
+
+ def write(self, contents, file):
+ write = file.write
+ write(("%d 0 obj\n" % self.id).encode('ascii'))
+ write(pdfRepr(contents))
+ write(b"\nendobj\n")
+
+
+@total_ordering
+class Name(object):
+ """PDF name object."""
+ __slots__ = ('name',)
+ _regex = re.compile(r'[^!-~]')
+
+ def __init__(self, name):
+ if isinstance(name, Name):
+ self.name = name.name
+ else:
+ if isinstance(name, bytes):
+ name = name.decode('ascii')
+ self.name = self._regex.sub(Name.hexify, name).encode('ascii')
+
+ def __repr__(self):
+ return "<Name %s>" % self.name
+
+ def __str__(self):
+ return '/' + six.text_type(self.name)
+
+ def __eq__(self, other):
+ return isinstance(other, Name) and self.name == other.name
+
+ def __lt__(self, other):
+ return isinstance(other, Name) and self.name < other.name
+
+ def __hash__(self):
+ return hash(self.name)
+
+ @staticmethod
+ def hexify(match):
+ return '#%02x' % ord(match.group())
+
+ def pdfRepr(self):
+ return b'/' + self.name
+
+
+class Operator(object):
+ """PDF operator object."""
+ __slots__ = ('op',)
+
+ def __init__(self, op):
+ self.op = op
+
+ def __repr__(self):
+ return '<Operator %s>' % self.op
+
+ def pdfRepr(self):
+ return self.op
+
+
+class Verbatim(object):
+ """Store verbatim PDF command content for later inclusion in the
+ stream."""
+ def __init__(self, x):
+ self._x = x
+
+ def pdfRepr(self):
+ return self._x
+
+
+# PDF operators (not an exhaustive list)
+_pdfops = dict(
+ close_fill_stroke=b'b', fill_stroke=b'B', fill=b'f', closepath=b'h',
+ close_stroke=b's', stroke=b'S', endpath=b'n', begin_text=b'BT',
+ end_text=b'ET', curveto=b'c', rectangle=b're', lineto=b'l', moveto=b'm',
+ concat_matrix=b'cm', use_xobject=b'Do', setgray_stroke=b'G',
+ setgray_nonstroke=b'g', setrgb_stroke=b'RG', setrgb_nonstroke=b'rg',
+ setcolorspace_stroke=b'CS', setcolorspace_nonstroke=b'cs',
+ setcolor_stroke=b'SCN', setcolor_nonstroke=b'scn', setdash=b'd',
+ setlinejoin=b'j', setlinecap=b'J', setgstate=b'gs', gsave=b'q',
+ grestore=b'Q', textpos=b'Td', selectfont=b'Tf', textmatrix=b'Tm',
+ show=b'Tj', showkern=b'TJ', setlinewidth=b'w', clip=b'W', shading=b'sh')
+
+Op = Bunch(**{name: Operator(value) for name, value in six.iteritems(_pdfops)})
+
+
+def _paint_path(fill, stroke):
+ """Return the PDF operator to paint a path in the following way:
+ fill: fill the path with the fill color
+ stroke: stroke the outline of the path with the line color"""
+ if stroke:
+ if fill:
+ return Op.fill_stroke
+ else:
+ return Op.stroke
+ else:
+ if fill:
+ return Op.fill
+ else:
+ return Op.endpath
+Op.paint_path = _paint_path
+
+
+class Stream(object):
+ """PDF stream object.
+
+ This has no pdfRepr method. Instead, call begin(), then output the
+ contents of the stream by calling write(), and finally call end().
+ """
+ __slots__ = ('id', 'len', 'pdfFile', 'file', 'compressobj', 'extra', 'pos')
+
+ def __init__(self, id, len, file, extra=None, png=None):
+ """id: object id of stream; len: an unused Reference object for the
+ length of the stream, or None (to use a memory buffer); file:
+ a PdfFile; extra: a dictionary of extra key-value pairs to
+ include in the stream header; png: if the data is already
+ png compressed, the decode parameters"""
+ self.id = id # object id
+ self.len = len # id of length object
+ self.pdfFile = file
+ self.file = file.fh # file to which the stream is written
+ self.compressobj = None # compression object
+ if extra is None:
+ self.extra = dict()
+ else:
+ self.extra = extra.copy()
+ if png is not None:
+ self.extra.update({'Filter': Name('FlateDecode'),
+ 'DecodeParms': png})
+
+ self.pdfFile.recordXref(self.id)
+ if rcParams['pdf.compression'] and not png:
+ self.compressobj = zlib.compressobj(rcParams['pdf.compression'])
+ if self.len is None:
+ self.file = BytesIO()
+ else:
+ self._writeHeader()
+ self.pos = self.file.tell()
+
+ def _writeHeader(self):
+ write = self.file.write
+ write(("%d 0 obj\n" % self.id).encode('ascii'))
+ dict = self.extra
+ dict['Length'] = self.len
+ if rcParams['pdf.compression']:
+ dict['Filter'] = Name('FlateDecode')
+
+ write(pdfRepr(dict))
+ write(b"\nstream\n")
+
+ def end(self):
+ """Finalize stream."""
+
+ self._flush()
+ if self.len is None:
+ contents = self.file.getvalue()
+ self.len = len(contents)
+ self.file = self.pdfFile.fh
+ self._writeHeader()
+ self.file.write(contents)
+ self.file.write(b"\nendstream\nendobj\n")
+ else:
+ length = self.file.tell() - self.pos
+ self.file.write(b"\nendstream\nendobj\n")
+ self.pdfFile.writeObject(self.len, length)
+
+ def write(self, data):
+ """Write some data on the stream."""
+
+ if self.compressobj is None:
+ self.file.write(data)
+ else:
+ compressed = self.compressobj.compress(data)
+ self.file.write(compressed)
+
+ def _flush(self):
+ """Flush the compression object."""
+
+ if self.compressobj is not None:
+ compressed = self.compressobj.flush()
+ self.file.write(compressed)
+ self.compressobj = None
+
+
+class PdfFile(object):
+ """PDF file object."""
+
+ def __init__(self, filename, metadata=None):
+ self.nextObject = 1 # next free object id
+ self.xrefTable = [[0, 65535, 'the zero object']]
+ self.passed_in_file_object = False
+ self.original_file_like = None
+ self.tell_base = 0
+ fh, opened = cbook.to_filehandle(filename, "wb", return_opened=True)
+ if not opened:
+ try:
+ self.tell_base = filename.tell()
+ except IOError:
+ fh = BytesIO()
+ self.original_file_like = filename
+ else:
+ fh = filename
+ self.passed_in_file_object = True
+
+ self._core14fontdir = os.path.join(
+ rcParams['datapath'], 'fonts', 'pdfcorefonts')
+ self.fh = fh
+ self.currentstream = None # stream object to write to, if any
+ fh.write(b"%PDF-1.4\n") # 1.4 is the first version to have alpha
+ # Output some eight-bit chars as a comment so various utilities
+ # recognize the file as binary by looking at the first few
+ # lines (see note in section 3.4.1 of the PDF reference).
+ fh.write(b"%\254\334 \253\272\n")
+
+ self.rootObject = self.reserveObject('root')
+ self.pagesObject = self.reserveObject('pages')
+ self.pageList = []
+ self.fontObject = self.reserveObject('fonts')
+ self.alphaStateObject = self.reserveObject('extended graphics states')
+ self.hatchObject = self.reserveObject('tiling patterns')
+ self.gouraudObject = self.reserveObject('Gouraud triangles')
+ self.XObjectObject = self.reserveObject('external objects')
+ self.resourceObject = self.reserveObject('resources')
+
+ root = {'Type': Name('Catalog'),
+ 'Pages': self.pagesObject}
+ self.writeObject(self.rootObject, root)
+
+ # get source date from SOURCE_DATE_EPOCH, if set
+ # See https://reproducible-builds.org/specs/source-date-epoch/
+ source_date_epoch = os.getenv("SOURCE_DATE_EPOCH")
+ if source_date_epoch:
+ source_date = datetime.utcfromtimestamp(int(source_date_epoch))
+ source_date = source_date.replace(tzinfo=UTC)
+ else:
+ source_date = datetime.today()
+
+ self.infoDict = {
+ 'Creator': 'matplotlib %s, http://matplotlib.org' % __version__,
+ 'Producer': 'matplotlib pdf backend %s' % __version__,
+ 'CreationDate': source_date
+ }
+ if metadata is not None:
+ self.infoDict.update(metadata)
+ self.infoDict = {k: v for (k, v) in self.infoDict.items()
+ if v is not None}
+
+ self.fontNames = {} # maps filenames to internal font names
+ self.nextFont = 1 # next free internal font name
+ self.dviFontInfo = {} # maps dvi font names to embedding information
+ self._texFontMap = None # maps TeX font names to PostScript fonts
+ # differently encoded Type-1 fonts may share the same descriptor
+ self.type1Descriptors = {}
+ self.used_characters = {}
+
+ self.alphaStates = {} # maps alpha values to graphics state objects
+ self.nextAlphaState = 1
+ # reproducible writeHatches needs an ordered dict:
+ self.hatchPatterns = collections.OrderedDict()
+ self.nextHatch = 1
+ self.gouraudTriangles = []
+
+ self._images = collections.OrderedDict() # reproducible writeImages
+ self.nextImage = 1
+
+ self.markers = collections.OrderedDict() # reproducible writeMarkers
+ self.multi_byte_charprocs = {}
+
+ self.paths = []
+
+ self.pageAnnotations = [] # A list of annotations for the
+ # current page
+
+ # The PDF spec recommends to include every procset
+ procsets = [Name(x)
+ for x in "PDF Text ImageB ImageC ImageI".split()]
+
+ # Write resource dictionary.
+ # Possibly TODO: more general ExtGState (graphics state dictionaries)
+ # ColorSpace Pattern Shading Properties
+ resources = {'Font': self.fontObject,
+ 'XObject': self.XObjectObject,
+ 'ExtGState': self.alphaStateObject,
+ 'Pattern': self.hatchObject,
+ 'Shading': self.gouraudObject,
+ 'ProcSet': procsets}
+ self.writeObject(self.resourceObject, resources)
+
+ def newPage(self, width, height):
+ self.endStream()
+
+ self.width, self.height = width, height
+ contentObject = self.reserveObject('page contents')
+ thePage = {'Type': Name('Page'),
+ 'Parent': self.pagesObject,
+ 'Resources': self.resourceObject,
+ 'MediaBox': [0, 0, 72 * width, 72 * height],
+ 'Contents': contentObject,
+ 'Group': {'Type': Name('Group'),
+ 'S': Name('Transparency'),
+ 'CS': Name('DeviceRGB')},
+ 'Annots': self.pageAnnotations,
+ }
+ pageObject = self.reserveObject('page')
+ self.writeObject(pageObject, thePage)
+ self.pageList.append(pageObject)
+
+ self.beginStream(contentObject.id,
+ self.reserveObject('length of content stream'))
+ # Initialize the pdf graphics state to match the default mpl
+ # graphics context: currently only the join style needs to be set
+ self.output(GraphicsContextPdf.joinstyles['round'], Op.setlinejoin)
+
+ # Clear the list of annotations for the next page
+ self.pageAnnotations = []
+
+ def newTextnote(self, text, positionRect=[-100, -100, 0, 0]):
+ # Create a new annotation of type text
+ theNote = {'Type': Name('Annot'),
+ 'Subtype': Name('Text'),
+ 'Contents': text,
+ 'Rect': positionRect,
+ }
+ annotObject = self.reserveObject('annotation')
+ self.writeObject(annotObject, theNote)
+ self.pageAnnotations.append(annotObject)
+
+ def finalize(self):
+ "Write out the various deferred objects and the pdf end matter."
+
+ self.endStream()
+ self.writeFonts()
+ self.writeObject(
+ self.alphaStateObject,
+ {val[0]: val[1] for val in six.itervalues(self.alphaStates)})
+ self.writeHatches()
+ self.writeGouraudTriangles()
+ xobjects = {
+ name: ob for image, name, ob in six.itervalues(self._images)}
+ for tup in six.itervalues(self.markers):
+ xobjects[tup[0]] = tup[1]
+ for name, value in six.iteritems(self.multi_byte_charprocs):
+ xobjects[name] = value
+ for name, path, trans, ob, join, cap, padding, filled, stroked \
+ in self.paths:
+ xobjects[name] = ob
+ self.writeObject(self.XObjectObject, xobjects)
+ self.writeImages()
+ self.writeMarkers()
+ self.writePathCollectionTemplates()
+ self.writeObject(self.pagesObject,
+ {'Type': Name('Pages'),
+ 'Kids': self.pageList,
+ 'Count': len(self.pageList)})
+ self.writeInfoDict()
+
+ # Finalize the file
+ self.writeXref()
+ self.writeTrailer()
+
+ def close(self):
+ "Flush all buffers and free all resources."
+
+ self.endStream()
+ if self.passed_in_file_object:
+ self.fh.flush()
+ else:
+ if self.original_file_like is not None:
+ self.original_file_like.write(self.fh.getvalue())
+ self.fh.close()
+
+ def write(self, data):
+ if self.currentstream is None:
+ self.fh.write(data)
+ else:
+ self.currentstream.write(data)
+
+ def output(self, *data):
+ self.write(fill([pdfRepr(x) for x in data]))
+ self.write(b'\n')
+
+ def beginStream(self, id, len, extra=None, png=None):
+ assert self.currentstream is None
+ self.currentstream = Stream(id, len, self, extra, png)
+
+ def endStream(self):
+ if self.currentstream is not None:
+ self.currentstream.end()
+ self.currentstream = None
+
+ def fontName(self, fontprop):
+ """
+ Select a font based on fontprop and return a name suitable for
+ Op.selectfont. If fontprop is a string, it will be interpreted
+ as the filename of the font.
+ """
+
+ if isinstance(fontprop, six.string_types):
+ filename = fontprop
+ elif rcParams['pdf.use14corefonts']:
+ filename = findfont(
+ fontprop, fontext='afm', directory=self._core14fontdir)
+ if filename is None:
+ filename = findfont(
+ "Helvetica", fontext='afm', directory=self._core14fontdir)
+ else:
+ filename = findfont(fontprop)
+
+ Fx = self.fontNames.get(filename)
+ if Fx is None:
+ Fx = Name('F%d' % self.nextFont)
+ self.fontNames[filename] = Fx
+ self.nextFont += 1
+ _log.debug('Assigning font %s = %r', Fx, filename)
+
+ return Fx
+
+ @property
+ def texFontMap(self):
+ # lazy-load texFontMap, it takes a while to parse
+ # and usetex is a relatively rare use case
+ if self._texFontMap is None:
+ self._texFontMap = dviread.PsfontsMap(
+ dviread.find_tex_file('pdftex.map'))
+
+ return self._texFontMap
+
+ def dviFontName(self, dvifont):
+ """
+ Given a dvi font object, return a name suitable for Op.selectfont.
+ This registers the font information in self.dviFontInfo if not yet
+ registered.
+ """
+
+ dvi_info = self.dviFontInfo.get(dvifont.texname)
+ if dvi_info is not None:
+ return dvi_info.pdfname
+
+ psfont = self.texFontMap[dvifont.texname]
+ if psfont.filename is None:
+ raise ValueError(
+ "No usable font file found for {} (TeX: {}); "
+ "the font may lack a Type-1 version"
+ .format(psfont.psname, dvifont.texname))
+
+ pdfname = Name('F%d' % self.nextFont)
+ self.nextFont += 1
+ _log.debug('Assigning font %s = %s (dvi)', pdfname, dvifont.texname)
+ self.dviFontInfo[dvifont.texname] = Bunch(
+ dvifont=dvifont,
+ pdfname=pdfname,
+ fontfile=psfont.filename,
+ basefont=psfont.psname,
+ encodingfile=psfont.encoding,
+ effects=psfont.effects)
+ return pdfname
+
+ def writeFonts(self):
+ fonts = {}
+ for dviname, info in sorted(self.dviFontInfo.items()):
+ Fx = info.pdfname
+ _log.debug('Embedding Type-1 font %s from dvi.', dviname)
+ fonts[Fx] = self._embedTeXFont(info)
+ for filename in sorted(self.fontNames):
+ Fx = self.fontNames[filename]
+ _log.debug('Embedding font %s.', filename)
+ if filename.endswith('.afm'):
+ # from pdf.use14corefonts
+ _log.debug('Writing AFM font.')
+ fonts[Fx] = self._write_afm_font(filename)
+ else:
+ # a normal TrueType font
+ _log.debug('Writing TrueType font.')
+ realpath, stat_key = get_realpath_and_stat(filename)
+ chars = self.used_characters.get(stat_key)
+ if chars is not None and len(chars[1]):
+ fonts[Fx] = self.embedTTF(realpath, chars[1])
+ self.writeObject(self.fontObject, fonts)
+
+ def _write_afm_font(self, filename):
+ with open(filename, 'rb') as fh:
+ font = AFM(fh)
+ fontname = font.get_fontname()
+ fontdict = {'Type': Name('Font'),
+ 'Subtype': Name('Type1'),
+ 'BaseFont': Name(fontname),
+ 'Encoding': Name('WinAnsiEncoding')}
+ fontdictObject = self.reserveObject('font dictionary')
+ self.writeObject(fontdictObject, fontdict)
+ return fontdictObject
+
+ def _embedTeXFont(self, fontinfo):
+ _log.debug('Embedding TeX font %s - fontinfo=%s',
+ fontinfo.dvifont.texname, fontinfo.__dict__)
+
+ # Widths
+ widthsObject = self.reserveObject('font widths')
+ self.writeObject(widthsObject, fontinfo.dvifont.widths)
+
+ # Font dictionary
+ fontdictObject = self.reserveObject('font dictionary')
+ fontdict = {
+ 'Type': Name('Font'),
+ 'Subtype': Name('Type1'),
+ 'FirstChar': 0,
+ 'LastChar': len(fontinfo.dvifont.widths) - 1,
+ 'Widths': widthsObject,
+ }
+
+ # Encoding (if needed)
+ if fontinfo.encodingfile is not None:
+ enc = dviread.Encoding(fontinfo.encodingfile)
+ differencesArray = [Name(ch) for ch in enc]
+ differencesArray = [0] + differencesArray
+ fontdict['Encoding'] = \
+ {'Type': Name('Encoding'),
+ 'Differences': differencesArray}
+
+ # If no file is specified, stop short
+ if fontinfo.fontfile is None:
+ _log.warning(
+ "Because of TeX configuration (pdftex.map, see updmap option "
+ "pdftexDownloadBase14) the font %s is not embedded. This is "
+ "deprecated as of PDF 1.5 and it may cause the consumer "
+ "application to show something that was not intended.",
+ fontinfo.basefont)
+ fontdict['BaseFont'] = Name(fontinfo.basefont)
+ self.writeObject(fontdictObject, fontdict)
+ return fontdictObject
+
+ # We have a font file to embed - read it in and apply any effects
+ t1font = type1font.Type1Font(fontinfo.fontfile)
+ if fontinfo.effects:
+ t1font = t1font.transform(fontinfo.effects)
+ fontdict['BaseFont'] = Name(t1font.prop['FontName'])
+
+ # Font descriptors may be shared between differently encoded
+ # Type-1 fonts, so only create a new descriptor if there is no
+ # existing descriptor for this font.
+ effects = (fontinfo.effects.get('slant', 0.0),
+ fontinfo.effects.get('extend', 1.0))
+ fontdesc = self.type1Descriptors.get((fontinfo.fontfile, effects))
+ if fontdesc is None:
+ fontdesc = self.createType1Descriptor(t1font, fontinfo.fontfile)
+ self.type1Descriptors[(fontinfo.fontfile, effects)] = fontdesc
+ fontdict['FontDescriptor'] = fontdesc
+
+ self.writeObject(fontdictObject, fontdict)
+ return fontdictObject
+
+ def createType1Descriptor(self, t1font, fontfile):
+ # Create and write the font descriptor and the font file
+ # of a Type-1 font
+ fontdescObject = self.reserveObject('font descriptor')
+ fontfileObject = self.reserveObject('font file')
+
+ italic_angle = t1font.prop['ItalicAngle']
+ fixed_pitch = t1font.prop['isFixedPitch']
+
+ flags = 0
+ # fixed width
+ if fixed_pitch:
+ flags |= 1 << 0
+ # TODO: serif
+ if 0:
+ flags |= 1 << 1
+ # TODO: symbolic (most TeX fonts are)
+ if 1:
+ flags |= 1 << 2
+ # non-symbolic
+ else:
+ flags |= 1 << 5
+ # italic
+ if italic_angle:
+ flags |= 1 << 6
+ # TODO: all caps
+ if 0:
+ flags |= 1 << 16
+ # TODO: small caps
+ if 0:
+ flags |= 1 << 17
+ # TODO: force bold
+ if 0:
+ flags |= 1 << 18
+
+ ft2font = get_font(fontfile)
+
+ descriptor = {
+ 'Type': Name('FontDescriptor'),
+ 'FontName': Name(t1font.prop['FontName']),
+ 'Flags': flags,
+ 'FontBBox': ft2font.bbox,
+ 'ItalicAngle': italic_angle,
+ 'Ascent': ft2font.ascender,
+ 'Descent': ft2font.descender,
+ 'CapHeight': 1000, # TODO: find this out
+ 'XHeight': 500, # TODO: this one too
+ 'FontFile': fontfileObject,
+ 'FontFamily': t1font.prop['FamilyName'],
+ 'StemV': 50, # TODO
+ # (see also revision 3874; but not all TeX distros have AFM files!)
+ # 'FontWeight': a number where 400 = Regular, 700 = Bold
+ }
+
+ self.writeObject(fontdescObject, descriptor)
+
+ self.beginStream(fontfileObject.id, None,
+ {'Length1': len(t1font.parts[0]),
+ 'Length2': len(t1font.parts[1]),
+ 'Length3': 0})
+ self.currentstream.write(t1font.parts[0])
+ self.currentstream.write(t1font.parts[1])
+ self.endStream()
+
+ return fontdescObject
+
+ def _get_xobject_symbol_name(self, filename, symbol_name):
+ return "%s-%s" % (
+ os.path.splitext(os.path.basename(filename))[0],
+ symbol_name)
+
+ _identityToUnicodeCMap = """/CIDInit /ProcSet findresource begin
+12 dict begin
+begincmap
+/CIDSystemInfo
+<< /Registry (Adobe)
+ /Ordering (UCS)
+ /Supplement 0
+>> def
+/CMapName /Adobe-Identity-UCS def
+/CMapType 2 def
+1 begincodespacerange
+<0000> <ffff>
+endcodespacerange
+%d beginbfrange
+%s
+endbfrange
+endcmap
+CMapName currentdict /CMap defineresource pop
+end
+end"""
+
+ def embedTTF(self, filename, characters):
+ """Embed the TTF font from the named file into the document."""
+
+ font = get_font(filename)
+ fonttype = rcParams['pdf.fonttype']
+
+ def cvt(length, upe=font.units_per_EM, nearest=True):
+ "Convert font coordinates to PDF glyph coordinates"
+ value = length / upe * 1000
+ if nearest:
+ return np.round(value)
+ # Perhaps best to round away from zero for bounding
+ # boxes and the like
+ if value < 0:
+ return floor(value)
+ else:
+ return ceil(value)
+
+ def embedTTFType3(font, characters, descriptor):
+ """The Type 3-specific part of embedding a Truetype font"""
+ widthsObject = self.reserveObject('font widths')
+ fontdescObject = self.reserveObject('font descriptor')
+ fontdictObject = self.reserveObject('font dictionary')
+ charprocsObject = self.reserveObject('character procs')
+ differencesArray = []
+ firstchar, lastchar = 0, 255
+ bbox = [cvt(x, nearest=False) for x in font.bbox]
+
+ fontdict = {
+ 'Type': Name('Font'),
+ 'BaseFont': ps_name,
+ 'FirstChar': firstchar,
+ 'LastChar': lastchar,
+ 'FontDescriptor': fontdescObject,
+ 'Subtype': Name('Type3'),
+ 'Name': descriptor['FontName'],
+ 'FontBBox': bbox,
+ 'FontMatrix': [.001, 0, 0, .001, 0, 0],
+ 'CharProcs': charprocsObject,
+ 'Encoding': {
+ 'Type': Name('Encoding'),
+ 'Differences': differencesArray},
+ 'Widths': widthsObject
+ }
+
+ # Make the "Widths" array
+ from encodings import cp1252
+ # The "decoding_map" was changed
+ # to a "decoding_table" as of Python 2.5.
+ if hasattr(cp1252, 'decoding_map'):
+ def decode_char(charcode):
+ return cp1252.decoding_map[charcode] or 0
+ else:
+ def decode_char(charcode):
+ return ord(cp1252.decoding_table[charcode])
+
+ def get_char_width(charcode):
+ s = decode_char(charcode)
+ width = font.load_char(
+ s, flags=LOAD_NO_SCALE | LOAD_NO_HINTING).horiAdvance
+ return cvt(width)
+
+ widths = [get_char_width(charcode)
+ for charcode in range(firstchar, lastchar+1)]
+ descriptor['MaxWidth'] = max(widths)
+
+ # Make the "Differences" array, sort the ccodes < 255 from
+ # the multi-byte ccodes, and build the whole set of glyph ids
+ # that we need from this font.
+ glyph_ids = []
+ differences = []
+ multi_byte_chars = set()
+ for c in characters:
+ ccode = c
+ gind = font.get_char_index(ccode)
+ glyph_ids.append(gind)
+ glyph_name = font.get_glyph_name(gind)
+ if ccode <= 255:
+ differences.append((ccode, glyph_name))
+ else:
+ multi_byte_chars.add(glyph_name)
+ differences.sort()
+
+ last_c = -2
+ for c, name in differences:
+ if c != last_c + 1:
+ differencesArray.append(c)
+ differencesArray.append(Name(name))
+ last_c = c
+
+ # Make the charprocs array (using ttconv to generate the
+ # actual outlines)
+ rawcharprocs = ttconv.get_pdf_charprocs(
+ filename.encode(sys.getfilesystemencoding()), glyph_ids)
+ charprocs = {}
+ for charname in sorted(rawcharprocs):
+ stream = rawcharprocs[charname]
+ charprocDict = {'Length': len(stream)}
+ # The 2-byte characters are used as XObjects, so they
+ # need extra info in their dictionary
+ if charname in multi_byte_chars:
+ charprocDict['Type'] = Name('XObject')
+ charprocDict['Subtype'] = Name('Form')
+ charprocDict['BBox'] = bbox
+ # Each glyph includes bounding box information,
+ # but xpdf and ghostscript can't handle it in a
+ # Form XObject (they segfault!!!), so we remove it
+ # from the stream here. It's not needed anyway,
+ # since the Form XObject includes it in its BBox
+ # value.
+ stream = stream[stream.find(b"d1") + 2:]
+ charprocObject = self.reserveObject('charProc')
+ self.beginStream(charprocObject.id, None, charprocDict)
+ self.currentstream.write(stream)
+ self.endStream()
+
+ # Send the glyphs with ccode > 255 to the XObject dictionary,
+ # and the others to the font itself
+ if charname in multi_byte_chars:
+ name = self._get_xobject_symbol_name(filename, charname)
+ self.multi_byte_charprocs[name] = charprocObject
+ else:
+ charprocs[charname] = charprocObject
+
+ # Write everything out
+ self.writeObject(fontdictObject, fontdict)
+ self.writeObject(fontdescObject, descriptor)
+ self.writeObject(widthsObject, widths)
+ self.writeObject(charprocsObject, charprocs)
+
+ return fontdictObject
+
+ def embedTTFType42(font, characters, descriptor):
+ """The Type 42-specific part of embedding a Truetype font"""
+ fontdescObject = self.reserveObject('font descriptor')
+ cidFontDictObject = self.reserveObject('CID font dictionary')
+ type0FontDictObject = self.reserveObject('Type 0 font dictionary')
+ cidToGidMapObject = self.reserveObject('CIDToGIDMap stream')
+ fontfileObject = self.reserveObject('font file stream')
+ wObject = self.reserveObject('Type 0 widths')
+ toUnicodeMapObject = self.reserveObject('ToUnicode map')
+
+ cidFontDict = {
+ 'Type': Name('Font'),
+ 'Subtype': Name('CIDFontType2'),
+ 'BaseFont': ps_name,
+ 'CIDSystemInfo': {
+ 'Registry': 'Adobe',
+ 'Ordering': 'Identity',
+ 'Supplement': 0},
+ 'FontDescriptor': fontdescObject,
+ 'W': wObject,
+ 'CIDToGIDMap': cidToGidMapObject
+ }
+
+ type0FontDict = {
+ 'Type': Name('Font'),
+ 'Subtype': Name('Type0'),
+ 'BaseFont': ps_name,
+ 'Encoding': Name('Identity-H'),
+ 'DescendantFonts': [cidFontDictObject],
+ 'ToUnicode': toUnicodeMapObject
+ }
+
+ # Make fontfile stream
+ descriptor['FontFile2'] = fontfileObject
+ length1Object = self.reserveObject('decoded length of a font')
+ self.beginStream(
+ fontfileObject.id,
+ self.reserveObject('length of font stream'),
+ {'Length1': length1Object})
+ with open(filename, 'rb') as fontfile:
+ length1 = 0
+ while True:
+ data = fontfile.read(4096)
+ if not data:
+ break
+ length1 += len(data)
+ self.currentstream.write(data)
+ self.endStream()
+ self.writeObject(length1Object, length1)
+
+ # Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap
+ # at the same time
+ cid_to_gid_map = ['\0'] * 65536
+ widths = []
+ max_ccode = 0
+ for c in characters:
+ ccode = c
+ gind = font.get_char_index(ccode)
+ glyph = font.load_char(ccode,
+ flags=LOAD_NO_SCALE | LOAD_NO_HINTING)
+ widths.append((ccode, cvt(glyph.horiAdvance)))
+ if ccode < 65536:
+ cid_to_gid_map[ccode] = unichr(gind)
+ max_ccode = max(ccode, max_ccode)
+ widths.sort()
+ cid_to_gid_map = cid_to_gid_map[:max_ccode + 1]
+
+ last_ccode = -2
+ w = []
+ max_width = 0
+ unicode_groups = []
+ for ccode, width in widths:
+ if ccode != last_ccode + 1:
+ w.append(ccode)
+ w.append([width])
+ unicode_groups.append([ccode, ccode])
+ else:
+ w[-1].append(width)
+ unicode_groups[-1][1] = ccode
+ max_width = max(max_width, width)
+ last_ccode = ccode
+
+ unicode_bfrange = []
+ for start, end in unicode_groups:
+ unicode_bfrange.append(
+ "<%04x> <%04x> [%s]" %
+ (start, end,
+ " ".join(["<%04x>" % x for x in range(start, end+1)])))
+ unicode_cmap = (self._identityToUnicodeCMap %
+ (len(unicode_groups),
+ "\n".join(unicode_bfrange))).encode('ascii')
+
+ # CIDToGIDMap stream
+ cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be")
+ self.beginStream(cidToGidMapObject.id,
+ None,
+ {'Length': len(cid_to_gid_map)})
+ self.currentstream.write(cid_to_gid_map)
+ self.endStream()
+
+ # ToUnicode CMap
+ self.beginStream(toUnicodeMapObject.id,
+ None,
+ {'Length': unicode_cmap})
+ self.currentstream.write(unicode_cmap)
+ self.endStream()
+
+ descriptor['MaxWidth'] = max_width
+
+ # Write everything out
+ self.writeObject(cidFontDictObject, cidFontDict)
+ self.writeObject(type0FontDictObject, type0FontDict)
+ self.writeObject(fontdescObject, descriptor)
+ self.writeObject(wObject, w)
+
+ return type0FontDictObject
+
+ # Beginning of main embedTTF function...
+
+ # You are lost in a maze of TrueType tables, all different...
+ sfnt = font.get_sfnt()
+ try:
+ ps_name = sfnt[1, 0, 0, 6].decode('mac_roman') # Macintosh scheme
+ except KeyError:
+ # Microsoft scheme:
+ ps_name = sfnt[3, 1, 0x0409, 6].decode('utf-16be')
+ # (see freetype/ttnameid.h)
+ ps_name = ps_name.encode('ascii', 'replace')
+ ps_name = Name(ps_name)
+ pclt = font.get_sfnt_table('pclt') or {'capHeight': 0, 'xHeight': 0}
+ post = font.get_sfnt_table('post') or {'italicAngle': (0, 0)}
+ ff = font.face_flags
+ sf = font.style_flags
+
+ flags = 0
+ symbolic = False # ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10')
+ if ff & FIXED_WIDTH:
+ flags |= 1 << 0
+ if 0: # TODO: serif
+ flags |= 1 << 1
+ if symbolic:
+ flags |= 1 << 2
+ else:
+ flags |= 1 << 5
+ if sf & ITALIC:
+ flags |= 1 << 6
+ if 0: # TODO: all caps
+ flags |= 1 << 16
+ if 0: # TODO: small caps
+ flags |= 1 << 17
+ if 0: # TODO: force bold
+ flags |= 1 << 18
+
+ descriptor = {
+ 'Type': Name('FontDescriptor'),
+ 'FontName': ps_name,
+ 'Flags': flags,
+ 'FontBBox': [cvt(x, nearest=False) for x in font.bbox],
+ 'Ascent': cvt(font.ascender, nearest=False),
+ 'Descent': cvt(font.descender, nearest=False),
+ 'CapHeight': cvt(pclt['capHeight'], nearest=False),
+ 'XHeight': cvt(pclt['xHeight']),
+ 'ItalicAngle': post['italicAngle'][1], # ???
+ 'StemV': 0 # ???
+ }
+
+ # The font subsetting to a Type 3 font does not work for
+ # OpenType (.otf) that embed a Postscript CFF font, so avoid that --
+ # save as a (non-subsetted) Type 42 font instead.
+ if is_opentype_cff_font(filename):
+ fonttype = 42
+ _log.warning("%r can not be subsetted into a Type 3 font. The "
+ "entire font will be embedded in the output.",
+ os.path.basename(filename))
+
+ if fonttype == 3:
+ return embedTTFType3(font, characters, descriptor)
+ elif fonttype == 42:
+ return embedTTFType42(font, characters, descriptor)
+
+ def alphaState(self, alpha):
+ """Return name of an ExtGState that sets alpha to the given value."""
+
+ state = self.alphaStates.get(alpha, None)
+ if state is not None:
+ return state[0]
+
+ name = Name('A%d' % self.nextAlphaState)
+ self.nextAlphaState += 1
+ self.alphaStates[alpha] = \
+ (name, {'Type': Name('ExtGState'),
+ 'CA': alpha[0], 'ca': alpha[1]})
+ return name
+
+ def hatchPattern(self, hatch_style):
+ # The colors may come in as numpy arrays, which aren't hashable
+ if hatch_style is not None:
+ edge, face, hatch = hatch_style
+ if edge is not None:
+ edge = tuple(edge)
+ if face is not None:
+ face = tuple(face)
+ hatch_style = (edge, face, hatch)
+
+ pattern = self.hatchPatterns.get(hatch_style, None)
+ if pattern is not None:
+ return pattern
+
+ name = Name('H%d' % self.nextHatch)
+ self.nextHatch += 1
+ self.hatchPatterns[hatch_style] = name
+ return name
+
+ def writeHatches(self):
+ hatchDict = dict()
+ sidelen = 72.0
+ for hatch_style, name in six.iteritems(self.hatchPatterns):
+ ob = self.reserveObject('hatch pattern')
+ hatchDict[name] = ob
+ res = {'Procsets':
+ [Name(x) for x in "PDF Text ImageB ImageC ImageI".split()]}
+ self.beginStream(
+ ob.id, None,
+ {'Type': Name('Pattern'),
+ 'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
+ 'BBox': [0, 0, sidelen, sidelen],
+ 'XStep': sidelen, 'YStep': sidelen,
+ 'Resources': res,
+ # Change origin to match Agg at top-left.
+ 'Matrix': [1, 0, 0, 1, 0, self.height * 72]})
+
+ stroke_rgb, fill_rgb, path = hatch_style
+ self.output(stroke_rgb[0], stroke_rgb[1], stroke_rgb[2],
+ Op.setrgb_stroke)
+ if fill_rgb is not None:
+ self.output(fill_rgb[0], fill_rgb[1], fill_rgb[2],
+ Op.setrgb_nonstroke,
+ 0, 0, sidelen, sidelen, Op.rectangle,
+ Op.fill)
+
+ self.output(rcParams['hatch.linewidth'], Op.setlinewidth)
+
+ self.output(*self.pathOperations(
+ Path.hatch(path),
+ Affine2D().scale(sidelen),
+ simplify=False))
+ self.output(Op.fill_stroke)
+
+ self.endStream()
+ self.writeObject(self.hatchObject, hatchDict)
+
+ def addGouraudTriangles(self, points, colors):
+ name = Name('GT%d' % len(self.gouraudTriangles))
+ self.gouraudTriangles.append((name, points, colors))
+ return name
+
+ def writeGouraudTriangles(self):
+ gouraudDict = dict()
+ for name, points, colors in self.gouraudTriangles:
+ ob = self.reserveObject('Gouraud triangle')
+ gouraudDict[name] = ob
+ shape = points.shape
+ flat_points = points.reshape((shape[0] * shape[1], 2))
+ flat_colors = colors.reshape((shape[0] * shape[1], 4))
+ points_min = np.min(flat_points, axis=0) - (1 << 8)
+ points_max = np.max(flat_points, axis=0) + (1 << 8)
+ factor = 0xffffffff / (points_max - points_min)
+
+ self.beginStream(
+ ob.id, None,
+ {'ShadingType': 4,
+ 'BitsPerCoordinate': 32,
+ 'BitsPerComponent': 8,
+ 'BitsPerFlag': 8,
+ 'ColorSpace': Name('DeviceRGB'),
+ 'AntiAlias': True,
+ 'Decode': [points_min[0], points_max[0],
+ points_min[1], points_max[1],
+ 0, 1, 0, 1, 0, 1]
+ })
+
+ streamarr = np.empty(
+ (shape[0] * shape[1],),
+ dtype=[(str('flags'), str('u1')),
+ (str('points'), str('>u4'), (2,)),
+ (str('colors'), str('u1'), (3,))])
+ streamarr['flags'] = 0
+ streamarr['points'] = (flat_points - points_min) * factor
+ streamarr['colors'] = flat_colors[:, :3] * 255.0
+
+ self.write(streamarr.tostring())
+ self.endStream()
+ self.writeObject(self.gouraudObject, gouraudDict)
+
+ def imageObject(self, image):
+ """Return name of an image XObject representing the given image."""
+
+ entry = self._images.get(id(image), None)
+ if entry is not None:
+ return entry[1]
+
+ name = Name('I%d' % self.nextImage)
+ ob = self.reserveObject('image %d' % self.nextImage)
+ self.nextImage += 1
+ self._images[id(image)] = (image, name, ob)
+ return name
+
+ def _unpack(self, im):
+ """
+ Unpack the image object im into height, width, data, alpha,
+ where data and alpha are HxWx3 (RGB) or HxWx1 (grayscale or alpha)
+ arrays, except alpha is None if the image is fully opaque.
+ """
+ h, w = im.shape[:2]
+ im = im[::-1]
+ if im.ndim == 2:
+ return h, w, im, None
+ else:
+ rgb = im[:, :, :3]
+ rgb = np.array(rgb, order='C')
+ # PDF needs a separate alpha image
+ if im.shape[2] == 4:
+ alpha = im[:, :, 3][..., None]
+ if np.all(alpha == 255):
+ alpha = None
+ else:
+ alpha = np.array(alpha, order='C')
+ else:
+ alpha = None
+ return h, w, rgb, alpha
+
+ def _writePng(self, data):
+ """
+ Write the image *data* into the pdf file using png
+ predictors with Flate compression.
+ """
+
+ buffer = BytesIO()
+ _png.write_png(data, buffer)
+ buffer.seek(8)
+ written = 0
+ header = bytearray(8)
+ while True:
+ n = buffer.readinto(header)
+ assert n == 8
+ length, type = struct.unpack(b'!L4s', bytes(header))
+ if type == b'IDAT':
+ data = bytearray(length)
+ n = buffer.readinto(data)
+ assert n == length
+ self.currentstream.write(bytes(data))
+ written += n
+ elif type == b'IEND':
+ break
+ else:
+ buffer.seek(length, 1)
+ buffer.seek(4, 1) # skip CRC
+
+ def _writeImg(self, data, height, width, grayscale, id, smask=None):
+ """
+ Write the image *data* of size *height* x *width*, as grayscale
+ if *grayscale* is true and RGB otherwise, as pdf object *id*
+ and with the soft mask (alpha channel) *smask*, which should be
+ either None or a *height* x *width* x 1 array.
+ """
+
+ obj = {'Type': Name('XObject'),
+ 'Subtype': Name('Image'),
+ 'Width': width,
+ 'Height': height,
+ 'ColorSpace': Name('DeviceGray' if grayscale
+ else 'DeviceRGB'),
+ 'BitsPerComponent': 8}
+ if smask:
+ obj['SMask'] = smask
+ if rcParams['pdf.compression']:
+ png = {'Predictor': 10,
+ 'Colors': 1 if grayscale else 3,
+ 'Columns': width}
+ else:
+ png = None
+ self.beginStream(
+ id,
+ self.reserveObject('length of image stream'),
+ obj,
+ png=png
+ )
+ if png:
+ self._writePng(data)
+ else:
+ self.currentstream.write(data.tostring())
+ self.endStream()
+
+ def writeImages(self):
+ for img, name, ob in six.itervalues(self._images):
+ height, width, data, adata = self._unpack(img)
+ if adata is not None:
+ smaskObject = self.reserveObject("smask")
+ self._writeImg(adata, height, width, True, smaskObject.id)
+ else:
+ smaskObject = None
+ self._writeImg(data, height, width, False,
+ ob.id, smaskObject)
+
+ def markerObject(self, path, trans, fill, stroke, lw, joinstyle,
+ capstyle):
+ """Return name of a marker XObject representing the given path."""
+ # self.markers used by markerObject, writeMarkers, close:
+ # mapping from (path operations, fill?, stroke?) to
+ # [name, object reference, bounding box, linewidth]
+ # This enables different draw_markers calls to share the XObject
+ # if the gc is sufficiently similar: colors etc can vary, but
+ # the choices of whether to fill and whether to stroke cannot.
+ # We need a bounding box enclosing all of the XObject path,
+ # but since line width may vary, we store the maximum of all
+ # occurring line widths in self.markers.
+ # close() is somewhat tightly coupled in that it expects the
+ # first two components of each value in self.markers to be the
+ # name and object reference.
+ pathops = self.pathOperations(path, trans, simplify=False)
+ key = (tuple(pathops), bool(fill), bool(stroke), joinstyle, capstyle)
+ result = self.markers.get(key)
+ if result is None:
+ name = Name('M%d' % len(self.markers))
+ ob = self.reserveObject('marker %d' % len(self.markers))
+ bbox = path.get_extents(trans)
+ self.markers[key] = [name, ob, bbox, lw]
+ else:
+ if result[-1] < lw:
+ result[-1] = lw
+ name = result[0]
+ return name
+
+ def writeMarkers(self):
+ for ((pathops, fill, stroke, joinstyle, capstyle),
+ (name, ob, bbox, lw)) in six.iteritems(self.markers):
+ bbox = bbox.padded(lw * 0.5)
+ self.beginStream(
+ ob.id, None,
+ {'Type': Name('XObject'), 'Subtype': Name('Form'),
+ 'BBox': list(bbox.extents)})
+ self.output(GraphicsContextPdf.joinstyles[joinstyle],
+ Op.setlinejoin)
+ self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
+ self.output(*pathops)
+ self.output(Op.paint_path(fill, stroke))
+ self.endStream()
+
+ def pathCollectionObject(self, gc, path, trans, padding, filled, stroked):
+ name = Name('P%d' % len(self.paths))
+ ob = self.reserveObject('path %d' % len(self.paths))
+ self.paths.append(
+ (name, path, trans, ob, gc.get_joinstyle(), gc.get_capstyle(),
+ padding, filled, stroked))
+ return name
+
+ def writePathCollectionTemplates(self):
+ for (name, path, trans, ob, joinstyle, capstyle, padding, filled,
+ stroked) in self.paths:
+ pathops = self.pathOperations(path, trans, simplify=False)
+ bbox = path.get_extents(trans)
+ if not np.all(np.isfinite(bbox.extents)):
+ extents = [0, 0, 0, 0]
+ else:
+ bbox = bbox.padded(padding)
+ extents = list(bbox.extents)
+ self.beginStream(
+ ob.id, None,
+ {'Type': Name('XObject'), 'Subtype': Name('Form'),
+ 'BBox': extents})
+ self.output(GraphicsContextPdf.joinstyles[joinstyle],
+ Op.setlinejoin)
+ self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
+ self.output(*pathops)
+ self.output(Op.paint_path(filled, stroked))
+ self.endStream()
+
+ @staticmethod
+ def pathOperations(path, transform, clip=None, simplify=None, sketch=None):
+ return [Verbatim(_path.convert_to_string(
+ path, transform, clip, simplify, sketch,
+ 6,
+ [Op.moveto.op, Op.lineto.op, b'', Op.curveto.op, Op.closepath.op],
+ True))]
+
+ def writePath(self, path, transform, clip=False, sketch=None):
+ if clip:
+ clip = (0.0, 0.0, self.width * 72, self.height * 72)
+ simplify = path.should_simplify
+ else:
+ clip = None
+ simplify = False
+ cmds = self.pathOperations(path, transform, clip, simplify=simplify,
+ sketch=sketch)
+ self.output(*cmds)
+
+ def reserveObject(self, name=''):
+ """Reserve an ID for an indirect object.
+ The name is used for debugging in case we forget to print out
+ the object with writeObject.
+ """
+
+ id = self.nextObject
+ self.nextObject += 1
+ self.xrefTable.append([None, 0, name])
+ return Reference(id)
+
+ def recordXref(self, id):
+ self.xrefTable[id][0] = self.fh.tell() - self.tell_base
+
+ def writeObject(self, object, contents):
+ self.recordXref(object.id)
+ object.write(contents, self)
+
+ def writeXref(self):
+ """Write out the xref table."""
+
+ self.startxref = self.fh.tell() - self.tell_base
+ self.write(("xref\n0 %d\n" % self.nextObject).encode('ascii'))
+ i = 0
+ borken = False
+ for offset, generation, name in self.xrefTable:
+ if offset is None:
+ print('No offset for object %d (%s)' % (i, name),
+ file=sys.stderr)
+ borken = True
+ else:
+ if name == 'the zero object':
+ key = "f"
+ else:
+ key = "n"
+ text = "%010d %05d %s \n" % (offset, generation, key)
+ self.write(text.encode('ascii'))
+ i += 1
+ if borken:
+ raise AssertionError('Indirect object does not exist')
+
+ def writeInfoDict(self):
+ """Write out the info dictionary, checking it for good form"""
+
+ def is_string_like(x):
+ return isinstance(x, six.string_types)
+
+ def is_date(x):
+ return isinstance(x, datetime)
+
+ check_trapped = (lambda x: isinstance(x, Name) and
+ x.name in ('True', 'False', 'Unknown'))
+
+ keywords = {'Title': is_string_like,
+ 'Author': is_string_like,
+ 'Subject': is_string_like,
+ 'Keywords': is_string_like,
+ 'Creator': is_string_like,
+ 'Producer': is_string_like,
+ 'CreationDate': is_date,
+ 'ModDate': is_date,
+ 'Trapped': check_trapped}
+ for k in self.infoDict:
+ if k not in keywords:
+ warnings.warn('Unknown infodict keyword: %s' % k)
+ else:
+ if not keywords[k](self.infoDict[k]):
+ warnings.warn('Bad value for infodict keyword %s' % k)
+
+ self.infoObject = self.reserveObject('info')
+ self.writeObject(self.infoObject, self.infoDict)
+
+ def writeTrailer(self):
+ """Write out the PDF trailer."""
+
+ self.write(b"trailer\n")
+ self.write(pdfRepr(
+ {'Size': self.nextObject,
+ 'Root': self.rootObject,
+ 'Info': self.infoObject}))
+ # Could add 'ID'
+ self.write(("\nstartxref\n%d\n%%%%EOF\n" %
+ self.startxref).encode('ascii'))
+
+
+class RendererPdf(RendererBase):
+ afm_font_cache = maxdict(50)
+
+ def __init__(self, file, image_dpi, height, width):
+ RendererBase.__init__(self)
+ self.height = height
+ self.width = width
+ self.file = file
+ self.gc = self.new_gc()
+ self.mathtext_parser = MathTextParser("Pdf")
+ self.image_dpi = image_dpi
+
+ def finalize(self):
+ self.file.output(*self.gc.finalize())
+
+ def check_gc(self, gc, fillcolor=None):
+ orig_fill = getattr(gc, '_fillcolor', (0., 0., 0.))
+ gc._fillcolor = fillcolor
+
+ orig_alphas = getattr(gc, '_effective_alphas', (1.0, 1.0))
+
+ if gc.get_rgb() is None:
+ # it should not matter what color here
+ # since linewidth should be 0
+ # unless affected by global settings in rcParams
+ # hence setting zero alpha just incase
+ gc.set_foreground((0, 0, 0, 0), isRGBA=True)
+
+ if gc._forced_alpha:
+ gc._effective_alphas = (gc._alpha, gc._alpha)
+ elif fillcolor is None or len(fillcolor) < 4:
+ gc._effective_alphas = (gc._rgb[3], 1.0)
+ else:
+ gc._effective_alphas = (gc._rgb[3], fillcolor[3])
+
+ delta = self.gc.delta(gc)
+ if delta:
+ self.file.output(*delta)
+
+ # Restore gc to avoid unwanted side effects
+ gc._fillcolor = orig_fill
+ gc._effective_alphas = orig_alphas
+
+ def track_characters(self, font, s):
+ """Keeps track of which characters are required from
+ each font."""
+ if isinstance(font, six.string_types):
+ fname = font
+ else:
+ fname = font.fname
+ realpath, stat_key = get_realpath_and_stat(fname)
+ used_characters = self.file.used_characters.setdefault(
+ stat_key, (realpath, set()))
+ used_characters[1].update([ord(x) for x in s])
+
+ def merge_used_characters(self, other):
+ for stat_key, (realpath, charset) in six.iteritems(other):
+ used_characters = self.file.used_characters.setdefault(
+ stat_key, (realpath, set()))
+ used_characters[1].update(charset)
+
+ def get_image_magnification(self):
+ return self.image_dpi/72.0
+
+ def option_scale_image(self):
+ """
+ pdf backend support arbitrary scaling of image.
+ """
+ return True
+
+ def option_image_nocomposite(self):
+ """
+ return whether to generate a composite image from multiple images on
+ a set of axes
+ """
+ return not rcParams['image.composite_image']
+
+ def draw_image(self, gc, x, y, im, transform=None):
+ h, w = im.shape[:2]
+ if w == 0 or h == 0:
+ return
+
+ if transform is None:
+ # If there's no transform, alpha has already been applied
+ gc.set_alpha(1.0)
+
+ self.check_gc(gc)
+
+ w = 72.0 * w / self.image_dpi
+ h = 72.0 * h / self.image_dpi
+
+ imob = self.file.imageObject(im)
+
+ if transform is None:
+ self.file.output(Op.gsave,
+ w, 0, 0, h, x, y, Op.concat_matrix,
+ imob, Op.use_xobject, Op.grestore)
+ else:
+ tr1, tr2, tr3, tr4, tr5, tr6 = transform.frozen().to_values()
+
+ self.file.output(Op.gsave,
+ 1, 0, 0, 1, x, y, Op.concat_matrix,
+ tr1, tr2, tr3, tr4, tr5, tr6, Op.concat_matrix,
+ imob, Op.use_xobject, Op.grestore)
+
+ def draw_path(self, gc, path, transform, rgbFace=None):
+ self.check_gc(gc, rgbFace)
+ self.file.writePath(
+ path, transform,
+ rgbFace is None and gc.get_hatch_path() is None,
+ gc.get_sketch_params())
+ self.file.output(self.gc.paint())
+
+ def draw_path_collection(self, gc, master_transform, paths, all_transforms,
+ offsets, offsetTrans, facecolors, edgecolors,
+ linewidths, linestyles, antialiaseds, urls,
+ offset_position):
+ # We can only reuse the objects if the presence of fill and
+ # stroke (and the amount of alpha for each) is the same for
+ # all of them
+ can_do_optimization = True
+ facecolors = np.asarray(facecolors)
+ edgecolors = np.asarray(edgecolors)
+
+ if not len(facecolors):
+ filled = False
+ can_do_optimization = not gc.get_hatch()
+ else:
+ if np.all(facecolors[:, 3] == facecolors[0, 3]):
+ filled = facecolors[0, 3] != 0.0
+ else:
+ can_do_optimization = False
+
+ if not len(edgecolors):
+ stroked = False
+ else:
+ if np.all(np.asarray(linewidths) == 0.0):
+ stroked = False
+ elif np.all(edgecolors[:, 3] == edgecolors[0, 3]):
+ stroked = edgecolors[0, 3] != 0.0
+ else:
+ can_do_optimization = False
+
+ # Is the optimization worth it? Rough calculation:
+ # cost of emitting a path in-line is len_path * uses_per_path
+ # cost of XObject is len_path + 5 for the definition,
+ # uses_per_path for the uses
+ len_path = len(paths[0].vertices) if len(paths) > 0 else 0
+ uses_per_path = self._iter_collection_uses_per_path(
+ paths, all_transforms, offsets, facecolors, edgecolors)
+ should_do_optimization = \
+ len_path + uses_per_path + 5 < len_path * uses_per_path
+
+ if (not can_do_optimization) or (not should_do_optimization):
+ return RendererBase.draw_path_collection(
+ self, gc, master_transform, paths, all_transforms,
+ offsets, offsetTrans, facecolors, edgecolors,
+ linewidths, linestyles, antialiaseds, urls,
+ offset_position)
+
+ padding = np.max(linewidths)
+ path_codes = []
+ for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
+ master_transform, paths, all_transforms)):
+ name = self.file.pathCollectionObject(
+ gc, path, transform, padding, filled, stroked)
+ path_codes.append(name)
+
+ output = self.file.output
+ output(*self.gc.push())
+ lastx, lasty = 0, 0
+ for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
+ gc, master_transform, all_transforms, path_codes, offsets,
+ offsetTrans, facecolors, edgecolors, linewidths, linestyles,
+ antialiaseds, urls, offset_position):
+
+ self.check_gc(gc0, rgbFace)
+ dx, dy = xo - lastx, yo - lasty
+ output(1, 0, 0, 1, dx, dy, Op.concat_matrix, path_id,
+ Op.use_xobject)
+ lastx, lasty = xo, yo
+ output(*self.gc.pop())
+
+ def draw_markers(self, gc, marker_path, marker_trans, path, trans,
+ rgbFace=None):
+ # Same logic as in draw_path_collection
+ len_marker_path = len(marker_path)
+ uses = len(path)
+ if len_marker_path * uses < len_marker_path + uses + 5:
+ RendererBase.draw_markers(self, gc, marker_path, marker_trans,
+ path, trans, rgbFace)
+ return
+
+ self.check_gc(gc, rgbFace)
+ fill = gc.fill(rgbFace)
+ stroke = gc.stroke()
+
+ output = self.file.output
+ marker = self.file.markerObject(
+ marker_path, marker_trans, fill, stroke, self.gc._linewidth,
+ gc.get_joinstyle(), gc.get_capstyle())
+
+ output(Op.gsave)
+ lastx, lasty = 0, 0
+ for vertices, code in path.iter_segments(
+ trans,
+ clip=(0, 0, self.file.width*72, self.file.height*72),
+ simplify=False):
+ if len(vertices):
+ x, y = vertices[-2:]
+ if (x < 0 or y < 0 or
+ x > self.file.width * 72 or y > self.file.height * 72):
+ continue
+ dx, dy = x - lastx, y - lasty
+ output(1, 0, 0, 1, dx, dy, Op.concat_matrix,
+ marker, Op.use_xobject)
+ lastx, lasty = x, y
+ output(Op.grestore)
+
+ def draw_gouraud_triangle(self, gc, points, colors, trans):
+ self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
+ colors.reshape((1, 3, 4)), trans)
+
+ def draw_gouraud_triangles(self, gc, points, colors, trans):
+ assert len(points) == len(colors)
+ assert points.ndim == 3
+ assert points.shape[1] == 3
+ assert points.shape[2] == 2
+ assert colors.ndim == 3
+ assert colors.shape[1] == 3
+ assert colors.shape[2] == 4
+
+ shape = points.shape
+ points = points.reshape((shape[0] * shape[1], 2))
+ tpoints = trans.transform(points)
+ tpoints = tpoints.reshape(shape)
+ name = self.file.addGouraudTriangles(tpoints, colors)
+ self.check_gc(gc)
+ self.file.output(name, Op.shading)
+
+ def _setup_textpos(self, x, y, angle, oldx=0, oldy=0, oldangle=0):
+ if angle == oldangle == 0:
+ self.file.output(x - oldx, y - oldy, Op.textpos)
+ else:
+ angle = angle / 180.0 * pi
+ self.file.output(cos(angle), sin(angle),
+ -sin(angle), cos(angle),
+ x, y, Op.textmatrix)
+ self.file.output(0, 0, Op.textpos)
+
+ def draw_mathtext(self, gc, x, y, s, prop, angle):
+ # TODO: fix positioning and encoding
+ width, height, descent, glyphs, rects, used_characters = \
+ self.mathtext_parser.parse(s, 72, prop)
+ self.merge_used_characters(used_characters)
+
+ # When using Type 3 fonts, we can't use character codes higher
+ # than 255, so we use the "Do" command to render those
+ # instead.
+ global_fonttype = rcParams['pdf.fonttype']
+
+ # Set up a global transformation matrix for the whole math expression
+ a = angle / 180.0 * pi
+ self.file.output(Op.gsave)
+ self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
+ Op.concat_matrix)
+
+ self.check_gc(gc, gc._rgb)
+ self.file.output(Op.begin_text)
+ prev_font = None, None
+ oldx, oldy = 0, 0
+ for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
+ if is_opentype_cff_font(fontname):
+ fonttype = 42
+ else:
+ fonttype = global_fonttype
+
+ if fonttype == 42 or num <= 255:
+ self._setup_textpos(ox, oy, 0, oldx, oldy)
+ oldx, oldy = ox, oy
+ if (fontname, fontsize) != prev_font:
+ self.file.output(self.file.fontName(fontname), fontsize,
+ Op.selectfont)
+ prev_font = fontname, fontsize
+ self.file.output(self.encode_string(unichr(num), fonttype),
+ Op.show)
+ self.file.output(Op.end_text)
+
+ # If using Type 3 fonts, render all of the multi-byte characters
+ # as XObjects using the 'Do' command.
+ if global_fonttype == 3:
+ for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
+ if is_opentype_cff_font(fontname):
+ fonttype = 42
+ else:
+ fonttype = global_fonttype
+
+ if fonttype == 3 and num > 255:
+ self.file.fontName(fontname)
+ self.file.output(Op.gsave,
+ 0.001 * fontsize, 0,
+ 0, 0.001 * fontsize,
+ ox, oy, Op.concat_matrix)
+ name = self.file._get_xobject_symbol_name(
+ fontname, symbol_name)
+ self.file.output(Name(name), Op.use_xobject)
+ self.file.output(Op.grestore)
+
+ # Draw any horizontal lines in the math layout
+ for ox, oy, width, height in rects:
+ self.file.output(Op.gsave, ox, oy, width, height,
+ Op.rectangle, Op.fill, Op.grestore)
+
+ # Pop off the global transformation
+ self.file.output(Op.grestore)
+
+ def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
+ texmanager = self.get_texmanager()
+ fontsize = prop.get_size_in_points()
+ dvifile = texmanager.make_dvi(s, fontsize)
+ with dviread.Dvi(dvifile, 72) as dvi:
+ page = next(iter(dvi))
+
+ # Gather font information and do some setup for combining
+ # characters into strings. The variable seq will contain a
+ # sequence of font and text entries. A font entry is a list
+ # ['font', name, size] where name is a Name object for the
+ # font. A text entry is ['text', x, y, glyphs, x+w] where x
+ # and y are the starting coordinates, w is the width, and
+ # glyphs is a list; in this phase it will always contain just
+ # one one-character string, but later it may have longer
+ # strings interspersed with kern amounts.
+ oldfont, seq = None, []
+ for x1, y1, dvifont, glyph, width in page.text:
+ if dvifont != oldfont:
+ pdfname = self.file.dviFontName(dvifont)
+ seq += [['font', pdfname, dvifont.size]]
+ oldfont = dvifont
+ # We need to convert the glyph numbers to bytes, and the easiest
+ # way to do this on both Python 2 and 3 is .encode('latin-1')
+ seq += [['text', x1, y1,
+ [six.unichr(glyph).encode('latin-1')], x1+width]]
+
+ # Find consecutive text strings with constant y coordinate and
+ # combine into a sequence of strings and kerns, or just one
+ # string (if any kerns would be less than 0.1 points).
+ i, curx, fontsize = 0, 0, None
+ while i < len(seq)-1:
+ elt, nxt = seq[i:i+2]
+ if elt[0] == 'font':
+ fontsize = elt[2]
+ elif elt[0] == nxt[0] == 'text' and elt[2] == nxt[2]:
+ offset = elt[4] - nxt[1]
+ if abs(offset) < 0.1:
+ elt[3][-1] += nxt[3][0]
+ elt[4] += nxt[4]-nxt[1]
+ else:
+ elt[3] += [offset*1000.0/fontsize, nxt[3][0]]
+ elt[4] = nxt[4]
+ del seq[i+1]
+ continue
+ i += 1
+
+ # Create a transform to map the dvi contents to the canvas.
+ mytrans = Affine2D().rotate_deg(angle).translate(x, y)
+
+ # Output the text.
+ self.check_gc(gc, gc._rgb)
+ self.file.output(Op.begin_text)
+ curx, cury, oldx, oldy = 0, 0, 0, 0
+ for elt in seq:
+ if elt[0] == 'font':
+ self.file.output(elt[1], elt[2], Op.selectfont)
+ elif elt[0] == 'text':
+ curx, cury = mytrans.transform_point((elt[1], elt[2]))
+ self._setup_textpos(curx, cury, angle, oldx, oldy)
+ oldx, oldy = curx, cury
+ if len(elt[3]) == 1:
+ self.file.output(elt[3][0], Op.show)
+ else:
+ self.file.output(elt[3], Op.showkern)
+ else:
+ assert False
+ self.file.output(Op.end_text)
+
+ # Then output the boxes (e.g., variable-length lines of square
+ # roots).
+ boxgc = self.new_gc()
+ boxgc.copy_properties(gc)
+ boxgc.set_linewidth(0)
+ pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.CLOSEPOLY]
+ for x1, y1, h, w in page.boxes:
+ path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h],
+ [0, 0]], pathops)
+ self.draw_path(boxgc, path, mytrans, gc._rgb)
+
+ def encode_string(self, s, fonttype):
+ if fonttype in (1, 3):
+ return s.encode('cp1252', 'replace')
+ return s.encode('utf-16be', 'replace')
+
+ def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
+ # TODO: combine consecutive texts into one BT/ET delimited section
+
+ # This function is rather complex, since there is no way to
+ # access characters of a Type 3 font with codes > 255. (Type
+ # 3 fonts can not have a CIDMap). Therefore, we break the
+ # string into chunks, where each chunk contains exclusively
+ # 1-byte or exclusively 2-byte characters, and output each
+ # chunk a separate command. 1-byte characters use the regular
+ # text show command (Tj), whereas 2-byte characters use the
+ # use XObject command (Do). If using Type 42 fonts, all of
+ # this complication is avoided, but of course, those fonts can
+ # not be subsetted.
+
+ self.check_gc(gc, gc._rgb)
+ if ismath:
+ return self.draw_mathtext(gc, x, y, s, prop, angle)
+
+ fontsize = prop.get_size_in_points()
+
+ if rcParams['pdf.use14corefonts']:
+ font = self._get_font_afm(prop)
+ l, b, w, h = font.get_str_bbox(s)
+ fonttype = 1
+ else:
+ font = self._get_font_ttf(prop)
+ self.track_characters(font, s)
+ font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
+
+ fonttype = rcParams['pdf.fonttype']
+
+ # We can't subset all OpenType fonts, so switch to Type 42
+ # in that case.
+ if is_opentype_cff_font(font.fname):
+ fonttype = 42
+
+ def check_simple_method(s):
+ """Determine if we should use the simple or woven method
+ to output this text, and chunks the string into 1-byte and
+ 2-byte sections if necessary."""
+ use_simple_method = True
+ chunks = []
+
+ if not rcParams['pdf.use14corefonts']:
+ if fonttype == 3 and not isinstance(s, bytes) and len(s) != 0:
+ # Break the string into chunks where each chunk is either
+ # a string of chars <= 255, or a single character > 255.
+ s = six.text_type(s)
+ for c in s:
+ if ord(c) <= 255:
+ char_type = 1
+ else:
+ char_type = 2
+ if len(chunks) and chunks[-1][0] == char_type:
+ chunks[-1][1].append(c)
+ else:
+ chunks.append((char_type, [c]))
+ use_simple_method = (len(chunks) == 1 and
+ chunks[-1][0] == 1)
+ return use_simple_method, chunks
+
+ def draw_text_simple():
+ """Outputs text using the simple method."""
+ self.file.output(Op.begin_text,
+ self.file.fontName(prop),
+ fontsize,
+ Op.selectfont)
+ self._setup_textpos(x, y, angle)
+ self.file.output(self.encode_string(s, fonttype), Op.show,
+ Op.end_text)
+
+ def draw_text_woven(chunks):
+ """Outputs text using the woven method, alternating
+ between chunks of 1-byte characters and 2-byte characters.
+ Only used for Type 3 fonts."""
+ chunks = [(a, ''.join(b)) for a, b in chunks]
+
+ # Do the rotation and global translation as a single matrix
+ # concatenation up front
+ self.file.output(Op.gsave)
+ a = angle / 180.0 * pi
+ self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
+ Op.concat_matrix)
+
+ # Output all the 1-byte characters in a BT/ET group, then
+ # output all the 2-byte characters.
+ for mode in (1, 2):
+ newx = oldx = 0
+ # Output a 1-byte character chunk
+ if mode == 1:
+ self.file.output(Op.begin_text,
+ self.file.fontName(prop),
+ fontsize,
+ Op.selectfont)
+
+ for chunk_type, chunk in chunks:
+ if mode == 1 and chunk_type == 1:
+ self._setup_textpos(newx, 0, 0, oldx, 0, 0)
+ self.file.output(self.encode_string(chunk, fonttype),
+ Op.show)
+ oldx = newx
+
+ lastgind = None
+ for c in chunk:
+ ccode = ord(c)
+ gind = font.get_char_index(ccode)
+ if gind is not None:
+ if mode == 2 and chunk_type == 2:
+ glyph_name = font.get_glyph_name(gind)
+ self.file.output(Op.gsave)
+ self.file.output(0.001 * fontsize, 0,
+ 0, 0.001 * fontsize,
+ newx, 0, Op.concat_matrix)
+ name = self.file._get_xobject_symbol_name(
+ font.fname, glyph_name)
+ self.file.output(Name(name), Op.use_xobject)
+ self.file.output(Op.grestore)
+
+ # Move the pointer based on the character width
+ # and kerning
+ glyph = font.load_char(ccode,
+ flags=LOAD_NO_HINTING)
+ if lastgind is not None:
+ kern = font.get_kerning(
+ lastgind, gind, KERNING_UNFITTED)
+ else:
+ kern = 0
+ lastgind = gind
+ newx += kern/64.0 + glyph.linearHoriAdvance/65536.0
+
+ if mode == 1:
+ self.file.output(Op.end_text)
+
+ self.file.output(Op.grestore)
+
+ use_simple_method, chunks = check_simple_method(s)
+ if use_simple_method:
+ return draw_text_simple()
+ else:
+ return draw_text_woven(chunks)
+
+ def get_text_width_height_descent(self, s, prop, ismath):
+ if rcParams['text.usetex']:
+ texmanager = self.get_texmanager()
+ fontsize = prop.get_size_in_points()
+ w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
+ renderer=self)
+ return w, h, d
+
+ if ismath:
+ w, h, d, glyphs, rects, used_characters = \
+ self.mathtext_parser.parse(s, 72, prop)
+
+ elif rcParams['pdf.use14corefonts']:
+ font = self._get_font_afm(prop)
+ l, b, w, h, d = font.get_str_bbox_and_descent(s)
+ scale = prop.get_size_in_points()
+ w *= scale / 1000
+ h *= scale / 1000
+ d *= scale / 1000
+ else:
+ font = self._get_font_ttf(prop)
+ font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
+ w, h = font.get_width_height()
+ scale = (1.0 / 64.0)
+ w *= scale
+ h *= scale
+ d = font.get_descent()
+ d *= scale
+ return w, h, d
+
+ def _get_font_afm(self, prop):
+ key = hash(prop)
+ font = self.afm_font_cache.get(key)
+ if font is None:
+ filename = findfont(
+ prop, fontext='afm', directory=self.file._core14fontdir)
+ if filename is None:
+ filename = findfont(
+ "Helvetica", fontext='afm',
+ directory=self.file._core14fontdir)
+ font = self.afm_font_cache.get(filename)
+ if font is None:
+ with open(filename, 'rb') as fh:
+ font = AFM(fh)
+ self.afm_font_cache[filename] = font
+ self.afm_font_cache[key] = font
+ return font
+
+ def _get_font_ttf(self, prop):
+ filename = findfont(prop)
+ font = get_font(filename)
+ font.clear()
+ font.set_size(prop.get_size_in_points(), 72)
+ return font
+
+ def flipy(self):
+ return False
+
+ def get_canvas_width_height(self):
+ return self.file.width * 72.0, self.file.height * 72.0
+
+ def new_gc(self):
+ return GraphicsContextPdf(self.file)
+
+
+class GraphicsContextPdf(GraphicsContextBase):
+
+ def __init__(self, file):
+ GraphicsContextBase.__init__(self)
+ self._fillcolor = (0.0, 0.0, 0.0)
+ self._effective_alphas = (1.0, 1.0)
+ self.file = file
+ self.parent = None
+
+ def __repr__(self):
+ d = dict(self.__dict__)
+ del d['file']
+ del d['parent']
+ return repr(d)
+
+ def stroke(self):
+ """
+ Predicate: does the path need to be stroked (its outline drawn)?
+ This tests for the various conditions that disable stroking
+ the path, in which case it would presumably be filled.
+ """
+ # _linewidth > 0: in pdf a line of width 0 is drawn at minimum
+ # possible device width, but e.g., agg doesn't draw at all
+ return (self._linewidth > 0 and self._alpha > 0 and
+ (len(self._rgb) <= 3 or self._rgb[3] != 0.0))
+
+ def fill(self, *args):
+ """
+ Predicate: does the path need to be filled?
+
+ An optional argument can be used to specify an alternative
+ _fillcolor, as needed by RendererPdf.draw_markers.
+ """
+ if len(args):
+ _fillcolor = args[0]
+ else:
+ _fillcolor = self._fillcolor
+ return (self._hatch or
+ (_fillcolor is not None and
+ (len(_fillcolor) <= 3 or _fillcolor[3] != 0.0)))
+
+ def paint(self):
+ """
+ Return the appropriate pdf operator to cause the path to be
+ stroked, filled, or both.
+ """
+ return Op.paint_path(self.fill(), self.stroke())
+
+ capstyles = {'butt': 0, 'round': 1, 'projecting': 2}
+ joinstyles = {'miter': 0, 'round': 1, 'bevel': 2}
+
+ def capstyle_cmd(self, style):
+ return [self.capstyles[style], Op.setlinecap]
+
+ def joinstyle_cmd(self, style):
+ return [self.joinstyles[style], Op.setlinejoin]
+
+ def linewidth_cmd(self, width):
+ return [width, Op.setlinewidth]
+
+ def dash_cmd(self, dashes):
+ offset, dash = dashes
+ if dash is None:
+ dash = []
+ offset = 0
+ return [list(dash), offset, Op.setdash]
+
+ def alpha_cmd(self, alpha, forced, effective_alphas):
+ name = self.file.alphaState(effective_alphas)
+ return [name, Op.setgstate]
+
+ def hatch_cmd(self, hatch, hatch_color):
+ if not hatch:
+ if self._fillcolor is not None:
+ return self.fillcolor_cmd(self._fillcolor)
+ else:
+ return [Name('DeviceRGB'), Op.setcolorspace_nonstroke]
+ else:
+ hatch_style = (hatch_color, self._fillcolor, hatch)
+ name = self.file.hatchPattern(hatch_style)
+ return [Name('Pattern'), Op.setcolorspace_nonstroke,
+ name, Op.setcolor_nonstroke]
+
+ def rgb_cmd(self, rgb):
+ if rcParams['pdf.inheritcolor']:
+ return []
+ if rgb[0] == rgb[1] == rgb[2]:
+ return [rgb[0], Op.setgray_stroke]
+ else:
+ return list(rgb[:3]) + [Op.setrgb_stroke]
+
+ def fillcolor_cmd(self, rgb):
+ if rgb is None or rcParams['pdf.inheritcolor']:
+ return []
+ elif rgb[0] == rgb[1] == rgb[2]:
+ return [rgb[0], Op.setgray_nonstroke]
+ else:
+ return list(rgb[:3]) + [Op.setrgb_nonstroke]
+
+ def push(self):
+ parent = GraphicsContextPdf(self.file)
+ parent.copy_properties(self)
+ parent.parent = self.parent
+ self.parent = parent
+ return [Op.gsave]
+
+ def pop(self):
+ assert self.parent is not None
+ self.copy_properties(self.parent)
+ self.parent = self.parent.parent
+ return [Op.grestore]
+
+ def clip_cmd(self, cliprect, clippath):
+ """Set clip rectangle. Calls self.pop() and self.push()."""
+ cmds = []
+ # Pop graphics state until we hit the right one or the stack is empty
+ while ((self._cliprect, self._clippath) != (cliprect, clippath)
+ and self.parent is not None):
+ cmds.extend(self.pop())
+ # Unless we hit the right one, set the clip polygon
+ if ((self._cliprect, self._clippath) != (cliprect, clippath) or
+ self.parent is None):
+ cmds.extend(self.push())
+ if self._cliprect != cliprect:
+ cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])
+ if self._clippath != clippath:
+ path, affine = clippath.get_transformed_path_and_affine()
+ cmds.extend(
+ PdfFile.pathOperations(path, affine, simplify=False) +
+ [Op.clip, Op.endpath])
+ return cmds
+
+ commands = (
+ # must come first since may pop
+ (('_cliprect', '_clippath'), clip_cmd),
+ (('_alpha', '_forced_alpha', '_effective_alphas'), alpha_cmd),
+ (('_capstyle',), capstyle_cmd),
+ (('_fillcolor',), fillcolor_cmd),
+ (('_joinstyle',), joinstyle_cmd),
+ (('_linewidth',), linewidth_cmd),
+ (('_dashes',), dash_cmd),
+ (('_rgb',), rgb_cmd),
+ # must come after fillcolor and rgb
+ (('_hatch', '_hatch_color'), hatch_cmd),
+ )
+
+ def delta(self, other):
+ """
+ Copy properties of other into self and return PDF commands
+ needed to transform self into other.
+ """
+ cmds = []
+ fill_performed = False
+ for params, cmd in self.commands:
+ different = False
+ for p in params:
+ ours = getattr(self, p)
+ theirs = getattr(other, p)
+ try:
+ if ours is None or theirs is None:
+ different = ours is not theirs
+ else:
+ different = bool(ours != theirs)
+ except ValueError:
+ ours = np.asarray(ours)
+ theirs = np.asarray(theirs)
+ different = (ours.shape != theirs.shape or
+ np.any(ours != theirs))
+ if different:
+ break
+
+ # Need to update hatching if we also updated fillcolor
+ if params == ('_hatch', '_hatch_color') and fill_performed:
+ different = True
+
+ if different:
+ if params == ('_fillcolor',):
+ fill_performed = True
+ theirs = [getattr(other, p) for p in params]
+ cmds.extend(cmd(self, *theirs))
+ for p in params:
+ setattr(self, p, getattr(other, p))
+ return cmds
+
+ def copy_properties(self, other):
+ """
+ Copy properties of other into self.
+ """
+ GraphicsContextBase.copy_properties(self, other)
+ fillcolor = getattr(other, '_fillcolor', self._fillcolor)
+ effective_alphas = getattr(other, '_effective_alphas',
+ self._effective_alphas)
+ self._fillcolor = fillcolor
+ self._effective_alphas = effective_alphas
+
+ def finalize(self):
+ """
+ Make sure every pushed graphics state is popped.
+ """
+ cmds = []
+ while self.parent is not None:
+ cmds.extend(self.pop())
+ return cmds
+
+########################################################################
+#
+# The following functions and classes are for pylab and implement
+# window/figure managers, etc...
+#
+########################################################################
+
+
+class PdfPages(object):
+ """
+ A multi-page PDF file.
+
+ Examples
+ --------
+
+ >>> import matplotlib.pyplot as plt
+ >>> # Initialize:
+ >>> with PdfPages('foo.pdf') as pdf:
+ ... # As many times as you like, create a figure fig and save it:
+ ... fig = plt.figure()
+ ... pdf.savefig(fig)
+ ... # When no figure is specified the current figure is saved
+ ... pdf.savefig()
+
+ Notes
+ -----
+
+ In reality :class:`PdfPages` is a thin wrapper around :class:`PdfFile`, in
+ order to avoid confusion when using :func:`~matplotlib.pyplot.savefig` and
+ forgetting the format argument.
+ """
+ __slots__ = ('_file', 'keep_empty')
+
+ def __init__(self, filename, keep_empty=True, metadata=None):
+ """
+ Create a new PdfPages object.
+
+ Parameters
+ ----------
+
+ filename : str
+ Plots using :meth:`PdfPages.savefig` will be written to a file at
+ this location. The file is opened at once and any older file with
+ the same name is overwritten.
+ keep_empty : bool, optional
+ If set to False, then empty pdf files will be deleted automatically
+ when closed.
+ metadata : dictionary, optional
+ Information dictionary object (see PDF reference section 10.2.1
+ 'Document Information Dictionary'), e.g.:
+ `{'Creator': 'My software', 'Author': 'Me',
+ 'Title': 'Awesome fig'}`
+
+ The standard keys are `'Title'`, `'Author'`, `'Subject'`,
+ `'Keywords'`, `'Creator'`, `'Producer'`, `'CreationDate'`,
+ `'ModDate'`, and `'Trapped'`. Values have been predefined
+ for `'Creator'`, `'Producer'` and `'CreationDate'`. They
+ can be removed by setting them to `None`.
+
+ """
+ self._file = PdfFile(filename, metadata=metadata)
+ self.keep_empty = keep_empty
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+
+ def close(self):
+ """
+ Finalize this object, making the underlying file a complete
+ PDF file.
+ """
+ self._file.finalize()
+ self._file.close()
+ if (self.get_pagecount() == 0 and not self.keep_empty and
+ not self._file.passed_in_file_object):
+ os.remove(self._file.fh.name)
+ self._file = None
+
+ def infodict(self):
+ """
+ Return a modifiable information dictionary object
+ (see PDF reference section 10.2.1 'Document Information
+ Dictionary').
+ """
+ return self._file.infoDict
+
+ def savefig(self, figure=None, **kwargs):
+ """
+ Saves a :class:`~matplotlib.figure.Figure` to this file as a new page.
+
+ Any other keyword arguments are passed to
+ :meth:`~matplotlib.figure.Figure.savefig`.
+
+ Parameters
+ ----------
+
+ figure : :class:`~matplotlib.figure.Figure` or int, optional
+ Specifies what figure is saved to file. If not specified, the
+ active figure is saved. If a :class:`~matplotlib.figure.Figure`
+ instance is provided, this figure is saved. If an int is specified,
+ the figure instance to save is looked up by number.
+ """
+ if not isinstance(figure, Figure):
+ if figure is None:
+ manager = Gcf.get_active()
+ else:
+ manager = Gcf.get_fig_manager(figure)
+ if manager is None:
+ raise ValueError("No figure {}".format(figure))
+ figure = manager.canvas.figure
+ # Force use of pdf backend, as PdfPages is tightly coupled with it.
+ try:
+ orig_canvas = figure.canvas
+ figure.canvas = FigureCanvasPdf(figure)
+ figure.savefig(self, format="pdf", **kwargs)
+ finally:
+ figure.canvas = orig_canvas
+
+ def get_pagecount(self):
+ """
+ Returns the current number of pages in the multipage pdf file.
+ """
+ return len(self._file.pageList)
+
+ def attach_note(self, text, positionRect=[-100, -100, 0, 0]):
+ """
+ Add a new text note to the page to be saved next. The optional
+ positionRect specifies the position of the new note on the
+ page. It is outside the page per default to make sure it is
+ invisible on printouts.
+ """
+ self._file.newTextnote(text, positionRect)
+
+
+class FigureCanvasPdf(FigureCanvasBase):
+ """
+ The canvas the figure renders into. Calls the draw and print fig
+ methods, creates the renderers, etc...
+
+ Attributes
+ ----------
+ figure : `matplotlib.figure.Figure`
+ A high-level Figure instance
+
+ """
+
+ fixed_dpi = 72
+
+ def draw(self):
+ pass
+
+ filetypes = {'pdf': 'Portable Document Format'}
+
+ def get_default_filetype(self):
+ return 'pdf'
+
+ def print_pdf(self, filename, **kwargs):
+ image_dpi = kwargs.get('dpi', 72) # dpi to use for images
+ self.figure.set_dpi(72) # there are 72 pdf points to an inch
+ width, height = self.figure.get_size_inches()
+ if isinstance(filename, PdfPages):
+ file = filename._file
+ else:
+ file = PdfFile(filename, metadata=kwargs.pop("metadata", None))
+ try:
+ file.newPage(width, height)
+ _bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
+ renderer = MixedModeRenderer(
+ self.figure, width, height, image_dpi,
+ RendererPdf(file, image_dpi, height, width),
+ bbox_inches_restore=_bbox_inches_restore)
+ self.figure.draw(renderer)
+ renderer.finalize()
+ if not isinstance(filename, PdfPages):
+ file.finalize()
+ finally:
+ if isinstance(filename, PdfPages): # finish off this page
+ file.endStream()
+ else: # we opened the file above; now finish it off
+ file.close()
+
+
+class FigureManagerPdf(FigureManagerBase):
+ pass
+
+
+@_Backend.export
+class _BackendPdf(_Backend):
+ FigureCanvas = FigureCanvasPdf
+ FigureManager = FigureManagerPdf
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_pgf.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_pgf.py
new file mode 100644
index 00000000000..cec6358452d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_pgf.py
@@ -0,0 +1,990 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import atexit
+import codecs
+import errno
+import math
+import os
+import re
+import shutil
+import sys
+import tempfile
+import warnings
+import weakref
+
+import matplotlib as mpl
+from matplotlib import _png, rcParams
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
+ RendererBase)
+from matplotlib.backends.backend_mixed import MixedModeRenderer
+from matplotlib.cbook import is_writable_file_like
+from matplotlib.compat import subprocess
+from matplotlib.compat.subprocess import check_output
+from matplotlib.path import Path
+
+
+###############################################################################
+
+# create a list of system fonts, all of these should work with xe/lua-latex
+system_fonts = []
+if sys.platform.startswith('win'):
+ from matplotlib import font_manager
+ for f in font_manager.win32InstalledFonts():
+ try:
+ system_fonts.append(font_manager.get_font(str(f)).family_name)
+ except:
+ pass # unknown error, skip this font
+else:
+ # assuming fontconfig is installed and the command 'fc-list' exists
+ try:
+ # list scalable (non-bitmap) fonts
+ fc_list = check_output([str('fc-list'), ':outline,scalable', 'family'])
+ fc_list = fc_list.decode('utf8')
+ system_fonts = [f.split(',')[0] for f in fc_list.splitlines()]
+ system_fonts = list(set(system_fonts))
+ except:
+ warnings.warn('error getting fonts from fc-list', UserWarning)
+
+def get_texcommand():
+ """Get chosen TeX system from rc."""
+ texsystem_options = ["xelatex", "lualatex", "pdflatex"]
+ texsystem = rcParams["pgf.texsystem"]
+ return texsystem if texsystem in texsystem_options else "xelatex"
+
+
+def get_fontspec():
+ """Build fontspec preamble from rc."""
+ latex_fontspec = []
+ texcommand = get_texcommand()
+
+ if texcommand != "pdflatex":
+ latex_fontspec.append("\\usepackage{fontspec}")
+
+ if texcommand != "pdflatex" and rcParams["pgf.rcfonts"]:
+ # try to find fonts from rc parameters
+ families = ["serif", "sans-serif", "monospace"]
+ fontspecs = [r"\setmainfont{%s}", r"\setsansfont{%s}",
+ r"\setmonofont{%s}"]
+ for family, fontspec in zip(families, fontspecs):
+ matches = [f for f in rcParams["font." + family]
+ if f in system_fonts]
+ if matches:
+ latex_fontspec.append(fontspec % matches[0])
+ else:
+ pass # no fonts found, fallback to LaTeX defaule
+
+ return "\n".join(latex_fontspec)
+
+
+def get_preamble():
+ """Get LaTeX preamble from rc."""
+ return "\n".join(rcParams["pgf.preamble"])
+
+###############################################################################
+
+# This almost made me cry!!!
+# In the end, it's better to use only one unit for all coordinates, since the
+# arithmetic in latex seems to produce inaccurate conversions.
+latex_pt_to_in = 1. / 72.27
+latex_in_to_pt = 1. / latex_pt_to_in
+mpl_pt_to_in = 1. / 72.
+mpl_in_to_pt = 1. / mpl_pt_to_in
+
+###############################################################################
+# helper functions
+
+NO_ESCAPE = r"(?<!\\)(?:\\\\)*"
+re_mathsep = re.compile(NO_ESCAPE + r"\$")
+re_escapetext = re.compile(NO_ESCAPE + "([_^$%])")
+repl_escapetext = lambda m: "\\" + m.group(1)
+re_mathdefault = re.compile(NO_ESCAPE + r"(\\mathdefault)")
+repl_mathdefault = lambda m: m.group(0)[:-len(m.group(1))]
+
+
+def common_texification(text):
+ """
+ Do some necessary and/or useful substitutions for texts to be included in
+ LaTeX documents.
+ """
+
+ # Sometimes, matplotlib adds the unknown command \mathdefault.
+ # Not using \mathnormal instead since this looks odd for the latex cm font.
+ text = re_mathdefault.sub(repl_mathdefault, text)
+
+ # split text into normaltext and inline math parts
+ parts = re_mathsep.split(text)
+ for i, s in enumerate(parts):
+ if not i % 2:
+ # textmode replacements
+ s = re_escapetext.sub(repl_escapetext, s)
+ else:
+ # mathmode replacements
+ s = r"\(\displaystyle %s\)" % s
+ parts[i] = s
+
+ return "".join(parts)
+
+
+def writeln(fh, line):
+ # every line of a file included with \\input must be terminated with %
+ # if not, latex will create additional vertical spaces for some reason
+ fh.write(line)
+ fh.write("%\n")
+
+
+def _font_properties_str(prop):
+ # translate font properties to latex commands, return as string
+ commands = []
+
+ families = {"serif": r"\rmfamily", "sans": r"\sffamily",
+ "sans-serif": r"\sffamily", "monospace": r"\ttfamily"}
+ family = prop.get_family()[0]
+ if family in families:
+ commands.append(families[family])
+ elif family in system_fonts and get_texcommand() != "pdflatex":
+ commands.append(r"\setmainfont{%s}\rmfamily" % family)
+ else:
+ pass # print warning?
+
+ size = prop.get_size_in_points()
+ commands.append(r"\fontsize{%f}{%f}" % (size, size * 1.2))
+
+ styles = {"normal": r"", "italic": r"\itshape", "oblique": r"\slshape"}
+ commands.append(styles[prop.get_style()])
+
+ boldstyles = ["semibold", "demibold", "demi", "bold", "heavy",
+ "extra bold", "black"]
+ if prop.get_weight() in boldstyles:
+ commands.append(r"\bfseries")
+
+ commands.append(r"\selectfont")
+ return "".join(commands)
+
+
+def make_pdf_to_png_converter():
+ """
+ Returns a function that converts a pdf file to a png file.
+ """
+
+ tools_available = []
+ # check for pdftocairo
+ try:
+ check_output([str("pdftocairo"), "-v"], stderr=subprocess.STDOUT)
+ tools_available.append("pdftocairo")
+ except:
+ pass
+ # check for ghostscript
+ gs, ver = mpl.checkdep_ghostscript()
+ if gs:
+ tools_available.append("gs")
+
+ # pick converter
+ if "pdftocairo" in tools_available:
+ def cairo_convert(pdffile, pngfile, dpi):
+ cmd = [str("pdftocairo"), "-singlefile", "-png", "-r", "%d" % dpi,
+ pdffile, os.path.splitext(pngfile)[0]]
+ check_output(cmd, stderr=subprocess.STDOUT)
+ return cairo_convert
+ elif "gs" in tools_available:
+ def gs_convert(pdffile, pngfile, dpi):
+ cmd = [str(gs),
+ '-dQUIET', '-dSAFER', '-dBATCH', '-dNOPAUSE', '-dNOPROMPT',
+ '-dUseCIEColor', '-dTextAlphaBits=4',
+ '-dGraphicsAlphaBits=4', '-dDOINTERPOLATE',
+ '-sDEVICE=png16m', '-sOutputFile=%s' % pngfile,
+ '-r%d' % dpi, pdffile]
+ check_output(cmd, stderr=subprocess.STDOUT)
+ return gs_convert
+ else:
+ raise RuntimeError("No suitable pdf to png renderer found.")
+
+
+class LatexError(Exception):
+ def __init__(self, message, latex_output=""):
+ Exception.__init__(self, message)
+ self.latex_output = latex_output
+
+
+class LatexManagerFactory(object):
+ previous_instance = None
+
+ @staticmethod
+ def get_latex_manager():
+ texcommand = get_texcommand()
+ latex_header = LatexManager._build_latex_header()
+ prev = LatexManagerFactory.previous_instance
+
+ # Check if the previous instance of LatexManager can be reused.
+ if (prev and prev.latex_header == latex_header
+ and prev.texcommand == texcommand):
+ if rcParams["pgf.debug"]:
+ print("reusing LatexManager")
+ return prev
+ else:
+ if rcParams["pgf.debug"]:
+ print("creating LatexManager")
+ new_inst = LatexManager()
+ LatexManagerFactory.previous_instance = new_inst
+ return new_inst
+
+
+class LatexManager(object):
+ """
+ The LatexManager opens an instance of the LaTeX application for
+ determining the metrics of text elements. The LaTeX environment can be
+ modified by setting fonts and/or a custem preamble in the rc parameters.
+ """
+ _unclean_instances = weakref.WeakSet()
+
+ @staticmethod
+ def _build_latex_header():
+ latex_preamble = get_preamble()
+ latex_fontspec = get_fontspec()
+ # Create LaTeX header with some content, else LaTeX will load some math
+ # fonts later when we don't expect the additional output on stdout.
+ # TODO: is this sufficient?
+ latex_header = [r"\documentclass{minimal}",
+ latex_preamble,
+ latex_fontspec,
+ r"\begin{document}",
+ r"text $math \mu$", # force latex to load fonts now
+ r"\typeout{pgf_backend_query_start}"]
+ return "\n".join(latex_header)
+
+ @staticmethod
+ def _cleanup_remaining_instances():
+ unclean_instances = list(LatexManager._unclean_instances)
+ for latex_manager in unclean_instances:
+ latex_manager._cleanup()
+
+ def _stdin_writeln(self, s):
+ self.latex_stdin_utf8.write(s)
+ self.latex_stdin_utf8.write("\n")
+ self.latex_stdin_utf8.flush()
+
+ def _expect(self, s):
+ exp = s.encode("utf8")
+ buf = bytearray()
+ while True:
+ b = self.latex.stdout.read(1)
+ buf += b
+ if buf[-len(exp):] == exp:
+ break
+ if not len(b):
+ raise LatexError("LaTeX process halted", buf.decode("utf8"))
+ return buf.decode("utf8")
+
+ def _expect_prompt(self):
+ return self._expect("\n*")
+
+ def __init__(self):
+ # store references for __del__
+ self._os_path = os.path
+ self._shutil = shutil
+ self._debug = rcParams["pgf.debug"]
+
+ # create a tmp directory for running latex, remember to cleanup
+ self.tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_lm_")
+ LatexManager._unclean_instances.add(self)
+
+ # test the LaTeX setup to ensure a clean startup of the subprocess
+ self.texcommand = get_texcommand()
+ self.latex_header = LatexManager._build_latex_header()
+ latex_end = "\n\\makeatletter\n\\@@end\n"
+ try:
+ latex = subprocess.Popen([str(self.texcommand), "-halt-on-error"],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ cwd=self.tmpdir)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise RuntimeError(
+ "Latex command not found. Install %r or change "
+ "pgf.texsystem to the desired command." % self.texcommand)
+ else:
+ raise RuntimeError(
+ "Error starting process %r" % self.texcommand)
+ test_input = self.latex_header + latex_end
+ stdout, stderr = latex.communicate(test_input.encode("utf-8"))
+ if latex.returncode != 0:
+ raise LatexError("LaTeX returned an error, probably missing font "
+ "or error in preamble:\n%s" % stdout)
+
+ # open LaTeX process for real work
+ latex = subprocess.Popen([str(self.texcommand), "-halt-on-error"],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ cwd=self.tmpdir)
+ self.latex = latex
+ self.latex_stdin_utf8 = codecs.getwriter("utf8")(self.latex.stdin)
+ # write header with 'pgf_backend_query_start' token
+ self._stdin_writeln(self._build_latex_header())
+ # read all lines until our 'pgf_backend_query_start' token appears
+ self._expect("*pgf_backend_query_start")
+ self._expect_prompt()
+
+ # cache for strings already processed
+ self.str_cache = {}
+
+ def _cleanup(self):
+ if not self._os_path.isdir(self.tmpdir):
+ return
+ try:
+ self.latex.communicate()
+ self.latex_stdin_utf8.close()
+ self.latex.stdout.close()
+ except:
+ pass
+ try:
+ self._shutil.rmtree(self.tmpdir)
+ LatexManager._unclean_instances.discard(self)
+ except:
+ sys.stderr.write("error deleting tmp directory %s\n" % self.tmpdir)
+
+ def __del__(self):
+ if self._debug:
+ print("deleting LatexManager")
+ self._cleanup()
+
+ def get_width_height_descent(self, text, prop):
+ """
+ Get the width, total height and descent for a text typesetted by the
+ current LaTeX environment.
+ """
+
+ # apply font properties and define textbox
+ prop_cmds = _font_properties_str(prop)
+ textbox = "\\sbox0{%s %s}" % (prop_cmds, text)
+
+ # check cache
+ if textbox in self.str_cache:
+ return self.str_cache[textbox]
+
+ # send textbox to LaTeX and wait for prompt
+ self._stdin_writeln(textbox)
+ try:
+ self._expect_prompt()
+ except LatexError as e:
+ raise ValueError("Error processing '{}'\nLaTeX Output:\n{}"
+ .format(text, e.latex_output))
+
+ # typeout width, height and text offset of the last textbox
+ self._stdin_writeln(r"\typeout{\the\wd0,\the\ht0,\the\dp0}")
+ # read answer from latex and advance to the next prompt
+ try:
+ answer = self._expect_prompt()
+ except LatexError as e:
+ raise ValueError("Error processing '{}'\nLaTeX Output:\n{}"
+ .format(text, e.latex_output))
+
+ # parse metrics from the answer string
+ try:
+ width, height, offset = answer.splitlines()[0].split(",")
+ except:
+ raise ValueError("Error processing '{}'\nLaTeX Output:\n{}"
+ .format(text, answer))
+ w, h, o = float(width[:-2]), float(height[:-2]), float(offset[:-2])
+
+ # the height returned from LaTeX goes from base to top.
+ # the height matplotlib expects goes from bottom to top.
+ self.str_cache[textbox] = (w, h + o, o)
+ return w, h + o, o
+
+
+class RendererPgf(RendererBase):
+
+ def __init__(self, figure, fh, dummy=False):
+ """
+ Creates a new PGF renderer that translates any drawing instruction
+ into text commands to be interpreted in a latex pgfpicture environment.
+
+ Attributes
+ ----------
+ figure : `matplotlib.figure.Figure`
+ Matplotlib figure to initialize height, width and dpi from.
+ fh : file-like
+ File handle for the output of the drawing commands.
+
+ """
+ RendererBase.__init__(self)
+ self.dpi = figure.dpi
+ self.fh = fh
+ self.figure = figure
+ self.image_counter = 0
+
+ # get LatexManager instance
+ self.latexManager = LatexManagerFactory.get_latex_manager()
+
+ if dummy:
+ # dummy==True deactivate all methods
+ nop = lambda *args, **kwargs: None
+ for m in RendererPgf.__dict__:
+ if m.startswith("draw_"):
+ self.__dict__[m] = nop
+ else:
+ # if fh does not belong to a filename, deactivate draw_image
+ if not hasattr(fh, 'name') or not os.path.exists(fh.name):
+ warnings.warn("streamed pgf-code does not support raster "
+ "graphics, consider using the pgf-to-pdf option",
+ UserWarning)
+ self.__dict__["draw_image"] = lambda *args, **kwargs: None
+
+ def draw_markers(self, gc, marker_path, marker_trans, path, trans,
+ rgbFace=None):
+ writeln(self.fh, r"\begin{pgfscope}")
+
+ # convert from display units to in
+ f = 1. / self.dpi
+
+ # set style and clip
+ self._print_pgf_clip(gc)
+ self._print_pgf_path_styles(gc, rgbFace)
+
+ # build marker definition
+ bl, tr = marker_path.get_extents(marker_trans).get_points()
+ coords = bl[0] * f, bl[1] * f, tr[0] * f, tr[1] * f
+ writeln(self.fh,
+ r"\pgfsys@defobject{currentmarker}"
+ r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{" % coords)
+ self._print_pgf_path(None, marker_path, marker_trans)
+ self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
+ fill=rgbFace is not None)
+ writeln(self.fh, r"}")
+
+ # draw marker for each vertex
+ for point, code in path.iter_segments(trans, simplify=False):
+ x, y = point[0] * f, point[1] * f
+ writeln(self.fh, r"\begin{pgfscope}")
+ writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (x, y))
+ writeln(self.fh, r"\pgfsys@useobject{currentmarker}{}")
+ writeln(self.fh, r"\end{pgfscope}")
+
+ writeln(self.fh, r"\end{pgfscope}")
+
+ def draw_path(self, gc, path, transform, rgbFace=None):
+ writeln(self.fh, r"\begin{pgfscope}")
+ # draw the path
+ self._print_pgf_clip(gc)
+ self._print_pgf_path_styles(gc, rgbFace)
+ self._print_pgf_path(gc, path, transform, rgbFace)
+ self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
+ fill=rgbFace is not None)
+ writeln(self.fh, r"\end{pgfscope}")
+
+ # if present, draw pattern on top
+ if gc.get_hatch():
+ writeln(self.fh, r"\begin{pgfscope}")
+ self._print_pgf_path_styles(gc, rgbFace)
+
+ # combine clip and path for clipping
+ self._print_pgf_clip(gc)
+ self._print_pgf_path(gc, path, transform, rgbFace)
+ writeln(self.fh, r"\pgfusepath{clip}")
+
+ # build pattern definition
+ writeln(self.fh,
+ r"\pgfsys@defobject{currentpattern}"
+ r"{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}{")
+ writeln(self.fh, r"\begin{pgfscope}")
+ writeln(self.fh,
+ r"\pgfpathrectangle"
+ r"{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}")
+ writeln(self.fh, r"\pgfusepath{clip}")
+ scale = mpl.transforms.Affine2D().scale(self.dpi)
+ self._print_pgf_path(None, gc.get_hatch_path(), scale)
+ self._pgf_path_draw(stroke=True)
+ writeln(self.fh, r"\end{pgfscope}")
+ writeln(self.fh, r"}")
+ # repeat pattern, filling the bounding rect of the path
+ f = 1. / self.dpi
+ (xmin, ymin), (xmax, ymax) = \
+ path.get_extents(transform).get_points()
+ xmin, xmax = f * xmin, f * xmax
+ ymin, ymax = f * ymin, f * ymax
+ repx, repy = int(math.ceil(xmax-xmin)), int(math.ceil(ymax-ymin))
+ writeln(self.fh,
+ r"\pgfsys@transformshift{%fin}{%fin}" % (xmin, ymin))
+ for iy in range(repy):
+ for ix in range(repx):
+ writeln(self.fh, r"\pgfsys@useobject{currentpattern}{}")
+ writeln(self.fh, r"\pgfsys@transformshift{1in}{0in}")
+ writeln(self.fh, r"\pgfsys@transformshift{-%din}{0in}" % repx)
+ writeln(self.fh, r"\pgfsys@transformshift{0in}{1in}")
+
+ writeln(self.fh, r"\end{pgfscope}")
+
+ def _print_pgf_clip(self, gc):
+ f = 1. / self.dpi
+ # check for clip box
+ bbox = gc.get_clip_rectangle()
+ if bbox:
+ p1, p2 = bbox.get_points()
+ w, h = p2 - p1
+ coords = p1[0] * f, p1[1] * f, w * f, h * f
+ writeln(self.fh,
+ r"\pgfpathrectangle"
+ r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}"
+ % coords)
+ writeln(self.fh, r"\pgfusepath{clip}")
+
+ # check for clip path
+ clippath, clippath_trans = gc.get_clip_path()
+ if clippath is not None:
+ self._print_pgf_path(gc, clippath, clippath_trans)
+ writeln(self.fh, r"\pgfusepath{clip}")
+
+ def _print_pgf_path_styles(self, gc, rgbFace):
+ # cap style
+ capstyles = {"butt": r"\pgfsetbuttcap",
+ "round": r"\pgfsetroundcap",
+ "projecting": r"\pgfsetrectcap"}
+ writeln(self.fh, capstyles[gc.get_capstyle()])
+
+ # join style
+ joinstyles = {"miter": r"\pgfsetmiterjoin",
+ "round": r"\pgfsetroundjoin",
+ "bevel": r"\pgfsetbeveljoin"}
+ writeln(self.fh, joinstyles[gc.get_joinstyle()])
+
+ # filling
+ has_fill = rgbFace is not None
+
+ if gc.get_forced_alpha():
+ fillopacity = strokeopacity = gc.get_alpha()
+ else:
+ strokeopacity = gc.get_rgb()[3]
+ fillopacity = rgbFace[3] if has_fill and len(rgbFace) > 3 else 1.0
+
+ if has_fill:
+ writeln(self.fh,
+ r"\definecolor{currentfill}{rgb}{%f,%f,%f}"
+ % tuple(rgbFace[:3]))
+ writeln(self.fh, r"\pgfsetfillcolor{currentfill}")
+ if has_fill and fillopacity != 1.0:
+ writeln(self.fh, r"\pgfsetfillopacity{%f}" % fillopacity)
+
+ # linewidth and color
+ lw = gc.get_linewidth() * mpl_pt_to_in * latex_in_to_pt
+ stroke_rgba = gc.get_rgb()
+ writeln(self.fh, r"\pgfsetlinewidth{%fpt}" % lw)
+ writeln(self.fh,
+ r"\definecolor{currentstroke}{rgb}{%f,%f,%f}"
+ % stroke_rgba[:3])
+ writeln(self.fh, r"\pgfsetstrokecolor{currentstroke}")
+ if strokeopacity != 1.0:
+ writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % strokeopacity)
+
+ # line style
+ dash_offset, dash_list = gc.get_dashes()
+ if dash_list is None:
+ writeln(self.fh, r"\pgfsetdash{}{0pt}")
+ else:
+ writeln(self.fh,
+ r"\pgfsetdash{%s}{%fpt}"
+ % ("".join(r"{%fpt}" % dash for dash in dash_list),
+ dash_offset))
+
+ def _print_pgf_path(self, gc, path, transform, rgbFace=None):
+ f = 1. / self.dpi
+ # check for clip box / ignore clip for filled paths
+ bbox = gc.get_clip_rectangle() if gc else None
+ if bbox and (rgbFace is None):
+ p1, p2 = bbox.get_points()
+ clip = (p1[0], p1[1], p2[0], p2[1])
+ else:
+ clip = None
+ # build path
+ for points, code in path.iter_segments(transform, clip=clip):
+ if code == Path.MOVETO:
+ x, y = tuple(points)
+ writeln(self.fh,
+ r"\pgfpathmoveto{\pgfqpoint{%fin}{%fin}}" %
+ (f * x, f * y))
+ elif code == Path.CLOSEPOLY:
+ writeln(self.fh, r"\pgfpathclose")
+ elif code == Path.LINETO:
+ x, y = tuple(points)
+ writeln(self.fh,
+ r"\pgfpathlineto{\pgfqpoint{%fin}{%fin}}" %
+ (f * x, f * y))
+ elif code == Path.CURVE3:
+ cx, cy, px, py = tuple(points)
+ coords = cx * f, cy * f, px * f, py * f
+ writeln(self.fh,
+ r"\pgfpathquadraticcurveto"
+ r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}"
+ % coords)
+ elif code == Path.CURVE4:
+ c1x, c1y, c2x, c2y, px, py = tuple(points)
+ coords = c1x * f, c1y * f, c2x * f, c2y * f, px * f, py * f
+ writeln(self.fh,
+ r"\pgfpathcurveto"
+ r"{\pgfqpoint{%fin}{%fin}}"
+ r"{\pgfqpoint{%fin}{%fin}}"
+ r"{\pgfqpoint{%fin}{%fin}}"
+ % coords)
+
+ def _pgf_path_draw(self, stroke=True, fill=False):
+ actions = []
+ if stroke:
+ actions.append("stroke")
+ if fill:
+ actions.append("fill")
+ writeln(self.fh, r"\pgfusepath{%s}" % ",".join(actions))
+
+ def option_scale_image(self):
+ """
+ pgf backend supports affine transform of image.
+ """
+ return True
+
+ def option_image_nocomposite(self):
+ """
+ return whether to generate a composite image from multiple images on
+ a set of axes
+ """
+ return not rcParams['image.composite_image']
+
+ def draw_image(self, gc, x, y, im, transform=None):
+ h, w = im.shape[:2]
+ if w == 0 or h == 0:
+ return
+
+ # save the images to png files
+ path = os.path.dirname(self.fh.name)
+ fname = os.path.splitext(os.path.basename(self.fh.name))[0]
+ fname_img = "%s-img%d.png" % (fname, self.image_counter)
+ self.image_counter += 1
+ _png.write_png(im[::-1], os.path.join(path, fname_img))
+
+ # reference the image in the pgf picture
+ writeln(self.fh, r"\begin{pgfscope}")
+ self._print_pgf_clip(gc)
+ f = 1. / self.dpi # from display coords to inch
+ if transform is None:
+ writeln(self.fh,
+ r"\pgfsys@transformshift{%fin}{%fin}" % (x * f, y * f))
+ w, h = w * f, h * f
+ else:
+ tr1, tr2, tr3, tr4, tr5, tr6 = transform.frozen().to_values()
+ writeln(self.fh,
+ r"\pgfsys@transformcm{%f}{%f}{%f}{%f}{%fin}{%fin}" %
+ (tr1 * f, tr2 * f, tr3 * f, tr4 * f,
+ (tr5 + x) * f, (tr6 + y) * f))
+ w = h = 1 # scale is already included in the transform
+ interp = str(transform is None).lower() # interpolation in PDF reader
+ writeln(self.fh,
+ r"\pgftext[left,bottom]"
+ r"{\pgfimage[interpolate=%s,width=%fin,height=%fin]{%s}}" %
+ (interp, w, h, fname_img))
+ writeln(self.fh, r"\end{pgfscope}")
+
+ def draw_tex(self, gc, x, y, s, prop, angle, ismath="TeX!", mtext=None):
+ self.draw_text(gc, x, y, s, prop, angle, ismath, mtext)
+
+ def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
+ # prepare string for tex
+ s = common_texification(s)
+ prop_cmds = _font_properties_str(prop)
+ s = r"%s %s" % (prop_cmds, s)
+
+
+ writeln(self.fh, r"\begin{pgfscope}")
+
+ alpha = gc.get_alpha()
+ if alpha != 1.0:
+ writeln(self.fh, r"\pgfsetfillopacity{%f}" % alpha)
+ writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % alpha)
+ rgb = tuple(gc.get_rgb())[:3]
+ if rgb != (0, 0, 0):
+ writeln(self.fh, r"\definecolor{textcolor}{rgb}{%f,%f,%f}" % rgb)
+ writeln(self.fh, r"\pgfsetstrokecolor{textcolor}")
+ writeln(self.fh, r"\pgfsetfillcolor{textcolor}")
+ s = r"\color{textcolor}" + s
+
+ f = 1.0 / self.figure.dpi
+ text_args = []
+ if mtext and (
+ (angle == 0 or
+ mtext.get_rotation_mode() == "anchor") and
+ mtext.get_va() != "center_baseline"):
+ # if text anchoring can be supported, get the original coordinates
+ # and add alignment information
+ x, y = mtext.get_transform().transform_point(mtext.get_position())
+ text_args.append("x=%fin" % (x * f))
+ text_args.append("y=%fin" % (y * f))
+
+ halign = {"left": "left", "right": "right", "center": ""}
+ valign = {"top": "top", "bottom": "bottom",
+ "baseline": "base", "center": ""}
+ text_args.append(halign[mtext.get_ha()])
+ text_args.append(valign[mtext.get_va()])
+ else:
+ # if not, use the text layout provided by matplotlib
+ text_args.append("x=%fin" % (x * f))
+ text_args.append("y=%fin" % (y * f))
+ text_args.append("left")
+ text_args.append("base")
+
+ if angle != 0:
+ text_args.append("rotate=%f" % angle)
+
+ writeln(self.fh, r"\pgftext[%s]{%s}" % (",".join(text_args), s))
+ writeln(self.fh, r"\end{pgfscope}")
+
+ def get_text_width_height_descent(self, s, prop, ismath):
+ # check if the math is supposed to be displaystyled
+ s = common_texification(s)
+
+ # get text metrics in units of latex pt, convert to display units
+ w, h, d = self.latexManager.get_width_height_descent(s, prop)
+ # TODO: this should be latex_pt_to_in instead of mpl_pt_to_in
+ # but having a little bit more space around the text looks better,
+ # plus the bounding box reported by LaTeX is VERY narrow
+ f = mpl_pt_to_in * self.dpi
+ return w * f, h * f, d * f
+
+ def flipy(self):
+ return False
+
+ def get_canvas_width_height(self):
+ return self.figure.get_figwidth(), self.figure.get_figheight()
+
+ def points_to_pixels(self, points):
+ return points * mpl_pt_to_in * self.dpi
+
+ def new_gc(self):
+ return GraphicsContextPgf()
+
+
+class GraphicsContextPgf(GraphicsContextBase):
+ pass
+
+########################################################################
+
+
+class TmpDirCleaner(object):
+ remaining_tmpdirs = set()
+
+ @staticmethod
+ def add(tmpdir):
+ TmpDirCleaner.remaining_tmpdirs.add(tmpdir)
+
+ @staticmethod
+ def cleanup_remaining_tmpdirs():
+ for tmpdir in TmpDirCleaner.remaining_tmpdirs:
+ try:
+ shutil.rmtree(tmpdir)
+ except:
+ sys.stderr.write("error deleting tmp directory %s\n" % tmpdir)
+
+
+class FigureCanvasPgf(FigureCanvasBase):
+ filetypes = {"pgf": "LaTeX PGF picture",
+ "pdf": "LaTeX compiled PGF picture",
+ "png": "Portable Network Graphics", }
+
+ def get_default_filetype(self):
+ return 'pdf'
+
+ def _print_pgf_to_fh(self, fh, *args, **kwargs):
+ if kwargs.get("dryrun", False):
+ renderer = RendererPgf(self.figure, None, dummy=True)
+ self.figure.draw(renderer)
+ return
+
+ header_text = """%% Creator: Matplotlib, PGF backend
+%%
+%% To include the figure in your LaTeX document, write
+%% \\input{<filename>.pgf}
+%%
+%% Make sure the required packages are loaded in your preamble
+%% \\usepackage{pgf}
+%%
+%% Figures using additional raster images can only be included by \\input if
+%% they are in the same directory as the main LaTeX file. For loading figures
+%% from other directories you can use the `import` package
+%% \\usepackage{import}
+%% and then include the figures with
+%% \\import{<path to file>}{<filename>.pgf}
+%%
+"""
+
+ # append the preamble used by the backend as a comment for debugging
+ header_info_preamble = ["%% Matplotlib used the following preamble"]
+ for line in get_preamble().splitlines():
+ header_info_preamble.append("%% " + line)
+ for line in get_fontspec().splitlines():
+ header_info_preamble.append("%% " + line)
+ header_info_preamble.append("%%")
+ header_info_preamble = "\n".join(header_info_preamble)
+
+ # get figure size in inch
+ w, h = self.figure.get_figwidth(), self.figure.get_figheight()
+ dpi = self.figure.get_dpi()
+
+ # create pgfpicture environment and write the pgf code
+ fh.write(header_text)
+ fh.write(header_info_preamble)
+ fh.write("\n")
+ writeln(fh, r"\begingroup")
+ writeln(fh, r"\makeatletter")
+ writeln(fh, r"\begin{pgfpicture}")
+ writeln(fh,
+ r"\pgfpathrectangle{\pgfpointorigin}{\pgfqpoint{%fin}{%fin}}"
+ % (w, h))
+ writeln(fh, r"\pgfusepath{use as bounding box, clip}")
+ _bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
+ renderer = MixedModeRenderer(self.figure, w, h, dpi,
+ RendererPgf(self.figure, fh),
+ bbox_inches_restore=_bbox_inches_restore)
+ self.figure.draw(renderer)
+
+ # end the pgfpicture environment
+ writeln(fh, r"\end{pgfpicture}")
+ writeln(fh, r"\makeatother")
+ writeln(fh, r"\endgroup")
+
+ def print_pgf(self, fname_or_fh, *args, **kwargs):
+ """
+ Output pgf commands for drawing the figure so it can be included and
+ rendered in latex documents.
+ """
+ if kwargs.get("dryrun", False):
+ self._print_pgf_to_fh(None, *args, **kwargs)
+ return
+
+ # figure out where the pgf is to be written to
+ if isinstance(fname_or_fh, six.string_types):
+ with codecs.open(fname_or_fh, "w", encoding="utf-8") as fh:
+ self._print_pgf_to_fh(fh, *args, **kwargs)
+ elif is_writable_file_like(fname_or_fh):
+ fh = codecs.getwriter("utf-8")(fname_or_fh)
+ self._print_pgf_to_fh(fh, *args, **kwargs)
+ else:
+ raise ValueError("filename must be a path")
+
+ def _print_pdf_to_fh(self, fh, *args, **kwargs):
+ w, h = self.figure.get_figwidth(), self.figure.get_figheight()
+
+ try:
+ # create temporary directory for compiling the figure
+ tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
+ fname_pgf = os.path.join(tmpdir, "figure.pgf")
+ fname_tex = os.path.join(tmpdir, "figure.tex")
+ fname_pdf = os.path.join(tmpdir, "figure.pdf")
+
+ # print figure to pgf and compile it with latex
+ self.print_pgf(fname_pgf, *args, **kwargs)
+
+ latex_preamble = get_preamble()
+ latex_fontspec = get_fontspec()
+ latexcode = """
+\\documentclass[12pt]{minimal}
+\\usepackage[paperwidth=%fin, paperheight=%fin, margin=0in]{geometry}
+%s
+%s
+\\usepackage{pgf}
+
+\\begin{document}
+\\centering
+\\input{figure.pgf}
+\\end{document}""" % (w, h, latex_preamble, latex_fontspec)
+ with codecs.open(fname_tex, "w", "utf-8") as fh_tex:
+ fh_tex.write(latexcode)
+
+ texcommand = get_texcommand()
+ cmdargs = [str(texcommand), "-interaction=nonstopmode",
+ "-halt-on-error", "figure.tex"]
+ try:
+ check_output(cmdargs, stderr=subprocess.STDOUT, cwd=tmpdir)
+ except subprocess.CalledProcessError as e:
+ raise RuntimeError(
+ "%s was not able to process your file.\n\nFull log:\n%s"
+ % (texcommand, e.output))
+
+ # copy file contents to target
+ with open(fname_pdf, "rb") as fh_src:
+ shutil.copyfileobj(fh_src, fh)
+ finally:
+ try:
+ shutil.rmtree(tmpdir)
+ except:
+ TmpDirCleaner.add(tmpdir)
+
+ def print_pdf(self, fname_or_fh, *args, **kwargs):
+ """
+ Use LaTeX to compile a Pgf generated figure to PDF.
+ """
+ if kwargs.get("dryrun", False):
+ self._print_pgf_to_fh(None, *args, **kwargs)
+ return
+
+ # figure out where the pdf is to be written to
+ if isinstance(fname_or_fh, six.string_types):
+ with open(fname_or_fh, "wb") as fh:
+ self._print_pdf_to_fh(fh, *args, **kwargs)
+ elif is_writable_file_like(fname_or_fh):
+ self._print_pdf_to_fh(fname_or_fh, *args, **kwargs)
+ else:
+ raise ValueError("filename must be a path or a file-like object")
+
+ def _print_png_to_fh(self, fh, *args, **kwargs):
+ converter = make_pdf_to_png_converter()
+
+ try:
+ # create temporary directory for pdf creation and png conversion
+ tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
+ fname_pdf = os.path.join(tmpdir, "figure.pdf")
+ fname_png = os.path.join(tmpdir, "figure.png")
+ # create pdf and try to convert it to png
+ self.print_pdf(fname_pdf, *args, **kwargs)
+ converter(fname_pdf, fname_png, dpi=self.figure.dpi)
+ # copy file contents to target
+ with open(fname_png, "rb") as fh_src:
+ shutil.copyfileobj(fh_src, fh)
+ finally:
+ try:
+ shutil.rmtree(tmpdir)
+ except:
+ TmpDirCleaner.add(tmpdir)
+
+ def print_png(self, fname_or_fh, *args, **kwargs):
+ """
+ Use LaTeX to compile a pgf figure to pdf and convert it to png.
+ """
+ if kwargs.get("dryrun", False):
+ self._print_pgf_to_fh(None, *args, **kwargs)
+ return
+
+ if isinstance(fname_or_fh, six.string_types):
+ with open(fname_or_fh, "wb") as fh:
+ self._print_png_to_fh(fh, *args, **kwargs)
+ elif is_writable_file_like(fname_or_fh):
+ self._print_png_to_fh(fname_or_fh, *args, **kwargs)
+ else:
+ raise ValueError("filename must be a path or a file-like object")
+
+ def get_renderer(self):
+ return RendererPgf(self.figure, None, dummy=True)
+
+
+class FigureManagerPgf(FigureManagerBase):
+ def __init__(self, *args):
+ FigureManagerBase.__init__(self, *args)
+
+
+@_Backend.export
+class _BackendPgf(_Backend):
+ FigureCanvas = FigureCanvasPgf
+ FigureManager = FigureManagerPgf
+
+
+def _cleanup_all():
+ LatexManager._cleanup_remaining_instances()
+ TmpDirCleaner.cleanup_remaining_tmpdirs()
+
+atexit.register(_cleanup_all)
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_ps.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_ps.py
new file mode 100644
index 00000000000..5e475101d3b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_ps.py
@@ -0,0 +1,1762 @@
+"""
+A PostScript backend, which can produce both PostScript .ps and .eps
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import StringIO
+
+import glob, os, shutil, sys, time, datetime
+import io
+import logging
+
+from tempfile import mkstemp
+from matplotlib import cbook, __version__, rcParams, checkdep_ghostscript
+from matplotlib.afm import AFM
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
+ RendererBase)
+
+from matplotlib.cbook import (get_realpath_and_stat, is_writable_file_like,
+ maxdict, file_requires_unicode)
+from matplotlib.compat.subprocess import subprocess
+
+from matplotlib.font_manager import findfont, is_opentype_cff_font, get_font
+from matplotlib.ft2font import KERNING_DEFAULT, LOAD_NO_HINTING
+from matplotlib.ttconv import convert_ttf_to_ps
+from matplotlib.mathtext import MathTextParser
+from matplotlib._mathtext_data import uni2type1
+from matplotlib.path import Path
+from matplotlib import _path
+from matplotlib.transforms import Affine2D
+
+from matplotlib.backends.backend_mixed import MixedModeRenderer
+
+
+import numpy as np
+import binascii
+import re
+
+_log = logging.getLogger(__name__)
+
+backend_version = 'Level II'
+
+debugPS = 0
+
+
+class PsBackendHelper(object):
+
+ def __init__(self):
+ self._cached = {}
+
+ @property
+ def gs_exe(self):
+ """
+ executable name of ghostscript.
+ """
+ try:
+ return self._cached["gs_exe"]
+ except KeyError:
+ pass
+
+ gs_exe, gs_version = checkdep_ghostscript()
+ if gs_exe is None:
+ gs_exe = 'gs'
+
+ self._cached["gs_exe"] = str(gs_exe)
+ return str(gs_exe)
+
+ @property
+ def gs_version(self):
+ """
+ version of ghostscript.
+ """
+ try:
+ return self._cached["gs_version"]
+ except KeyError:
+ pass
+
+ from matplotlib.compat.subprocess import Popen, PIPE
+ s = Popen([self.gs_exe, "--version"], stdout=PIPE)
+ pipe, stderr = s.communicate()
+ if six.PY3:
+ ver = pipe.decode('ascii')
+ else:
+ ver = pipe
+ try:
+ gs_version = tuple(map(int, ver.strip().split(".")))
+ except ValueError:
+ # if something went wrong parsing return null version number
+ gs_version = (0, 0)
+ self._cached["gs_version"] = gs_version
+ return gs_version
+
+ @property
+ def supports_ps2write(self):
+ """
+ True if the installed ghostscript supports ps2write device.
+ """
+ return self.gs_version[0] >= 9
+
+ps_backend_helper = PsBackendHelper()
+
+papersize = {'letter': (8.5,11),
+ 'legal': (8.5,14),
+ 'ledger': (11,17),
+ 'a0': (33.11,46.81),
+ 'a1': (23.39,33.11),
+ 'a2': (16.54,23.39),
+ 'a3': (11.69,16.54),
+ 'a4': (8.27,11.69),
+ 'a5': (5.83,8.27),
+ 'a6': (4.13,5.83),
+ 'a7': (2.91,4.13),
+ 'a8': (2.07,2.91),
+ 'a9': (1.457,2.05),
+ 'a10': (1.02,1.457),
+ 'b0': (40.55,57.32),
+ 'b1': (28.66,40.55),
+ 'b2': (20.27,28.66),
+ 'b3': (14.33,20.27),
+ 'b4': (10.11,14.33),
+ 'b5': (7.16,10.11),
+ 'b6': (5.04,7.16),
+ 'b7': (3.58,5.04),
+ 'b8': (2.51,3.58),
+ 'b9': (1.76,2.51),
+ 'b10': (1.26,1.76)}
+
+def _get_papertype(w, h):
+ keys = list(six.iterkeys(papersize))
+ keys.sort()
+ keys.reverse()
+ for key in keys:
+ if key.startswith('l'): continue
+ pw, ph = papersize[key]
+ if (w < pw) and (h < ph): return key
+ return 'a0'
+
+def _num_to_str(val):
+ if isinstance(val, six.string_types): return val
+
+ ival = int(val)
+ if val == ival: return str(ival)
+
+ s = "%1.3f"%val
+ s = s.rstrip("0")
+ s = s.rstrip(".")
+ return s
+
+def _nums_to_str(*args):
+ return ' '.join(map(_num_to_str,args))
+
+
+def quote_ps_string(s):
+ "Quote dangerous characters of S for use in a PostScript string constant."
+ s = s.replace(b"\\", b"\\\\")
+ s = s.replace(b"(", b"\\(")
+ s = s.replace(b")", b"\\)")
+ s = s.replace(b"'", b"\\251")
+ s = s.replace(b"`", b"\\301")
+ s = re.sub(br"[^ -~\n]", lambda x: br"\%03o" % ord(x.group()), s)
+ return s.decode('ascii')
+
+
+def _move_path_to_path_or_stream(src, dst):
+ """Move the contents of file at *src* to path-or-filelike *dst*.
+
+ If *dst* is a path, the metadata of *src* are *not* copied.
+ """
+ if is_writable_file_like(dst):
+ fh = (io.open(src, 'r', encoding='latin-1')
+ if file_requires_unicode(dst)
+ else io.open(src, 'rb'))
+ with fh:
+ shutil.copyfileobj(fh, dst)
+ else:
+ # Py3: shutil.move(src, dst, copy_function=shutil.copyfile)
+ open(dst, 'w').close()
+ mode = os.stat(dst).st_mode
+ shutil.move(src, dst)
+ os.chmod(dst, mode)
+
+
+class RendererPS(RendererBase):
+ """
+ The renderer handles all the drawing primitives using a graphics
+ context instance that controls the colors/styles.
+ """
+
+ afmfontd = maxdict(50)
+
+ def __init__(self, width, height, pswriter, imagedpi=72):
+ """
+ Although postscript itself is dpi independent, we need to
+ imform the image code about a requested dpi to generate high
+ res images and them scale them before embeddin them
+ """
+ RendererBase.__init__(self)
+ self.width = width
+ self.height = height
+ self._pswriter = pswriter
+ if rcParams['text.usetex']:
+ self.textcnt = 0
+ self.psfrag = []
+ self.imagedpi = imagedpi
+
+ # current renderer state (None=uninitialised)
+ self.color = None
+ self.linewidth = None
+ self.linejoin = None
+ self.linecap = None
+ self.linedash = None
+ self.fontname = None
+ self.fontsize = None
+ self._hatches = {}
+ self.image_magnification = imagedpi/72.0
+ self._clip_paths = {}
+ self._path_collection_id = 0
+
+ self.used_characters = {}
+ self.mathtext_parser = MathTextParser("PS")
+
+ self._afm_font_dir = os.path.join(
+ rcParams['datapath'], 'fonts', 'afm')
+
+ def track_characters(self, font, s):
+ """Keeps track of which characters are required from
+ each font."""
+ realpath, stat_key = get_realpath_and_stat(font.fname)
+ used_characters = self.used_characters.setdefault(
+ stat_key, (realpath, set()))
+ used_characters[1].update([ord(x) for x in s])
+
+ def merge_used_characters(self, other):
+ for stat_key, (realpath, charset) in six.iteritems(other):
+ used_characters = self.used_characters.setdefault(
+ stat_key, (realpath, set()))
+ used_characters[1].update(charset)
+
+ def set_color(self, r, g, b, store=1):
+ if (r,g,b) != self.color:
+ if r==g and r==b:
+ self._pswriter.write("%1.3f setgray\n"%r)
+ else:
+ self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
+ if store: self.color = (r,g,b)
+
+ def set_linewidth(self, linewidth, store=1):
+ linewidth = float(linewidth)
+ if linewidth != self.linewidth:
+ self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
+ if store: self.linewidth = linewidth
+
+ def set_linejoin(self, linejoin, store=1):
+ if linejoin != self.linejoin:
+ self._pswriter.write("%d setlinejoin\n"%linejoin)
+ if store: self.linejoin = linejoin
+
+ def set_linecap(self, linecap, store=1):
+ if linecap != self.linecap:
+ self._pswriter.write("%d setlinecap\n"%linecap)
+ if store: self.linecap = linecap
+
+ def set_linedash(self, offset, seq, store=1):
+ if self.linedash is not None:
+ oldo, oldseq = self.linedash
+ if np.array_equal(seq, oldseq) and oldo == offset:
+ return
+
+ if seq is not None and len(seq):
+ s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
+ self._pswriter.write(s)
+ else:
+ self._pswriter.write("[] 0 setdash\n")
+ if store:
+ self.linedash = (offset, seq)
+
+ def set_font(self, fontname, fontsize, store=1):
+ if rcParams['ps.useafm']: return
+ if (fontname,fontsize) != (self.fontname,self.fontsize):
+ out = ("/%s findfont\n"
+ "%1.3f scalefont\n"
+ "setfont\n" % (fontname, fontsize))
+
+ self._pswriter.write(out)
+ if store: self.fontname = fontname
+ if store: self.fontsize = fontsize
+
+ def create_hatch(self, hatch):
+ sidelen = 72
+ if hatch in self._hatches:
+ return self._hatches[hatch]
+ name = 'H%d' % len(self._hatches)
+ linewidth = rcParams['hatch.linewidth']
+ pageheight = self.height * 72
+ self._pswriter.write("""\
+ << /PatternType 1
+ /PaintType 2
+ /TilingType 2
+ /BBox[0 0 %(sidelen)d %(sidelen)d]
+ /XStep %(sidelen)d
+ /YStep %(sidelen)d
+
+ /PaintProc {
+ pop
+ %(linewidth)f setlinewidth
+""" % locals())
+ self._pswriter.write(
+ self._convert_path(Path.hatch(hatch), Affine2D().scale(sidelen),
+ simplify=False))
+ self._pswriter.write("""\
+ fill
+ stroke
+ } bind
+ >>
+ matrix
+ 0.0 %(pageheight)f translate
+ makepattern
+ /%(name)s exch def
+""" % locals())
+ self._hatches[hatch] = name
+ return name
+
+ def get_canvas_width_height(self):
+ 'return the canvas width and height in display coords'
+ return self.width * 72.0, self.height * 72.0
+
+ def get_text_width_height_descent(self, s, prop, ismath):
+ """
+ get the width and height in display coords of the string s
+ with FontPropertry prop
+
+ """
+ if rcParams['text.usetex']:
+ texmanager = self.get_texmanager()
+ fontsize = prop.get_size_in_points()
+ w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
+ renderer=self)
+ return w, h, d
+
+ if ismath:
+ width, height, descent, pswriter, used_characters = \
+ self.mathtext_parser.parse(s, 72, prop)
+ return width, height, descent
+
+ if rcParams['ps.useafm']:
+ if ismath: s = s[1:-1]
+ font = self._get_font_afm(prop)
+ l,b,w,h,d = font.get_str_bbox_and_descent(s)
+
+ fontsize = prop.get_size_in_points()
+ scale = 0.001*fontsize
+ w *= scale
+ h *= scale
+ d *= scale
+ return w, h, d
+
+ font = self._get_font_ttf(prop)
+ font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
+ w, h = font.get_width_height()
+ w /= 64.0 # convert from subpixels
+ h /= 64.0
+ d = font.get_descent()
+ d /= 64.0
+ return w, h, d
+
+ def flipy(self):
+ 'return true if small y numbers are top for renderer'
+ return False
+
+ def _get_font_afm(self, prop):
+ key = hash(prop)
+ font = self.afmfontd.get(key)
+ if font is None:
+ fname = findfont(prop, fontext='afm', directory=self._afm_font_dir)
+ if fname is None:
+ fname = findfont(
+ "Helvetica", fontext='afm', directory=self._afm_font_dir)
+ font = self.afmfontd.get(fname)
+ if font is None:
+ with io.open(fname, 'rb') as fh:
+ font = AFM(fh)
+ self.afmfontd[fname] = font
+ self.afmfontd[key] = font
+ return font
+
+ def _get_font_ttf(self, prop):
+ fname = findfont(prop)
+ font = get_font(fname)
+ font.clear()
+ size = prop.get_size_in_points()
+ font.set_size(size, 72.0)
+ return font
+
+ def _rgb(self, rgba):
+ h, w = rgba.shape[:2]
+ rgb = rgba[::-1, :, :3]
+ return h, w, rgb.tostring()
+
+ def _hex_lines(self, s, chars_per_line=128):
+ s = binascii.b2a_hex(s)
+ nhex = len(s)
+ lines = []
+ for i in range(0,nhex,chars_per_line):
+ limit = min(i+chars_per_line, nhex)
+ lines.append(s[i:limit])
+ return lines
+
+ def get_image_magnification(self):
+ """
+ Get the factor by which to magnify images passed to draw_image.
+ Allows a backend to have images at a different resolution to other
+ artists.
+ """
+ return self.image_magnification
+
+ def option_scale_image(self):
+ """
+ ps backend support arbitrary scaling of image.
+ """
+ return True
+
+ def option_image_nocomposite(self):
+ """
+ return whether to generate a composite image from multiple images on
+ a set of axes
+ """
+ return not rcParams['image.composite_image']
+
+ def _get_image_h_w_bits_command(self, im):
+ h, w, bits = self._rgb(im)
+ imagecmd = "false 3 colorimage"
+
+ return h, w, bits, imagecmd
+
+ def draw_image(self, gc, x, y, im, transform=None):
+ """
+ Draw the Image instance into the current axes; x is the
+ distance in pixels from the left hand side of the canvas and y
+ is the distance from bottom
+ """
+
+ h, w, bits, imagecmd = self._get_image_h_w_bits_command(im)
+ hexlines = b'\n'.join(self._hex_lines(bits)).decode('ascii')
+
+ if transform is None:
+ matrix = "1 0 0 1 0 0"
+ xscale = w / self.image_magnification
+ yscale = h / self.image_magnification
+ else:
+ matrix = " ".join(map(str, transform.frozen().to_values()))
+ xscale = 1.0
+ yscale = 1.0
+
+ figh = self.height * 72
+
+ bbox = gc.get_clip_rectangle()
+ clippath, clippath_trans = gc.get_clip_path()
+
+ clip = []
+ if bbox is not None:
+ clipx,clipy,clipw,cliph = bbox.bounds
+ clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
+ if clippath is not None:
+ id = self._get_clip_path(clippath, clippath_trans)
+ clip.append('%s' % id)
+ clip = '\n'.join(clip)
+
+ ps = """gsave
+%(clip)s
+%(x)s %(y)s translate
+[%(matrix)s] concat
+%(xscale)s %(yscale)s scale
+/DataString %(w)s string def
+%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
+{
+currentfile DataString readhexstring pop
+} bind %(imagecmd)s
+%(hexlines)s
+grestore
+""" % locals()
+ self._pswriter.write(ps)
+
+ def _convert_path(self, path, transform, clip=False, simplify=None):
+ if clip:
+ clip = (0.0, 0.0, self.width * 72.0,
+ self.height * 72.0)
+ else:
+ clip = None
+ return _path.convert_to_string(
+ path, transform, clip, simplify, None,
+ 6, [b'm', b'l', b'', b'c', b'cl'], True).decode('ascii')
+
+ def _get_clip_path(self, clippath, clippath_transform):
+ key = (clippath, id(clippath_transform))
+ pid = self._clip_paths.get(key)
+ if pid is None:
+ pid = 'c%x' % len(self._clip_paths)
+ ps_cmd = ['/%s {' % pid]
+ ps_cmd.append(self._convert_path(clippath, clippath_transform,
+ simplify=False))
+ ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
+ self._pswriter.write('\n'.join(ps_cmd))
+ self._clip_paths[key] = pid
+ return pid
+
+ def draw_path(self, gc, path, transform, rgbFace=None):
+ """
+ Draws a Path instance using the given affine transform.
+ """
+ clip = rgbFace is None and gc.get_hatch_path() is None
+ simplify = path.should_simplify and clip
+ ps = self._convert_path(path, transform, clip=clip, simplify=simplify)
+ self._draw_ps(ps, gc, rgbFace)
+
+ def draw_markers(
+ self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
+ """
+ Draw the markers defined by path at each of the positions in x
+ and y. path coordinates are points, x and y coords will be
+ transformed by the transform
+ """
+ if debugPS: self._pswriter.write('% draw_markers \n')
+
+ if rgbFace:
+ if len(rgbFace) == 4 and rgbFace[3] == 0:
+ ps_color = None
+ else:
+ if rgbFace[0] == rgbFace[1] == rgbFace[2]:
+ ps_color = '%1.3f setgray' % rgbFace[0]
+ else:
+ ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace[:3]
+
+ # construct the generic marker command:
+ ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # don't want the translate to be global
+
+ lw = gc.get_linewidth()
+ alpha = (gc.get_alpha()
+ if gc.get_forced_alpha() or len(gc.get_rgb()) == 3
+ else gc.get_rgb()[3])
+ stroke = lw > 0 and alpha > 0
+ if stroke:
+ ps_cmd.append('%.1f setlinewidth' % lw)
+ jint = gc.get_joinstyle()
+ ps_cmd.append('%d setlinejoin' % jint)
+ cint = gc.get_capstyle()
+ ps_cmd.append('%d setlinecap' % cint)
+
+ ps_cmd.append(self._convert_path(marker_path, marker_trans,
+ simplify=False))
+
+ if rgbFace:
+ if stroke:
+ ps_cmd.append('gsave')
+ if ps_color:
+ ps_cmd.extend([ps_color, 'fill'])
+ if stroke:
+ ps_cmd.append('grestore')
+
+ if stroke:
+ ps_cmd.append('stroke')
+ ps_cmd.extend(['grestore', '} bind def'])
+
+ for vertices, code in path.iter_segments(
+ trans,
+ clip=(0, 0, self.width*72, self.height*72),
+ simplify=False):
+ if len(vertices):
+ x, y = vertices[-2:]
+ ps_cmd.append("%g %g o" % (x, y))
+
+ ps = '\n'.join(ps_cmd)
+ self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
+
+ def draw_path_collection(self, gc, master_transform, paths, all_transforms,
+ offsets, offsetTrans, facecolors, edgecolors,
+ linewidths, linestyles, antialiaseds, urls,
+ offset_position):
+ # Is the optimization worth it? Rough calculation:
+ # cost of emitting a path in-line is
+ # (len_path + 2) * uses_per_path
+ # cost of definition+use is
+ # (len_path + 3) + 3 * uses_per_path
+ len_path = len(paths[0].vertices) if len(paths) > 0 else 0
+ uses_per_path = self._iter_collection_uses_per_path(
+ paths, all_transforms, offsets, facecolors, edgecolors)
+ should_do_optimization = \
+ len_path + 3 * uses_per_path + 3 < (len_path + 2) * uses_per_path
+ if not should_do_optimization:
+ return RendererBase.draw_path_collection(
+ self, gc, master_transform, paths, all_transforms,
+ offsets, offsetTrans, facecolors, edgecolors,
+ linewidths, linestyles, antialiaseds, urls,
+ offset_position)
+
+ write = self._pswriter.write
+
+ path_codes = []
+ for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
+ master_transform, paths, all_transforms)):
+ name = 'p%x_%x' % (self._path_collection_id, i)
+ ps_cmd = ['/%s {' % name,
+ 'newpath', 'translate']
+ ps_cmd.append(self._convert_path(path, transform, simplify=False))
+ ps_cmd.extend(['} bind def\n'])
+ write('\n'.join(ps_cmd))
+ path_codes.append(name)
+
+ for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
+ gc, master_transform, all_transforms, path_codes, offsets,
+ offsetTrans, facecolors, edgecolors, linewidths, linestyles,
+ antialiaseds, urls, offset_position):
+ ps = "%g %g %s" % (xo, yo, path_id)
+ self._draw_ps(ps, gc0, rgbFace)
+
+ self._path_collection_id += 1
+
+ def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
+ """
+ draw a Text instance
+ """
+ w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
+ fontsize = prop.get_size_in_points()
+ thetext = 'psmarker%d' % self.textcnt
+ color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
+ fontcmd = {'sans-serif' : r'{\sffamily %s}',
+ 'monospace' : r'{\ttfamily %s}'}.get(
+ rcParams['font.family'][0], r'{\rmfamily %s}')
+ s = fontcmd % s
+ tex = r'\color[rgb]{%s} %s' % (color, s)
+
+ corr = 0#w/2*(fontsize-10)/10
+ if rcParams['text.latex.preview']:
+ # use baseline alignment!
+ pos = _nums_to_str(x-corr, y)
+ self.psfrag.append(r'\psfrag{%s}[Bl][Bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
+ else:
+ # stick to the bottom alignment, but this may give incorrect baseline some times.
+ pos = _nums_to_str(x-corr, y-bl)
+ self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
+
+ ps = """\
+gsave
+%(pos)s moveto
+(%(thetext)s)
+show
+grestore
+ """ % locals()
+
+ self._pswriter.write(ps)
+ self.textcnt += 1
+
+ def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
+ """
+ Draw a Text instance.
+ """
+ # local to avoid repeated attribute lookups
+ write = self._pswriter.write
+ if debugPS:
+ write("% text\n")
+
+ if len(gc.get_rgb()) == 4 and gc.get_rgb()[3] == 0:
+ return # Special handling for fully transparent.
+
+ if ismath=='TeX':
+ return self.draw_tex(gc, x, y, s, prop, angle)
+
+ elif ismath:
+ return self.draw_mathtext(gc, x, y, s, prop, angle)
+
+ elif rcParams['ps.useafm']:
+ self.set_color(*gc.get_rgb())
+
+ font = self._get_font_afm(prop)
+ fontname = font.get_fontname()
+ fontsize = prop.get_size_in_points()
+ scale = 0.001*fontsize
+
+ thisx = 0
+ thisy = font.get_str_bbox_and_descent(s)[4] * scale
+ last_name = None
+ lines = []
+ for c in s:
+ name = uni2type1.get(ord(c), 'question')
+ try:
+ width = font.get_width_from_char_name(name)
+ except KeyError:
+ name = 'question'
+ width = font.get_width_char('?')
+ if last_name is not None:
+ kern = font.get_kern_dist_from_name(last_name, name)
+ else:
+ kern = 0
+ last_name = name
+ thisx += kern * scale
+
+ lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
+
+ thisx += width * scale
+
+ thetext = "\n".join(lines)
+ ps = """\
+gsave
+/%(fontname)s findfont
+%(fontsize)s scalefont
+setfont
+%(x)f %(y)f translate
+%(angle)f rotate
+%(thetext)s
+grestore
+ """ % locals()
+ self._pswriter.write(ps)
+
+ else:
+ font = self._get_font_ttf(prop)
+ font.set_text(s, 0, flags=LOAD_NO_HINTING)
+ self.track_characters(font, s)
+
+ self.set_color(*gc.get_rgb())
+ sfnt = font.get_sfnt()
+ try:
+ ps_name = sfnt[1, 0, 0, 6].decode('mac_roman')
+ except KeyError:
+ ps_name = sfnt[3, 1, 0x0409, 6].decode('utf-16be')
+ ps_name = ps_name.encode('ascii', 'replace').decode('ascii')
+ self.set_font(ps_name, prop.get_size_in_points())
+
+ lastgind = None
+ lines = []
+ thisx = 0
+ thisy = 0
+ for c in s:
+ ccode = ord(c)
+ gind = font.get_char_index(ccode)
+ if gind is None:
+ ccode = ord('?')
+ name = '.notdef'
+ gind = 0
+ else:
+ name = font.get_glyph_name(gind)
+ glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
+
+ if lastgind is not None:
+ kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
+ else:
+ kern = 0
+ lastgind = gind
+ thisx += kern/64.0
+
+ lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
+ thisx += glyph.linearHoriAdvance/65536.0
+
+
+ thetext = '\n'.join(lines)
+ ps = """gsave
+%(x)f %(y)f translate
+%(angle)f rotate
+%(thetext)s
+grestore
+""" % locals()
+ self._pswriter.write(ps)
+
+ def new_gc(self):
+ return GraphicsContextPS()
+
+ def draw_mathtext(self, gc,
+ x, y, s, prop, angle):
+ """
+ Draw the math text using matplotlib.mathtext
+ """
+ if debugPS:
+ self._pswriter.write("% mathtext\n")
+
+ width, height, descent, pswriter, used_characters = \
+ self.mathtext_parser.parse(s, 72, prop)
+ self.merge_used_characters(used_characters)
+ self.set_color(*gc.get_rgb())
+ thetext = pswriter.getvalue()
+ ps = """gsave
+%(x)f %(y)f translate
+%(angle)f rotate
+%(thetext)s
+grestore
+""" % locals()
+ self._pswriter.write(ps)
+
+ def draw_gouraud_triangle(self, gc, points, colors, trans):
+ self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
+ colors.reshape((1, 3, 4)), trans)
+
+ def draw_gouraud_triangles(self, gc, points, colors, trans):
+ assert len(points) == len(colors)
+ assert points.ndim == 3
+ assert points.shape[1] == 3
+ assert points.shape[2] == 2
+ assert colors.ndim == 3
+ assert colors.shape[1] == 3
+ assert colors.shape[2] == 4
+
+ shape = points.shape
+ flat_points = points.reshape((shape[0] * shape[1], 2))
+ flat_points = trans.transform(flat_points)
+ flat_colors = colors.reshape((shape[0] * shape[1], 4))
+ points_min = np.min(flat_points, axis=0) - (1 << 12)
+ points_max = np.max(flat_points, axis=0) + (1 << 12)
+ factor = np.ceil((2 ** 32 - 1) / (points_max - points_min))
+
+ xmin, ymin = points_min
+ xmax, ymax = points_max
+
+ streamarr = np.empty(
+ (shape[0] * shape[1],),
+ dtype=[('flags', 'u1'),
+ ('points', '>u4', (2,)),
+ ('colors', 'u1', (3,))])
+ streamarr['flags'] = 0
+ streamarr['points'] = (flat_points - points_min) * factor
+ streamarr['colors'] = flat_colors[:, :3] * 255.0
+
+ stream = quote_ps_string(streamarr.tostring())
+
+ self._pswriter.write("""
+gsave
+<< /ShadingType 4
+ /ColorSpace [/DeviceRGB]
+ /BitsPerCoordinate 32
+ /BitsPerComponent 8
+ /BitsPerFlag 8
+ /AntiAlias true
+ /Decode [ %(xmin)f %(xmax)f %(ymin)f %(ymax)f 0 1 0 1 0 1 ]
+ /DataSource (%(stream)s)
+>>
+shfill
+grestore
+""" % locals())
+
+ def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
+ """
+ Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
+ applied. 'ps' must consist of PostScript commands to construct a path.
+
+ The fill and/or stroke kwargs can be set to False if the
+ 'ps' string already includes filling and/or stroking, in
+ which case _draw_ps is just supplying properties and
+ clipping.
+ """
+ # local variable eliminates all repeated attribute lookups
+ write = self._pswriter.write
+ if debugPS and command:
+ write("% "+command+"\n")
+ mightstroke = gc.shouldstroke()
+ stroke = stroke and mightstroke
+ fill = (fill and rgbFace is not None and
+ (len(rgbFace) <= 3 or rgbFace[3] != 0.0))
+ hatch = gc.get_hatch()
+
+ if mightstroke:
+ self.set_linewidth(gc.get_linewidth())
+ jint = gc.get_joinstyle()
+ self.set_linejoin(jint)
+ cint = gc.get_capstyle()
+ self.set_linecap(cint)
+ self.set_linedash(*gc.get_dashes())
+ self.set_color(*gc.get_rgb()[:3])
+ write('gsave\n')
+
+ cliprect = gc.get_clip_rectangle()
+ if cliprect:
+ x,y,w,h=cliprect.bounds
+ write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
+ clippath, clippath_trans = gc.get_clip_path()
+ if clippath:
+ id = self._get_clip_path(clippath, clippath_trans)
+ write('%s\n' % id)
+
+ # Jochen, is the strip necessary? - this could be a honking big string
+ write(ps.strip())
+ write("\n")
+
+ if fill:
+ if stroke or hatch:
+ write("gsave\n")
+ self.set_color(store=0, *rgbFace[:3])
+ write("fill\n")
+ if stroke or hatch:
+ write("grestore\n")
+
+ if hatch:
+ hatch_name = self.create_hatch(hatch)
+ write("gsave\n")
+ write("%f %f %f " % gc.get_hatch_color()[:3])
+ write("%s setpattern fill grestore\n" % hatch_name)
+
+ if stroke:
+ write("stroke\n")
+
+ write("grestore\n")
+
+
+class GraphicsContextPS(GraphicsContextBase):
+ def get_capstyle(self):
+ return {'butt':0,
+ 'round':1,
+ 'projecting':2}[GraphicsContextBase.get_capstyle(self)]
+
+ def get_joinstyle(self):
+ return {'miter':0,
+ 'round':1,
+ 'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
+
+ def shouldstroke(self):
+ return (self.get_linewidth() > 0.0 and
+ (len(self.get_rgb()) <= 3 or self.get_rgb()[3] != 0.0))
+
+
+class FigureCanvasPS(FigureCanvasBase):
+ _renderer_class = RendererPS
+
+ fixed_dpi = 72
+
+ def draw(self):
+ pass
+
+ filetypes = {'ps' : 'Postscript',
+ 'eps' : 'Encapsulated Postscript'}
+
+ def get_default_filetype(self):
+ return 'ps'
+
+ def print_ps(self, outfile, *args, **kwargs):
+ return self._print_ps(outfile, 'ps', *args, **kwargs)
+
+ def print_eps(self, outfile, *args, **kwargs):
+ return self._print_ps(outfile, 'eps', *args, **kwargs)
+
+ def _print_ps(self, outfile, format, *args, **kwargs):
+ papertype = kwargs.pop("papertype", rcParams['ps.papersize'])
+ papertype = papertype.lower()
+ if papertype == 'auto':
+ pass
+ elif papertype not in papersize:
+ raise RuntimeError('%s is not a valid papertype. Use one of %s' %
+ (papertype, ', '.join(papersize)))
+
+ orientation = kwargs.pop("orientation", "portrait").lower()
+ if orientation == 'landscape': isLandscape = True
+ elif orientation == 'portrait': isLandscape = False
+ else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
+
+ self.figure.set_dpi(72) # Override the dpi kwarg
+ imagedpi = kwargs.pop("dpi", 72)
+ facecolor = kwargs.pop("facecolor", "w")
+ edgecolor = kwargs.pop("edgecolor", "w")
+
+ if rcParams['text.usetex']:
+ self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
+ orientation, isLandscape, papertype,
+ **kwargs)
+ else:
+ self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
+ orientation, isLandscape, papertype,
+ **kwargs)
+
+ def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
+ orientation='portrait', isLandscape=False, papertype=None,
+ metadata=None, **kwargs):
+ """
+ Render the figure to hardcopy. Set the figure patch face and
+ edge colors. This is useful because some of the GUIs have a
+ gray figure face color background and you'll probably want to
+ override this on hardcopy
+
+ If outfile is a string, it is interpreted as a file name.
+ If the extension matches .ep* write encapsulated postscript,
+ otherwise write a stand-alone PostScript file.
+
+ If outfile is a file object, a stand-alone PostScript file is
+ written into this file object.
+
+ metadata must be a dictionary. Currently, only the value for
+ the key 'Creator' is used.
+ """
+ isEPSF = format == 'eps'
+ if isinstance(outfile,
+ (six.string_types, getattr(os, "PathLike", ()),)):
+ outfile = title = getattr(os, "fspath", lambda obj: obj)(outfile)
+ passed_in_file_object = False
+ elif is_writable_file_like(outfile):
+ title = None
+ passed_in_file_object = True
+ else:
+ raise ValueError("outfile must be a path or a file-like object")
+
+ # find the appropriate papertype
+ width, height = self.figure.get_size_inches()
+ if papertype == 'auto':
+ if isLandscape: papertype = _get_papertype(height, width)
+ else: papertype = _get_papertype(width, height)
+
+ if isLandscape: paperHeight, paperWidth = papersize[papertype]
+ else: paperWidth, paperHeight = papersize[papertype]
+
+ if rcParams['ps.usedistiller'] and not papertype == 'auto':
+ # distillers will improperly clip eps files if the pagesize is
+ # too small
+ if width>paperWidth or height>paperHeight:
+ if isLandscape:
+ papertype = _get_papertype(height, width)
+ paperHeight, paperWidth = papersize[papertype]
+ else:
+ papertype = _get_papertype(width, height)
+ paperWidth, paperHeight = papersize[papertype]
+
+ # center the figure on the paper
+ xo = 72*0.5*(paperWidth - width)
+ yo = 72*0.5*(paperHeight - height)
+
+ l, b, w, h = self.figure.bbox.bounds
+ llx = xo
+ lly = yo
+ urx = llx + w
+ ury = lly + h
+ rotation = 0
+ if isLandscape:
+ llx, lly, urx, ury = lly, llx, ury, urx
+ xo, yo = 72*paperHeight - yo, xo
+ rotation = 90
+ bbox = (llx, lly, urx, ury)
+
+ # generate PostScript code for the figure and store it in a string
+ origfacecolor = self.figure.get_facecolor()
+ origedgecolor = self.figure.get_edgecolor()
+ self.figure.set_facecolor(facecolor)
+ self.figure.set_edgecolor(edgecolor)
+
+
+ dryrun = kwargs.get("dryrun", False)
+ if dryrun:
+ class NullWriter(object):
+ def write(self, *kl, **kwargs):
+ pass
+
+ self._pswriter = NullWriter()
+ else:
+ self._pswriter = io.StringIO()
+
+
+ # mixed mode rendering
+ _bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
+ ps_renderer = self._renderer_class(width, height, self._pswriter,
+ imagedpi=dpi)
+ renderer = MixedModeRenderer(self.figure,
+ width, height, dpi, ps_renderer,
+ bbox_inches_restore=_bbox_inches_restore)
+
+ self.figure.draw(renderer)
+
+ if dryrun: # return immediately if dryrun (tightbbox=True)
+ return
+
+ self.figure.set_facecolor(origfacecolor)
+ self.figure.set_edgecolor(origedgecolor)
+
+ # check for custom metadata
+ if metadata is not None and 'Creator' in metadata:
+ creator_str = metadata['Creator']
+ else:
+ creator_str = "matplotlib version " + __version__ + \
+ ", http://matplotlib.org/"
+
+ def print_figure_impl(fh):
+ # write the PostScript headers
+ if isEPSF:
+ print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
+ else:
+ print("%!PS-Adobe-3.0", file=fh)
+ if title:
+ print("%%Title: "+title, file=fh)
+ print("%%Creator: " + creator_str, file=fh)
+ # get source date from SOURCE_DATE_EPOCH, if set
+ # See https://reproducible-builds.org/specs/source-date-epoch/
+ source_date_epoch = os.getenv("SOURCE_DATE_EPOCH")
+ if source_date_epoch:
+ source_date = datetime.datetime.utcfromtimestamp(
+ int(source_date_epoch)).strftime("%a %b %d %H:%M:%S %Y")
+ else:
+ source_date = time.ctime()
+ print("%%CreationDate: "+source_date, file=fh)
+ print("%%Orientation: " + orientation, file=fh)
+ if not isEPSF:
+ print("%%DocumentPaperSizes: "+papertype, file=fh)
+ print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
+ if not isEPSF:
+ print("%%Pages: 1", file=fh)
+ print("%%EndComments", file=fh)
+
+ Ndict = len(psDefs)
+ print("%%BeginProlog", file=fh)
+ if not rcParams['ps.useafm']:
+ Ndict += len(ps_renderer.used_characters)
+ print("/mpldict %d dict def" % Ndict, file=fh)
+ print("mpldict begin", file=fh)
+ for d in psDefs:
+ d = d.strip()
+ for l in d.split('\n'):
+ print(l.strip(), file=fh)
+ if not rcParams['ps.useafm']:
+ for font_filename, chars in six.itervalues(
+ ps_renderer.used_characters):
+ if len(chars):
+ font = get_font(font_filename)
+ glyph_ids = []
+ for c in chars:
+ gind = font.get_char_index(c)
+ glyph_ids.append(gind)
+
+ fonttype = rcParams['ps.fonttype']
+
+ # Can not use more than 255 characters from a
+ # single font for Type 3
+ if len(glyph_ids) > 255:
+ fonttype = 42
+
+ # The ttf to ps (subsetting) support doesn't work for
+ # OpenType fonts that are Postscript inside (like the
+ # STIX fonts). This will simply turn that off to avoid
+ # errors.
+ if is_opentype_cff_font(font_filename):
+ raise RuntimeError(
+ "OpenType CFF fonts can not be saved using "
+ "the internal Postscript backend at this "
+ "time; consider using the Cairo backend")
+ else:
+ fh.flush()
+ convert_ttf_to_ps(
+ font_filename.encode(
+ sys.getfilesystemencoding()),
+ fh, fonttype, glyph_ids)
+ print("end", file=fh)
+ print("%%EndProlog", file=fh)
+
+ if not isEPSF:
+ print("%%Page: 1 1", file=fh)
+ print("mpldict begin", file=fh)
+
+ print("%s translate" % _nums_to_str(xo, yo), file=fh)
+ if rotation:
+ print("%d rotate" % rotation, file=fh)
+ print("%s clipbox" % _nums_to_str(width*72, height*72, 0, 0),
+ file=fh)
+
+ # write the figure
+ content = self._pswriter.getvalue()
+ if not isinstance(content, six.text_type):
+ content = content.decode('ascii')
+ print(content, file=fh)
+
+ # write the trailer
+ print("end", file=fh)
+ print("showpage", file=fh)
+ if not isEPSF:
+ print("%%EOF", file=fh)
+ fh.flush()
+
+ if rcParams['ps.usedistiller']:
+ # We are going to use an external program to process the output.
+ # Write to a temporary file.
+ fd, tmpfile = mkstemp()
+ try:
+ with io.open(fd, 'w', encoding='latin-1') as fh:
+ print_figure_impl(fh)
+ if rcParams['ps.usedistiller'] == 'ghostscript':
+ gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
+ elif rcParams['ps.usedistiller'] == 'xpdf':
+ xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
+
+ _move_path_to_path_or_stream(tmpfile, outfile)
+ finally:
+ if os.path.isfile(tmpfile):
+ os.unlink(tmpfile)
+
+ else:
+ # Write directly to outfile.
+ if passed_in_file_object:
+ requires_unicode = file_requires_unicode(outfile)
+
+ if (not requires_unicode and
+ (six.PY3 or not isinstance(outfile, StringIO))):
+ fh = io.TextIOWrapper(outfile, encoding="latin-1")
+
+ # Prevent the io.TextIOWrapper from closing the
+ # underlying file
+ def do_nothing():
+ pass
+ fh.close = do_nothing
+ else:
+ fh = outfile
+
+ print_figure_impl(fh)
+ else:
+ with io.open(outfile, 'w', encoding='latin-1') as fh:
+ print_figure_impl(fh)
+
+ def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
+ orientation, isLandscape, papertype, metadata=None,
+ **kwargs):
+ """
+ If text.usetex is True in rc, a temporary pair of tex/eps files
+ are created to allow tex to manage the text layout via the PSFrags
+ package. These files are processed to yield the final ps or eps file.
+
+ metadata must be a dictionary. Currently, only the value for
+ the key 'Creator' is used.
+ """
+ isEPSF = format == 'eps'
+ if isinstance(outfile, six.string_types):
+ title = outfile
+ elif is_writable_file_like(outfile):
+ title = None
+ else:
+ raise ValueError("outfile must be a path or a file-like object")
+
+ self.figure.dpi = 72 # ignore the dpi kwarg
+ width, height = self.figure.get_size_inches()
+ xo = 0
+ yo = 0
+
+ l, b, w, h = self.figure.bbox.bounds
+ llx = xo
+ lly = yo
+ urx = llx + w
+ ury = lly + h
+ bbox = (llx, lly, urx, ury)
+
+ # generate PostScript code for the figure and store it in a string
+ origfacecolor = self.figure.get_facecolor()
+ origedgecolor = self.figure.get_edgecolor()
+ self.figure.set_facecolor(facecolor)
+ self.figure.set_edgecolor(edgecolor)
+
+ dryrun = kwargs.get("dryrun", False)
+ if dryrun:
+ class NullWriter(object):
+ def write(self, *kl, **kwargs):
+ pass
+
+ self._pswriter = NullWriter()
+ else:
+ self._pswriter = io.StringIO()
+
+ # mixed mode rendering
+ _bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
+ ps_renderer = self._renderer_class(width, height,
+ self._pswriter, imagedpi=dpi)
+ renderer = MixedModeRenderer(self.figure,
+ width, height, dpi, ps_renderer,
+ bbox_inches_restore=_bbox_inches_restore)
+
+ self.figure.draw(renderer)
+
+ if dryrun: # return immediately if dryrun (tightbbox=True)
+ return
+
+ self.figure.set_facecolor(origfacecolor)
+ self.figure.set_edgecolor(origedgecolor)
+
+ # check for custom metadata
+ if metadata is not None and 'Creator' in metadata:
+ creator_str = metadata['Creator']
+ else:
+ creator_str = "matplotlib version " + __version__ + \
+ ", http://matplotlib.org/"
+
+ # write to a temp file, we'll move it to outfile when done
+
+ fd, tmpfile = mkstemp()
+ try:
+ with io.open(fd, 'w', encoding='latin-1') as fh:
+ # write the Encapsulated PostScript headers
+ print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
+ if title:
+ print("%%Title: "+title, file=fh)
+ print("%%Creator: " + creator_str, file=fh)
+ # get source date from SOURCE_DATE_EPOCH, if set
+ # See https://reproducible-builds.org/specs/source-date-epoch/
+ source_date_epoch = os.getenv("SOURCE_DATE_EPOCH")
+ if source_date_epoch:
+ source_date = datetime.datetime.utcfromtimestamp(
+ int(source_date_epoch)).strftime(
+ "%a %b %d %H:%M:%S %Y")
+ else:
+ source_date = time.ctime()
+ print("%%CreationDate: "+source_date, file=fh)
+ print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
+ print("%%EndComments", file=fh)
+
+ Ndict = len(psDefs)
+ print("%%BeginProlog", file=fh)
+ print("/mpldict %d dict def" % Ndict, file=fh)
+ print("mpldict begin", file=fh)
+ for d in psDefs:
+ d = d.strip()
+ for l in d.split('\n'):
+ print(l.strip(), file=fh)
+ print("end", file=fh)
+ print("%%EndProlog", file=fh)
+
+ print("mpldict begin", file=fh)
+ print("%s translate" % _nums_to_str(xo, yo), file=fh)
+ print("%s clipbox" % _nums_to_str(width*72, height*72, 0, 0),
+ file=fh)
+
+ # write the figure
+ print(self._pswriter.getvalue(), file=fh)
+
+ # write the trailer
+ print("end", file=fh)
+ print("showpage", file=fh)
+ fh.flush()
+
+ if isLandscape: # now we are ready to rotate
+ isLandscape = True
+ width, height = height, width
+ bbox = (lly, llx, ury, urx)
+
+ # set the paper size to the figure size if isEPSF. The
+ # resulting ps file has the given size with correct bounding
+ # box so that there is no need to call 'pstoeps'
+ if isEPSF:
+ paperWidth, paperHeight = self.figure.get_size_inches()
+ if isLandscape:
+ paperWidth, paperHeight = paperHeight, paperWidth
+ else:
+ temp_papertype = _get_papertype(width, height)
+ if papertype == 'auto':
+ papertype = temp_papertype
+ paperWidth, paperHeight = papersize[temp_papertype]
+ else:
+ paperWidth, paperHeight = papersize[papertype]
+ if (width > paperWidth or height > paperHeight) and isEPSF:
+ paperWidth, paperHeight = papersize[temp_papertype]
+ _log.info('Your figure is too big to fit on %s paper. '
+ '%s paper will be used to prevent clipping.',
+ papertype, temp_papertype)
+
+ texmanager = ps_renderer.get_texmanager()
+ font_preamble = texmanager.get_font_preamble()
+ custom_preamble = texmanager.get_custom_preamble()
+
+ psfrag_rotated = convert_psfrags(tmpfile, ps_renderer.psfrag,
+ font_preamble,
+ custom_preamble, paperWidth,
+ paperHeight,
+ orientation)
+
+ if (rcParams['ps.usedistiller'] == 'ghostscript'
+ or rcParams['text.usetex']):
+ gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
+ rotated=psfrag_rotated)
+ elif rcParams['ps.usedistiller'] == 'xpdf':
+ xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
+ rotated=psfrag_rotated)
+
+ _move_path_to_path_or_stream(tmpfile, outfile)
+ finally:
+ if os.path.isfile(tmpfile):
+ os.unlink(tmpfile)
+
+
+def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
+ paperWidth, paperHeight, orientation):
+ """
+ When we want to use the LaTeX backend with postscript, we write PSFrag tags
+ to a temporary postscript file, each one marking a position for LaTeX to
+ render some text. convert_psfrags generates a LaTeX document containing the
+ commands to convert those tags to text. LaTeX/dvips produces the postscript
+ file that includes the actual text.
+ """
+ tmpdir = os.path.split(tmpfile)[0]
+ epsfile = tmpfile+'.eps'
+ shutil.move(tmpfile, epsfile)
+ latexfile = tmpfile+'.tex'
+ dvifile = tmpfile+'.dvi'
+ psfile = tmpfile+'.ps'
+
+ if orientation == 'landscape':
+ angle = 90
+ else:
+ angle = 0
+
+ if rcParams['text.latex.unicode']:
+ unicode_preamble = """\\usepackage{ucs}
+\\usepackage[utf8x]{inputenc}"""
+ else:
+ unicode_preamble = ''
+
+ s = """\\documentclass{article}
+%s
+%s
+%s
+\\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
+\\usepackage{psfrag}
+\\usepackage[dvips]{graphicx}
+\\usepackage{color}
+\\pagestyle{empty}
+\\begin{document}
+\\begin{figure}
+\\centering
+\\leavevmode
+%s
+\\includegraphics*[angle=%s]{%s}
+\\end{figure}
+\\end{document}
+"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
+ paperWidth, paperHeight,
+ '\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
+
+ with io.open(latexfile, 'wb') as latexh:
+ if rcParams['text.latex.unicode']:
+ latexh.write(s.encode('utf8'))
+ else:
+ try:
+ latexh.write(s.encode('ascii'))
+ except UnicodeEncodeError:
+ _log.info("You are using unicode and latex, but have "
+ "not enabled the matplotlib 'text.latex.unicode' "
+ "rcParam.")
+ raise
+
+ # Replace \\ for / so latex does not think there is a function call
+ latexfile = latexfile.replace("\\", "/")
+ # Replace ~ so Latex does not think it is line break
+ latexfile = latexfile.replace("~", "\\string~")
+ command = [str("latex"), "-interaction=nonstopmode",
+ '"%s"' % latexfile]
+ _log.debug('%s', command)
+ try:
+ report = subprocess.check_output(command, cwd=tmpdir,
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as exc:
+ raise RuntimeError(
+ ('LaTeX was not able to process the following '
+ 'file:\n%s\n\n'
+ 'Here is the full report generated by LaTeX:\n%s '
+ '\n\n' % (latexfile,
+ exc.output.decode("utf-8"))))
+ _log.debug(report)
+
+ command = [str('dvips'), '-q', '-R0', '-o', os.path.basename(psfile),
+ os.path.basename(dvifile)]
+ _log.debug(command)
+ try:
+ report = subprocess.check_output(command, cwd=tmpdir,
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as exc:
+ raise RuntimeError(
+ ('dvips was not able to process the following '
+ 'file:\n%s\n\n'
+ 'Here is the full report generated by dvips:\n%s '
+ '\n\n' % (dvifile,
+ exc.output.decode("utf-8"))))
+ _log.debug(report)
+ os.remove(epsfile)
+ shutil.move(psfile, tmpfile)
+
+ # check if the dvips created a ps in landscape paper. Somehow,
+ # above latex+dvips results in a ps file in a landscape mode for a
+ # certain figure sizes (e.g., 8.3in,5.8in which is a5). And the
+ # bounding box of the final output got messed up. We check see if
+ # the generated ps file is in landscape and return this
+ # information. The return value is used in pstoeps step to recover
+ # the correct bounding box. 2010-06-05 JJL
+ with io.open(tmpfile) as fh:
+ if "Landscape" in fh.read(1000):
+ psfrag_rotated = True
+ else:
+ psfrag_rotated = False
+
+ if not debugPS:
+ for fname in glob.glob(tmpfile+'.*'):
+ os.remove(fname)
+
+ return psfrag_rotated
+
+
+def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
+ """
+ Use ghostscript's pswrite or epswrite device to distill a file.
+ This yields smaller files without illegal encapsulated postscript
+ operators. The output is low-level, converting text to outlines.
+ """
+
+ if eps:
+ paper_option = "-dEPSCrop"
+ else:
+ paper_option = "-sPAPERSIZE=%s" % ptype
+
+ psfile = tmpfile + '.ps'
+ dpi = rcParams['ps.distiller.res']
+
+ gs_exe = ps_backend_helper.gs_exe
+ if ps_backend_helper.supports_ps2write: # gs version >= 9
+ device_name = "ps2write"
+ else:
+ device_name = "pswrite"
+
+ command = [str(gs_exe), "-dBATCH", "-dNOPAUSE", "-r%d" % dpi,
+ "-sDEVICE=%s" % device_name, paper_option,
+ "-sOutputFile=%s" % psfile, tmpfile]
+ _log.debug(command)
+ try:
+ report = subprocess.check_output(command, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as exc:
+ raise RuntimeError(
+ ('ghostscript was not able to process your image.\n'
+ 'Here is the full report generated by ghostscript:\n%s '
+ '\n\n' % exc.output.decode("utf-8")))
+ _log.debug(report)
+ os.remove(tmpfile)
+ shutil.move(psfile, tmpfile)
+
+
+ # While it is best if above steps preserve the original bounding
+ # box, there seem to be cases when it is not. For those cases,
+ # the original bbox can be restored during the pstoeps step.
+
+ if eps:
+ # For some versions of gs, above steps result in an ps file
+ # where the original bbox is no more correct. Do not adjust
+ # bbox for now.
+ if ps_backend_helper.supports_ps2write:
+ # fo gs version >= 9 w/ ps2write device
+ pstoeps(tmpfile, bbox, rotated=rotated)
+ else:
+ pstoeps(tmpfile)
+
+
+def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
+ """
+ Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
+ This yields smaller files without illegal encapsulated postscript
+ operators. This distiller is preferred, generating high-level postscript
+ output that treats text as text.
+ """
+ pdffile = tmpfile + '.pdf'
+ psfile = tmpfile + '.ps'
+
+ # Pass options as `-foo#bar` instead of `-foo=bar` to keep Windows happy
+ # (https://www.ghostscript.com/doc/9.22/Use.htm#MS_Windows).
+ command = [str("ps2pdf"),
+ "-dAutoFilterColorImages#false",
+ "-dAutoFilterGrayImages#false",
+ "-dAutoRotatePages#false",
+ "-sGrayImageFilter#FlateEncode",
+ "-sColorImageFilter#FlateEncode",
+ "-dEPSCrop" if eps else "-sPAPERSIZE#%s" % ptype,
+ tmpfile, pdffile]
+ _log.debug(command)
+
+ try:
+ report = subprocess.check_output(command, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as exc:
+ raise RuntimeError(
+ ('ps2pdf was not able to process your image.\n'
+ 'Here is the full report generated by ps2pdf:\n%s '
+ '\n\n' % exc.output.decode("utf-8")))
+ _log.debug(report)
+
+ command = [str("pdftops"), "-paper", "match", "-level2", pdffile, psfile]
+ _log.debug(command)
+ try:
+ report = subprocess.check_output(command, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as exc:
+ raise RuntimeError(
+ ('pdftops was not able to process your image.\n'
+ 'Here is the full report generated by pdftops:\n%s '
+ '\n\n' % exc.output.decode("utf-8")))
+ _log.debug(report)
+ os.remove(tmpfile)
+ shutil.move(psfile, tmpfile)
+
+ if eps:
+ pstoeps(tmpfile)
+
+ for fname in glob.glob(tmpfile+'.*'):
+ os.remove(fname)
+
+
+def get_bbox_header(lbrt, rotated=False):
+ """
+ return a postscript header stringfor the given bbox lbrt=(l, b, r, t).
+ Optionally, return rotate command.
+ """
+
+ l, b, r, t = lbrt
+ if rotated:
+ rotate = "%.2f %.2f translate\n90 rotate" % (l+r, 0)
+ else:
+ rotate = ""
+ bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
+ hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
+
+ return '\n'.join([bbox_info, hires_bbox_info]), rotate
+
+
+# get_bbox is deprecated. I don't see any reason to use ghostscript to
+# find the bounding box, as the required bounding box is alread known.
+def get_bbox(tmpfile, bbox):
+ """
+ Use ghostscript's bbox device to find the center of the bounding box.
+ Return an appropriately sized bbox centered around that point. A bit of a
+ hack.
+ """
+
+ gs_exe = ps_backend_helper.gs_exe
+ command = [gs_exe, "-dBATCH", "-dNOPAUSE", "-sDEVICE=bbox", "%s" % tmpfile]
+ _log.debug(command)
+ p = subprocess.Popen(command, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ close_fds=True)
+ (stdout, stderr) = (p.stdout, p.stderr)
+ _log.debug(stdout.read())
+ bbox_info = stderr.read()
+ _log.info(bbox_info)
+ bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
+ if bbox_found:
+ bbox_info = bbox_found.group()
+ else:
+ raise RuntimeError('Ghostscript was not able to extract a bounding box.\
+Here is the Ghostscript output:\n\n%s' % bbox_info)
+ l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
+
+ # this is a hack to deal with the fact that ghostscript does not return the
+ # intended bbox, but a tight bbox. For now, we just center the ink in the
+ # intended bbox. This is not ideal, users may intend the ink to not be
+ # centered.
+ if bbox is None:
+ l, b, r, t = (l-1, b-1, r+1, t+1)
+ else:
+ x = (l+r)/2
+ y = (b+t)/2
+ dx = (bbox[2]-bbox[0])/2
+ dy = (bbox[3]-bbox[1])/2
+ l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
+
+ bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
+ hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
+
+ return '\n'.join([bbox_info, hires_bbox_info])
+
+
+def pstoeps(tmpfile, bbox=None, rotated=False):
+ """
+ Convert the postscript to encapsulated postscript. The bbox of
+ the eps file will be replaced with the given *bbox* argument. If
+ None, original bbox will be used.
+ """
+
+ # if rotated==True, the output eps file need to be rotated
+ if bbox:
+ bbox_info, rotate = get_bbox_header(bbox, rotated=rotated)
+ else:
+ bbox_info, rotate = None, None
+
+ epsfile = tmpfile + '.eps'
+ with io.open(epsfile, 'wb') as epsh, io.open(tmpfile, 'rb') as tmph:
+ write = epsh.write
+ # Modify the header:
+ for line in tmph:
+ if line.startswith(b'%!PS'):
+ write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
+ if bbox:
+ write(bbox_info.encode('ascii') + b'\n')
+ elif line.startswith(b'%%EndComments'):
+ write(line)
+ write(b'%%BeginProlog\n'
+ b'save\n'
+ b'countdictstack\n'
+ b'mark\n'
+ b'newpath\n'
+ b'/showpage {} def\n'
+ b'/setpagedevice {pop} def\n'
+ b'%%EndProlog\n'
+ b'%%Page 1 1\n')
+ if rotate:
+ write(rotate.encode('ascii') + b'\n')
+ break
+ elif bbox and line.startswith((b'%%Bound', b'%%HiResBound',
+ b'%%DocumentMedia', b'%%Pages')):
+ pass
+ else:
+ write(line)
+ # Now rewrite the rest of the file, and modify the trailer.
+ # This is done in a second loop such that the header of the embedded
+ # eps file is not modified.
+ for line in tmph:
+ if line.startswith(b'%%EOF'):
+ write(b'cleartomark\n'
+ b'countdictstack\n'
+ b'exch sub { end } repeat\n'
+ b'restore\n'
+ b'showpage\n'
+ b'%%EOF\n')
+ elif line.startswith(b'%%PageBoundingBox'):
+ pass
+ else:
+ write(line)
+
+ os.remove(tmpfile)
+ shutil.move(epsfile, tmpfile)
+
+
+class FigureManagerPS(FigureManagerBase):
+ pass
+
+
+# The following Python dictionary psDefs contains the entries for the
+# PostScript dictionary mpldict. This dictionary implements most of
+# the matplotlib primitives and some abbreviations.
+#
+# References:
+# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
+# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
+# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
+#
+
+# The usage comments use the notation of the operator summary
+# in the PostScript Language reference manual.
+psDefs = [
+ # x y *m* -
+ "/m { moveto } bind def",
+ # x y *l* -
+ "/l { lineto } bind def",
+ # x y *r* -
+ "/r { rlineto } bind def",
+ # x1 y1 x2 y2 x y *c* -
+ "/c { curveto } bind def",
+ # *closepath* -
+ "/cl { closepath } bind def",
+ # w h x y *box* -
+ """/box {
+ m
+ 1 index 0 r
+ 0 exch r
+ neg 0 r
+ cl
+ } bind def""",
+ # w h x y *clipbox* -
+ """/clipbox {
+ box
+ clip
+ newpath
+ } bind def""",
+]
+
+
+@_Backend.export
+class _BackendPS(_Backend):
+ FigureCanvas = FigureCanvasPS
+ FigureManager = FigureManagerPS
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt4.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt4.py
new file mode 100644
index 00000000000..92463a6573a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt4.py
@@ -0,0 +1,15 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from .backend_qt5 import (
+ backend_version, SPECIAL_KEYS, SUPER, ALT, CTRL, SHIFT, MODIFIER_KEYS,
+ cursord, _create_qApp, _BackendQT5, TimerQT, MainWindow, FigureManagerQT,
+ NavigationToolbar2QT, SubplotToolQt, error_msg_qt, exception_handler)
+from .backend_qt5 import FigureCanvasQT as FigureCanvasQT5
+
+
+@_BackendQT5.export
+class _BackendQT4(_BackendQT5):
+ pass
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt4agg.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt4agg.py
new file mode 100644
index 00000000000..7e90a09bf35
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt4agg.py
@@ -0,0 +1,15 @@
+"""
+Render to qt from agg
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from .backend_qt5agg import (
+ _BackendQT5Agg, FigureCanvasQTAgg, FigureManagerQT, NavigationToolbar2QT)
+
+
+@_BackendQT5Agg.export
+class _BackendQT4Agg(_BackendQT5Agg):
+ pass
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt4cairo.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt4cairo.py
new file mode 100644
index 00000000000..f94851da382
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt4cairo.py
@@ -0,0 +1,6 @@
+from .backend_qt5cairo import _BackendQT5Cairo
+
+
+@_BackendQT5Cairo.export
+class _BackendQT4Cairo(_BackendQT5Cairo):
+ pass
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt5.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt5.py
new file mode 100644
index 00000000000..f70bf6f1dc5
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt5.py
@@ -0,0 +1,1118 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+import six
+
+import functools
+import os
+import re
+import signal
+import sys
+from six import unichr
+import traceback
+
+import matplotlib
+
+from matplotlib._pylab_helpers import Gcf
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
+ TimerBase, cursors, ToolContainerBase, StatusbarBase)
+import matplotlib.backends.qt_editor.figureoptions as figureoptions
+from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool
+from matplotlib.figure import Figure
+from matplotlib.backend_managers import ToolManager
+from matplotlib import backend_tools
+
+from .qt_compat import (
+ QtCore, QtGui, QtWidgets, _getSaveFileName, is_pyqt5, __version__, QT_API)
+
+backend_version = __version__
+
+# SPECIAL_KEYS are keys that do *not* return their unicode name
+# instead they have manually specified names
+SPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',
+ QtCore.Qt.Key_Shift: 'shift',
+ QtCore.Qt.Key_Alt: 'alt',
+ QtCore.Qt.Key_Meta: 'super',
+ QtCore.Qt.Key_Return: 'enter',
+ QtCore.Qt.Key_Left: 'left',
+ QtCore.Qt.Key_Up: 'up',
+ QtCore.Qt.Key_Right: 'right',
+ QtCore.Qt.Key_Down: 'down',
+ QtCore.Qt.Key_Escape: 'escape',
+ QtCore.Qt.Key_F1: 'f1',
+ QtCore.Qt.Key_F2: 'f2',
+ QtCore.Qt.Key_F3: 'f3',
+ QtCore.Qt.Key_F4: 'f4',
+ QtCore.Qt.Key_F5: 'f5',
+ QtCore.Qt.Key_F6: 'f6',
+ QtCore.Qt.Key_F7: 'f7',
+ QtCore.Qt.Key_F8: 'f8',
+ QtCore.Qt.Key_F9: 'f9',
+ QtCore.Qt.Key_F10: 'f10',
+ QtCore.Qt.Key_F11: 'f11',
+ QtCore.Qt.Key_F12: 'f12',
+ QtCore.Qt.Key_Home: 'home',
+ QtCore.Qt.Key_End: 'end',
+ QtCore.Qt.Key_PageUp: 'pageup',
+ QtCore.Qt.Key_PageDown: 'pagedown',
+ QtCore.Qt.Key_Tab: 'tab',
+ QtCore.Qt.Key_Backspace: 'backspace',
+ QtCore.Qt.Key_Enter: 'enter',
+ QtCore.Qt.Key_Insert: 'insert',
+ QtCore.Qt.Key_Delete: 'delete',
+ QtCore.Qt.Key_Pause: 'pause',
+ QtCore.Qt.Key_SysReq: 'sysreq',
+ QtCore.Qt.Key_Clear: 'clear', }
+
+# define which modifier keys are collected on keyboard events.
+# elements are (mpl names, Modifier Flag, Qt Key) tuples
+SUPER = 0
+ALT = 1
+CTRL = 2
+SHIFT = 3
+MODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),
+ ('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),
+ ('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),
+ ('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),
+ ]
+
+if sys.platform == 'darwin':
+ # in OSX, the control and super (aka cmd/apple) keys are switched, so
+ # switch them back.
+ SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'cmd', # cmd/apple key
+ QtCore.Qt.Key_Meta: 'control',
+ })
+ MODIFIER_KEYS[0] = ('cmd', QtCore.Qt.ControlModifier,
+ QtCore.Qt.Key_Control)
+ MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,
+ QtCore.Qt.Key_Meta)
+
+
+cursord = {
+ cursors.MOVE: QtCore.Qt.SizeAllCursor,
+ cursors.HAND: QtCore.Qt.PointingHandCursor,
+ cursors.POINTER: QtCore.Qt.ArrowCursor,
+ cursors.SELECT_REGION: QtCore.Qt.CrossCursor,
+ cursors.WAIT: QtCore.Qt.WaitCursor,
+ }
+
+
+# make place holder
+qApp = None
+
+
+def _create_qApp():
+ """
+ Only one qApp can exist at a time, so check before creating one.
+ """
+ global qApp
+
+ if qApp is None:
+ app = QtWidgets.QApplication.instance()
+ if app is None:
+ # check for DISPLAY env variable on X11 build of Qt
+ if is_pyqt5():
+ try:
+ from PyQt5 import QtX11Extras
+ is_x11_build = True
+ except ImportError:
+ is_x11_build = False
+ else:
+ is_x11_build = hasattr(QtGui, "QX11Info")
+ if is_x11_build:
+ display = os.environ.get('DISPLAY')
+ if display is None or not re.search(r':\d', display):
+ raise RuntimeError('Invalid DISPLAY variable')
+
+ qApp = QtWidgets.QApplication([b"matplotlib"])
+ qApp.lastWindowClosed.connect(qApp.quit)
+ else:
+ qApp = app
+
+ if is_pyqt5():
+ try:
+ qApp.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
+ qApp.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
+ except AttributeError:
+ pass
+
+
+def _allow_super_init(__init__):
+ """
+ Decorator for ``__init__`` to allow ``super().__init__`` on PyQt4/PySide2.
+ """
+
+ if QT_API == "PyQt5":
+
+ return __init__
+
+ else:
+ # To work around lack of cooperative inheritance in PyQt4, PySide,
+ # and PySide2, when calling FigureCanvasQT.__init__, we temporarily
+ # patch QWidget.__init__ by a cooperative version, that first calls
+ # QWidget.__init__ with no additional arguments, and then finds the
+ # next class in the MRO with an __init__ that does support cooperative
+ # inheritance (i.e., not defined by the PyQt4, PySide, PySide2, sip
+ # or Shiboken packages), and manually call its `__init__`, once again
+ # passing the additional arguments.
+
+ qwidget_init = QtWidgets.QWidget.__init__
+
+ def cooperative_qwidget_init(self, *args, **kwargs):
+ qwidget_init(self)
+ mro = type(self).__mro__
+ next_coop_init = next(
+ cls for cls in mro[mro.index(QtWidgets.QWidget) + 1:]
+ if cls.__module__.split(".")[0] not in [
+ "PyQt4", "sip", "PySide", "PySide2", "Shiboken"])
+ next_coop_init.__init__(self, *args, **kwargs)
+
+ @functools.wraps(__init__)
+ def wrapper(self, **kwargs):
+ try:
+ QtWidgets.QWidget.__init__ = cooperative_qwidget_init
+ __init__(self, **kwargs)
+ finally:
+ # Restore __init__
+ QtWidgets.QWidget.__init__ = qwidget_init
+
+ return wrapper
+
+
+class TimerQT(TimerBase):
+ '''
+ Subclass of :class:`backend_bases.TimerBase` that uses Qt timer events.
+
+ Attributes
+ ----------
+ interval : int
+ The time between timer events in milliseconds. Default is 1000 ms.
+ single_shot : bool
+ Boolean flag indicating whether this timer should
+ operate as single shot (run once and then stop). Defaults to False.
+ callbacks : list
+ Stores list of (func, args) tuples that will be called upon timer
+ events. This list can be manipulated directly, or the functions
+ `add_callback` and `remove_callback` can be used.
+
+ '''
+
+ def __init__(self, *args, **kwargs):
+ TimerBase.__init__(self, *args, **kwargs)
+
+ # Create a new timer and connect the timeout() signal to the
+ # _on_timer method.
+ self._timer = QtCore.QTimer()
+ self._timer.timeout.connect(self._on_timer)
+ self._timer_set_interval()
+
+ def _timer_set_single_shot(self):
+ self._timer.setSingleShot(self._single)
+
+ def _timer_set_interval(self):
+ self._timer.setInterval(self._interval)
+
+ def _timer_start(self):
+ self._timer.start()
+
+ def _timer_stop(self):
+ self._timer.stop()
+
+
+class FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):
+
+ # map Qt button codes to MouseEvent's ones:
+ buttond = {QtCore.Qt.LeftButton: 1,
+ QtCore.Qt.MidButton: 2,
+ QtCore.Qt.RightButton: 3,
+ # QtCore.Qt.XButton1: None,
+ # QtCore.Qt.XButton2: None,
+ }
+
+ @_allow_super_init
+ def __init__(self, figure):
+ _create_qApp()
+ super(FigureCanvasQT, self).__init__(figure=figure)
+
+ self.figure = figure
+ # We don't want to scale up the figure DPI more than once.
+ # Note, we don't handle a signal for changing DPI yet.
+ figure._original_dpi = figure.dpi
+ self._update_figure_dpi()
+ # In cases with mixed resolution displays, we need to be careful if the
+ # dpi_ratio changes - in this case we need to resize the canvas
+ # accordingly. We could watch for screenChanged events from Qt, but
+ # the issue is that we can't guarantee this will be emitted *before*
+ # the first paintEvent for the canvas, so instead we keep track of the
+ # dpi_ratio value here and in paintEvent we resize the canvas if
+ # needed.
+ self._dpi_ratio_prev = None
+
+ self._draw_pending = False
+ self._is_drawing = False
+ self._draw_rect_callback = lambda painter: None
+
+ self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
+ self.setMouseTracking(True)
+ self.resize(*self.get_width_height())
+ # Key auto-repeat enabled by default
+ self._keyautorepeat = True
+
+ palette = QtGui.QPalette(QtCore.Qt.white)
+ self.setPalette(palette)
+
+ def _update_figure_dpi(self):
+ dpi = self._dpi_ratio * self.figure._original_dpi
+ self.figure._set_dpi(dpi, forward=False)
+
+ @property
+ def _dpi_ratio(self):
+ # Not available on Qt4 or some older Qt5.
+ try:
+ # self.devicePixelRatio() returns 0 in rare cases
+ return self.devicePixelRatio() or 1
+ except AttributeError:
+ return 1
+
+ def _update_dpi(self):
+ # As described in __init__ above, we need to be careful in cases with
+ # mixed resolution displays if dpi_ratio is changing between painting
+ # events.
+ # Return whether we triggered a resizeEvent (and thus a paintEvent)
+ # from within this function.
+ if self._dpi_ratio != self._dpi_ratio_prev:
+ # We need to update the figure DPI.
+ self._update_figure_dpi()
+ self._dpi_ratio_prev = self._dpi_ratio
+ # The easiest way to resize the canvas is to emit a resizeEvent
+ # since we implement all the logic for resizing the canvas for
+ # that event.
+ event = QtGui.QResizeEvent(self.size(), self.size())
+ self.resizeEvent(event)
+ # resizeEvent triggers a paintEvent itself, so we exit this one
+ # (after making sure that the event is immediately handled).
+ return True
+ return False
+
+ def get_width_height(self):
+ w, h = FigureCanvasBase.get_width_height(self)
+ return int(w / self._dpi_ratio), int(h / self._dpi_ratio)
+
+ def enterEvent(self, event):
+ FigureCanvasBase.enter_notify_event(self, guiEvent=event)
+
+ def leaveEvent(self, event):
+ QtWidgets.QApplication.restoreOverrideCursor()
+ FigureCanvasBase.leave_notify_event(self, guiEvent=event)
+
+ def mouseEventCoords(self, pos):
+ """Calculate mouse coordinates in physical pixels
+
+ Qt5 use logical pixels, but the figure is scaled to physical
+ pixels for rendering. Transform to physical pixels so that
+ all of the down-stream transforms work as expected.
+
+ Also, the origin is different and needs to be corrected.
+
+ """
+ dpi_ratio = self._dpi_ratio
+ x = pos.x()
+ # flip y so y=0 is bottom of canvas
+ y = self.figure.bbox.height / dpi_ratio - pos.y()
+ return x * dpi_ratio, y * dpi_ratio
+
+ def mousePressEvent(self, event):
+ x, y = self.mouseEventCoords(event.pos())
+ button = self.buttond.get(event.button())
+ if button is not None:
+ FigureCanvasBase.button_press_event(self, x, y, button,
+ guiEvent=event)
+
+ def mouseDoubleClickEvent(self, event):
+ x, y = self.mouseEventCoords(event.pos())
+ button = self.buttond.get(event.button())
+ if button is not None:
+ FigureCanvasBase.button_press_event(self, x, y,
+ button, dblclick=True,
+ guiEvent=event)
+
+ def mouseMoveEvent(self, event):
+ x, y = self.mouseEventCoords(event)
+ FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
+
+ def mouseReleaseEvent(self, event):
+ x, y = self.mouseEventCoords(event)
+ button = self.buttond.get(event.button())
+ if button is not None:
+ FigureCanvasBase.button_release_event(self, x, y, button,
+ guiEvent=event)
+
+ if is_pyqt5():
+ def wheelEvent(self, event):
+ x, y = self.mouseEventCoords(event)
+ # from QWheelEvent::delta doc
+ if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:
+ steps = event.angleDelta().y() / 120
+ else:
+ steps = event.pixelDelta().y()
+ if steps:
+ FigureCanvasBase.scroll_event(
+ self, x, y, steps, guiEvent=event)
+ else:
+ def wheelEvent(self, event):
+ x = event.x()
+ # flipy so y=0 is bottom of canvas
+ y = self.figure.bbox.height - event.y()
+ # from QWheelEvent::delta doc
+ steps = event.delta() / 120
+ if event.orientation() == QtCore.Qt.Vertical:
+ FigureCanvasBase.scroll_event(
+ self, x, y, steps, guiEvent=event)
+
+ def keyPressEvent(self, event):
+ key = self._get_key(event)
+ if key is not None:
+ FigureCanvasBase.key_press_event(self, key, guiEvent=event)
+
+ def keyReleaseEvent(self, event):
+ key = self._get_key(event)
+ if key is not None:
+ FigureCanvasBase.key_release_event(self, key, guiEvent=event)
+
+ @property
+ def keyAutoRepeat(self):
+ """
+ If True, enable auto-repeat for key events.
+ """
+ return self._keyautorepeat
+
+ @keyAutoRepeat.setter
+ def keyAutoRepeat(self, val):
+ self._keyautorepeat = bool(val)
+
+ def resizeEvent(self, event):
+ # _dpi_ratio_prev will be set the first time the canvas is painted, and
+ # the rendered buffer is useless before anyways.
+ if self._dpi_ratio_prev is None:
+ return
+ w = event.size().width() * self._dpi_ratio
+ h = event.size().height() * self._dpi_ratio
+ dpival = self.figure.dpi
+ winch = w / dpival
+ hinch = h / dpival
+ self.figure.set_size_inches(winch, hinch, forward=False)
+ # pass back into Qt to let it finish
+ QtWidgets.QWidget.resizeEvent(self, event)
+ # emit our resize events
+ FigureCanvasBase.resize_event(self)
+
+ def sizeHint(self):
+ w, h = self.get_width_height()
+ return QtCore.QSize(w, h)
+
+ def minumumSizeHint(self):
+ return QtCore.QSize(10, 10)
+
+ def _get_key(self, event):
+ if not self._keyautorepeat and event.isAutoRepeat():
+ return None
+
+ event_key = event.key()
+ event_mods = int(event.modifiers()) # actually a bitmask
+
+ # get names of the pressed modifier keys
+ # bit twiddling to pick out modifier keys from event_mods bitmask,
+ # if event_key is a MODIFIER, it should not be duplicated in mods
+ mods = [name for name, mod_key, qt_key in MODIFIER_KEYS
+ if event_key != qt_key and (event_mods & mod_key) == mod_key]
+ try:
+ # for certain keys (enter, left, backspace, etc) use a word for the
+ # key, rather than unicode
+ key = SPECIAL_KEYS[event_key]
+ except KeyError:
+ # unicode defines code points up to 0x0010ffff
+ # QT will use Key_Codes larger than that for keyboard keys that are
+ # are not unicode characters (like multimedia keys)
+ # skip these
+ # if you really want them, you should add them to SPECIAL_KEYS
+ MAX_UNICODE = 0x10ffff
+ if event_key > MAX_UNICODE:
+ return None
+
+ key = unichr(event_key)
+ # qt delivers capitalized letters. fix capitalization
+ # note that capslock is ignored
+ if 'shift' in mods:
+ mods.remove('shift')
+ else:
+ key = key.lower()
+
+ mods.reverse()
+ return '+'.join(mods + [key])
+
+ def new_timer(self, *args, **kwargs):
+ """
+ Creates a new backend-specific subclass of
+ :class:`backend_bases.Timer`. This is useful for getting
+ periodic events through the backend's native event
+ loop. Implemented only for backends with GUIs.
+
+ Other Parameters
+ ----------------
+ interval : scalar
+ Timer interval in milliseconds
+
+ callbacks : list
+ Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
+ will be executed by the timer every *interval*.
+
+ """
+ return TimerQT(*args, **kwargs)
+
+ def flush_events(self):
+ qApp.processEvents()
+
+ def start_event_loop(self, timeout=0):
+ if hasattr(self, "_event_loop") and self._event_loop.isRunning():
+ raise RuntimeError("Event loop already running")
+ self._event_loop = event_loop = QtCore.QEventLoop()
+ if timeout:
+ timer = QtCore.QTimer.singleShot(timeout * 1000, event_loop.quit)
+ event_loop.exec_()
+
+ def stop_event_loop(self, event=None):
+ if hasattr(self, "_event_loop"):
+ self._event_loop.quit()
+
+ def draw(self):
+ """Render the figure, and queue a request for a Qt draw.
+ """
+ # The renderer draw is done here; delaying causes problems with code
+ # that uses the result of the draw() to update plot elements.
+ if self._is_drawing:
+ return
+ self._is_drawing = True
+ try:
+ super(FigureCanvasQT, self).draw()
+ finally:
+ self._is_drawing = False
+ self.update()
+
+ def draw_idle(self):
+ """Queue redraw of the Agg buffer and request Qt paintEvent.
+ """
+ # The Agg draw needs to be handled by the same thread matplotlib
+ # modifies the scene graph from. Post Agg draw request to the
+ # current event loop in order to ensure thread affinity and to
+ # accumulate multiple draw requests from event handling.
+ # TODO: queued signal connection might be safer than singleShot
+ if not (self._draw_pending or self._is_drawing):
+ self._draw_pending = True
+ QtCore.QTimer.singleShot(0, self._draw_idle)
+
+ def _draw_idle(self):
+ if self.height() < 0 or self.width() < 0:
+ self._draw_pending = False
+ if not self._draw_pending:
+ return
+ try:
+ self.draw()
+ except Exception:
+ # Uncaught exceptions are fatal for PyQt5, so catch them instead.
+ traceback.print_exc()
+ finally:
+ self._draw_pending = False
+
+ def drawRectangle(self, rect):
+ # Draw the zoom rectangle to the QPainter. _draw_rect_callback needs
+ # to be called at the end of paintEvent.
+ if rect is not None:
+ def _draw_rect_callback(painter):
+ pen = QtGui.QPen(QtCore.Qt.black, 1 / self._dpi_ratio,
+ QtCore.Qt.DotLine)
+ painter.setPen(pen)
+ painter.drawRect(*(pt / self._dpi_ratio for pt in rect))
+ else:
+ def _draw_rect_callback(painter):
+ return
+ self._draw_rect_callback = _draw_rect_callback
+ self.update()
+
+
+class MainWindow(QtWidgets.QMainWindow):
+ closing = QtCore.Signal()
+
+ def closeEvent(self, event):
+ self.closing.emit()
+ QtWidgets.QMainWindow.closeEvent(self, event)
+
+
+class FigureManagerQT(FigureManagerBase):
+ """
+ Attributes
+ ----------
+ canvas : `FigureCanvas`
+ The FigureCanvas instance
+ num : int or str
+ The Figure number
+ toolbar : qt.QToolBar
+ The qt.QToolBar
+ window : qt.QMainWindow
+ The qt.QMainWindow
+
+ """
+
+ def __init__(self, canvas, num):
+ FigureManagerBase.__init__(self, canvas, num)
+ self.canvas = canvas
+ self.window = MainWindow()
+ self.window.closing.connect(canvas.close_event)
+ self.window.closing.connect(self._widgetclosed)
+
+ self.window.setWindowTitle("Figure %d" % num)
+ image = os.path.join(matplotlib.rcParams['datapath'],
+ 'images', 'matplotlib.svg')
+ self.window.setWindowIcon(QtGui.QIcon(image))
+
+ # Give the keyboard focus to the figure instead of the
+ # manager; StrongFocus accepts both tab and click to focus and
+ # will enable the canvas to process event w/o clicking.
+ # ClickFocus only takes the focus is the window has been
+ # clicked
+ # on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or
+ # http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum
+ self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
+ self.canvas.setFocus()
+
+ self.window._destroying = False
+
+ self.toolmanager = self._get_toolmanager()
+ self.toolbar = self._get_toolbar(self.canvas, self.window)
+ self.statusbar = None
+
+ if self.toolmanager:
+ backend_tools.add_tools_to_manager(self.toolmanager)
+ if self.toolbar:
+ backend_tools.add_tools_to_container(self.toolbar)
+ self.statusbar = StatusbarQt(self.window, self.toolmanager)
+
+ if self.toolbar is not None:
+ self.window.addToolBar(self.toolbar)
+ if not self.toolmanager:
+ # add text label to status bar
+ statusbar_label = QtWidgets.QLabel()
+ self.window.statusBar().addWidget(statusbar_label)
+ self.toolbar.message.connect(statusbar_label.setText)
+ tbs_height = self.toolbar.sizeHint().height()
+ else:
+ tbs_height = 0
+
+ # resize the main window so it will display the canvas with the
+ # requested size:
+ cs = canvas.sizeHint()
+ sbs = self.window.statusBar().sizeHint()
+ self._status_and_tool_height = tbs_height + sbs.height()
+ height = cs.height() + self._status_and_tool_height
+ self.window.resize(cs.width(), height)
+
+ self.window.setCentralWidget(self.canvas)
+
+ if matplotlib.is_interactive():
+ self.window.show()
+ self.canvas.draw_idle()
+
+ def notify_axes_change(fig):
+ # This will be called whenever the current axes is changed
+ if self.toolbar is not None:
+ self.toolbar.update()
+ self.canvas.figure.add_axobserver(notify_axes_change)
+ self.window.raise_()
+
+ def full_screen_toggle(self):
+ if self.window.isFullScreen():
+ self.window.showNormal()
+ else:
+ self.window.showFullScreen()
+
+ def _widgetclosed(self):
+ if self.window._destroying:
+ return
+ self.window._destroying = True
+ try:
+ Gcf.destroy(self.num)
+ except AttributeError:
+ pass
+ # It seems that when the python session is killed,
+ # Gcf can get destroyed before the Gcf.destroy
+ # line is run, leading to a useless AttributeError.
+
+ def _get_toolbar(self, canvas, parent):
+ # must be inited after the window, drawingArea and figure
+ # attrs are set
+ if matplotlib.rcParams['toolbar'] == 'toolbar2':
+ toolbar = NavigationToolbar2QT(canvas, parent, False)
+ elif matplotlib.rcParams['toolbar'] == 'toolmanager':
+ toolbar = ToolbarQt(self.toolmanager, self.window)
+ else:
+ toolbar = None
+ return toolbar
+
+ def _get_toolmanager(self):
+ if matplotlib.rcParams['toolbar'] == 'toolmanager':
+ toolmanager = ToolManager(self.canvas.figure)
+ else:
+ toolmanager = None
+ return toolmanager
+
+ def resize(self, width, height):
+ 'set the canvas size in pixels'
+ self.window.resize(width, height + self._status_and_tool_height)
+
+ def show(self):
+ self.window.show()
+ self.window.activateWindow()
+ self.window.raise_()
+
+ def destroy(self, *args):
+ # check for qApp first, as PySide deletes it in its atexit handler
+ if QtWidgets.QApplication.instance() is None:
+ return
+ if self.window._destroying:
+ return
+ self.window._destroying = True
+ if self.toolbar:
+ self.toolbar.destroy()
+ self.window.close()
+
+ def get_window_title(self):
+ return six.text_type(self.window.windowTitle())
+
+ def set_window_title(self, title):
+ self.window.setWindowTitle(title)
+
+
+class NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):
+ message = QtCore.Signal(str)
+
+ def __init__(self, canvas, parent, coordinates=True):
+ """ coordinates: should we show the coordinates on the right? """
+ self.canvas = canvas
+ self.parent = parent
+ self.coordinates = coordinates
+ self._actions = {}
+ """A mapping of toolitem method names to their QActions"""
+
+ QtWidgets.QToolBar.__init__(self, parent)
+ NavigationToolbar2.__init__(self, canvas)
+
+ def _icon(self, name):
+ if is_pyqt5():
+ name = name.replace('.png', '_large.png')
+ pm = QtGui.QPixmap(os.path.join(self.basedir, name))
+ if hasattr(pm, 'setDevicePixelRatio'):
+ pm.setDevicePixelRatio(self.canvas._dpi_ratio)
+ return QtGui.QIcon(pm)
+
+ def _init_toolbar(self):
+ self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
+
+ for text, tooltip_text, image_file, callback in self.toolitems:
+ if text is None:
+ self.addSeparator()
+ else:
+ a = self.addAction(self._icon(image_file + '.png'),
+ text, getattr(self, callback))
+ self._actions[callback] = a
+ if callback in ['zoom', 'pan']:
+ a.setCheckable(True)
+ if tooltip_text is not None:
+ a.setToolTip(tooltip_text)
+ if text == 'Subplots':
+ a = self.addAction(self._icon("qt4_editor_options.png"),
+ 'Customize', self.edit_parameters)
+ a.setToolTip('Edit axis, curve and image parameters')
+
+ self.buttons = {}
+
+ # Add the x,y location widget at the right side of the toolbar
+ # The stretch factor is 1 which means any resizing of the toolbar
+ # will resize this label instead of the buttons.
+ if self.coordinates:
+ self.locLabel = QtWidgets.QLabel("", self)
+ self.locLabel.setAlignment(
+ QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
+ self.locLabel.setSizePolicy(
+ QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
+ QtWidgets.QSizePolicy.Ignored))
+ labelAction = self.addWidget(self.locLabel)
+ labelAction.setVisible(True)
+
+ # reference holder for subplots_adjust window
+ self.adj_window = None
+
+ # Esthetic adjustments - we need to set these explicitly in PyQt5
+ # otherwise the layout looks different - but we don't want to set it if
+ # not using HiDPI icons otherwise they look worse than before.
+ if is_pyqt5():
+ self.setIconSize(QtCore.QSize(24, 24))
+ self.layout().setSpacing(12)
+
+ if is_pyqt5():
+ # For some reason, self.setMinimumHeight doesn't seem to carry over to
+ # the actual sizeHint, so override it instead in order to make the
+ # aesthetic adjustments noted above.
+ def sizeHint(self):
+ size = super(NavigationToolbar2QT, self).sizeHint()
+ size.setHeight(max(48, size.height()))
+ return size
+
+ def edit_parameters(self):
+ allaxes = self.canvas.figure.get_axes()
+ if not allaxes:
+ QtWidgets.QMessageBox.warning(
+ self.parent, "Error", "There are no axes to edit.")
+ return
+ elif len(allaxes) == 1:
+ axes, = allaxes
+ else:
+ titles = []
+ for axes in allaxes:
+ name = (axes.get_title() or
+ " - ".join(filter(None, [axes.get_xlabel(),
+ axes.get_ylabel()])) or
+ "<anonymous {} (id: {:#x})>".format(
+ type(axes).__name__, id(axes)))
+ titles.append(name)
+ item, ok = QtWidgets.QInputDialog.getItem(
+ self.parent, 'Customize', 'Select axes:', titles, 0, False)
+ if ok:
+ axes = allaxes[titles.index(six.text_type(item))]
+ else:
+ return
+
+ figureoptions.figure_edit(axes, self)
+
+ def _update_buttons_checked(self):
+ # sync button checkstates to match active mode
+ self._actions['pan'].setChecked(self._active == 'PAN')
+ self._actions['zoom'].setChecked(self._active == 'ZOOM')
+
+ def pan(self, *args):
+ super(NavigationToolbar2QT, self).pan(*args)
+ self._update_buttons_checked()
+
+ def zoom(self, *args):
+ super(NavigationToolbar2QT, self).zoom(*args)
+ self._update_buttons_checked()
+
+ def set_message(self, s):
+ self.message.emit(s)
+ if self.coordinates:
+ self.locLabel.setText(s)
+
+ def set_cursor(self, cursor):
+ self.canvas.setCursor(cursord[cursor])
+
+ def draw_rubberband(self, event, x0, y0, x1, y1):
+ height = self.canvas.figure.bbox.height
+ y1 = height - y1
+ y0 = height - y0
+ rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]
+ self.canvas.drawRectangle(rect)
+
+ def remove_rubberband(self):
+ self.canvas.drawRectangle(None)
+
+ def configure_subplots(self):
+ image = os.path.join(matplotlib.rcParams['datapath'],
+ 'images', 'matplotlib.png')
+ dia = SubplotToolQt(self.canvas.figure, self.parent)
+ dia.setWindowIcon(QtGui.QIcon(image))
+ dia.exec_()
+
+ def save_figure(self, *args):
+ filetypes = self.canvas.get_supported_filetypes_grouped()
+ sorted_filetypes = sorted(six.iteritems(filetypes))
+ default_filetype = self.canvas.get_default_filetype()
+
+ startpath = os.path.expanduser(
+ matplotlib.rcParams['savefig.directory'])
+ start = os.path.join(startpath, self.canvas.get_default_filename())
+ filters = []
+ selectedFilter = None
+ for name, exts in sorted_filetypes:
+ exts_list = " ".join(['*.%s' % ext for ext in exts])
+ filter = '%s (%s)' % (name, exts_list)
+ if default_filetype in exts:
+ selectedFilter = filter
+ filters.append(filter)
+ filters = ';;'.join(filters)
+
+ fname, filter = _getSaveFileName(self.parent,
+ "Choose a filename to save to",
+ start, filters, selectedFilter)
+ if fname:
+ # Save dir for next time, unless empty str (i.e., use cwd).
+ if startpath != "":
+ matplotlib.rcParams['savefig.directory'] = (
+ os.path.dirname(six.text_type(fname)))
+ try:
+ self.canvas.figure.savefig(six.text_type(fname))
+ except Exception as e:
+ QtWidgets.QMessageBox.critical(
+ self, "Error saving file", six.text_type(e),
+ QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
+
+
+class SubplotToolQt(UiSubplotTool):
+ def __init__(self, targetfig, parent):
+ UiSubplotTool.__init__(self, None)
+
+ self._figure = targetfig
+
+ for lower, higher in [("bottom", "top"), ("left", "right")]:
+ self._widgets[lower].valueChanged.connect(
+ lambda val: self._widgets[higher].setMinimum(val + .001))
+ self._widgets[higher].valueChanged.connect(
+ lambda val: self._widgets[lower].setMaximum(val - .001))
+
+ self._attrs = ["top", "bottom", "left", "right", "hspace", "wspace"]
+ self._defaults = {attr: vars(self._figure.subplotpars)[attr]
+ for attr in self._attrs}
+
+ # Set values after setting the range callbacks, but before setting up
+ # the redraw callbacks.
+ self._reset()
+
+ for attr in self._attrs:
+ self._widgets[attr].valueChanged.connect(self._on_value_changed)
+ for action, method in [("Export values", self._export_values),
+ ("Tight layout", self._tight_layout),
+ ("Reset", self._reset),
+ ("Close", self.close)]:
+ self._widgets[action].clicked.connect(method)
+
+ def _export_values(self):
+ # Explicitly round to 3 decimals (which is also the spinbox precision)
+ # to avoid numbers of the form 0.100...001.
+ dialog = QtWidgets.QDialog()
+ layout = QtWidgets.QVBoxLayout()
+ dialog.setLayout(layout)
+ text = QtWidgets.QPlainTextEdit()
+ text.setReadOnly(True)
+ layout.addWidget(text)
+ text.setPlainText(
+ ",\n".join("{}={:.3}".format(attr, self._widgets[attr].value())
+ for attr in self._attrs))
+ # Adjust the height of the text widget to fit the whole text, plus
+ # some padding.
+ size = text.maximumSize()
+ size.setHeight(
+ QtGui.QFontMetrics(text.document().defaultFont())
+ .size(0, text.toPlainText()).height() + 20)
+ text.setMaximumSize(size)
+ dialog.exec_()
+
+ def _on_value_changed(self):
+ self._figure.subplots_adjust(**{attr: self._widgets[attr].value()
+ for attr in self._attrs})
+ self._figure.canvas.draw_idle()
+
+ def _tight_layout(self):
+ self._figure.tight_layout()
+ for attr in self._attrs:
+ widget = self._widgets[attr]
+ widget.blockSignals(True)
+ widget.setValue(vars(self._figure.subplotpars)[attr])
+ widget.blockSignals(False)
+ self._figure.canvas.draw_idle()
+
+ def _reset(self):
+ for attr, value in self._defaults.items():
+ self._widgets[attr].setValue(value)
+
+
+class ToolbarQt(ToolContainerBase, QtWidgets.QToolBar):
+ def __init__(self, toolmanager, parent):
+ ToolContainerBase.__init__(self, toolmanager)
+ QtWidgets.QToolBar.__init__(self, parent)
+ self._toolitems = {}
+ self._groups = {}
+ self._last = None
+
+ @property
+ def _icon_extension(self):
+ if is_pyqt5():
+ return '_large.png'
+ return '.png'
+
+ def add_toolitem(
+ self, name, group, position, image_file, description, toggle):
+
+ button = QtWidgets.QToolButton(self)
+ button.setIcon(self._icon(image_file))
+ button.setText(name)
+ if description:
+ button.setToolTip(description)
+
+ def handler():
+ self.trigger_tool(name)
+ if toggle:
+ button.setCheckable(True)
+ button.toggled.connect(handler)
+ else:
+ button.clicked.connect(handler)
+
+ self._last = button
+ self._toolitems.setdefault(name, [])
+ self._add_to_group(group, name, button, position)
+ self._toolitems[name].append((button, handler))
+
+ def _add_to_group(self, group, name, button, position):
+ gr = self._groups.get(group, [])
+ if not gr:
+ sep = self.addSeparator()
+ gr.append(sep)
+ before = gr[position]
+ widget = self.insertWidget(before, button)
+ gr.insert(position, widget)
+ self._groups[group] = gr
+
+ def _icon(self, name):
+ pm = QtGui.QPixmap(name)
+ if hasattr(pm, 'setDevicePixelRatio'):
+ pm.setDevicePixelRatio(self.toolmanager.canvas._dpi_ratio)
+ return QtGui.QIcon(pm)
+
+ def toggle_toolitem(self, name, toggled):
+ if name not in self._toolitems:
+ return
+ for button, handler in self._toolitems[name]:
+ button.toggled.disconnect(handler)
+ button.setChecked(toggled)
+ button.toggled.connect(handler)
+
+ def remove_toolitem(self, name):
+ for button, handler in self._toolitems[name]:
+ button.setParent(None)
+ del self._toolitems[name]
+
+
+class StatusbarQt(StatusbarBase, QtWidgets.QLabel):
+ def __init__(self, window, *args, **kwargs):
+ StatusbarBase.__init__(self, *args, **kwargs)
+ QtWidgets.QLabel.__init__(self)
+ window.statusBar().addWidget(self)
+
+ def set_message(self, s):
+ self.setText(s)
+
+
+class ConfigureSubplotsQt(backend_tools.ConfigureSubplotsBase):
+ def trigger(self, *args):
+ image = os.path.join(matplotlib.rcParams['datapath'],
+ 'images', 'matplotlib.png')
+ parent = self.canvas.manager.window
+ dia = SubplotToolQt(self.figure, parent)
+ dia.setWindowIcon(QtGui.QIcon(image))
+ dia.exec_()
+
+
+class SaveFigureQt(backend_tools.SaveFigureBase):
+ def trigger(self, *args):
+ filetypes = self.canvas.get_supported_filetypes_grouped()
+ sorted_filetypes = sorted(six.iteritems(filetypes))
+ default_filetype = self.canvas.get_default_filetype()
+
+ startpath = os.path.expanduser(
+ matplotlib.rcParams['savefig.directory'])
+ start = os.path.join(startpath, self.canvas.get_default_filename())
+ filters = []
+ selectedFilter = None
+ for name, exts in sorted_filetypes:
+ exts_list = " ".join(['*.%s' % ext for ext in exts])
+ filter = '%s (%s)' % (name, exts_list)
+ if default_filetype in exts:
+ selectedFilter = filter
+ filters.append(filter)
+ filters = ';;'.join(filters)
+
+ parent = self.canvas.manager.window
+ fname, filter = _getSaveFileName(parent,
+ "Choose a filename to save to",
+ start, filters, selectedFilter)
+ if fname:
+ # Save dir for next time, unless empty str (i.e., use cwd).
+ if startpath != "":
+ matplotlib.rcParams['savefig.directory'] = (
+ os.path.dirname(six.text_type(fname)))
+ try:
+ self.canvas.figure.savefig(six.text_type(fname))
+ except Exception as e:
+ QtWidgets.QMessageBox.critical(
+ self, "Error saving file", six.text_type(e),
+ QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
+
+
+class SetCursorQt(backend_tools.SetCursorBase):
+ def set_cursor(self, cursor):
+ self.canvas.setCursor(cursord[cursor])
+
+
+class RubberbandQt(backend_tools.RubberbandBase):
+ def draw_rubberband(self, x0, y0, x1, y1):
+ height = self.canvas.figure.bbox.height
+ y1 = height - y1
+ y0 = height - y0
+ rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]
+ self.canvas.drawRectangle(rect)
+
+ def remove_rubberband(self):
+ self.canvas.drawRectangle(None)
+
+
+backend_tools.ToolSaveFigure = SaveFigureQt
+backend_tools.ToolConfigureSubplots = ConfigureSubplotsQt
+backend_tools.ToolSetCursor = SetCursorQt
+backend_tools.ToolRubberband = RubberbandQt
+
+
+def error_msg_qt(msg, parent=None):
+ if not isinstance(msg, six.string_types):
+ msg = ','.join(map(str, msg))
+
+ QtWidgets.QMessageBox.warning(None, "Matplotlib",
+ msg, QtGui.QMessageBox.Ok)
+
+
+def exception_handler(type, value, tb):
+ """Handle uncaught exceptions
+ It does not catch SystemExit
+ """
+ msg = ''
+ # get the filename attribute if available (for IOError)
+ if hasattr(value, 'filename') and value.filename is not None:
+ msg = value.filename + ': '
+ if hasattr(value, 'strerror') and value.strerror is not None:
+ msg += value.strerror
+ else:
+ msg += six.text_type(value)
+
+ if len(msg):
+ error_msg_qt(msg)
+
+
+@_Backend.export
+class _BackendQT5(_Backend):
+ FigureCanvas = FigureCanvasQT
+ FigureManager = FigureManagerQT
+
+ @staticmethod
+ def trigger_manager_draw(manager):
+ manager.canvas.draw_idle()
+
+ @staticmethod
+ def mainloop():
+ # allow KeyboardInterrupt exceptions to close the plot window.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ qApp.exec_()
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt5agg.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt5agg.py
new file mode 100644
index 00000000000..826156e6784
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt5agg.py
@@ -0,0 +1,105 @@
+"""
+Render to qt from agg
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import ctypes
+
+from matplotlib import cbook
+from matplotlib.transforms import Bbox
+
+from .backend_agg import FigureCanvasAgg
+from .backend_qt5 import (
+ QtCore, QtGui, QtWidgets, _BackendQT5, FigureCanvasQT, FigureManagerQT,
+ NavigationToolbar2QT, backend_version)
+from .qt_compat import QT_API
+
+
+class FigureCanvasQTAgg(FigureCanvasAgg, FigureCanvasQT):
+
+ def __init__(self, figure):
+ super(FigureCanvasQTAgg, self).__init__(figure=figure)
+ self._bbox_queue = []
+
+ @property
+ @cbook.deprecated("2.1")
+ def blitbox(self):
+ return self._bbox_queue
+
+ def paintEvent(self, e):
+ """Copy the image from the Agg canvas to the qt.drawable.
+
+ In Qt, all drawing should be done inside of here when a widget is
+ shown onscreen.
+ """
+ if self._update_dpi():
+ # The dpi update triggered its own paintEvent.
+ return
+ self._draw_idle() # Only does something if a draw is pending.
+
+ # if the canvas does not have a renderer, then give up and wait for
+ # FigureCanvasAgg.draw(self) to be called
+ if not hasattr(self, 'renderer'):
+ return
+
+ painter = QtGui.QPainter(self)
+
+ if self._bbox_queue:
+ bbox_queue = self._bbox_queue
+ else:
+ painter.eraseRect(self.rect())
+ bbox_queue = [
+ Bbox([[0, 0], [self.renderer.width, self.renderer.height]])]
+ self._bbox_queue = []
+ for bbox in bbox_queue:
+ l, b, r, t = map(int, bbox.extents)
+ w = r - l
+ h = t - b
+ reg = self.copy_from_bbox(bbox)
+ buf = reg.to_string_argb()
+ qimage = QtGui.QImage(buf, w, h, QtGui.QImage.Format_ARGB32)
+ # Adjust the buf reference count to work around a memory leak bug
+ # in QImage under PySide on Python 3.
+ if QT_API in ('PySide', 'PySide2') and six.PY3:
+ ctypes.c_long.from_address(id(buf)).value = 1
+ if hasattr(qimage, 'setDevicePixelRatio'):
+ # Not available on Qt4 or some older Qt5.
+ qimage.setDevicePixelRatio(self._dpi_ratio)
+ origin = QtCore.QPoint(l, self.renderer.height - t)
+ painter.drawImage(origin / self._dpi_ratio, qimage)
+
+ self._draw_rect_callback(painter)
+
+ painter.end()
+
+ def blit(self, bbox=None):
+ """Blit the region in bbox.
+ """
+ # If bbox is None, blit the entire canvas. Otherwise
+ # blit only the area defined by the bbox.
+ if bbox is None and self.figure:
+ bbox = self.figure.bbox
+
+ self._bbox_queue.append(bbox)
+
+ # repaint uses logical pixels, not physical pixels like the renderer.
+ l, b, w, h = [pt / self._dpi_ratio for pt in bbox.bounds]
+ t = b + h
+ self.repaint(l, self.renderer.height / self._dpi_ratio - t, w, h)
+
+ def print_figure(self, *args, **kwargs):
+ super(FigureCanvasQTAgg, self).print_figure(*args, **kwargs)
+ self.draw()
+
+
+@cbook.deprecated("2.2")
+class FigureCanvasQTAggBase(FigureCanvasQTAgg):
+ pass
+
+
+@_BackendQT5.export
+class _BackendQT5Agg(_BackendQT5):
+ FigureCanvas = FigureCanvasQTAgg
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt5cairo.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt5cairo.py
new file mode 100644
index 00000000000..1108707c3a0
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_qt5cairo.py
@@ -0,0 +1,49 @@
+
+import six
+
+from .backend_cairo import cairo, FigureCanvasCairo, RendererCairo
+from .backend_qt5 import QtCore, QtGui, _BackendQT5, FigureCanvasQT
+from .qt_compat import QT_API
+
+
+class FigureCanvasQTCairo(FigureCanvasQT, FigureCanvasCairo):
+ def __init__(self, figure):
+ super(FigureCanvasQTCairo, self).__init__(figure=figure)
+ self._renderer = RendererCairo(self.figure.dpi)
+ self._renderer.set_width_height(-1, -1) # Invalid values.
+
+ def draw(self):
+ if hasattr(self._renderer.gc, "ctx"):
+ self.figure.draw(self._renderer)
+ super(FigureCanvasQTCairo, self).draw()
+
+ def paintEvent(self, event):
+ self._update_dpi()
+ dpi_ratio = self._dpi_ratio
+ width = dpi_ratio * self.width()
+ height = dpi_ratio * self.height()
+ if (width, height) != self._renderer.get_canvas_width_height():
+ surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
+ self._renderer.set_ctx_from_surface(surface)
+ self._renderer.set_width_height(width, height)
+ self.figure.draw(self._renderer)
+ buf = self._renderer.gc.ctx.get_target().get_data()
+ qimage = QtGui.QImage(buf, width, height,
+ QtGui.QImage.Format_ARGB32_Premultiplied)
+ # Adjust the buf reference count to work around a memory leak bug in
+ # QImage under PySide on Python 3.
+ if QT_API == 'PySide' and six.PY3:
+ import ctypes
+ ctypes.c_long.from_address(id(buf)).value = 1
+ if hasattr(qimage, 'setDevicePixelRatio'):
+ # Not available on Qt4 or some older Qt5.
+ qimage.setDevicePixelRatio(dpi_ratio)
+ painter = QtGui.QPainter(self)
+ painter.drawImage(0, 0, qimage)
+ self._draw_rect_callback(painter)
+ painter.end()
+
+
+@_BackendQT5.export
+class _BackendQT5Cairo(_BackendQT5):
+ FigureCanvas = FigureCanvasQTCairo
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_svg.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_svg.py
new file mode 100644
index 00000000000..b38c6850dad
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_svg.py
@@ -0,0 +1,1261 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from collections import OrderedDict
+
+import six
+from six import unichr
+from six.moves import xrange
+
+import base64
+import codecs
+import gzip
+import hashlib
+import io
+import logging
+import re
+import uuid
+
+import numpy as np
+
+from matplotlib import cbook, __version__, rcParams
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
+from matplotlib.backends.backend_mixed import MixedModeRenderer
+from matplotlib.cbook import is_writable_file_like, maxdict
+from matplotlib.colors import rgb2hex
+from matplotlib.font_manager import findfont, get_font
+from matplotlib.ft2font import LOAD_NO_HINTING
+from matplotlib.mathtext import MathTextParser
+from matplotlib.path import Path
+from matplotlib import _path
+from matplotlib.transforms import Affine2D, Affine2DBase
+from matplotlib import _png
+
+_log = logging.getLogger(__name__)
+
+backend_version = __version__
+
+# ----------------------------------------------------------------------
+# SimpleXMLWriter class
+#
+# Based on an original by Fredrik Lundh, but modified here to:
+# 1. Support modern Python idioms
+# 2. Remove encoding support (it's handled by the file writer instead)
+# 3. Support proper indentation
+# 4. Minify things a little bit
+
+# --------------------------------------------------------------------
+# The SimpleXMLWriter module is
+#
+# Copyright (c) 2001-2004 by Fredrik Lundh
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Secret Labs AB or the author not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
+# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
+# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
+# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+# --------------------------------------------------------------------
+
+def escape_cdata(s):
+ s = s.replace("&", "&amp;")
+ s = s.replace("<", "&lt;")
+ s = s.replace(">", "&gt;")
+ return s
+
+_escape_xml_comment = re.compile(r'-(?=-)')
+def escape_comment(s):
+ s = escape_cdata(s)
+ return _escape_xml_comment.sub('- ', s)
+
+def escape_attrib(s):
+ s = s.replace("&", "&amp;")
+ s = s.replace("'", "&apos;")
+ s = s.replace("\"", "&quot;")
+ s = s.replace("<", "&lt;")
+ s = s.replace(">", "&gt;")
+ return s
+
+def short_float_fmt(x):
+ """
+ Create a short string representation of a float, which is %f
+ formatting with trailing zeros and the decimal point removed.
+ """
+ return '{0:f}'.format(x).rstrip('0').rstrip('.')
+
+##
+# XML writer class.
+#
+# @param file A file or file-like object. This object must implement
+# a <b>write</b> method that takes an 8-bit string.
+
+class XMLWriter(object):
+ def __init__(self, file):
+ self.__write = file.write
+ if hasattr(file, "flush"):
+ self.flush = file.flush
+ self.__open = 0 # true if start tag is open
+ self.__tags = []
+ self.__data = []
+ self.__indentation = " " * 64
+
+ def __flush(self, indent=True):
+ # flush internal buffers
+ if self.__open:
+ if indent:
+ self.__write(">\n")
+ else:
+ self.__write(">")
+ self.__open = 0
+ if self.__data:
+ data = ''.join(self.__data)
+ self.__write(escape_cdata(data))
+ self.__data = []
+
+ ## Opens a new element. Attributes can be given as keyword
+ # arguments, or as a string/string dictionary. The method returns
+ # an opaque identifier that can be passed to the <b>close</b>
+ # method, to close all open elements up to and including this one.
+ #
+ # @param tag Element tag.
+ # @param attrib Attribute dictionary. Alternatively, attributes
+ # can be given as keyword arguments.
+ # @return An element identifier.
+
+ def start(self, tag, attrib={}, **extra):
+ self.__flush()
+ tag = escape_cdata(tag)
+ self.__data = []
+ self.__tags.append(tag)
+ self.__write(self.__indentation[:len(self.__tags) - 1])
+ self.__write("<%s" % tag)
+ if attrib or extra:
+ attrib = attrib.copy()
+ attrib.update(extra)
+ attrib = sorted(six.iteritems(attrib))
+ for k, v in attrib:
+ if not v == '':
+ k = escape_cdata(k)
+ v = escape_attrib(v)
+ self.__write(" %s=\"%s\"" % (k, v))
+ self.__open = 1
+ return len(self.__tags)-1
+
+ ##
+ # Adds a comment to the output stream.
+ #
+ # @param comment Comment text, as a Unicode string.
+
+ def comment(self, comment):
+ self.__flush()
+ self.__write(self.__indentation[:len(self.__tags)])
+ self.__write("<!-- %s -->\n" % escape_comment(comment))
+
+ ##
+ # Adds character data to the output stream.
+ #
+ # @param text Character data, as a Unicode string.
+
+ def data(self, text):
+ self.__data.append(text)
+
+ ##
+ # Closes the current element (opened by the most recent call to
+ # <b>start</b>).
+ #
+ # @param tag Element tag. If given, the tag must match the start
+ # tag. If omitted, the current element is closed.
+
+ def end(self, tag=None, indent=True):
+ if tag:
+ assert self.__tags, "unbalanced end(%s)" % tag
+ assert escape_cdata(tag) == self.__tags[-1],\
+ "expected end(%s), got %s" % (self.__tags[-1], tag)
+ else:
+ assert self.__tags, "unbalanced end()"
+ tag = self.__tags.pop()
+ if self.__data:
+ self.__flush(indent)
+ elif self.__open:
+ self.__open = 0
+ self.__write("/>\n")
+ return
+ if indent:
+ self.__write(self.__indentation[:len(self.__tags)])
+ self.__write("</%s>\n" % tag)
+
+ ##
+ # Closes open elements, up to (and including) the element identified
+ # by the given identifier.
+ #
+ # @param id Element identifier, as returned by the <b>start</b> method.
+
+ def close(self, id):
+ while len(self.__tags) > id:
+ self.end()
+
+ ##
+ # Adds an entire element. This is the same as calling <b>start</b>,
+ # <b>data</b>, and <b>end</b> in sequence. The <b>text</b> argument
+ # can be omitted.
+
+ def element(self, tag, text=None, attrib={}, **extra):
+ self.start(*(tag, attrib), **extra)
+ if text:
+ self.data(text)
+ self.end(indent=False)
+
+ ##
+ # Flushes the output stream.
+
+ def flush(self):
+ pass # replaced by the constructor
+
+# ----------------------------------------------------------------------
+
+def generate_transform(transform_list=[]):
+ if len(transform_list):
+ output = io.StringIO()
+ for type, value in transform_list:
+ if type == 'scale' and (value == (1.0,) or value == (1.0, 1.0)):
+ continue
+ if type == 'translate' and value == (0.0, 0.0):
+ continue
+ if type == 'rotate' and value == (0.0,):
+ continue
+ if type == 'matrix' and isinstance(value, Affine2DBase):
+ value = value.to_values()
+
+ output.write('%s(%s)' % (
+ type, ' '.join(short_float_fmt(x) for x in value)))
+ return output.getvalue()
+ return ''
+
+def generate_css(attrib={}):
+ if attrib:
+ output = io.StringIO()
+ attrib = sorted(six.iteritems(attrib))
+ for k, v in attrib:
+ k = escape_attrib(k)
+ v = escape_attrib(v)
+ output.write("%s:%s;" % (k, v))
+ return output.getvalue()
+ return ''
+
+_capstyle_d = {'projecting' : 'square', 'butt' : 'butt', 'round': 'round',}
+class RendererSVG(RendererBase):
+ FONT_SCALE = 100.0
+ fontd = maxdict(50)
+
+ def __init__(self, width, height, svgwriter, basename=None, image_dpi=72):
+ self.width = width
+ self.height = height
+ self.writer = XMLWriter(svgwriter)
+ self.image_dpi = image_dpi # the actual dpi we want to rasterize stuff with
+
+ self._groupd = {}
+ if not rcParams['svg.image_inline']:
+ assert basename is not None
+ self.basename = basename
+ self._imaged = {}
+ self._clipd = OrderedDict()
+ self._char_defs = {}
+ self._markers = {}
+ self._path_collection_id = 0
+ self._imaged = {}
+ self._hatchd = OrderedDict()
+ self._has_gouraud = False
+ self._n_gradients = 0
+ self._fonts = OrderedDict()
+ self.mathtext_parser = MathTextParser('SVG')
+
+ RendererBase.__init__(self)
+ self._glyph_map = dict()
+ str_height = short_float_fmt(height)
+ str_width = short_float_fmt(width)
+ svgwriter.write(svgProlog)
+ self._start_id = self.writer.start(
+ 'svg',
+ width='%spt' % str_width,
+ height='%spt' % str_height,
+ viewBox='0 0 %s %s' % (str_width, str_height),
+ xmlns="http://www.w3.org/2000/svg",
+ version="1.1",
+ attrib={'xmlns:xlink': "http://www.w3.org/1999/xlink"})
+ self._write_default_style()
+
+ def finalize(self):
+ self._write_clips()
+ self._write_hatches()
+ self._write_svgfonts()
+ self.writer.close(self._start_id)
+ self.writer.flush()
+
+ def _write_default_style(self):
+ writer = self.writer
+ default_style = generate_css({
+ 'stroke-linejoin': 'round',
+ 'stroke-linecap': 'butt'})
+ writer.start('defs')
+ writer.start('style', type='text/css')
+ writer.data('*{%s}\n' % default_style)
+ writer.end('style')
+ writer.end('defs')
+
+ def _make_id(self, type, content):
+ content = str(content)
+ if rcParams['svg.hashsalt'] is None:
+ salt = str(uuid.uuid4())
+ else:
+ salt = rcParams['svg.hashsalt']
+ if six.PY3:
+ content = content.encode('utf8')
+ salt = salt.encode('utf8')
+ m = hashlib.md5()
+ m.update(salt)
+ m.update(content)
+ return '%s%s' % (type, m.hexdigest()[:10])
+
+ def _make_flip_transform(self, transform):
+ return (transform +
+ Affine2D()
+ .scale(1.0, -1.0)
+ .translate(0.0, self.height))
+
+ def _get_font(self, prop):
+ fname = findfont(prop)
+ font = get_font(fname)
+ font.clear()
+ size = prop.get_size_in_points()
+ font.set_size(size, 72.0)
+ return font
+
+ def _get_hatch(self, gc, rgbFace):
+ """
+ Create a new hatch pattern
+ """
+ if rgbFace is not None:
+ rgbFace = tuple(rgbFace)
+ edge = gc.get_hatch_color()
+ if edge is not None:
+ edge = tuple(edge)
+ dictkey = (gc.get_hatch(), rgbFace, edge)
+ oid = self._hatchd.get(dictkey)
+ if oid is None:
+ oid = self._make_id('h', dictkey)
+ self._hatchd[dictkey] = ((gc.get_hatch_path(), rgbFace, edge), oid)
+ else:
+ _, oid = oid
+ return oid
+
+ def _write_hatches(self):
+ if not len(self._hatchd):
+ return
+ HATCH_SIZE = 72
+ writer = self.writer
+ writer.start('defs')
+ for ((path, face, stroke), oid) in six.itervalues(self._hatchd):
+ writer.start(
+ 'pattern',
+ id=oid,
+ patternUnits="userSpaceOnUse",
+ x="0", y="0", width=six.text_type(HATCH_SIZE),
+ height=six.text_type(HATCH_SIZE))
+ path_data = self._convert_path(
+ path,
+ Affine2D().scale(HATCH_SIZE).scale(1.0, -1.0).translate(0, HATCH_SIZE),
+ simplify=False)
+ if face is None:
+ fill = 'none'
+ else:
+ fill = rgb2hex(face)
+ writer.element(
+ 'rect',
+ x="0", y="0", width=six.text_type(HATCH_SIZE+1),
+ height=six.text_type(HATCH_SIZE+1),
+ fill=fill)
+ writer.element(
+ 'path',
+ d=path_data,
+ style=generate_css({
+ 'fill': rgb2hex(stroke),
+ 'stroke': rgb2hex(stroke),
+ 'stroke-width': six.text_type(rcParams['hatch.linewidth']),
+ 'stroke-linecap': 'butt',
+ 'stroke-linejoin': 'miter'
+ })
+ )
+ writer.end('pattern')
+ writer.end('defs')
+
+ def _get_style_dict(self, gc, rgbFace):
+ """
+ return the style string. style is generated from the
+ GraphicsContext and rgbFace
+ """
+ attrib = {}
+
+ forced_alpha = gc.get_forced_alpha()
+
+ if gc.get_hatch() is not None:
+ attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace)
+ if rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha:
+ attrib['fill-opacity'] = short_float_fmt(rgbFace[3])
+ else:
+ if rgbFace is None:
+ attrib['fill'] = 'none'
+ else:
+ if tuple(rgbFace[:3]) != (0, 0, 0):
+ attrib['fill'] = rgb2hex(rgbFace)
+ if len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha:
+ attrib['fill-opacity'] = short_float_fmt(rgbFace[3])
+
+ if forced_alpha and gc.get_alpha() != 1.0:
+ attrib['opacity'] = short_float_fmt(gc.get_alpha())
+
+ offset, seq = gc.get_dashes()
+ if seq is not None:
+ attrib['stroke-dasharray'] = ','.join([short_float_fmt(val) for val in seq])
+ attrib['stroke-dashoffset'] = short_float_fmt(float(offset))
+
+ linewidth = gc.get_linewidth()
+ if linewidth:
+ rgb = gc.get_rgb()
+ attrib['stroke'] = rgb2hex(rgb)
+ if not forced_alpha and rgb[3] != 1.0:
+ attrib['stroke-opacity'] = short_float_fmt(rgb[3])
+ if linewidth != 1.0:
+ attrib['stroke-width'] = short_float_fmt(linewidth)
+ if gc.get_joinstyle() != 'round':
+ attrib['stroke-linejoin'] = gc.get_joinstyle()
+ if gc.get_capstyle() != 'butt':
+ attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()]
+
+ return attrib
+
+ def _get_style(self, gc, rgbFace):
+ return generate_css(self._get_style_dict(gc, rgbFace))
+
+ def _get_clip(self, gc):
+ cliprect = gc.get_clip_rectangle()
+ clippath, clippath_trans = gc.get_clip_path()
+ if clippath is not None:
+ clippath_trans = self._make_flip_transform(clippath_trans)
+ dictkey = (id(clippath), str(clippath_trans))
+ elif cliprect is not None:
+ x, y, w, h = cliprect.bounds
+ y = self.height-(y+h)
+ dictkey = (x, y, w, h)
+ else:
+ return None
+
+ clip = self._clipd.get(dictkey)
+ if clip is None:
+ oid = self._make_id('p', dictkey)
+ if clippath is not None:
+ self._clipd[dictkey] = ((clippath, clippath_trans), oid)
+ else:
+ self._clipd[dictkey] = (dictkey, oid)
+ else:
+ clip, oid = clip
+ return oid
+
+ def _write_clips(self):
+ if not len(self._clipd):
+ return
+ writer = self.writer
+ writer.start('defs')
+ for clip, oid in six.itervalues(self._clipd):
+ writer.start('clipPath', id=oid)
+ if len(clip) == 2:
+ clippath, clippath_trans = clip
+ path_data = self._convert_path(clippath, clippath_trans, simplify=False)
+ writer.element('path', d=path_data)
+ else:
+ x, y, w, h = clip
+ writer.element(
+ 'rect',
+ x=short_float_fmt(x),
+ y=short_float_fmt(y),
+ width=short_float_fmt(w),
+ height=short_float_fmt(h))
+ writer.end('clipPath')
+ writer.end('defs')
+
+ def _write_svgfonts(self):
+ if not rcParams['svg.fonttype'] == 'svgfont':
+ return
+
+ writer = self.writer
+ writer.start('defs')
+ for font_fname, chars in six.iteritems(self._fonts):
+ font = get_font(font_fname)
+ font.set_size(72, 72)
+ sfnt = font.get_sfnt()
+ writer.start('font', id=sfnt[1, 0, 0, 4].decode("mac_roman"))
+ writer.element(
+ 'font-face',
+ attrib={
+ 'font-family': font.family_name,
+ 'font-style': font.style_name.lower(),
+ 'units-per-em': '72',
+ 'bbox': ' '.join(
+ short_float_fmt(x / 64.0) for x in font.bbox)})
+ for char in chars:
+ glyph = font.load_char(char, flags=LOAD_NO_HINTING)
+ verts, codes = font.get_path()
+ path = Path(verts, codes)
+ path_data = self._convert_path(path)
+ # name = font.get_glyph_name(char)
+ writer.element(
+ 'glyph',
+ d=path_data,
+ attrib={
+ # 'glyph-name': name,
+ 'unicode': unichr(char),
+ 'horiz-adv-x':
+ short_float_fmt(glyph.linearHoriAdvance / 65536.0)})
+ writer.end('font')
+ writer.end('defs')
+
+ def open_group(self, s, gid=None):
+ """
+ Open a grouping element with label *s*. If *gid* is given, use
+ *gid* as the id of the group.
+ """
+ if gid:
+ self.writer.start('g', id=gid)
+ else:
+ self._groupd[s] = self._groupd.get(s, 0) + 1
+ self.writer.start('g', id="%s_%d" % (s, self._groupd[s]))
+
+ def close_group(self, s):
+ self.writer.end('g')
+
+ def option_image_nocomposite(self):
+ """
+ return whether to generate a composite image from multiple images on
+ a set of axes
+ """
+ return not rcParams['image.composite_image']
+
+ def _convert_path(self, path, transform=None, clip=None, simplify=None,
+ sketch=None):
+ if clip:
+ clip = (0.0, 0.0, self.width, self.height)
+ else:
+ clip = None
+ return _path.convert_to_string(
+ path, transform, clip, simplify, sketch, 6,
+ [b'M', b'L', b'Q', b'C', b'z'], False).decode('ascii')
+
+ def draw_path(self, gc, path, transform, rgbFace=None):
+ trans_and_flip = self._make_flip_transform(transform)
+ clip = (rgbFace is None and gc.get_hatch_path() is None)
+ simplify = path.should_simplify and clip
+ path_data = self._convert_path(
+ path, trans_and_flip, clip=clip, simplify=simplify,
+ sketch=gc.get_sketch_params())
+
+ attrib = {}
+ attrib['style'] = self._get_style(gc, rgbFace)
+
+ clipid = self._get_clip(gc)
+ if clipid is not None:
+ attrib['clip-path'] = 'url(#%s)' % clipid
+
+ if gc.get_url() is not None:
+ self.writer.start('a', {'xlink:href': gc.get_url()})
+ self.writer.element('path', d=path_data, attrib=attrib)
+ if gc.get_url() is not None:
+ self.writer.end('a')
+
+ def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
+ if not len(path.vertices):
+ return
+
+ writer = self.writer
+ path_data = self._convert_path(
+ marker_path,
+ marker_trans + Affine2D().scale(1.0, -1.0),
+ simplify=False)
+ style = self._get_style_dict(gc, rgbFace)
+ dictkey = (path_data, generate_css(style))
+ oid = self._markers.get(dictkey)
+ style = generate_css({k: v for k, v in six.iteritems(style)
+ if k.startswith('stroke')})
+
+ if oid is None:
+ oid = self._make_id('m', dictkey)
+ writer.start('defs')
+ writer.element('path', id=oid, d=path_data, style=style)
+ writer.end('defs')
+ self._markers[dictkey] = oid
+
+ attrib = {}
+ clipid = self._get_clip(gc)
+ if clipid is not None:
+ attrib['clip-path'] = 'url(#%s)' % clipid
+ writer.start('g', attrib=attrib)
+
+ trans_and_flip = self._make_flip_transform(trans)
+ attrib = {'xlink:href': '#%s' % oid}
+ clip = (0, 0, self.width*72, self.height*72)
+ for vertices, code in path.iter_segments(
+ trans_and_flip, clip=clip, simplify=False):
+ if len(vertices):
+ x, y = vertices[-2:]
+ attrib['x'] = short_float_fmt(x)
+ attrib['y'] = short_float_fmt(y)
+ attrib['style'] = self._get_style(gc, rgbFace)
+ writer.element('use', attrib=attrib)
+ writer.end('g')
+
+ def draw_path_collection(self, gc, master_transform, paths, all_transforms,
+ offsets, offsetTrans, facecolors, edgecolors,
+ linewidths, linestyles, antialiaseds, urls,
+ offset_position):
+ # Is the optimization worth it? Rough calculation:
+ # cost of emitting a path in-line is
+ # (len_path + 5) * uses_per_path
+ # cost of definition+use is
+ # (len_path + 3) + 9 * uses_per_path
+ len_path = len(paths[0].vertices) if len(paths) > 0 else 0
+ uses_per_path = self._iter_collection_uses_per_path(
+ paths, all_transforms, offsets, facecolors, edgecolors)
+ should_do_optimization = \
+ len_path + 9 * uses_per_path + 3 < (len_path + 5) * uses_per_path
+ if not should_do_optimization:
+ return RendererBase.draw_path_collection(
+ self, gc, master_transform, paths, all_transforms,
+ offsets, offsetTrans, facecolors, edgecolors,
+ linewidths, linestyles, antialiaseds, urls,
+ offset_position)
+
+ writer = self.writer
+ path_codes = []
+ writer.start('defs')
+ for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
+ master_transform, paths, all_transforms)):
+ transform = Affine2D(transform.get_matrix()).scale(1.0, -1.0)
+ d = self._convert_path(path, transform, simplify=False)
+ oid = 'C%x_%x_%s' % (self._path_collection_id, i,
+ self._make_id('', d))
+ writer.element('path', id=oid, d=d)
+ path_codes.append(oid)
+ writer.end('defs')
+
+ for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
+ gc, master_transform, all_transforms, path_codes, offsets,
+ offsetTrans, facecolors, edgecolors, linewidths, linestyles,
+ antialiaseds, urls, offset_position):
+ clipid = self._get_clip(gc0)
+ url = gc0.get_url()
+ if url is not None:
+ writer.start('a', attrib={'xlink:href': url})
+ if clipid is not None:
+ writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
+ attrib = {
+ 'xlink:href': '#%s' % path_id,
+ 'x': short_float_fmt(xo),
+ 'y': short_float_fmt(self.height - yo),
+ 'style': self._get_style(gc0, rgbFace)
+ }
+ writer.element('use', attrib=attrib)
+ if clipid is not None:
+ writer.end('g')
+ if url is not None:
+ writer.end('a')
+
+ self._path_collection_id += 1
+
+ def draw_gouraud_triangle(self, gc, points, colors, trans):
+ # This uses a method described here:
+ #
+ # http://www.svgopen.org/2005/papers/Converting3DFaceToSVG/index.html
+ #
+ # that uses three overlapping linear gradients to simulate a
+ # Gouraud triangle. Each gradient goes from fully opaque in
+ # one corner to fully transparent along the opposite edge.
+ # The line between the stop points is perpendicular to the
+ # opposite edge. Underlying these three gradients is a solid
+ # triangle whose color is the average of all three points.
+
+ writer = self.writer
+ if not self._has_gouraud:
+ self._has_gouraud = True
+ writer.start(
+ 'filter',
+ id='colorAdd')
+ writer.element(
+ 'feComposite',
+ attrib={'in': 'SourceGraphic'},
+ in2='BackgroundImage',
+ operator='arithmetic',
+ k2="1", k3="1")
+ writer.end('filter')
+
+ avg_color = np.sum(colors[:, :], axis=0) / 3.0
+ # Just skip fully-transparent triangles
+ if avg_color[-1] == 0.0:
+ return
+
+ trans_and_flip = self._make_flip_transform(trans)
+ tpoints = trans_and_flip.transform(points)
+
+ writer.start('defs')
+ for i in range(3):
+ x1, y1 = tpoints[i]
+ x2, y2 = tpoints[(i + 1) % 3]
+ x3, y3 = tpoints[(i + 2) % 3]
+ c = colors[i][:]
+
+ if x2 == x3:
+ xb = x2
+ yb = y1
+ elif y2 == y3:
+ xb = x1
+ yb = y2
+ else:
+ m1 = (y2 - y3) / (x2 - x3)
+ b1 = y2 - (m1 * x2)
+ m2 = -(1.0 / m1)
+ b2 = y1 - (m2 * x1)
+ xb = (-b1 + b2) / (m1 - m2)
+ yb = m2 * xb + b2
+
+ writer.start(
+ 'linearGradient',
+ id="GR%x_%d" % (self._n_gradients, i),
+ x1=short_float_fmt(x1), y1=short_float_fmt(y1),
+ x2=short_float_fmt(xb), y2=short_float_fmt(yb))
+ writer.element(
+ 'stop',
+ offset='0',
+ style=generate_css({'stop-color': rgb2hex(c),
+ 'stop-opacity': short_float_fmt(c[-1])}))
+ writer.element(
+ 'stop',
+ offset='1',
+ style=generate_css({'stop-color': rgb2hex(c),
+ 'stop-opacity': "0"}))
+ writer.end('linearGradient')
+
+ writer.element(
+ 'polygon',
+ id='GT%x' % self._n_gradients,
+ points=" ".join([short_float_fmt(x)
+ for x in (x1, y1, x2, y2, x3, y3)]))
+ writer.end('defs')
+
+ avg_color = np.sum(colors[:, :], axis=0) / 3.0
+ href = '#GT%x' % self._n_gradients
+ writer.element(
+ 'use',
+ attrib={'xlink:href': href,
+ 'fill': rgb2hex(avg_color),
+ 'fill-opacity': short_float_fmt(avg_color[-1])})
+ for i in range(3):
+ writer.element(
+ 'use',
+ attrib={'xlink:href': href,
+ 'fill': 'url(#GR%x_%d)' % (self._n_gradients, i),
+ 'fill-opacity': '1',
+ 'filter': 'url(#colorAdd)'})
+
+ self._n_gradients += 1
+
+ def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
+ transform):
+ attrib = {}
+ clipid = self._get_clip(gc)
+ if clipid is not None:
+ attrib['clip-path'] = 'url(#%s)' % clipid
+
+ self.writer.start('g', attrib=attrib)
+
+ transform = transform.frozen()
+ for tri, col in zip(triangles_array, colors_array):
+ self.draw_gouraud_triangle(gc, tri, col, transform)
+
+ self.writer.end('g')
+
+ def option_scale_image(self):
+ return True
+
+ def get_image_magnification(self):
+ return self.image_dpi / 72.0
+
+ def draw_image(self, gc, x, y, im, transform=None):
+ h, w = im.shape[:2]
+
+ if w == 0 or h == 0:
+ return
+
+ attrib = {}
+ clipid = self._get_clip(gc)
+ if clipid is not None:
+ # Can't apply clip-path directly to the image because the
+ # image has a transformation, which would also be applied
+ # to the clip-path
+ self.writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
+
+ oid = gc.get_gid()
+ url = gc.get_url()
+ if url is not None:
+ self.writer.start('a', attrib={'xlink:href': url})
+ if rcParams['svg.image_inline']:
+ bytesio = io.BytesIO()
+ _png.write_png(im, bytesio)
+ oid = oid or self._make_id('image', bytesio.getvalue())
+ attrib['xlink:href'] = (
+ "data:image/png;base64,\n" +
+ base64.b64encode(bytesio.getvalue()).decode('ascii'))
+ else:
+ self._imaged[self.basename] = self._imaged.get(self.basename, 0) + 1
+ filename = '%s.image%d.png'%(self.basename, self._imaged[self.basename])
+ _log.info('Writing image file for inclusion: %s', filename)
+ _png.write_png(im, filename)
+ oid = oid or 'Im_' + self._make_id('image', filename)
+ attrib['xlink:href'] = filename
+
+ attrib['id'] = oid
+
+ if transform is None:
+ w = 72.0 * w / self.image_dpi
+ h = 72.0 * h / self.image_dpi
+
+ self.writer.element(
+ 'image',
+ transform=generate_transform([
+ ('scale', (1, -1)), ('translate', (0, -h))]),
+ x=short_float_fmt(x),
+ y=short_float_fmt(-(self.height - y - h)),
+ width=short_float_fmt(w), height=short_float_fmt(h),
+ attrib=attrib)
+ else:
+ alpha = gc.get_alpha()
+ if alpha != 1.0:
+ attrib['opacity'] = short_float_fmt(alpha)
+
+ flipped = (
+ Affine2D().scale(1.0 / w, 1.0 / h) +
+ transform +
+ Affine2D()
+ .translate(x, y)
+ .scale(1.0, -1.0)
+ .translate(0.0, self.height))
+
+ attrib['transform'] = generate_transform(
+ [('matrix', flipped.frozen())])
+ self.writer.element(
+ 'image',
+ width=short_float_fmt(w), height=short_float_fmt(h),
+ attrib=attrib)
+
+ if url is not None:
+ self.writer.end('a')
+ if clipid is not None:
+ self.writer.end('g')
+
+ def _adjust_char_id(self, char_id):
+ return char_id.replace("%20", "_")
+
+ def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath, mtext=None):
+ """
+ draw the text by converting them to paths using textpath module.
+
+ Parameters
+ ----------
+ prop : `matplotlib.font_manager.FontProperties`
+ font property
+
+ s : str
+ text to be converted
+
+ usetex : bool
+ If True, use matplotlib usetex mode.
+
+ ismath : bool
+ If True, use mathtext parser. If "TeX", use *usetex* mode.
+
+ """
+ writer = self.writer
+
+ writer.comment(s)
+
+ glyph_map=self._glyph_map
+
+ text2path = self._text2path
+ color = rgb2hex(gc.get_rgb())
+ fontsize = prop.get_size_in_points()
+
+ style = {}
+ if color != '#000000':
+ style['fill'] = color
+ if gc.get_alpha() != 1.0:
+ style['opacity'] = short_float_fmt(gc.get_alpha())
+
+ if not ismath:
+ font = text2path._get_font(prop)
+ _glyphs = text2path.get_glyphs_with_font(
+ font, s, glyph_map=glyph_map, return_new_glyphs_only=True)
+ glyph_info, glyph_map_new, rects = _glyphs
+
+ if glyph_map_new:
+ writer.start('defs')
+ for char_id, glyph_path in six.iteritems(glyph_map_new):
+ path = Path(*glyph_path)
+ path_data = self._convert_path(path, simplify=False)
+ writer.element('path', id=char_id, d=path_data)
+ writer.end('defs')
+
+ glyph_map.update(glyph_map_new)
+
+ attrib = {}
+ attrib['style'] = generate_css(style)
+ font_scale = fontsize / text2path.FONT_SCALE
+ attrib['transform'] = generate_transform([
+ ('translate', (x, y)),
+ ('rotate', (-angle,)),
+ ('scale', (font_scale, -font_scale))])
+
+ writer.start('g', attrib=attrib)
+ for glyph_id, xposition, yposition, scale in glyph_info:
+ attrib={'xlink:href': '#%s' % glyph_id}
+ if xposition != 0.0:
+ attrib['x'] = short_float_fmt(xposition)
+ if yposition != 0.0:
+ attrib['y'] = short_float_fmt(yposition)
+ writer.element(
+ 'use',
+ attrib=attrib)
+
+ writer.end('g')
+ else:
+ if ismath == "TeX":
+ _glyphs = text2path.get_glyphs_tex(prop, s, glyph_map=glyph_map,
+ return_new_glyphs_only=True)
+ else:
+ _glyphs = text2path.get_glyphs_mathtext(prop, s, glyph_map=glyph_map,
+ return_new_glyphs_only=True)
+
+ glyph_info, glyph_map_new, rects = _glyphs
+
+ # we store the character glyphs w/o flipping. Instead, the
+ # coordinate will be flipped when this characters are
+ # used.
+ if glyph_map_new:
+ writer.start('defs')
+ for char_id, glyph_path in six.iteritems(glyph_map_new):
+ char_id = self._adjust_char_id(char_id)
+ # Some characters are blank
+ if not len(glyph_path[0]):
+ path_data = ""
+ else:
+ path = Path(*glyph_path)
+ path_data = self._convert_path(path, simplify=False)
+ writer.element('path', id=char_id, d=path_data)
+ writer.end('defs')
+
+ glyph_map.update(glyph_map_new)
+
+ attrib = {}
+ font_scale = fontsize / text2path.FONT_SCALE
+ attrib['style'] = generate_css(style)
+ attrib['transform'] = generate_transform([
+ ('translate', (x, y)),
+ ('rotate', (-angle,)),
+ ('scale', (font_scale, -font_scale))])
+
+ writer.start('g', attrib=attrib)
+ for char_id, xposition, yposition, scale in glyph_info:
+ char_id = self._adjust_char_id(char_id)
+
+ writer.element(
+ 'use',
+ transform=generate_transform([
+ ('translate', (xposition, yposition)),
+ ('scale', (scale,)),
+ ]),
+ attrib={'xlink:href': '#%s' % char_id})
+
+ for verts, codes in rects:
+ path = Path(verts, codes)
+ path_data = self._convert_path(path, simplify=False)
+ writer.element('path', d=path_data)
+
+ writer.end('g')
+
+ def _draw_text_as_text(self, gc, x, y, s, prop, angle, ismath, mtext=None):
+ writer = self.writer
+
+ color = rgb2hex(gc.get_rgb())
+ style = {}
+ if color != '#000000':
+ style['fill'] = color
+ if gc.get_alpha() != 1.0:
+ style['opacity'] = short_float_fmt(gc.get_alpha())
+
+ if not ismath:
+ font = self._get_font(prop)
+ font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
+
+ fontsize = prop.get_size_in_points()
+
+ fontfamily = font.family_name
+ fontstyle = prop.get_style()
+
+ attrib = {}
+ # Must add "px" to workaround a Firefox bug
+ style['font-size'] = short_float_fmt(fontsize) + 'px'
+ style['font-family'] = six.text_type(fontfamily)
+ style['font-style'] = prop.get_style().lower()
+ style['font-weight'] = six.text_type(prop.get_weight()).lower()
+ attrib['style'] = generate_css(style)
+
+ if mtext and (angle == 0 or mtext.get_rotation_mode() == "anchor"):
+ # If text anchoring can be supported, get the original
+ # coordinates and add alignment information.
+
+ # Get anchor coordinates.
+ transform = mtext.get_transform()
+ ax, ay = transform.transform_point(mtext.get_position())
+ ay = self.height - ay
+
+ # Don't do vertical anchor alignment. Most applications do not
+ # support 'alignment-baseline' yet. Apply the vertical layout
+ # to the anchor point manually for now.
+ angle_rad = np.deg2rad(angle)
+ dir_vert = np.array([np.sin(angle_rad), np.cos(angle_rad)])
+ v_offset = np.dot(dir_vert, [(x - ax), (y - ay)])
+ ax = ax + v_offset * dir_vert[0]
+ ay = ay + v_offset * dir_vert[1]
+
+ ha_mpl_to_svg = {'left': 'start', 'right': 'end',
+ 'center': 'middle'}
+ style['text-anchor'] = ha_mpl_to_svg[mtext.get_ha()]
+
+ attrib['x'] = short_float_fmt(ax)
+ attrib['y'] = short_float_fmt(ay)
+ attrib['style'] = generate_css(style)
+ attrib['transform'] = "rotate(%s, %s, %s)" % (
+ short_float_fmt(-angle),
+ short_float_fmt(ax),
+ short_float_fmt(ay))
+ writer.element('text', s, attrib=attrib)
+ else:
+ attrib['transform'] = generate_transform([
+ ('translate', (x, y)),
+ ('rotate', (-angle,))])
+
+ writer.element('text', s, attrib=attrib)
+
+ if rcParams['svg.fonttype'] == 'svgfont':
+ fontset = self._fonts.setdefault(font.fname, set())
+ for c in s:
+ fontset.add(ord(c))
+ else:
+ writer.comment(s)
+
+ width, height, descent, svg_elements, used_characters = \
+ self.mathtext_parser.parse(s, 72, prop)
+ svg_glyphs = svg_elements.svg_glyphs
+ svg_rects = svg_elements.svg_rects
+
+ attrib = {}
+ attrib['style'] = generate_css(style)
+ attrib['transform'] = generate_transform([
+ ('translate', (x, y)),
+ ('rotate', (-angle,))])
+
+ # Apply attributes to 'g', not 'text', because we likely
+ # have some rectangles as well with the same style and
+ # transformation
+ writer.start('g', attrib=attrib)
+
+ writer.start('text')
+
+ # Sort the characters by font, and output one tspan for
+ # each
+ spans = OrderedDict()
+ for font, fontsize, thetext, new_x, new_y, metrics in svg_glyphs:
+ style = generate_css({
+ 'font-size': short_float_fmt(fontsize) + 'px',
+ 'font-family': font.family_name,
+ 'font-style': font.style_name.lower(),
+ 'font-weight': font.style_name.lower()})
+ if thetext == 32:
+ thetext = 0xa0 # non-breaking space
+ spans.setdefault(style, []).append((new_x, -new_y, thetext))
+
+ if rcParams['svg.fonttype'] == 'svgfont':
+ for font, fontsize, thetext, new_x, new_y, metrics in svg_glyphs:
+ fontset = self._fonts.setdefault(font.fname, set())
+ fontset.add(thetext)
+
+ for style, chars in six.iteritems(spans):
+ chars.sort()
+
+ same_y = True
+ if len(chars) > 1:
+ last_y = chars[0][1]
+ for i in xrange(1, len(chars)):
+ if chars[i][1] != last_y:
+ same_y = False
+ break
+ if same_y:
+ ys = six.text_type(chars[0][1])
+ else:
+ ys = ' '.join(six.text_type(c[1]) for c in chars)
+
+ attrib = {
+ 'style': style,
+ 'x': ' '.join(short_float_fmt(c[0]) for c in chars),
+ 'y': ys
+ }
+
+ writer.element(
+ 'tspan',
+ ''.join(unichr(c[2]) for c in chars),
+ attrib=attrib)
+
+ writer.end('text')
+
+ if len(svg_rects):
+ for x, y, width, height in svg_rects:
+ writer.element(
+ 'rect',
+ x=short_float_fmt(x),
+ y=short_float_fmt(-y + height),
+ width=short_float_fmt(width),
+ height=short_float_fmt(height)
+ )
+
+ writer.end('g')
+
+ def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
+ self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
+
+ def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
+ clipid = self._get_clip(gc)
+ if clipid is not None:
+ # Cannot apply clip-path directly to the text, because
+ # is has a transformation
+ self.writer.start(
+ 'g', attrib={'clip-path': 'url(#%s)' % clipid})
+
+ if gc.get_url() is not None:
+ self.writer.start('a', {'xlink:href': gc.get_url()})
+
+ if rcParams['svg.fonttype'] == 'path':
+ self._draw_text_as_path(gc, x, y, s, prop, angle, ismath, mtext)
+ else:
+ self._draw_text_as_text(gc, x, y, s, prop, angle, ismath, mtext)
+
+ if gc.get_url() is not None:
+ self.writer.end('a')
+
+ if clipid is not None:
+ self.writer.end('g')
+
+ def flipy(self):
+ return True
+
+ def get_canvas_width_height(self):
+ return self.width, self.height
+
+ def get_text_width_height_descent(self, s, prop, ismath):
+ return self._text2path.get_text_width_height_descent(s, prop, ismath)
+
+
+class FigureCanvasSVG(FigureCanvasBase):
+ filetypes = {'svg': 'Scalable Vector Graphics',
+ 'svgz': 'Scalable Vector Graphics'}
+
+ fixed_dpi = 72
+
+ def print_svg(self, filename, *args, **kwargs):
+ with cbook.open_file_cm(filename, "w", encoding="utf-8") as fh:
+
+ filename = getattr(fh, 'name', '')
+ if not isinstance(filename, six.string_types):
+ filename = ''
+
+ if cbook.file_requires_unicode(fh):
+ detach = False
+ else:
+ if six.PY3:
+ fh = io.TextIOWrapper(fh, 'utf-8')
+ else:
+ fh = codecs.getwriter('utf-8')(fh)
+ detach = True
+
+ result = self._print_svg(filename, fh, **kwargs)
+
+ # Detach underlying stream from wrapper so that it remains open in
+ # the caller.
+ if detach:
+ if six.PY3:
+ fh.detach()
+ else:
+ fh.reset()
+ fh.stream = io.BytesIO()
+
+ return result
+
+ def print_svgz(self, filename, *args, **kwargs):
+ with cbook.open_file_cm(filename, "wb") as fh, \
+ gzip.GzipFile(mode='w', fileobj=fh) as gzipwriter:
+ return self.print_svg(gzipwriter)
+
+ def _print_svg(self, filename, fh, **kwargs):
+ image_dpi = kwargs.pop("dpi", 72)
+ self.figure.set_dpi(72.0)
+ width, height = self.figure.get_size_inches()
+ w, h = width * 72, height * 72
+
+ _bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
+ renderer = MixedModeRenderer(
+ self.figure, width, height, image_dpi,
+ RendererSVG(w, h, fh, filename, image_dpi),
+ bbox_inches_restore=_bbox_inches_restore)
+
+ self.figure.draw(renderer)
+ renderer.finalize()
+
+ def get_default_filetype(self):
+ return 'svg'
+
+class FigureManagerSVG(FigureManagerBase):
+ pass
+
+
+svgProlog = """\
+<?xml version="1.0" encoding="utf-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
+ "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Created with matplotlib (http://matplotlib.org/) -->
+"""
+
+
+@_Backend.export
+class _BackendSVG(_Backend):
+ FigureCanvas = FigureCanvasSVG
+ FigureManager = FigureManagerSVG
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_template.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_template.py
new file mode 100644
index 00000000000..524ca73285c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_template.py
@@ -0,0 +1,278 @@
+"""
+This is a fully functional do nothing backend to provide a template to
+backend writers. It is fully functional in that you can select it as
+a backend with
+
+ import matplotlib
+ matplotlib.use('Template')
+
+and your matplotlib scripts will (should!) run without error, though
+no output is produced. This provides a nice starting point for
+backend writers because you can selectively implement methods
+(draw_rectangle, draw_lines, etc...) and slowly see your figure come
+to life w/o having to have a full blown implementation before getting
+any results.
+
+Copy this to backend_xxx.py and replace all instances of 'template'
+with 'xxx'. Then implement the class methods and functions below, and
+add 'xxx' to the switchyard in matplotlib/backends/__init__.py and
+'xxx' to the backends list in the validate_backend methon in
+matplotlib/__init__.py and you're off. You can use your backend with::
+
+ import matplotlib
+ matplotlib.use('xxx')
+ from pylab import *
+ plot([1,2,3])
+ show()
+
+matplotlib also supports external backends, so you can place you can
+use any module in your PYTHONPATH with the syntax::
+
+ import matplotlib
+ matplotlib.use('module://my_backend')
+
+where my_backend.py is your module name. This syntax is also
+recognized in the rc file and in the -d argument in pylab, e.g.,::
+
+ python simple_plot.py -dmodule://my_backend
+
+If your backend implements support for saving figures (i.e. has a print_xyz()
+method) you can register it as the default handler for a given file type
+
+ from matplotlib.backend_bases import register_backend
+ register_backend('xyz', 'my_backend', 'XYZ File Format')
+ ...
+ plt.savefig("figure.xyz")
+
+The files that are most relevant to backend_writers are
+
+ matplotlib/backends/backend_your_backend.py
+ matplotlib/backend_bases.py
+ matplotlib/backends/__init__.py
+ matplotlib/__init__.py
+ matplotlib/_pylab_helpers.py
+
+Naming Conventions
+
+ * classes Upper or MixedUpperCase
+
+ * variables lower or lowerUpper
+
+ * functions lower or underscore_separated
+
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from matplotlib._pylab_helpers import Gcf
+from matplotlib.backend_bases import (
+ FigureCanvasBase, FigureManagerBase, GraphicsContextBase, RendererBase)
+from matplotlib.figure import Figure
+
+
+class RendererTemplate(RendererBase):
+ """
+ The renderer handles drawing/rendering operations.
+
+ This is a minimal do-nothing class that can be used to get started when
+ writing a new backend. Refer to backend_bases.RendererBase for
+ documentation of the classes methods.
+ """
+ def __init__(self, dpi):
+ self.dpi = dpi
+
+ def draw_path(self, gc, path, transform, rgbFace=None):
+ pass
+
+ # draw_markers is optional, and we get more correct relative
+ # timings by leaving it out. backend implementers concerned with
+ # performance will probably want to implement it
+# def draw_markers(self, gc, marker_path, marker_trans, path, trans,
+# rgbFace=None):
+# pass
+
+ # draw_path_collection is optional, and we get more correct
+ # relative timings by leaving it out. backend implementers concerned with
+ # performance will probably want to implement it
+# def draw_path_collection(self, gc, master_transform, paths,
+# all_transforms, offsets, offsetTrans,
+# facecolors, edgecolors, linewidths, linestyles,
+# antialiaseds):
+# pass
+
+ # draw_quad_mesh is optional, and we get more correct
+ # relative timings by leaving it out. backend implementers concerned with
+ # performance will probably want to implement it
+# def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
+# coordinates, offsets, offsetTrans, facecolors,
+# antialiased, edgecolors):
+# pass
+
+ def draw_image(self, gc, x, y, im):
+ pass
+
+ def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
+ pass
+
+ def flipy(self):
+ return True
+
+ def get_canvas_width_height(self):
+ return 100, 100
+
+ def get_text_width_height_descent(self, s, prop, ismath):
+ return 1, 1, 1
+
+ def new_gc(self):
+ return GraphicsContextTemplate()
+
+ def points_to_pixels(self, points):
+ # if backend doesn't have dpi, e.g., postscript or svg
+ return points
+ # elif backend assumes a value for pixels_per_inch
+ #return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
+ # else
+ #return points/72.0 * self.dpi.get()
+
+
+class GraphicsContextTemplate(GraphicsContextBase):
+ """
+ The graphics context provides the color, line styles, etc... See the gtk
+ and postscript backends for examples of mapping the graphics context
+ attributes (cap styles, join styles, line widths, colors) to a particular
+ backend. In GTK this is done by wrapping a gtk.gdk.GC object and
+ forwarding the appropriate calls to it using a dictionary mapping styles
+ to gdk constants. In Postscript, all the work is done by the renderer,
+ mapping line styles to postscript calls.
+
+ If it's more appropriate to do the mapping at the renderer level (as in
+ the postscript backend), you don't need to override any of the GC methods.
+ If it's more appropriate to wrap an instance (as in the GTK backend) and
+ do the mapping here, you'll need to override several of the setter
+ methods.
+
+ The base GraphicsContext stores colors as a RGB tuple on the unit
+ interval, e.g., (0.5, 0.0, 1.0). You may need to map this to colors
+ appropriate for your backend.
+ """
+ pass
+
+
+
+########################################################################
+#
+# The following functions and classes are for pylab and implement
+# window/figure managers, etc...
+#
+########################################################################
+
+def draw_if_interactive():
+ """
+ For image backends - is not required
+ For GUI backends - this should be overridden if drawing should be done in
+ interactive python mode
+ """
+
+
+def show(block=None):
+ """
+ For image backends - is not required
+ For GUI backends - show() is usually the last line of a pylab script and
+ tells the backend that it is time to draw. In interactive mode, this may
+ be a do nothing func. See the GTK backend for an example of how to handle
+ interactive versus batch mode
+ """
+ for manager in Gcf.get_all_fig_managers():
+ # do something to display the GUI
+ pass
+
+
+def new_figure_manager(num, *args, **kwargs):
+ """
+ Create a new figure manager instance
+ """
+ # May be implemented via the `_new_figure_manager_template` helper.
+ # If a main-level app must be created, this (and
+ # new_figure_manager_given_figure) is the usual place to do it -- see
+ # backend_wx, backend_wxagg and backend_tkagg for examples. Not all GUIs
+ # require explicit instantiation of a main-level app (egg backend_gtk,
+ # backend_gtkagg) for pylab.
+ FigureClass = kwargs.pop('FigureClass', Figure)
+ thisFig = FigureClass(*args, **kwargs)
+ return new_figure_manager_given_figure(num, thisFig)
+
+
+def new_figure_manager_given_figure(num, figure):
+ """
+ Create a new figure manager instance for the given figure.
+ """
+ # May be implemented via the `_new_figure_manager_template` helper.
+ canvas = FigureCanvasTemplate(figure)
+ manager = FigureManagerTemplate(canvas, num)
+ return manager
+
+
+class FigureCanvasTemplate(FigureCanvasBase):
+ """
+ The canvas the figure renders into. Calls the draw and print fig
+ methods, creates the renderers, etc...
+
+ Note GUI templates will want to connect events for button presses,
+ mouse movements and key presses to functions that call the base
+ class methods button_press_event, button_release_event,
+ motion_notify_event, key_press_event, and key_release_event. See,
+ e.g., backend_gtk.py, backend_wx.py and backend_tkagg.py
+
+ Attributes
+ ----------
+ figure : `matplotlib.figure.Figure`
+ A high-level Figure instance
+
+ """
+
+ def draw(self):
+ """
+ Draw the figure using the renderer
+ """
+ renderer = RendererTemplate(self.figure.dpi)
+ self.figure.draw(renderer)
+
+ # You should provide a print_xxx function for every file format
+ # you can write.
+
+ # If the file type is not in the base set of filetypes,
+ # you should add it to the class-scope filetypes dictionary as follows:
+ filetypes = FigureCanvasBase.filetypes.copy()
+ filetypes['foo'] = 'My magic Foo format'
+
+ def print_foo(self, filename, *args, **kwargs):
+ """
+ Write out format foo. The dpi, facecolor and edgecolor are restored
+ to their original values after this call, so you don't need to
+ save and restore them.
+ """
+ pass
+
+ def get_default_filetype(self):
+ return 'foo'
+
+
+class FigureManagerTemplate(FigureManagerBase):
+ """
+ Wrap everything up into a window for the pylab interface
+
+ For non interactive backends, the base class does all the work
+ """
+ pass
+
+########################################################################
+#
+# Now just provide the standard names that backend.__init__ is expecting
+#
+########################################################################
+
+FigureCanvas = FigureCanvasTemplate
+FigureManager = FigureManagerTemplate
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_tkagg.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_tkagg.py
new file mode 100644
index 00000000000..9511326e4a5
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_tkagg.py
@@ -0,0 +1,34 @@
+from __future__ import absolute_import, division, print_function
+
+from .. import cbook
+from . import tkagg # Paint image to Tk photo blitter extension.
+from .backend_agg import FigureCanvasAgg
+from ._backend_tk import (
+ _BackendTk, FigureCanvasTk, FigureManagerTk, NavigationToolbar2Tk)
+
+
+class FigureCanvasTkAgg(FigureCanvasAgg, FigureCanvasTk):
+ def draw(self):
+ super(FigureCanvasTkAgg, self).draw()
+ tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
+ self._master.update_idletasks()
+
+ def blit(self, bbox=None):
+ tkagg.blit(
+ self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
+ self._master.update_idletasks()
+
+
+@cbook.deprecated("2.2")
+class FigureManagerTkAgg(FigureManagerTk):
+ pass
+
+
+@cbook.deprecated("2.2")
+class NavigationToolbar2TkAgg(NavigationToolbar2Tk):
+ pass
+
+
+@_BackendTk.export
+class _BackendTkAgg(_BackendTk):
+ FigureCanvas = FigureCanvasTkAgg
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_tkcairo.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_tkcairo.py
new file mode 100644
index 00000000000..c4edfb97ed1
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_tkcairo.py
@@ -0,0 +1,37 @@
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+import numpy as np
+
+from . import tkagg # Paint image to Tk photo blitter extension.
+from .backend_cairo import cairo, FigureCanvasCairo, RendererCairo
+from ._backend_tk import _BackendTk, FigureCanvasTk
+
+
+class FigureCanvasTkCairo(FigureCanvasCairo, FigureCanvasTk):
+ def __init__(self, *args, **kwargs):
+ super(FigureCanvasTkCairo, self).__init__(*args, **kwargs)
+ self._renderer = RendererCairo(self.figure.dpi)
+
+ def draw(self):
+ width = int(self.figure.bbox.width)
+ height = int(self.figure.bbox.height)
+ surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
+ self._renderer.set_ctx_from_surface(surface)
+ self._renderer.set_width_height(width, height)
+ self.figure.draw(self._renderer)
+ buf = np.reshape(surface.get_data(), (height, width, 4))
+ # Convert from ARGB32 to RGBA8888. Using .take() instead of directly
+ # indexing ensures C-contiguity of the result, which is needed by
+ # tkagg.
+ buf = buf.take(
+ [2, 1, 0, 3] if sys.byteorder == "little" else [1, 2, 3, 0],
+ axis=2)
+ tkagg.blit(self._tkphoto, buf, colormode=2)
+ self._master.update_idletasks()
+
+
+@_BackendTk.export
+class _BackendTkCairo(_BackendTk):
+ FigureCanvas = FigureCanvasTkCairo
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_webagg.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_webagg.py
new file mode 100644
index 00000000000..c917a162ab1
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_webagg.py
@@ -0,0 +1,350 @@
+"""
+Displays Agg images in the browser, with interactivity
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+# The WebAgg backend is divided into two modules:
+#
+# - `backend_webagg_core.py` contains code necessary to embed a WebAgg
+# plot inside of a web application, and communicate in an abstract
+# way over a web socket.
+#
+# - `backend_webagg.py` contains a concrete implementation of a basic
+# application, implemented with tornado.
+
+import six
+
+from contextlib import contextmanager
+import errno
+import json
+import os
+import random
+import sys
+import signal
+import socket
+import threading
+
+try:
+ import tornado
+except ImportError:
+ raise RuntimeError("The WebAgg backend requires Tornado.")
+
+import tornado.web
+import tornado.ioloop
+import tornado.websocket
+
+from matplotlib import rcParams
+from matplotlib.backend_bases import _Backend
+from matplotlib._pylab_helpers import Gcf
+from . import backend_webagg_core as core
+from .backend_webagg_core import TimerTornado
+
+
+class ServerThread(threading.Thread):
+ def run(self):
+ tornado.ioloop.IOLoop.instance().start()
+
+webagg_server_thread = ServerThread()
+
+
+class FigureCanvasWebAgg(core.FigureCanvasWebAggCore):
+ def show(self):
+ # show the figure window
+ show()
+
+ def new_timer(self, *args, **kwargs):
+ return TimerTornado(*args, **kwargs)
+
+
+class WebAggApplication(tornado.web.Application):
+ initialized = False
+ started = False
+
+ class FavIcon(tornado.web.RequestHandler):
+ def get(self):
+ image_path = os.path.join(
+ os.path.dirname(os.path.dirname(__file__)),
+ 'mpl-data', 'images')
+
+ self.set_header('Content-Type', 'image/png')
+ with open(os.path.join(image_path,
+ 'matplotlib.png'), 'rb') as fd:
+ self.write(fd.read())
+
+ class SingleFigurePage(tornado.web.RequestHandler):
+ def __init__(self, application, request, **kwargs):
+ self.url_prefix = kwargs.pop('url_prefix', '')
+ tornado.web.RequestHandler.__init__(self, application,
+ request, **kwargs)
+
+ def get(self, fignum):
+ fignum = int(fignum)
+ manager = Gcf.get_fig_manager(fignum)
+
+ ws_uri = 'ws://{req.host}{prefix}/'.format(req=self.request,
+ prefix=self.url_prefix)
+ self.render(
+ "single_figure.html",
+ prefix=self.url_prefix,
+ ws_uri=ws_uri,
+ fig_id=fignum,
+ toolitems=core.NavigationToolbar2WebAgg.toolitems,
+ canvas=manager.canvas)
+
+ class AllFiguresPage(tornado.web.RequestHandler):
+ def __init__(self, application, request, **kwargs):
+ self.url_prefix = kwargs.pop('url_prefix', '')
+ tornado.web.RequestHandler.__init__(self, application,
+ request, **kwargs)
+
+ def get(self):
+ ws_uri = 'ws://{req.host}{prefix}/'.format(req=self.request,
+ prefix=self.url_prefix)
+ self.render(
+ "all_figures.html",
+ prefix=self.url_prefix,
+ ws_uri=ws_uri,
+ figures=sorted(Gcf.figs.items()),
+ toolitems=core.NavigationToolbar2WebAgg.toolitems)
+
+ class MplJs(tornado.web.RequestHandler):
+ def get(self):
+ self.set_header('Content-Type', 'application/javascript')
+
+ js_content = core.FigureManagerWebAgg.get_javascript()
+
+ self.write(js_content)
+
+ class Download(tornado.web.RequestHandler):
+ def get(self, fignum, fmt):
+ fignum = int(fignum)
+ manager = Gcf.get_fig_manager(fignum)
+
+ # TODO: Move this to a central location
+ mimetypes = {
+ 'ps': 'application/postscript',
+ 'eps': 'application/postscript',
+ 'pdf': 'application/pdf',
+ 'svg': 'image/svg+xml',
+ 'png': 'image/png',
+ 'jpeg': 'image/jpeg',
+ 'tif': 'image/tiff',
+ 'emf': 'application/emf'
+ }
+
+ self.set_header('Content-Type', mimetypes.get(fmt, 'binary'))
+
+ buff = six.BytesIO()
+ manager.canvas.figure.savefig(buff, format=fmt)
+ self.write(buff.getvalue())
+
+ class WebSocket(tornado.websocket.WebSocketHandler):
+ supports_binary = True
+
+ def open(self, fignum):
+ self.fignum = int(fignum)
+ self.manager = Gcf.get_fig_manager(self.fignum)
+ self.manager.add_web_socket(self)
+ if hasattr(self, 'set_nodelay'):
+ self.set_nodelay(True)
+
+ def on_close(self):
+ self.manager.remove_web_socket(self)
+
+ def on_message(self, message):
+ message = json.loads(message)
+ # The 'supports_binary' message is on a client-by-client
+ # basis. The others affect the (shared) canvas as a
+ # whole.
+ if message['type'] == 'supports_binary':
+ self.supports_binary = message['value']
+ else:
+ manager = Gcf.get_fig_manager(self.fignum)
+ # It is possible for a figure to be closed,
+ # but a stale figure UI is still sending messages
+ # from the browser.
+ if manager is not None:
+ manager.handle_json(message)
+
+ def send_json(self, content):
+ self.write_message(json.dumps(content))
+
+ def send_binary(self, blob):
+ if self.supports_binary:
+ self.write_message(blob, binary=True)
+ else:
+ data_uri = "data:image/png;base64,{0}".format(
+ blob.encode('base64').replace('\n', ''))
+ self.write_message(data_uri)
+
+ def __init__(self, url_prefix=''):
+ if url_prefix:
+ assert url_prefix[0] == '/' and url_prefix[-1] != '/', \
+ 'url_prefix must start with a "/" and not end with one.'
+
+ super(WebAggApplication, self).__init__(
+ [
+ # Static files for the CSS and JS
+ (url_prefix + r'/_static/(.*)',
+ tornado.web.StaticFileHandler,
+ {'path': core.FigureManagerWebAgg.get_static_file_path()}),
+
+ # An MPL favicon
+ (url_prefix + r'/favicon.ico', self.FavIcon),
+
+ # The page that contains all of the pieces
+ (url_prefix + r'/([0-9]+)', self.SingleFigurePage,
+ {'url_prefix': url_prefix}),
+
+ # The page that contains all of the figures
+ (url_prefix + r'/?', self.AllFiguresPage,
+ {'url_prefix': url_prefix}),
+
+ (url_prefix + r'/js/mpl.js', self.MplJs),
+
+ # Sends images and events to the browser, and receives
+ # events from the browser
+ (url_prefix + r'/([0-9]+)/ws', self.WebSocket),
+
+ # Handles the downloading (i.e., saving) of static images
+ (url_prefix + r'/([0-9]+)/download.([a-z0-9.]+)',
+ self.Download),
+ ],
+ template_path=core.FigureManagerWebAgg.get_static_file_path())
+
+ @classmethod
+ def initialize(cls, url_prefix='', port=None, address=None):
+ if cls.initialized:
+ return
+
+ # Create the class instance
+ app = cls(url_prefix=url_prefix)
+
+ cls.url_prefix = url_prefix
+
+ # This port selection algorithm is borrowed, more or less
+ # verbatim, from IPython.
+ def random_ports(port, n):
+ """
+ Generate a list of n random ports near the given port.
+
+ The first 5 ports will be sequential, and the remaining n-5 will be
+ randomly selected in the range [port-2*n, port+2*n].
+ """
+ for i in range(min(5, n)):
+ yield port + i
+ for i in range(n - 5):
+ yield port + random.randint(-2 * n, 2 * n)
+
+ success = None
+
+ if address is None:
+ cls.address = rcParams['webagg.address']
+ else:
+ cls.address = address
+ cls.port = rcParams['webagg.port']
+ for port in random_ports(cls.port, rcParams['webagg.port_retries']):
+ try:
+ app.listen(port, cls.address)
+ except socket.error as e:
+ if e.errno != errno.EADDRINUSE:
+ raise
+ else:
+ cls.port = port
+ success = True
+ break
+
+ if not success:
+ raise SystemExit(
+ "The webagg server could not be started because an available "
+ "port could not be found")
+
+ cls.initialized = True
+
+ @classmethod
+ def start(cls):
+ if cls.started:
+ return
+
+ """
+ IOLoop.running() was removed as of Tornado 2.4; see for example
+ https://groups.google.com/forum/#!topic/python-tornado/QLMzkpQBGOY
+ Thus there is no correct way to check if the loop has already been
+ launched. We may end up with two concurrently running loops in that
+ unlucky case with all the expected consequences.
+ """
+ ioloop = tornado.ioloop.IOLoop.instance()
+
+ def shutdown():
+ ioloop.stop()
+ print("Server is stopped")
+ sys.stdout.flush()
+ cls.started = False
+
+ @contextmanager
+ def catch_sigint():
+ old_handler = signal.signal(
+ signal.SIGINT,
+ lambda sig, frame: ioloop.add_callback_from_signal(shutdown))
+ try:
+ yield
+ finally:
+ signal.signal(signal.SIGINT, old_handler)
+
+ # Set the flag to True *before* blocking on ioloop.start()
+ cls.started = True
+
+ print("Press Ctrl+C to stop WebAgg server")
+ sys.stdout.flush()
+ with catch_sigint():
+ ioloop.start()
+
+
+def ipython_inline_display(figure):
+ import tornado.template
+
+ WebAggApplication.initialize()
+ if not webagg_server_thread.is_alive():
+ webagg_server_thread.start()
+
+ with open(os.path.join(
+ core.FigureManagerWebAgg.get_static_file_path(),
+ 'ipython_inline_figure.html')) as fd:
+ tpl = fd.read()
+
+ fignum = figure.number
+
+ t = tornado.template.Template(tpl)
+ return t.generate(
+ prefix=WebAggApplication.url_prefix,
+ fig_id=fignum,
+ toolitems=core.NavigationToolbar2WebAgg.toolitems,
+ canvas=figure.canvas,
+ port=WebAggApplication.port).decode('utf-8')
+
+
+@_Backend.export
+class _BackendWebAgg(_Backend):
+ FigureCanvas = FigureCanvasWebAgg
+ FigureManager = core.FigureManagerWebAgg
+
+ @staticmethod
+ def trigger_manager_draw(manager):
+ manager.canvas.draw_idle()
+
+ @staticmethod
+ def show():
+ WebAggApplication.initialize()
+
+ url = "http://127.0.0.1:{port}{prefix}".format(
+ port=WebAggApplication.port,
+ prefix=WebAggApplication.url_prefix)
+
+ if rcParams['webagg.open_in_browser']:
+ import webbrowser
+ webbrowser.open(url)
+ else:
+ print("To view figure, visit {0}".format(url))
+
+ WebAggApplication.start()
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_webagg_core.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_webagg_core.py
new file mode 100644
index 00000000000..e75014b1e63
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_webagg_core.py
@@ -0,0 +1,543 @@
+"""
+Displays Agg images in the browser, with interactivity
+"""
+# The WebAgg backend is divided into two modules:
+#
+# - `backend_webagg_core.py` contains code necessary to embed a WebAgg
+# plot inside of a web application, and communicate in an abstract
+# way over a web socket.
+#
+# - `backend_webagg.py` contains a concrete implementation of a basic
+# application, implemented with tornado.
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import datetime
+import io
+import json
+import os
+import warnings
+
+import numpy as np
+import tornado
+
+from matplotlib.backends import backend_agg
+from matplotlib.backend_bases import _Backend
+from matplotlib import backend_bases
+from matplotlib import _png
+
+
+# http://www.cambiaresearch.com/articles/15/javascript-char-codes-key-codes
+_SHIFT_LUT = {59: ':',
+ 61: '+',
+ 173: '_',
+ 186: ':',
+ 187: '+',
+ 188: '<',
+ 189: '_',
+ 190: '>',
+ 191: '?',
+ 192: '~',
+ 219: '{',
+ 220: '|',
+ 221: '}',
+ 222: '"'}
+
+_LUT = {8: 'backspace',
+ 9: 'tab',
+ 13: 'enter',
+ 16: 'shift',
+ 17: 'control',
+ 18: 'alt',
+ 19: 'pause',
+ 20: 'caps',
+ 27: 'escape',
+ 32: ' ',
+ 33: 'pageup',
+ 34: 'pagedown',
+ 35: 'end',
+ 36: 'home',
+ 37: 'left',
+ 38: 'up',
+ 39: 'right',
+ 40: 'down',
+ 45: 'insert',
+ 46: 'delete',
+ 91: 'super',
+ 92: 'super',
+ 93: 'select',
+ 106: '*',
+ 107: '+',
+ 109: '-',
+ 110: '.',
+ 111: '/',
+ 144: 'num_lock',
+ 145: 'scroll_lock',
+ 186: ':',
+ 187: '=',
+ 188: ',',
+ 189: '-',
+ 190: '.',
+ 191: '/',
+ 192: '`',
+ 219: '[',
+ 220: '\\',
+ 221: ']',
+ 222: "'"}
+
+
+def _handle_key(key):
+ """Handle key codes"""
+ code = int(key[key.index('k') + 1:])
+ value = chr(code)
+ # letter keys
+ if code >= 65 and code <= 90:
+ if 'shift+' in key:
+ key = key.replace('shift+', '')
+ else:
+ value = value.lower()
+ # number keys
+ elif code >= 48 and code <= 57:
+ if 'shift+' in key:
+ value = ')!@#$%^&*('[int(value)]
+ key = key.replace('shift+', '')
+ # function keys
+ elif code >= 112 and code <= 123:
+ value = 'f%s' % (code - 111)
+ # number pad keys
+ elif code >= 96 and code <= 105:
+ value = '%s' % (code - 96)
+ # keys with shift alternatives
+ elif code in _SHIFT_LUT and 'shift+' in key:
+ key = key.replace('shift+', '')
+ value = _SHIFT_LUT[code]
+ elif code in _LUT:
+ value = _LUT[code]
+ key = key[:key.index('k')] + value
+ return key
+
+
+class FigureCanvasWebAggCore(backend_agg.FigureCanvasAgg):
+ supports_blit = False
+
+ def __init__(self, *args, **kwargs):
+ backend_agg.FigureCanvasAgg.__init__(self, *args, **kwargs)
+
+ # Set to True when the renderer contains data that is newer
+ # than the PNG buffer.
+ self._png_is_old = True
+
+ # Set to True by the `refresh` message so that the next frame
+ # sent to the clients will be a full frame.
+ self._force_full = True
+
+ # Store the current image mode so that at any point, clients can
+ # request the information. This should be changed by calling
+ # self.set_image_mode(mode) so that the notification can be given
+ # to the connected clients.
+ self._current_image_mode = 'full'
+
+ # Store the DPI ratio of the browser. This is the scaling that
+ # occurs automatically for all images on a HiDPI display.
+ self._dpi_ratio = 1
+
+ def show(self):
+ # show the figure window
+ from matplotlib.pyplot import show
+ show()
+
+ def draw(self):
+ renderer = self.get_renderer(cleared=True)
+
+ self._png_is_old = True
+
+ backend_agg.RendererAgg.lock.acquire()
+ try:
+ self.figure.draw(renderer)
+ finally:
+ backend_agg.RendererAgg.lock.release()
+ # Swap the frames
+ self.manager.refresh_all()
+
+ def draw_idle(self):
+ self.send_event("draw")
+
+ def set_image_mode(self, mode):
+ """
+ Set the image mode for any subsequent images which will be sent
+ to the clients. The modes may currently be either 'full' or 'diff'.
+
+ Note: diff images may not contain transparency, therefore upon
+ draw this mode may be changed if the resulting image has any
+ transparent component.
+
+ """
+ if mode not in ['full', 'diff']:
+ raise ValueError('image mode must be either full or diff.')
+ if self._current_image_mode != mode:
+ self._current_image_mode = mode
+ self.handle_send_image_mode(None)
+
+ def get_diff_image(self):
+ if self._png_is_old:
+ renderer = self.get_renderer()
+
+ # The buffer is created as type uint32 so that entire
+ # pixels can be compared in one numpy call, rather than
+ # needing to compare each plane separately.
+ buff = (np.frombuffer(renderer.buffer_rgba(), dtype=np.uint32)
+ .reshape((renderer.height, renderer.width)))
+
+ # If any pixels have transparency, we need to force a full
+ # draw as we cannot overlay new on top of old.
+ pixels = buff.view(dtype=np.uint8).reshape(buff.shape + (4,))
+
+ if self._force_full or np.any(pixels[:, :, 3] != 255):
+ self.set_image_mode('full')
+ output = buff
+ else:
+ self.set_image_mode('diff')
+ last_buffer = (np.frombuffer(self._last_renderer.buffer_rgba(),
+ dtype=np.uint32)
+ .reshape((renderer.height, renderer.width)))
+ diff = buff != last_buffer
+ output = np.where(diff, buff, 0)
+
+ # TODO: We should write a new version of write_png that
+ # handles the differencing inline
+ buff = _png.write_png(
+ output.view(dtype=np.uint8).reshape(output.shape + (4,)),
+ None, compression=6, filter=_png.PNG_FILTER_NONE)
+
+ # Swap the renderer frames
+ self._renderer, self._last_renderer = (
+ self._last_renderer, renderer)
+ self._force_full = False
+ self._png_is_old = False
+ return buff
+
+ def get_renderer(self, cleared=None):
+ # Mirrors super.get_renderer, but caches the old one
+ # so that we can do things such as produce a diff image
+ # in get_diff_image
+ _, _, w, h = self.figure.bbox.bounds
+ w, h = int(w), int(h)
+ key = w, h, self.figure.dpi
+ try:
+ self._lastKey, self._renderer
+ except AttributeError:
+ need_new_renderer = True
+ else:
+ need_new_renderer = (self._lastKey != key)
+
+ if need_new_renderer:
+ self._renderer = backend_agg.RendererAgg(
+ w, h, self.figure.dpi)
+ self._last_renderer = backend_agg.RendererAgg(
+ w, h, self.figure.dpi)
+ self._lastKey = key
+
+ elif cleared:
+ self._renderer.clear()
+
+ return self._renderer
+
+ def handle_event(self, event):
+ e_type = event['type']
+ handler = getattr(self, 'handle_{0}'.format(e_type),
+ self.handle_unknown_event)
+ return handler(event)
+
+ def handle_unknown_event(self, event):
+ warnings.warn('Unhandled message type {0}. {1}'.format(
+ event['type'], event))
+
+ def handle_ack(self, event):
+ # Network latency tends to decrease if traffic is flowing
+ # in both directions. Therefore, the browser sends back
+ # an "ack" message after each image frame is received.
+ # This could also be used as a simple sanity check in the
+ # future, but for now the performance increase is enough
+ # to justify it, even if the server does nothing with it.
+ pass
+
+ def handle_draw(self, event):
+ self.draw()
+
+ def _handle_mouse(self, event):
+ x = event['x']
+ y = event['y']
+ y = self.get_renderer().height - y
+
+ # Javascript button numbers and matplotlib button numbers are
+ # off by 1
+ button = event['button'] + 1
+
+ # The right mouse button pops up a context menu, which
+ # doesn't work very well, so use the middle mouse button
+ # instead. It doesn't seem that it's possible to disable
+ # the context menu in recent versions of Chrome. If this
+ # is resolved, please also adjust the docstring in MouseEvent.
+ if button == 2:
+ button = 3
+
+ e_type = event['type']
+ guiEvent = event.get('guiEvent', None)
+ if e_type == 'button_press':
+ self.button_press_event(x, y, button, guiEvent=guiEvent)
+ elif e_type == 'button_release':
+ self.button_release_event(x, y, button, guiEvent=guiEvent)
+ elif e_type == 'motion_notify':
+ self.motion_notify_event(x, y, guiEvent=guiEvent)
+ elif e_type == 'figure_enter':
+ self.enter_notify_event(xy=(x, y), guiEvent=guiEvent)
+ elif e_type == 'figure_leave':
+ self.leave_notify_event()
+ elif e_type == 'scroll':
+ self.scroll_event(x, y, event['step'], guiEvent=guiEvent)
+ handle_button_press = handle_button_release = handle_motion_notify = \
+ handle_figure_enter = handle_figure_leave = handle_scroll = \
+ _handle_mouse
+
+ def _handle_key(self, event):
+ key = _handle_key(event['key'])
+ e_type = event['type']
+ guiEvent = event.get('guiEvent', None)
+ if e_type == 'key_press':
+ self.key_press_event(key, guiEvent=guiEvent)
+ elif e_type == 'key_release':
+ self.key_release_event(key, guiEvent=guiEvent)
+ handle_key_press = handle_key_release = _handle_key
+
+ def handle_toolbar_button(self, event):
+ # TODO: Be more suspicious of the input
+ getattr(self.toolbar, event['name'])()
+
+ def handle_refresh(self, event):
+ figure_label = self.figure.get_label()
+ if not figure_label:
+ figure_label = "Figure {0}".format(self.manager.num)
+ self.send_event('figure_label', label=figure_label)
+ self._force_full = True
+ self.draw_idle()
+
+ def handle_resize(self, event):
+ x, y = event.get('width', 800), event.get('height', 800)
+ x, y = int(x) * self._dpi_ratio, int(y) * self._dpi_ratio
+ fig = self.figure
+ # An attempt at approximating the figure size in pixels.
+ fig.set_size_inches(x / fig.dpi, y / fig.dpi, forward=False)
+
+ _, _, w, h = self.figure.bbox.bounds
+ # Acknowledge the resize, and force the viewer to update the
+ # canvas size to the figure's new size (which is hopefully
+ # identical or within a pixel or so).
+ self._png_is_old = True
+ self.manager.resize(w, h)
+ self.resize_event()
+
+ def handle_send_image_mode(self, event):
+ # The client requests notification of what the current image mode is.
+ self.send_event('image_mode', mode=self._current_image_mode)
+
+ def handle_set_dpi_ratio(self, event):
+ dpi_ratio = event.get('dpi_ratio', 1)
+ if dpi_ratio != self._dpi_ratio:
+ # We don't want to scale up the figure dpi more than once.
+ if not hasattr(self.figure, '_original_dpi'):
+ self.figure._original_dpi = self.figure.dpi
+ self.figure.dpi = dpi_ratio * self.figure._original_dpi
+ self._dpi_ratio = dpi_ratio
+ self._force_full = True
+ self.draw_idle()
+
+ def send_event(self, event_type, **kwargs):
+ self.manager._send_event(event_type, **kwargs)
+
+
+_JQUERY_ICON_CLASSES = {
+ 'home': 'ui-icon ui-icon-home',
+ 'back': 'ui-icon ui-icon-circle-arrow-w',
+ 'forward': 'ui-icon ui-icon-circle-arrow-e',
+ 'zoom_to_rect': 'ui-icon ui-icon-search',
+ 'move': 'ui-icon ui-icon-arrow-4',
+ 'download': 'ui-icon ui-icon-disk',
+ None: None,
+}
+
+
+class NavigationToolbar2WebAgg(backend_bases.NavigationToolbar2):
+
+ # Use the standard toolbar items + download button
+ toolitems = [(text, tooltip_text, _JQUERY_ICON_CLASSES[image_file],
+ name_of_method)
+ for text, tooltip_text, image_file, name_of_method
+ in (backend_bases.NavigationToolbar2.toolitems +
+ (('Download', 'Download plot', 'download', 'download'),))
+ if image_file in _JQUERY_ICON_CLASSES]
+
+ def _init_toolbar(self):
+ self.message = ''
+ self.cursor = 0
+
+ def set_message(self, message):
+ if message != self.message:
+ self.canvas.send_event("message", message=message)
+ self.message = message
+
+ def set_cursor(self, cursor):
+ if cursor != self.cursor:
+ self.canvas.send_event("cursor", cursor=cursor)
+ self.cursor = cursor
+
+ def draw_rubberband(self, event, x0, y0, x1, y1):
+ self.canvas.send_event(
+ "rubberband", x0=x0, y0=y0, x1=x1, y1=y1)
+
+ def release_zoom(self, event):
+ backend_bases.NavigationToolbar2.release_zoom(self, event)
+ self.canvas.send_event(
+ "rubberband", x0=-1, y0=-1, x1=-1, y1=-1)
+
+ def save_figure(self, *args):
+ """Save the current figure"""
+ self.canvas.send_event('save')
+
+
+class FigureManagerWebAgg(backend_bases.FigureManagerBase):
+ ToolbarCls = NavigationToolbar2WebAgg
+
+ def __init__(self, canvas, num):
+ backend_bases.FigureManagerBase.__init__(self, canvas, num)
+
+ self.web_sockets = set()
+
+ self.toolbar = self._get_toolbar(canvas)
+
+ def show(self):
+ pass
+
+ def _get_toolbar(self, canvas):
+ toolbar = self.ToolbarCls(canvas)
+ return toolbar
+
+ def resize(self, w, h):
+ self._send_event(
+ 'resize',
+ size=(w / self.canvas._dpi_ratio, h / self.canvas._dpi_ratio))
+
+ def set_window_title(self, title):
+ self._send_event('figure_label', label=title)
+
+ # The following methods are specific to FigureManagerWebAgg
+
+ def add_web_socket(self, web_socket):
+ assert hasattr(web_socket, 'send_binary')
+ assert hasattr(web_socket, 'send_json')
+
+ self.web_sockets.add(web_socket)
+
+ _, _, w, h = self.canvas.figure.bbox.bounds
+ self.resize(w, h)
+ self._send_event('refresh')
+
+ def remove_web_socket(self, web_socket):
+ self.web_sockets.remove(web_socket)
+
+ def handle_json(self, content):
+ self.canvas.handle_event(content)
+
+ def refresh_all(self):
+ if self.web_sockets:
+ diff = self.canvas.get_diff_image()
+ if diff is not None:
+ for s in self.web_sockets:
+ s.send_binary(diff)
+
+ @classmethod
+ def get_javascript(cls, stream=None):
+ if stream is None:
+ output = io.StringIO()
+ else:
+ output = stream
+
+ with io.open(os.path.join(
+ os.path.dirname(__file__),
+ "web_backend", "js",
+ "mpl.js"), encoding='utf8') as fd:
+ output.write(fd.read())
+
+ toolitems = []
+ for name, tooltip, image, method in cls.ToolbarCls.toolitems:
+ if name is None:
+ toolitems.append(['', '', '', ''])
+ else:
+ toolitems.append([name, tooltip, image, method])
+ output.write("mpl.toolbar_items = {0};\n\n".format(
+ json.dumps(toolitems)))
+
+ extensions = []
+ for filetype, ext in sorted(FigureCanvasWebAggCore.
+ get_supported_filetypes_grouped().
+ items()):
+ if not ext[0] == 'pgf': # pgf does not support BytesIO
+ extensions.append(ext[0])
+ output.write("mpl.extensions = {0};\n\n".format(
+ json.dumps(extensions)))
+
+ output.write("mpl.default_extension = {0};".format(
+ json.dumps(FigureCanvasWebAggCore.get_default_filetype())))
+
+ if stream is None:
+ return output.getvalue()
+
+ @classmethod
+ def get_static_file_path(cls):
+ return os.path.join(os.path.dirname(__file__), 'web_backend')
+
+ def _send_event(self, event_type, **kwargs):
+ payload = {'type': event_type}
+ payload.update(kwargs)
+ for s in self.web_sockets:
+ s.send_json(payload)
+
+
+class TimerTornado(backend_bases.TimerBase):
+ def _timer_start(self):
+ self._timer_stop()
+ if self._single:
+ ioloop = tornado.ioloop.IOLoop.instance()
+ self._timer = ioloop.add_timeout(
+ datetime.timedelta(milliseconds=self.interval),
+ self._on_timer)
+ else:
+ self._timer = tornado.ioloop.PeriodicCallback(
+ self._on_timer,
+ self.interval)
+ self._timer.start()
+
+ def _timer_stop(self):
+ if self._timer is None:
+ return
+ elif self._single:
+ ioloop = tornado.ioloop.IOLoop.instance()
+ ioloop.remove_timeout(self._timer)
+ else:
+ self._timer.stop()
+
+ self._timer = None
+
+ def _timer_set_interval(self):
+ # Only stop and restart it if the timer has already been started
+ if self._timer is not None:
+ self._timer_stop()
+ self._timer_start()
+
+
+@_Backend.export
+class _BackendWebAggCoreAgg(_Backend):
+ FigureCanvas = FigureCanvasWebAggCore
+ FigureManager = FigureManagerWebAgg
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_wx.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_wx.py
new file mode 100644
index 00000000000..2ecc1a4b3b4
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_wx.py
@@ -0,0 +1,2002 @@
+"""
+ A wxPython backend for matplotlib, based (very heavily) on
+ backend_template.py and backend_gtk.py
+
+ Author: Jeremy O'Donoghue (jeremy@o-donoghue.com)
+
+ Derived from original copyright work by John Hunter
+ (jdhunter@ace.bsd.uchicago.edu)
+
+ Copyright (C) Jeremy O'Donoghue & John Hunter, 2003-4
+
+ License: This work is licensed under a PSF compatible license. A copy
+ should be included with this source code.
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange
+import six
+
+import sys
+import os
+import os.path
+import math
+import weakref
+import warnings
+
+import matplotlib
+from matplotlib.backend_bases import (
+ _Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
+ NavigationToolbar2, RendererBase, TimerBase, cursors)
+from matplotlib.backend_bases import _has_pil
+
+from matplotlib._pylab_helpers import Gcf
+from matplotlib.cbook import is_writable_file_like, warn_deprecated
+from matplotlib.figure import Figure
+from matplotlib.path import Path
+from matplotlib.transforms import Affine2D
+from matplotlib.widgets import SubplotTool
+from matplotlib import cbook, rcParams, backend_tools
+
+from . import wx_compat as wxc
+import wx
+
+# Debugging settings here...
+# Debug level set here. If the debug level is less than 5, information
+# messages (progressively more info for lower value) are printed. In addition,
+# traceback is performed, and pdb activated, for all uncaught exceptions in
+# this case
+_DEBUG = 5
+if _DEBUG < 5:
+ import traceback
+ import pdb
+_DEBUG_lvls = {1: 'Low ', 2: 'Med ', 3: 'High', 4: 'Error'}
+
+
+def DEBUG_MSG(string, lvl=3, o=None):
+ if lvl >= _DEBUG:
+ cls = o.__class__
+ # Jeremy, often times the commented line won't print but the
+ # one below does. I think WX is redefining stderr, damned
+ # beast
+ # print("%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls),
+ # file=sys.stderr)
+ print("%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls))
+
+
+def debug_on_error(type, value, tb):
+ """Code due to Thomas Heller - published in Python Cookbook (O'Reilley)"""
+ traceback.print_exception(type, value, tb)
+ print()
+ pdb.pm() # jdh uncomment
+
+
+class fake_stderr(object):
+ """
+ Wx does strange things with stderr, as it makes the assumption that
+ there is probably no console. This redirects stderr to the console, since
+ we know that there is one!
+ """
+
+ def write(self, msg):
+ print("Stderr: %s\n\r" % msg)
+
+
+# the True dots per inch on the screen; should be display dependent
+# see
+# http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5
+# for some info about screen dpi
+PIXELS_PER_INCH = 75
+
+# Delay time for idle checks
+IDLE_DELAY = 5
+
+
+def error_msg_wx(msg, parent=None):
+ """
+ Signal an error condition -- in a GUI, popup a error dialog
+ """
+ dialog = wx.MessageDialog(parent=parent,
+ message=msg,
+ caption='Matplotlib backend_wx error',
+ style=wx.OK | wx.CENTRE)
+ dialog.ShowModal()
+ dialog.Destroy()
+ return None
+
+
+def raise_msg_to_str(msg):
+ """msg is a return arg from a raise. Join with new lines."""
+ if not isinstance(msg, six.string_types):
+ msg = '\n'.join(map(str, msg))
+ return msg
+
+
+class TimerWx(TimerBase):
+ '''
+ Subclass of :class:`backend_bases.TimerBase` that uses WxTimer events.
+
+ Attributes
+ ----------
+ interval : int
+ The time between timer events in milliseconds. Default is 1000 ms.
+ single_shot : bool
+ Boolean flag indicating whether this timer should operate as single
+ shot (run once and then stop). Defaults to False.
+ callbacks : list
+ Stores list of (func, args) tuples that will be called upon timer
+ events. This list can be manipulated directly, or the functions
+ `add_callback` and `remove_callback` can be used.
+
+ '''
+
+ def __init__(self, parent, *args, **kwargs):
+ TimerBase.__init__(self, *args, **kwargs)
+
+ # Create a new timer and connect the timer event to our handler.
+ # For WX, the events have to use a widget for binding.
+ self.parent = parent
+ self._timer = wx.Timer(self.parent, wx.NewId())
+ self.parent.Bind(wx.EVT_TIMER, self._on_timer, self._timer)
+
+ # Unbinding causes Wx to stop for some reason. Disabling for now.
+# def __del__(self):
+# TimerBase.__del__(self)
+# self.parent.Bind(wx.EVT_TIMER, None, self._timer)
+
+ def _timer_start(self):
+ self._timer.Start(self._interval, self._single)
+
+ def _timer_stop(self):
+ self._timer.Stop()
+
+ def _timer_set_interval(self):
+ self._timer_start()
+
+ def _timer_set_single_shot(self):
+ self._timer.Start()
+
+ def _on_timer(self, *args):
+ TimerBase._on_timer(self)
+
+
+class RendererWx(RendererBase):
+ """
+ The renderer handles all the drawing primitives using a graphics
+ context instance that controls the colors/styles. It acts as the
+ 'renderer' instance used by many classes in the hierarchy.
+ """
+ # In wxPython, drawing is performed on a wxDC instance, which will
+ # generally be mapped to the client aread of the window displaying
+ # the plot. Under wxPython, the wxDC instance has a wx.Pen which
+ # describes the colour and weight of any lines drawn, and a wxBrush
+ # which describes the fill colour of any closed polygon.
+
+ fontweights = wxc.fontweights
+ fontangles = wxc.fontangles
+
+ # wxPython allows for portable font styles, choosing them appropriately
+ # for the target platform. Map some standard font names to the portable
+ # styles
+ # QUESTION: Is it be wise to agree standard fontnames across all backends?
+ fontnames = wxc.fontnames
+
+ def __init__(self, bitmap, dpi):
+ """
+ Initialise a wxWindows renderer instance.
+ """
+ warn_deprecated('2.0', message="The WX backend is "
+ "deprecated. It's untested "
+ "and will be removed in Matplotlib 3.0. "
+ "Use the WXAgg backend instead. "
+ "See Matplotlib usage FAQ for more info on backends.",
+ alternative='WXAgg')
+ RendererBase.__init__(self)
+ DEBUG_MSG("__init__()", 1, self)
+ self.width = bitmap.GetWidth()
+ self.height = bitmap.GetHeight()
+ self.bitmap = bitmap
+ self.fontd = {}
+ self.dpi = dpi
+ self.gc = None
+
+ def flipy(self):
+ return True
+
+ def offset_text_height(self):
+ return True
+
+ def get_text_width_height_descent(self, s, prop, ismath):
+ """
+ get the width and height in display coords of the string s
+ with FontPropertry prop
+ """
+ # return 1, 1
+ if ismath:
+ s = self.strip_math(s)
+
+ if self.gc is None:
+ gc = self.new_gc()
+ else:
+ gc = self.gc
+ gfx_ctx = gc.gfx_ctx
+ font = self.get_wx_font(s, prop)
+ gfx_ctx.SetFont(font, wx.BLACK)
+ w, h, descent, leading = gfx_ctx.GetFullTextExtent(s)
+
+ return w, h, descent
+
+ def get_canvas_width_height(self):
+ 'return the canvas width and height in display coords'
+ return self.width, self.height
+
+ def handle_clip_rectangle(self, gc):
+ new_bounds = gc.get_clip_rectangle()
+ if new_bounds is not None:
+ new_bounds = new_bounds.bounds
+ gfx_ctx = gc.gfx_ctx
+ if gfx_ctx._lastcliprect != new_bounds:
+ gfx_ctx._lastcliprect = new_bounds
+ if new_bounds is None:
+ gfx_ctx.ResetClip()
+ else:
+ gfx_ctx.Clip(new_bounds[0],
+ self.height - new_bounds[1] - new_bounds[3],
+ new_bounds[2], new_bounds[3])
+
+ @staticmethod
+ def convert_path(gfx_ctx, path, transform):
+ wxpath = gfx_ctx.CreatePath()
+ for points, code in path.iter_segments(transform):
+ if code == Path.MOVETO:
+ wxpath.MoveToPoint(*points)
+ elif code == Path.LINETO:
+ wxpath.AddLineToPoint(*points)
+ elif code == Path.CURVE3:
+ wxpath.AddQuadCurveToPoint(*points)
+ elif code == Path.CURVE4:
+ wxpath.AddCurveToPoint(*points)
+ elif code == Path.CLOSEPOLY:
+ wxpath.CloseSubpath()
+ return wxpath
+
+ def draw_path(self, gc, path, transform, rgbFace=None):
+ gc.select()
+ self.handle_clip_rectangle(gc)
+ gfx_ctx = gc.gfx_ctx
+ transform = transform + \
+ Affine2D().scale(1.0, -1.0).translate(0.0, self.height)
+ wxpath = self.convert_path(gfx_ctx, path, transform)
+ if rgbFace is not None:
+ gfx_ctx.SetBrush(wx.Brush(gc.get_wxcolour(rgbFace)))
+ gfx_ctx.DrawPath(wxpath)
+ else:
+ gfx_ctx.StrokePath(wxpath)
+ gc.unselect()
+
+ def draw_image(self, gc, x, y, im):
+ bbox = gc.get_clip_rectangle()
+ if bbox is not None:
+ l, b, w, h = bbox.bounds
+ else:
+ l = 0
+ b = 0
+ w = self.width
+ h = self.height
+ rows, cols = im.shape[:2]
+ bitmap = wxc.BitmapFromBuffer(cols, rows, im.tostring())
+ gc = self.get_gc()
+ gc.select()
+ gc.gfx_ctx.DrawBitmap(bitmap, int(l), int(self.height - b),
+ int(w), int(-h))
+ gc.unselect()
+
+ def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
+ if ismath:
+ s = self.strip_math(s)
+ DEBUG_MSG("draw_text()", 1, self)
+ gc.select()
+ self.handle_clip_rectangle(gc)
+ gfx_ctx = gc.gfx_ctx
+
+ font = self.get_wx_font(s, prop)
+ color = gc.get_wxcolour(gc.get_rgb())
+ gfx_ctx.SetFont(font, color)
+
+ w, h, d = self.get_text_width_height_descent(s, prop, ismath)
+ x = int(x)
+ y = int(y - h)
+
+ if angle == 0.0:
+ gfx_ctx.DrawText(s, x, y)
+ else:
+ rads = math.radians(angle)
+ xo = h * math.sin(rads)
+ yo = h * math.cos(rads)
+ gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads)
+
+ gc.unselect()
+
+ def new_gc(self):
+ """
+ Return an instance of a GraphicsContextWx, and sets the current gc copy
+ """
+ DEBUG_MSG('new_gc()', 2, self)
+ self.gc = GraphicsContextWx(self.bitmap, self)
+ self.gc.select()
+ self.gc.unselect()
+ return self.gc
+
+ def get_gc(self):
+ """
+ Fetch the locally cached gc.
+ """
+ # This is a dirty hack to allow anything with access to a renderer to
+ # access the current graphics context
+ assert self.gc is not None, "gc must be defined"
+ return self.gc
+
+ def get_wx_font(self, s, prop):
+ """
+ Return a wx font. Cache instances in a font dictionary for
+ efficiency
+ """
+ DEBUG_MSG("get_wx_font()", 1, self)
+
+ key = hash(prop)
+ fontprop = prop
+ fontname = fontprop.get_name()
+
+ font = self.fontd.get(key)
+ if font is not None:
+ return font
+
+ # Allow use of platform independent and dependent font names
+ wxFontname = self.fontnames.get(fontname, wx.ROMAN)
+ wxFacename = '' # Empty => wxPython chooses based on wx_fontname
+
+ # Font colour is determined by the active wx.Pen
+ # TODO: It may be wise to cache font information
+ size = self.points_to_pixels(fontprop.get_size_in_points())
+
+ font = wx.Font(int(size + 0.5), # Size
+ wxFontname, # 'Generic' name
+ self.fontangles[fontprop.get_style()], # Angle
+ self.fontweights[fontprop.get_weight()], # Weight
+ False, # Underline
+ wxFacename) # Platform font name
+
+ # cache the font and gc and return it
+ self.fontd[key] = font
+
+ return font
+
+ def points_to_pixels(self, points):
+ """
+ convert point measures to pixes using dpi and the pixels per
+ inch of the display
+ """
+ return points * (PIXELS_PER_INCH / 72.0 * self.dpi / 72.0)
+
+
+class GraphicsContextWx(GraphicsContextBase):
+ """
+ The graphics context provides the color, line styles, etc...
+
+ This class stores a reference to a wxMemoryDC, and a
+ wxGraphicsContext that draws to it. Creating a wxGraphicsContext
+ seems to be fairly heavy, so these objects are cached based on the
+ bitmap object that is passed in.
+
+ The base GraphicsContext stores colors as a RGB tuple on the unit
+ interval, e.g., (0.5, 0.0, 1.0). wxPython uses an int interval, but
+ since wxPython colour management is rather simple, I have not chosen
+ to implement a separate colour manager class.
+ """
+ _capd = {'butt': wx.CAP_BUTT,
+ 'projecting': wx.CAP_PROJECTING,
+ 'round': wx.CAP_ROUND}
+
+ _joind = {'bevel': wx.JOIN_BEVEL,
+ 'miter': wx.JOIN_MITER,
+ 'round': wx.JOIN_ROUND}
+
+ _cache = weakref.WeakKeyDictionary()
+
+ def __init__(self, bitmap, renderer):
+ GraphicsContextBase.__init__(self)
+ # assert self.Ok(), "wxMemoryDC not OK to use"
+ DEBUG_MSG("__init__()", 1, self)
+ DEBUG_MSG("__init__() 2: %s" % bitmap, 1, self)
+
+ dc, gfx_ctx = self._cache.get(bitmap, (None, None))
+ if dc is None:
+ dc = wx.MemoryDC()
+ dc.SelectObject(bitmap)
+ gfx_ctx = wx.GraphicsContext.Create(dc)
+ gfx_ctx._lastcliprect = None
+ self._cache[bitmap] = dc, gfx_ctx
+
+ self.bitmap = bitmap
+ self.dc = dc
+ self.gfx_ctx = gfx_ctx
+ self._pen = wx.Pen('BLACK', 1, wx.SOLID)
+ gfx_ctx.SetPen(self._pen)
+ self._style = wx.SOLID
+ self.renderer = renderer
+
+ def select(self):
+ """
+ Select the current bitmap into this wxDC instance
+ """
+
+ if sys.platform == 'win32':
+ self.dc.SelectObject(self.bitmap)
+ self.IsSelected = True
+
+ def unselect(self):
+ """
+ Select a Null bitmasp into this wxDC instance
+ """
+ if sys.platform == 'win32':
+ self.dc.SelectObject(wx.NullBitmap)
+ self.IsSelected = False
+
+ def set_foreground(self, fg, isRGBA=None):
+ """
+ Set the foreground color. fg can be a matlab format string, a
+ html hex color string, an rgb unit tuple, or a float between 0
+ and 1. In the latter case, grayscale is used.
+ """
+ # Implementation note: wxPython has a separate concept of pen and
+ # brush - the brush fills any outline trace left by the pen.
+ # Here we set both to the same colour - if a figure is not to be
+ # filled, the renderer will set the brush to be transparent
+ # Same goes for text foreground...
+ DEBUG_MSG("set_foreground()", 1, self)
+ self.select()
+ GraphicsContextBase.set_foreground(self, fg, isRGBA)
+
+ self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
+ self.gfx_ctx.SetPen(self._pen)
+ self.unselect()
+
+ def set_linewidth(self, w):
+ """
+ Set the line width.
+ """
+ w = float(w)
+ DEBUG_MSG("set_linewidth()", 1, self)
+ self.select()
+ if w > 0 and w < 1:
+ w = 1
+ GraphicsContextBase.set_linewidth(self, w)
+ lw = int(self.renderer.points_to_pixels(self._linewidth))
+ if lw == 0:
+ lw = 1
+ self._pen.SetWidth(lw)
+ self.gfx_ctx.SetPen(self._pen)
+ self.unselect()
+
+ def set_capstyle(self, cs):
+ """
+ Set the capstyle as a string in ('butt', 'round', 'projecting')
+ """
+ DEBUG_MSG("set_capstyle()", 1, self)
+ self.select()
+ GraphicsContextBase.set_capstyle(self, cs)
+ self._pen.SetCap(GraphicsContextWx._capd[self._capstyle])
+ self.gfx_ctx.SetPen(self._pen)
+ self.unselect()
+
+ def set_joinstyle(self, js):
+ """
+ Set the join style to be one of ('miter', 'round', 'bevel')
+ """
+ DEBUG_MSG("set_joinstyle()", 1, self)
+ self.select()
+ GraphicsContextBase.set_joinstyle(self, js)
+ self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle])
+ self.gfx_ctx.SetPen(self._pen)
+ self.unselect()
+
+ @cbook.deprecated("2.1")
+ def set_linestyle(self, ls):
+ """
+ Set the line style to be one of
+ """
+ DEBUG_MSG("set_linestyle()", 1, self)
+ self.select()
+ GraphicsContextBase.set_linestyle(self, ls)
+ try:
+ self._style = wxc.dashd_wx[ls]
+ except KeyError:
+ self._style = wx.LONG_DASH # Style not used elsewhere...
+
+ # On MS Windows platform, only line width of 1 allowed for dash lines
+ if wx.Platform == '__WXMSW__':
+ self.set_linewidth(1)
+
+ self._pen.SetStyle(self._style)
+ self.gfx_ctx.SetPen(self._pen)
+ self.unselect()
+
+ def get_wxcolour(self, color):
+ """return a wx.Colour from RGB format"""
+ DEBUG_MSG("get_wx_color()", 1, self)
+ if len(color) == 3:
+ r, g, b = color
+ r *= 255
+ g *= 255
+ b *= 255
+ return wx.Colour(red=int(r), green=int(g), blue=int(b))
+ else:
+ r, g, b, a = color
+ r *= 255
+ g *= 255
+ b *= 255
+ a *= 255
+ return wx.Colour(
+ red=int(r),
+ green=int(g),
+ blue=int(b),
+ alpha=int(a))
+
+
+class _FigureCanvasWxBase(FigureCanvasBase, wx.Panel):
+ """
+ The FigureCanvas contains the figure and does event handling.
+
+ In the wxPython backend, it is derived from wxPanel, and (usually) lives
+ inside a frame instantiated by a FigureManagerWx. The parent window
+ probably implements a wx.Sizer to control the displayed control size - but
+ we give a hint as to our preferred minimum size.
+ """
+
+ keyvald = {
+ wx.WXK_CONTROL: 'control',
+ wx.WXK_SHIFT: 'shift',
+ wx.WXK_ALT: 'alt',
+ wx.WXK_LEFT: 'left',
+ wx.WXK_UP: 'up',
+ wx.WXK_RIGHT: 'right',
+ wx.WXK_DOWN: 'down',
+ wx.WXK_ESCAPE: 'escape',
+ wx.WXK_F1: 'f1',
+ wx.WXK_F2: 'f2',
+ wx.WXK_F3: 'f3',
+ wx.WXK_F4: 'f4',
+ wx.WXK_F5: 'f5',
+ wx.WXK_F6: 'f6',
+ wx.WXK_F7: 'f7',
+ wx.WXK_F8: 'f8',
+ wx.WXK_F9: 'f9',
+ wx.WXK_F10: 'f10',
+ wx.WXK_F11: 'f11',
+ wx.WXK_F12: 'f12',
+ wx.WXK_SCROLL: 'scroll_lock',
+ wx.WXK_PAUSE: 'break',
+ wx.WXK_BACK: 'backspace',
+ wx.WXK_RETURN: 'enter',
+ wx.WXK_INSERT: 'insert',
+ wx.WXK_DELETE: 'delete',
+ wx.WXK_HOME: 'home',
+ wx.WXK_END: 'end',
+ wx.WXK_PAGEUP: 'pageup',
+ wx.WXK_PAGEDOWN: 'pagedown',
+ wx.WXK_NUMPAD0: '0',
+ wx.WXK_NUMPAD1: '1',
+ wx.WXK_NUMPAD2: '2',
+ wx.WXK_NUMPAD3: '3',
+ wx.WXK_NUMPAD4: '4',
+ wx.WXK_NUMPAD5: '5',
+ wx.WXK_NUMPAD6: '6',
+ wx.WXK_NUMPAD7: '7',
+ wx.WXK_NUMPAD8: '8',
+ wx.WXK_NUMPAD9: '9',
+ wx.WXK_NUMPAD_ADD: '+',
+ wx.WXK_NUMPAD_SUBTRACT: '-',
+ wx.WXK_NUMPAD_MULTIPLY: '*',
+ wx.WXK_NUMPAD_DIVIDE: '/',
+ wx.WXK_NUMPAD_DECIMAL: 'dec',
+ wx.WXK_NUMPAD_ENTER: 'enter',
+ wx.WXK_NUMPAD_UP: 'up',
+ wx.WXK_NUMPAD_RIGHT: 'right',
+ wx.WXK_NUMPAD_DOWN: 'down',
+ wx.WXK_NUMPAD_LEFT: 'left',
+ wx.WXK_NUMPAD_PAGEUP: 'pageup',
+ wx.WXK_NUMPAD_PAGEDOWN: 'pagedown',
+ wx.WXK_NUMPAD_HOME: 'home',
+ wx.WXK_NUMPAD_END: 'end',
+ wx.WXK_NUMPAD_INSERT: 'insert',
+ wx.WXK_NUMPAD_DELETE: 'delete',
+ }
+
+ def __init__(self, parent, id, figure):
+ """
+ Initialise a FigureWx instance.
+
+ - Initialise the FigureCanvasBase and wxPanel parents.
+ - Set event handlers for:
+ EVT_SIZE (Resize event)
+ EVT_PAINT (Paint event)
+ """
+
+ FigureCanvasBase.__init__(self, figure)
+ # Set preferred window size hint - helps the sizer (if one is
+ # connected)
+ l, b, w, h = figure.bbox.bounds
+ w = int(math.ceil(w))
+ h = int(math.ceil(h))
+
+ wx.Panel.__init__(self, parent, id, size=wx.Size(w, h))
+
+ def do_nothing(*args, **kwargs):
+ warnings.warn(
+ "could not find a setinitialsize function for backend_wx; "
+ "please report your wxpython version=%s "
+ "to the matplotlib developers list" %
+ wxc.backend_version)
+ pass
+
+ # try to find the set size func across wx versions
+ try:
+ getattr(self, 'SetInitialSize')
+ except AttributeError:
+ self.SetInitialSize = getattr(self, 'SetBestFittingSize',
+ do_nothing)
+
+ if not hasattr(self, 'IsShownOnScreen'):
+ self.IsShownOnScreen = getattr(self, 'IsVisible',
+ lambda *args: True)
+
+ # Create the drawing bitmap
+ self.bitmap = wxc.EmptyBitmap(w, h)
+ DEBUG_MSG("__init__() - bitmap w:%d h:%d" % (w, h), 2, self)
+ # TODO: Add support for 'point' inspection and plot navigation.
+ self._isDrawn = False
+
+ self.Bind(wx.EVT_SIZE, self._onSize)
+ self.Bind(wx.EVT_PAINT, self._onPaint)
+ self.Bind(wx.EVT_KEY_DOWN, self._onKeyDown)
+ self.Bind(wx.EVT_KEY_UP, self._onKeyUp)
+ self.Bind(wx.EVT_RIGHT_DOWN, self._onRightButtonDown)
+ self.Bind(wx.EVT_RIGHT_DCLICK, self._onRightButtonDClick)
+ self.Bind(wx.EVT_RIGHT_UP, self._onRightButtonUp)
+ self.Bind(wx.EVT_MOUSEWHEEL, self._onMouseWheel)
+ self.Bind(wx.EVT_LEFT_DOWN, self._onLeftButtonDown)
+ self.Bind(wx.EVT_LEFT_DCLICK, self._onLeftButtonDClick)
+ self.Bind(wx.EVT_LEFT_UP, self._onLeftButtonUp)
+ self.Bind(wx.EVT_MOTION, self._onMotion)
+ self.Bind(wx.EVT_LEAVE_WINDOW, self._onLeave)
+ self.Bind(wx.EVT_ENTER_WINDOW, self._onEnter)
+ # Add middle button events
+ self.Bind(wx.EVT_MIDDLE_DOWN, self._onMiddleButtonDown)
+ self.Bind(wx.EVT_MIDDLE_DCLICK, self._onMiddleButtonDClick)
+ self.Bind(wx.EVT_MIDDLE_UP, self._onMiddleButtonUp)
+
+ self.Bind(wx.EVT_MOUSE_CAPTURE_CHANGED, self._onCaptureLost)
+ self.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self._onCaptureLost)
+
+ self.SetBackgroundStyle(wx.BG_STYLE_PAINT) # Reduce flicker.
+ self.SetBackgroundColour(wx.WHITE)
+
+ self.macros = {} # dict from wx id to seq of macros
+
+ def Destroy(self, *args, **kwargs):
+ wx.Panel.Destroy(self, *args, **kwargs)
+
+ def Copy_to_Clipboard(self, event=None):
+ "copy bitmap of canvas to system clipboard"
+ bmp_obj = wx.BitmapDataObject()
+ bmp_obj.SetBitmap(self.bitmap)
+
+ if not wx.TheClipboard.IsOpened():
+ open_success = wx.TheClipboard.Open()
+ if open_success:
+ wx.TheClipboard.SetData(bmp_obj)
+ wx.TheClipboard.Close()
+ wx.TheClipboard.Flush()
+
+ def draw_idle(self):
+ """
+ Delay rendering until the GUI is idle.
+ """
+ DEBUG_MSG("draw_idle()", 1, self)
+ self._isDrawn = False # Force redraw
+ # Triggering a paint event is all that is needed to defer drawing
+ # until later. The platform will send the event when it thinks it is
+ # a good time (usually as soon as there are no other events pending).
+ self.Refresh(eraseBackground=False)
+
+ def new_timer(self, *args, **kwargs):
+ """
+ Creates a new backend-specific subclass of
+ :class:`backend_bases.Timer`. This is useful for getting periodic
+ events through the backend's native event loop. Implemented only
+ for backends with GUIs.
+
+ Other Parameters
+ ----------------
+ interval : scalar
+ Timer interval in milliseconds
+ callbacks : list
+ Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
+ will be executed by the timer every *interval*.
+
+ """
+ return TimerWx(self, *args, **kwargs)
+
+ def flush_events(self):
+ wx.Yield()
+
+ def start_event_loop(self, timeout=0):
+ """
+ Start an event loop. This is used to start a blocking event
+ loop so that interactive functions, such as ginput and
+ waitforbuttonpress, can wait for events. This should not be
+ confused with the main GUI event loop, which is always running
+ and has nothing to do with this.
+
+ This call blocks until a callback function triggers
+ stop_event_loop() or *timeout* is reached. If *timeout* is
+ <=0, never timeout.
+
+ Raises RuntimeError if event loop is already running.
+ """
+ if hasattr(self, '_event_loop'):
+ raise RuntimeError("Event loop already running")
+ id = wx.NewId()
+ timer = wx.Timer(self, id=id)
+ if timeout > 0:
+ timer.Start(timeout * 1000, oneShot=True)
+ self.Bind(wx.EVT_TIMER, self.stop_event_loop, id=id)
+
+ # Event loop handler for start/stop event loop
+ self._event_loop = wxc.EventLoop()
+ self._event_loop.Run()
+ timer.Stop()
+
+ def stop_event_loop(self, event=None):
+ """
+ Stop an event loop. This is used to stop a blocking event
+ loop so that interactive functions, such as ginput and
+ waitforbuttonpress, can wait for events.
+
+ """
+ if hasattr(self, '_event_loop'):
+ if self._event_loop.IsRunning():
+ self._event_loop.Exit()
+ del self._event_loop
+
+ def _get_imagesave_wildcards(self):
+ 'return the wildcard string for the filesave dialog'
+ default_filetype = self.get_default_filetype()
+ filetypes = self.get_supported_filetypes_grouped()
+ sorted_filetypes = sorted(filetypes.items())
+ wildcards = []
+ extensions = []
+ filter_index = 0
+ for i, (name, exts) in enumerate(sorted_filetypes):
+ ext_list = ';'.join(['*.%s' % ext for ext in exts])
+ extensions.append(exts[0])
+ wildcard = '%s (%s)|%s' % (name, ext_list, ext_list)
+ if default_filetype in exts:
+ filter_index = i
+ wildcards.append(wildcard)
+ wildcards = '|'.join(wildcards)
+ return wildcards, extensions, filter_index
+
+ def gui_repaint(self, drawDC=None, origin='WX'):
+ """
+ Performs update of the displayed image on the GUI canvas, using the
+ supplied wx.PaintDC device context.
+
+ The 'WXAgg' backend sets origin accordingly.
+ """
+ DEBUG_MSG("gui_repaint()", 1, self)
+ if self.IsShownOnScreen():
+ if not drawDC:
+ # not called from OnPaint use a ClientDC
+ drawDC = wx.ClientDC(self)
+
+ # following is for 'WX' backend on Windows
+ # the bitmap can not be in use by another DC,
+ # see GraphicsContextWx._cache
+ if wx.Platform == '__WXMSW__' and origin == 'WX':
+ img = self.bitmap.ConvertToImage()
+ bmp = img.ConvertToBitmap()
+ drawDC.DrawBitmap(bmp, 0, 0)
+ else:
+ drawDC.DrawBitmap(self.bitmap, 0, 0)
+
+ filetypes = FigureCanvasBase.filetypes.copy()
+ filetypes['bmp'] = 'Windows bitmap'
+ filetypes['jpeg'] = 'JPEG'
+ filetypes['jpg'] = 'JPEG'
+ filetypes['pcx'] = 'PCX'
+ filetypes['png'] = 'Portable Network Graphics'
+ filetypes['tif'] = 'Tagged Image Format File'
+ filetypes['tiff'] = 'Tagged Image Format File'
+ filetypes['xpm'] = 'X pixmap'
+
+ def print_figure(self, filename, *args, **kwargs):
+ super(_FigureCanvasWxBase, self).print_figure(
+ filename, *args, **kwargs)
+ # Restore the current view; this is needed because the artist contains
+ # methods rely on particular attributes of the rendered figure for
+ # determining things like bounding boxes.
+ if self._isDrawn:
+ self.draw()
+
+ def _onPaint(self, evt):
+ """
+ Called when wxPaintEvt is generated
+ """
+
+ DEBUG_MSG("_onPaint()", 1, self)
+ drawDC = wx.PaintDC(self)
+ if not self._isDrawn:
+ self.draw(drawDC=drawDC)
+ else:
+ self.gui_repaint(drawDC=drawDC)
+ drawDC.Destroy()
+
+ def _onSize(self, evt):
+ """
+ Called when wxEventSize is generated.
+
+ In this application we attempt to resize to fit the window, so it
+ is better to take the performance hit and redraw the whole window.
+ """
+
+ DEBUG_MSG("_onSize()", 2, self)
+ sz = self.GetParent().GetSizer()
+ if sz:
+ si = sz.GetItem(self)
+ if sz and si and not si.Proportion and not si.Flag & wx.EXPAND:
+ # managed by a sizer, but with a fixed size
+ size = self.GetMinSize()
+ else:
+ # variable size
+ size = self.GetClientSize()
+ if getattr(self, "_width", None):
+ if size == (self._width, self._height):
+ # no change in size
+ return
+ self._width, self._height = size
+ # Create a new, correctly sized bitmap
+ self.bitmap = wxc.EmptyBitmap(self._width, self._height)
+
+ self._isDrawn = False
+
+ if self._width <= 1 or self._height <= 1:
+ return # Empty figure
+
+ dpival = self.figure.dpi
+ winch = self._width / dpival
+ hinch = self._height / dpival
+ self.figure.set_size_inches(winch, hinch, forward=False)
+
+ # Rendering will happen on the associated paint event
+ # so no need to do anything here except to make sure
+ # the whole background is repainted.
+ self.Refresh(eraseBackground=False)
+ FigureCanvasBase.resize_event(self)
+
+ def _get_key(self, evt):
+
+ keyval = evt.KeyCode
+ if keyval in self.keyvald:
+ key = self.keyvald[keyval]
+ elif keyval < 256:
+ key = chr(keyval)
+ # wx always returns an uppercase, so make it lowercase if the shift
+ # key is not depressed (NOTE: this will not handle Caps Lock)
+ if not evt.ShiftDown():
+ key = key.lower()
+ else:
+ key = None
+
+ for meth, prefix in (
+ [evt.AltDown, 'alt'],
+ [evt.ControlDown, 'ctrl'], ):
+ if meth():
+ key = '{0}+{1}'.format(prefix, key)
+
+ return key
+
+ def _onKeyDown(self, evt):
+ """Capture key press."""
+ key = self._get_key(evt)
+ FigureCanvasBase.key_press_event(self, key, guiEvent=evt)
+ if self:
+ evt.Skip()
+
+ def _onKeyUp(self, evt):
+ """Release key."""
+ key = self._get_key(evt)
+ FigureCanvasBase.key_release_event(self, key, guiEvent=evt)
+ if self:
+ evt.Skip()
+
+ def _set_capture(self, capture=True):
+ """control wx mouse capture """
+ if self.HasCapture():
+ self.ReleaseMouse()
+ if capture:
+ self.CaptureMouse()
+
+ def _onCaptureLost(self, evt):
+ """Capture changed or lost"""
+ self._set_capture(False)
+
+ def _onRightButtonDown(self, evt):
+ """Start measuring on an axis."""
+ x = evt.GetX()
+ y = self.figure.bbox.height - evt.GetY()
+ evt.Skip()
+ self._set_capture(True)
+ FigureCanvasBase.button_press_event(self, x, y, 3, guiEvent=evt)
+
+ def _onRightButtonDClick(self, evt):
+ """Start measuring on an axis."""
+ x = evt.GetX()
+ y = self.figure.bbox.height - evt.GetY()
+ evt.Skip()
+ self._set_capture(True)
+ FigureCanvasBase.button_press_event(self, x, y, 3,
+ dblclick=True, guiEvent=evt)
+
+ def _onRightButtonUp(self, evt):
+ """End measuring on an axis."""
+ x = evt.GetX()
+ y = self.figure.bbox.height - evt.GetY()
+ evt.Skip()
+ self._set_capture(False)
+ FigureCanvasBase.button_release_event(self, x, y, 3, guiEvent=evt)
+
+ def _onLeftButtonDown(self, evt):
+ """Start measuring on an axis."""
+ x = evt.GetX()
+ y = self.figure.bbox.height - evt.GetY()
+ evt.Skip()
+ self._set_capture(True)
+ FigureCanvasBase.button_press_event(self, x, y, 1, guiEvent=evt)
+
+ def _onLeftButtonDClick(self, evt):
+ """Start measuring on an axis."""
+ x = evt.GetX()
+ y = self.figure.bbox.height - evt.GetY()
+ evt.Skip()
+ self._set_capture(True)
+ FigureCanvasBase.button_press_event(self, x, y, 1,
+ dblclick=True, guiEvent=evt)
+
+ def _onLeftButtonUp(self, evt):
+ """End measuring on an axis."""
+ x = evt.GetX()
+ y = self.figure.bbox.height - evt.GetY()
+ evt.Skip()
+ self._set_capture(False)
+ FigureCanvasBase.button_release_event(self, x, y, 1, guiEvent=evt)
+
+ # Add middle button events
+ def _onMiddleButtonDown(self, evt):
+ """Start measuring on an axis."""
+ x = evt.GetX()
+ y = self.figure.bbox.height - evt.GetY()
+ evt.Skip()
+ self._set_capture(True)
+ FigureCanvasBase.button_press_event(self, x, y, 2, guiEvent=evt)
+
+ def _onMiddleButtonDClick(self, evt):
+ """Start measuring on an axis."""
+ x = evt.GetX()
+ y = self.figure.bbox.height - evt.GetY()
+ evt.Skip()
+ self._set_capture(True)
+ FigureCanvasBase.button_press_event(self, x, y, 2,
+ dblclick=True, guiEvent=evt)
+
+ def _onMiddleButtonUp(self, evt):
+ """End measuring on an axis."""
+ x = evt.GetX()
+ y = self.figure.bbox.height - evt.GetY()
+ evt.Skip()
+ self._set_capture(False)
+ FigureCanvasBase.button_release_event(self, x, y, 2, guiEvent=evt)
+
+ def _onMouseWheel(self, evt):
+ """Translate mouse wheel events into matplotlib events"""
+
+ # Determine mouse location
+ x = evt.GetX()
+ y = self.figure.bbox.height - evt.GetY()
+
+ # Convert delta/rotation/rate into a floating point step size
+ delta = evt.GetWheelDelta()
+ rotation = evt.GetWheelRotation()
+ rate = evt.GetLinesPerAction()
+ step = rate * rotation / delta
+
+ # Done handling event
+ evt.Skip()
+
+ # Mac is giving two events for every wheel event
+ # Need to skip every second one
+ if wx.Platform == '__WXMAC__':
+ if not hasattr(self, '_skipwheelevent'):
+ self._skipwheelevent = True
+ elif self._skipwheelevent:
+ self._skipwheelevent = False
+ return # Return without processing event
+ else:
+ self._skipwheelevent = True
+
+ # Convert to mpl event
+ FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt)
+
+ def _onMotion(self, evt):
+ """Start measuring on an axis."""
+
+ x = evt.GetX()
+ y = self.figure.bbox.height - evt.GetY()
+ evt.Skip()
+ FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt)
+
+ def _onLeave(self, evt):
+ """Mouse has left the window."""
+
+ evt.Skip()
+ FigureCanvasBase.leave_notify_event(self, guiEvent=evt)
+
+ def _onEnter(self, evt):
+ """Mouse has entered the window."""
+ FigureCanvasBase.enter_notify_event(self, guiEvent=evt)
+
+
+class FigureCanvasWx(_FigureCanvasWxBase):
+ # Rendering to a Wx canvas using the deprecated Wx renderer.
+
+ def draw(self, drawDC=None):
+ """
+ Render the figure using RendererWx instance renderer, or using a
+ previously defined renderer if none is specified.
+ """
+ DEBUG_MSG("draw()", 1, self)
+ self.renderer = RendererWx(self.bitmap, self.figure.dpi)
+ self.figure.draw(self.renderer)
+ self._isDrawn = True
+ self.gui_repaint(drawDC=drawDC)
+
+ def print_bmp(self, filename, *args, **kwargs):
+ return self._print_image(filename, wx.BITMAP_TYPE_BMP, *args, **kwargs)
+
+ if not _has_pil:
+ def print_jpeg(self, filename, *args, **kwargs):
+ return self._print_image(filename, wx.BITMAP_TYPE_JPEG,
+ *args, **kwargs)
+ print_jpg = print_jpeg
+
+ def print_pcx(self, filename, *args, **kwargs):
+ return self._print_image(filename, wx.BITMAP_TYPE_PCX, *args, **kwargs)
+
+ def print_png(self, filename, *args, **kwargs):
+ return self._print_image(filename, wx.BITMAP_TYPE_PNG, *args, **kwargs)
+
+ if not _has_pil:
+ def print_tiff(self, filename, *args, **kwargs):
+ return self._print_image(filename, wx.BITMAP_TYPE_TIF,
+ *args, **kwargs)
+ print_tif = print_tiff
+
+ def print_xpm(self, filename, *args, **kwargs):
+ return self._print_image(filename, wx.BITMAP_TYPE_XPM, *args, **kwargs)
+
+ def _print_image(self, filename, filetype, *args, **kwargs):
+ origBitmap = self.bitmap
+
+ l, b, width, height = self.figure.bbox.bounds
+ width = int(math.ceil(width))
+ height = int(math.ceil(height))
+
+ self.bitmap = wxc.EmptyBitmap(width, height)
+
+ renderer = RendererWx(self.bitmap, self.figure.dpi)
+
+ gc = renderer.new_gc()
+
+ self.figure.draw(renderer)
+
+ # image is the object that we call SaveFile on.
+ image = self.bitmap
+ # set the JPEG quality appropriately. Unfortunately, it is only
+ # possible to set the quality on a wx.Image object. So if we
+ # are saving a JPEG, convert the wx.Bitmap to a wx.Image,
+ # and set the quality.
+ if filetype == wx.BITMAP_TYPE_JPEG:
+ jpeg_quality = kwargs.get('quality',
+ rcParams['savefig.jpeg_quality'])
+ image = self.bitmap.ConvertToImage()
+ image.SetOption(wx.IMAGE_OPTION_QUALITY, str(jpeg_quality))
+
+ # Now that we have rendered into the bitmap, save it
+ # to the appropriate file type and clean up
+ if isinstance(filename, six.string_types):
+ if not image.SaveFile(filename, filetype):
+ DEBUG_MSG('print_figure() file save error', 4, self)
+ raise RuntimeError(
+ 'Could not save figure to %s\n' %
+ (filename))
+ elif is_writable_file_like(filename):
+ if not isinstance(image, wx.Image):
+ image = image.ConvertToImage()
+ if not image.SaveStream(filename, filetype):
+ DEBUG_MSG('print_figure() file save error', 4, self)
+ raise RuntimeError(
+ 'Could not save figure to %s\n' %
+ (filename))
+
+ # Restore everything to normal
+ self.bitmap = origBitmap
+
+ # Note: draw is required here since bits of state about the
+ # last renderer are strewn about the artist draw methods. Do
+ # not remove the draw without first verifying that these have
+ # been cleaned up. The artist contains() methods will fail
+ # otherwise.
+ if self._isDrawn:
+ self.draw()
+ self.Refresh()
+
+
+########################################################################
+#
+# The following functions and classes are for pylab compatibility
+# mode (matplotlib.pylab) and implement figure managers, etc...
+#
+########################################################################
+
+
+class FigureFrameWx(wx.Frame):
+ def __init__(self, num, fig):
+ # On non-Windows platform, explicitly set the position - fix
+ # positioning bug on some Linux platforms
+ if wx.Platform == '__WXMSW__':
+ pos = wx.DefaultPosition
+ else:
+ pos = wx.Point(20, 20)
+ l, b, w, h = fig.bbox.bounds
+ wx.Frame.__init__(self, parent=None, id=-1, pos=pos,
+ title="Figure %d" % num)
+ # Frame will be sized later by the Fit method
+ DEBUG_MSG("__init__()", 1, self)
+ self.num = num
+
+ statbar = StatusBarWx(self)
+ self.SetStatusBar(statbar)
+ self.canvas = self.get_canvas(fig)
+ self.canvas.SetInitialSize(wx.Size(fig.bbox.width, fig.bbox.height))
+ self.canvas.SetFocus()
+ self.sizer = wx.BoxSizer(wx.VERTICAL)
+ self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
+ # By adding toolbar in sizer, we are able to put it at the bottom
+ # of the frame - so appearance is closer to GTK version
+
+ self.toolbar = self._get_toolbar(statbar)
+
+ if self.toolbar is not None:
+ self.toolbar.Realize()
+ # On Windows platform, default window size is incorrect, so set
+ # toolbar width to figure width.
+ if wxc.is_phoenix:
+ tw, th = self.toolbar.GetSize()
+ fw, fh = self.canvas.GetSize()
+ else:
+ tw, th = self.toolbar.GetSizeTuple()
+ fw, fh = self.canvas.GetSizeTuple()
+ # By adding toolbar in sizer, we are able to put it at the bottom
+ # of the frame - so appearance is closer to GTK version.
+ self.toolbar.SetSize(wx.Size(fw, th))
+ self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
+ self.SetSizer(self.sizer)
+ self.Fit()
+
+ self.canvas.SetMinSize((2, 2))
+
+ # give the window a matplotlib icon rather than the stock one.
+ # This is not currently working on Linux and is untested elsewhere.
+ # icon_path = os.path.join(matplotlib.rcParams['datapath'],
+ # 'images', 'matplotlib.png')
+ # icon = wx.IconFromBitmap(wx.Bitmap(icon_path))
+ # for xpm type icons try:
+ # icon = wx.Icon(icon_path, wx.BITMAP_TYPE_XPM)
+ # self.SetIcon(icon)
+
+ self.figmgr = FigureManagerWx(self.canvas, num, self)
+
+ self.Bind(wx.EVT_CLOSE, self._onClose)
+
+ def _get_toolbar(self, statbar):
+ if rcParams['toolbar'] == 'toolbar2':
+ toolbar = NavigationToolbar2Wx(self.canvas)
+ toolbar.set_status_bar(statbar)
+ else:
+ toolbar = None
+ return toolbar
+
+ def get_canvas(self, fig):
+ return FigureCanvasWx(self, -1, fig)
+
+ def get_figure_manager(self):
+ DEBUG_MSG("get_figure_manager()", 1, self)
+ return self.figmgr
+
+ def _onClose(self, evt):
+ DEBUG_MSG("onClose()", 1, self)
+ self.canvas.close_event()
+ self.canvas.stop_event_loop()
+ Gcf.destroy(self.num)
+ # self.Destroy()
+
+ def GetToolBar(self):
+ """Override wxFrame::GetToolBar as we don't have managed toolbar"""
+ return self.toolbar
+
+ def Destroy(self, *args, **kwargs):
+ try:
+ self.canvas.mpl_disconnect(self.toolbar._idDrag)
+ # Rationale for line above: see issue 2941338.
+ except AttributeError:
+ pass # classic toolbar lacks the attribute
+ if not self.IsBeingDeleted():
+ wx.Frame.Destroy(self, *args, **kwargs)
+ if self.toolbar is not None:
+ self.toolbar.Destroy()
+ wxapp = wx.GetApp()
+ if wxapp:
+ wxapp.Yield()
+ return True
+
+
+class FigureManagerWx(FigureManagerBase):
+ """
+ This class contains the FigureCanvas and GUI frame
+
+ It is instantiated by GcfWx whenever a new figure is created. GcfWx is
+ responsible for managing multiple instances of FigureManagerWx.
+
+ Attributes
+ ----------
+ canvas : `FigureCanvas`
+ a FigureCanvasWx(wx.Panel) instance
+ window : wxFrame
+ a wxFrame instance - wxpython.org/Phoenix/docs/html/Frame.html
+
+ """
+
+ def __init__(self, canvas, num, frame):
+ DEBUG_MSG("__init__()", 1, self)
+ FigureManagerBase.__init__(self, canvas, num)
+ self.frame = frame
+ self.window = frame
+
+ self.tb = frame.GetToolBar()
+ self.toolbar = self.tb # consistent with other backends
+
+ def notify_axes_change(fig):
+ 'this will be called whenever the current axes is changed'
+ if self.tb is not None:
+ self.tb.update()
+ self.canvas.figure.add_axobserver(notify_axes_change)
+
+ def show(self):
+ self.frame.Show()
+ self.canvas.draw()
+
+ def destroy(self, *args):
+ DEBUG_MSG("destroy()", 1, self)
+ self.frame.Destroy()
+ wxapp = wx.GetApp()
+ if wxapp:
+ wxapp.Yield()
+
+ def get_window_title(self):
+ return self.window.GetTitle()
+
+ def set_window_title(self, title):
+ self.window.SetTitle(title)
+
+ def resize(self, width, height):
+ 'Set the canvas size in pixels'
+ self.canvas.SetInitialSize(wx.Size(width, height))
+ self.window.GetSizer().Fit(self.window)
+
+# Identifiers for toolbar controls - images_wx contains bitmaps for the images
+# used in the controls. wxWindows does not provide any stock images, so I've
+# 'stolen' those from GTK2, and transformed them into the appropriate format.
+# import images_wx
+
+
+_NTB_AXISMENU = wx.NewId()
+_NTB_AXISMENU_BUTTON = wx.NewId()
+_NTB_X_PAN_LEFT = wx.NewId()
+_NTB_X_PAN_RIGHT = wx.NewId()
+_NTB_X_ZOOMIN = wx.NewId()
+_NTB_X_ZOOMOUT = wx.NewId()
+_NTB_Y_PAN_UP = wx.NewId()
+_NTB_Y_PAN_DOWN = wx.NewId()
+_NTB_Y_ZOOMIN = wx.NewId()
+_NTB_Y_ZOOMOUT = wx.NewId()
+# _NTB_SUBPLOT =wx.NewId()
+_NTB_SAVE = wx.NewId()
+_NTB_CLOSE = wx.NewId()
+
+
+def _load_bitmap(filename):
+ """
+ Load a bitmap file from the backends/images subdirectory in which the
+ matplotlib library is installed. The filename parameter should not
+ contain any path information as this is determined automatically.
+
+ Returns a wx.Bitmap object
+ """
+
+ basedir = os.path.join(rcParams['datapath'], 'images')
+
+ bmpFilename = os.path.normpath(os.path.join(basedir, filename))
+ if not os.path.exists(bmpFilename):
+ raise IOError('Could not find bitmap file "%s"; dying' % bmpFilename)
+
+ bmp = wx.Bitmap(bmpFilename)
+ return bmp
+
+
+class MenuButtonWx(wx.Button):
+ """
+ wxPython does not permit a menu to be incorporated directly into a toolbar.
+ This class simulates the effect by associating a pop-up menu with a button
+ in the toolbar, and managing this as though it were a menu.
+ """
+
+ def __init__(self, parent):
+
+ wx.Button.__init__(self, parent, _NTB_AXISMENU_BUTTON, "Axes: ",
+ style=wx.BU_EXACTFIT)
+ self._toolbar = parent
+ self._menu = wx.Menu()
+ self._axisId = []
+ # First two menu items never change...
+ self._allId = wx.NewId()
+ self._invertId = wx.NewId()
+ self._menu.Append(self._allId, "All", "Select all axes", False)
+ self._menu.Append(self._invertId, "Invert", "Invert axes selected",
+ False)
+ self._menu.AppendSeparator()
+
+ self.Bind(wx.EVT_BUTTON, self._onMenuButton, id=_NTB_AXISMENU_BUTTON)
+ self.Bind(wx.EVT_MENU, self._handleSelectAllAxes, id=self._allId)
+ self.Bind(wx.EVT_MENU, self._handleInvertAxesSelected,
+ id=self._invertId)
+
+ def Destroy(self):
+ self._menu.Destroy()
+ self.Destroy()
+
+ def _onMenuButton(self, evt):
+ """Handle menu button pressed."""
+ if wxc.is_phoenix:
+ x, y = self.GetPosition()
+ w, h = self.GetSize()
+ else:
+ x, y = self.GetPositionTuple()
+ w, h = self.GetSizeTuple()
+ self.PopupMenuXY(self._menu, x, y + h - 4)
+ # When menu returned, indicate selection in button
+ evt.Skip()
+
+ def _handleSelectAllAxes(self, evt):
+ """Called when the 'select all axes' menu item is selected."""
+ if len(self._axisId) == 0:
+ return
+ for i in range(len(self._axisId)):
+ self._menu.Check(self._axisId[i], True)
+ self._toolbar.set_active(self.getActiveAxes())
+ evt.Skip()
+
+ def _handleInvertAxesSelected(self, evt):
+ """Called when the invert all menu item is selected"""
+ if len(self._axisId) == 0:
+ return
+ for i in range(len(self._axisId)):
+ if self._menu.IsChecked(self._axisId[i]):
+ self._menu.Check(self._axisId[i], False)
+ else:
+ self._menu.Check(self._axisId[i], True)
+ self._toolbar.set_active(self.getActiveAxes())
+ evt.Skip()
+
+ def _onMenuItemSelected(self, evt):
+ """Called whenever one of the specific axis menu items is selected"""
+ current = self._menu.IsChecked(evt.GetId())
+ if current:
+ new = False
+ else:
+ new = True
+ self._menu.Check(evt.GetId(), new)
+ # Lines above would be deleted based on svn tracker ID 2841525;
+ # not clear whether this matters or not.
+ self._toolbar.set_active(self.getActiveAxes())
+ evt.Skip()
+
+ def updateAxes(self, maxAxis):
+ """Ensures that there are entries for max_axis axes in the menu
+ (selected by default)."""
+ if maxAxis > len(self._axisId):
+ for i in range(len(self._axisId) + 1, maxAxis + 1, 1):
+ menuId = wx.NewId()
+ self._axisId.append(menuId)
+ self._menu.Append(menuId, "Axis %d" % i,
+ "Select axis %d" % i,
+ True)
+ self._menu.Check(menuId, True)
+ self.Bind(wx.EVT_MENU, self._onMenuItemSelected, id=menuId)
+ elif maxAxis < len(self._axisId):
+ for menuId in self._axisId[maxAxis:]:
+ self._menu.Delete(menuId)
+ self._axisId = self._axisId[:maxAxis]
+ self._toolbar.set_active(list(xrange(maxAxis)))
+
+ def getActiveAxes(self):
+ """Return a list of the selected axes."""
+ active = []
+ for i in range(len(self._axisId)):
+ if self._menu.IsChecked(self._axisId[i]):
+ active.append(i)
+ return active
+
+ def updateButtonText(self, lst):
+ """Update the list of selected axes in the menu button."""
+ self.SetLabel(
+ 'Axes: ' + ','.join('%d' % (e + 1) for e in lst))
+
+
+cursord = {
+ cursors.MOVE: wx.CURSOR_HAND,
+ cursors.HAND: wx.CURSOR_HAND,
+ cursors.POINTER: wx.CURSOR_ARROW,
+ cursors.SELECT_REGION: wx.CURSOR_CROSS,
+ cursors.WAIT: wx.CURSOR_WAIT,
+}
+
+
+@cbook.deprecated("2.2")
+class SubplotToolWX(wx.Frame):
+ def __init__(self, targetfig):
+ wx.Frame.__init__(self, None, -1, "Configure subplots")
+
+ toolfig = Figure((6, 3))
+ canvas = FigureCanvasWx(self, -1, toolfig)
+
+ # Create a figure manager to manage things
+ figmgr = FigureManager(canvas, 1, self)
+
+ # Now put all into a sizer
+ sizer = wx.BoxSizer(wx.VERTICAL)
+ # This way of adding to sizer allows resizing
+ sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
+ self.SetSizer(sizer)
+ self.Fit()
+ tool = SubplotTool(targetfig, toolfig)
+
+
+class NavigationToolbar2Wx(NavigationToolbar2, wx.ToolBar):
+ def __init__(self, canvas):
+ wx.ToolBar.__init__(self, canvas.GetParent(), -1)
+ NavigationToolbar2.__init__(self, canvas)
+ self.canvas = canvas
+ self._idle = True
+ self.statbar = None
+ self.prevZoomRect = None
+ # for now, use alternate zoom-rectangle drawing on all
+ # Macs. N.B. In future versions of wx it may be possible to
+ # detect Retina displays with window.GetContentScaleFactor()
+ # and/or dc.GetContentScaleFactor()
+ self.retinaFix = 'wxMac' in wx.PlatformInfo
+
+ def get_canvas(self, frame, fig):
+ return type(self.canvas)(frame, -1, fig)
+
+ def _init_toolbar(self):
+ DEBUG_MSG("_init_toolbar", 1, self)
+
+ self._parent = self.canvas.GetParent()
+
+ self.wx_ids = {}
+ for text, tooltip_text, image_file, callback in self.toolitems:
+ if text is None:
+ self.AddSeparator()
+ continue
+ self.wx_ids[text] = wx.NewId()
+ wxc._AddTool(self, self.wx_ids, text,
+ _load_bitmap(image_file + '.png'),
+ tooltip_text)
+
+ self.Bind(wx.EVT_TOOL, getattr(self, callback),
+ id=self.wx_ids[text])
+
+ self.Realize()
+
+ def zoom(self, *args):
+ self.ToggleTool(self.wx_ids['Pan'], False)
+ NavigationToolbar2.zoom(self, *args)
+
+ def pan(self, *args):
+ self.ToggleTool(self.wx_ids['Zoom'], False)
+ NavigationToolbar2.pan(self, *args)
+
+ def configure_subplots(self, evt):
+ frame = wx.Frame(None, -1, "Configure subplots")
+
+ toolfig = Figure((6, 3))
+ canvas = self.get_canvas(frame, toolfig)
+
+ # Create a figure manager to manage things
+ figmgr = FigureManager(canvas, 1, frame)
+
+ # Now put all into a sizer
+ sizer = wx.BoxSizer(wx.VERTICAL)
+ # This way of adding to sizer allows resizing
+ sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
+ frame.SetSizer(sizer)
+ frame.Fit()
+ tool = SubplotTool(self.canvas.figure, toolfig)
+ frame.Show()
+
+ def save_figure(self, *args):
+ # Fetch the required filename and file type.
+ filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards()
+ default_file = self.canvas.get_default_filename()
+ dlg = wx.FileDialog(self._parent, "Save to file", "", default_file,
+ filetypes,
+ wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
+ dlg.SetFilterIndex(filter_index)
+ if dlg.ShowModal() == wx.ID_OK:
+ dirname = dlg.GetDirectory()
+ filename = dlg.GetFilename()
+ DEBUG_MSG(
+ 'Save file dir:%s name:%s' %
+ (dirname, filename), 3, self)
+ format = exts[dlg.GetFilterIndex()]
+ basename, ext = os.path.splitext(filename)
+ if ext.startswith('.'):
+ ext = ext[1:]
+ if ext in ('svg', 'pdf', 'ps', 'eps', 'png') and format != ext:
+ # looks like they forgot to set the image type drop
+ # down, going with the extension.
+ warnings.warn(
+ 'extension %s did not match the selected '
+ 'image type %s; going with %s' %
+ (ext, format, ext), stacklevel=0)
+ format = ext
+ try:
+ self.canvas.figure.savefig(
+ os.path.join(dirname, filename), format=format)
+ except Exception as e:
+ error_msg_wx(str(e))
+
+ def set_cursor(self, cursor):
+ cursor = wxc.Cursor(cursord[cursor])
+ self.canvas.SetCursor(cursor)
+ self.canvas.Update()
+
+ @cbook.deprecated("2.1", alternative="canvas.draw_idle")
+ def dynamic_update(self):
+ d = self._idle
+ self._idle = False
+ if d:
+ self.canvas.draw()
+ self._idle = True
+
+ def press(self, event):
+ if self._active == 'ZOOM':
+ if not self.retinaFix:
+ self.wxoverlay = wx.Overlay()
+ else:
+ if event.inaxes is not None:
+ self.savedRetinaImage = self.canvas.copy_from_bbox(
+ event.inaxes.bbox)
+ self.zoomStartX = event.xdata
+ self.zoomStartY = event.ydata
+ self.zoomAxes = event.inaxes
+
+ def release(self, event):
+ if self._active == 'ZOOM':
+ # When the mouse is released we reset the overlay and it
+ # restores the former content to the window.
+ if not self.retinaFix:
+ self.wxoverlay.Reset()
+ del self.wxoverlay
+ else:
+ del self.savedRetinaImage
+ if self.prevZoomRect:
+ self.prevZoomRect.pop(0).remove()
+ self.prevZoomRect = None
+ if self.zoomAxes:
+ self.zoomAxes = None
+
+ def draw_rubberband(self, event, x0, y0, x1, y1):
+ if self.retinaFix: # On Macs, use the following code
+ # wx.DCOverlay does not work properly on Retina displays.
+ rubberBandColor = '#C0C0FF'
+ if self.prevZoomRect:
+ self.prevZoomRect.pop(0).remove()
+ self.canvas.restore_region(self.savedRetinaImage)
+ X0, X1 = self.zoomStartX, event.xdata
+ Y0, Y1 = self.zoomStartY, event.ydata
+ lineX = (X0, X0, X1, X1, X0)
+ lineY = (Y0, Y1, Y1, Y0, Y0)
+ self.prevZoomRect = self.zoomAxes.plot(
+ lineX, lineY, '-', color=rubberBandColor)
+ self.zoomAxes.draw_artist(self.prevZoomRect[0])
+ self.canvas.blit(self.zoomAxes.bbox)
+ return
+
+ # Use an Overlay to draw a rubberband-like bounding box.
+
+ dc = wx.ClientDC(self.canvas)
+ odc = wx.DCOverlay(self.wxoverlay, dc)
+ odc.Clear()
+
+ # Mac's DC is already the same as a GCDC, and it causes
+ # problems with the overlay if we try to use an actual
+ # wx.GCDC so don't try it.
+ if 'wxMac' not in wx.PlatformInfo:
+ dc = wx.GCDC(dc)
+
+ height = self.canvas.figure.bbox.height
+ y1 = height - y1
+ y0 = height - y0
+
+ if y1 < y0:
+ y0, y1 = y1, y0
+ if x1 < x0:
+ x0, x1 = x1, x0
+
+ w = x1 - x0
+ h = y1 - y0
+ rect = wx.Rect(x0, y0, w, h)
+
+ rubberBandColor = '#C0C0FF' # or load from config?
+
+ # Set a pen for the border
+ color = wxc.NamedColour(rubberBandColor)
+ dc.SetPen(wx.Pen(color, 1))
+
+ # use the same color, plus alpha for the brush
+ r, g, b, a = color.Get(True)
+ color.Set(r, g, b, 0x60)
+ dc.SetBrush(wx.Brush(color))
+ if wxc.is_phoenix:
+ dc.DrawRectangle(rect)
+ else:
+ dc.DrawRectangleRect(rect)
+
+ def set_status_bar(self, statbar):
+ self.statbar = statbar
+
+ def set_message(self, s):
+ if self.statbar is not None:
+ self.statbar.set_function(s)
+
+ def set_history_buttons(self):
+ can_backward = self._nav_stack._pos > 0
+ can_forward = self._nav_stack._pos < len(self._nav_stack._elements) - 1
+ self.EnableTool(self.wx_ids['Back'], can_backward)
+ self.EnableTool(self.wx_ids['Forward'], can_forward)
+
+
+@cbook.deprecated("2.2", alternative="NavigationToolbar2Wx")
+class Toolbar(NavigationToolbar2Wx):
+ pass
+
+
+class StatusBarWx(wx.StatusBar):
+ """
+ A status bar is added to _FigureFrame to allow measurements and the
+ previously selected scroll function to be displayed as a user
+ convenience.
+ """
+
+ def __init__(self, parent):
+ wx.StatusBar.__init__(self, parent, -1)
+ self.SetFieldsCount(2)
+ self.SetStatusText("None", 1)
+ # self.SetStatusText("Measurement: None", 2)
+ # self.Reposition()
+
+ def set_function(self, string):
+ self.SetStatusText("%s" % string, 1)
+
+ # def set_measurement(self, string):
+ # self.SetStatusText("Measurement: %s" % string, 2)
+
+
+# tools for matplotlib.backend_managers.ToolManager:
+# for now only SaveFigure, SetCursor and Rubberband are implemented
+# once a ToolbarWx is implemented, also FigureManagerWx needs to be
+# modified, similar to pull request #9934
+
+class SaveFigureWx(backend_tools.SaveFigureBase):
+ def trigger(self, *args):
+ # Fetch the required filename and file type.
+ filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards()
+ default_dir = os.path.expanduser(
+ matplotlib.rcParams['savefig.directory'])
+ default_file = self.canvas.get_default_filename()
+ dlg = wx.FileDialog(self.canvas.GetTopLevelParent(), "Save to file",
+ default_dir, default_file, filetypes,
+ wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
+ dlg.SetFilterIndex(filter_index)
+ if dlg.ShowModal() != wx.ID_OK:
+ return
+
+ dirname = dlg.GetDirectory()
+ filename = dlg.GetFilename()
+ DEBUG_MSG('Save file dir:%s name:%s' % (dirname, filename), 3, self)
+ format = exts[dlg.GetFilterIndex()]
+ basename, ext = os.path.splitext(filename)
+ if ext.startswith('.'):
+ ext = ext[1:]
+ if ext in ('svg', 'pdf', 'ps', 'eps', 'png') and format != ext:
+ # looks like they forgot to set the image type drop
+ # down, going with the extension.
+ warnings.warn(
+ 'extension %s did not match the selected '
+ 'image type %s; going with %s' %
+ (ext, format, ext), stacklevel=0)
+ format = ext
+ if default_dir != "":
+ matplotlib.rcParams['savefig.directory'] = dirname
+ try:
+ self.canvas.figure.savefig(
+ os.path.join(dirname, filename), format=format)
+ except Exception as e:
+ error_msg_wx(str(e))
+
+
+class SetCursorWx(backend_tools.SetCursorBase):
+ def set_cursor(self, cursor):
+ cursor = wxc.Cursor(cursord[cursor])
+ self.canvas.SetCursor(cursor)
+ self.canvas.Update()
+
+
+if 'wxMac' not in wx.PlatformInfo:
+ # on most platforms, use overlay
+ class RubberbandWx(backend_tools.RubberbandBase):
+ def __init__(self, *args, **kwargs):
+ backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
+ self.wxoverlay = None
+
+ def draw_rubberband(self, x0, y0, x1, y1):
+ # Use an Overlay to draw a rubberband-like bounding box.
+ if self.wxoverlay is None:
+ self.wxoverlay = wx.Overlay()
+ dc = wx.ClientDC(self.canvas)
+ odc = wx.DCOverlay(self.wxoverlay, dc)
+ odc.Clear()
+
+ dc = wx.GCDC(dc)
+
+ height = self.canvas.figure.bbox.height
+ y1 = height - y1
+ y0 = height - y0
+
+ if y1 < y0:
+ y0, y1 = y1, y0
+ if x1 < x0:
+ x0, x1 = x1, x0
+
+ w = x1 - x0
+ h = y1 - y0
+ rect = wx.Rect(x0, y0, w, h)
+
+ rubberBandColor = '#C0C0FF' # or load from config?
+
+ # Set a pen for the border
+ color = wxc.NamedColour(rubberBandColor)
+ dc.SetPen(wx.Pen(color, 1))
+
+ # use the same color, plus alpha for the brush
+ r, g, b, a = color.Get(True)
+ color.Set(r, g, b, 0x60)
+ dc.SetBrush(wx.Brush(color))
+ if wxc.is_phoenix:
+ dc.DrawRectangle(rect)
+ else:
+ dc.DrawRectangleRect(rect)
+
+ def remove_rubberband(self):
+ if self.wxoverlay is None:
+ return
+ self.wxoverlay.Reset()
+ self.wxoverlay = None
+
+else:
+ # on Mac OS retina displays DCOverlay does not work
+ # and dc.SetLogicalFunction does not have an effect on any display
+ # the workaround is to blit the full image for remove_rubberband
+ class RubberbandWx(backend_tools.RubberbandBase):
+ def __init__(self, *args, **kwargs):
+ backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
+ self._rect = None
+
+ def draw_rubberband(self, x0, y0, x1, y1):
+ dc = wx.ClientDC(self.canvas)
+ # this would be required if the Canvas is a ScrolledWindow,
+ # which is not the case for now
+ # self.PrepareDC(dc)
+
+ # delete old rubberband
+ if self._rect:
+ self.remove_rubberband(dc)
+
+ # draw new rubberband
+ dc.SetPen(wx.Pen(wx.BLACK, 1, wx.SOLID))
+ dc.SetBrush(wx.TRANSPARENT_BRUSH)
+ self._rect = (x0, self.canvas._height-y0, x1-x0, -y1+y0)
+ if wxc.is_phoenix:
+ dc.DrawRectangle(self._rect)
+ else:
+ dc.DrawRectangleRect(self._rect)
+
+ def remove_rubberband(self, dc=None):
+ if not self._rect:
+ return
+ if self.canvas.bitmap:
+ if dc is None:
+ dc = wx.ClientDC(self.canvas)
+ dc.DrawBitmap(self.canvas.bitmap, 0, 0)
+ # for testing the method on Windows, use this code instead:
+ # img = self.canvas.bitmap.ConvertToImage()
+ # bmp = img.ConvertToBitmap()
+ # dc.DrawBitmap(bmp, 0, 0)
+ self._rect = None
+
+
+backend_tools.ToolSaveFigure = SaveFigureWx
+backend_tools.ToolSetCursor = SetCursorWx
+backend_tools.ToolRubberband = RubberbandWx
+
+
+# < Additions for printing support: Matt Newville
+
+class PrintoutWx(wx.Printout):
+ """
+ Simple wrapper around wx Printout class -- all the real work
+ here is scaling the matplotlib canvas bitmap to the current
+ printer's definition.
+ """
+
+ def __init__(self, canvas, width=5.5, margin=0.5, title='matplotlib'):
+ wx.Printout.__init__(self, title=title)
+ self.canvas = canvas
+ # width, in inches of output figure (approximate)
+ self.width = width
+ self.margin = margin
+
+ def HasPage(self, page):
+ # current only supports 1 page print
+ return page == 1
+
+ def GetPageInfo(self):
+ return (1, 1, 1, 1)
+
+ def OnPrintPage(self, page):
+ self.canvas.draw()
+
+ dc = self.GetDC()
+ (ppw, pph) = self.GetPPIPrinter() # printer's pixels per in
+ (pgw, pgh) = self.GetPageSizePixels() # page size in pixels
+ (dcw, dch) = dc.GetSize()
+ if wxc.is_phoenix:
+ (grw, grh) = self.canvas.GetSize()
+ else:
+ (grw, grh) = self.canvas.GetSizeTuple()
+
+ # save current figure dpi resolution and bg color,
+ # so that we can temporarily set them to the dpi of
+ # the printer, and the bg color to white
+ bgcolor = self.canvas.figure.get_facecolor()
+ fig_dpi = self.canvas.figure.dpi
+
+ # draw the bitmap, scaled appropriately
+ vscale = float(ppw) / fig_dpi
+
+ # set figure resolution,bg color for printer
+ self.canvas.figure.dpi = ppw
+ self.canvas.figure.set_facecolor('#FFFFFF')
+
+ renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi)
+ self.canvas.figure.draw(renderer)
+ self.canvas.bitmap.SetWidth(
+ int(self.canvas.bitmap.GetWidth() * vscale))
+ self.canvas.bitmap.SetHeight(
+ int(self.canvas.bitmap.GetHeight() * vscale))
+ self.canvas.draw()
+
+ # page may need additional scaling on preview
+ page_scale = 1.0
+ if self.IsPreview():
+ page_scale = float(dcw) / pgw
+
+ # get margin in pixels = (margin in in) * (pixels/in)
+ top_margin = int(self.margin * pph * page_scale)
+ left_margin = int(self.margin * ppw * page_scale)
+
+ # set scale so that width of output is self.width inches
+ # (assuming grw is size of graph in inches....)
+ user_scale = (self.width * fig_dpi * page_scale) / float(grw)
+
+ dc.SetDeviceOrigin(left_margin, top_margin)
+ dc.SetUserScale(user_scale, user_scale)
+
+ # this cute little number avoid API inconsistencies in wx
+ try:
+ dc.DrawBitmap(self.canvas.bitmap, 0, 0)
+ except Exception:
+ try:
+ dc.DrawBitmap(self.canvas.bitmap, (0, 0))
+ except Exception:
+ pass
+
+ # restore original figure resolution
+ self.canvas.figure.set_facecolor(bgcolor)
+ self.canvas.figure.dpi = fig_dpi
+ self.canvas.draw()
+ return True
+# >
+
+
+@_Backend.export
+class _BackendWx(_Backend):
+ FigureCanvas = FigureCanvasWx
+ FigureManager = FigureManagerWx
+ _frame_class = FigureFrameWx
+
+ @staticmethod
+ def trigger_manager_draw(manager):
+ manager.canvas.draw_idle()
+
+ @classmethod
+ def new_figure_manager(cls, num, *args, **kwargs):
+ # Create a wx.App instance if it has not been created sofar.
+ wxapp = wx.GetApp()
+ if wxapp is None:
+ wxapp = wx.App(False)
+ wxapp.SetExitOnFrameDelete(True)
+ # Retain a reference to the app object so that it does not get
+ # garbage collected.
+ _BackendWx._theWxApp = wxapp
+ return super(_BackendWx, cls).new_figure_manager(num, *args, **kwargs)
+
+ @classmethod
+ def new_figure_manager_given_figure(cls, num, figure):
+ frame = cls._frame_class(num, figure)
+ figmgr = frame.get_figure_manager()
+ if matplotlib.is_interactive():
+ figmgr.frame.Show()
+ figure.canvas.draw_idle()
+ return figmgr
+
+ @staticmethod
+ def mainloop():
+ if not wx.App.IsMainLoopRunning():
+ wxapp = wx.GetApp()
+ if wxapp is not None:
+ wxapp.MainLoop()
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_wxagg.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_wxagg.py
new file mode 100644
index 00000000000..041f274a78b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_wxagg.py
@@ -0,0 +1,147 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import wx
+
+import matplotlib
+from matplotlib import cbook
+from . import wx_compat as wxc
+from .backend_agg import FigureCanvasAgg
+from .backend_wx import (
+ _BackendWx, _FigureCanvasWxBase, FigureFrameWx,
+ NavigationToolbar2Wx as NavigationToolbar2WxAgg)
+
+
+class FigureFrameWxAgg(FigureFrameWx):
+ def get_canvas(self, fig):
+ return FigureCanvasWxAgg(self, -1, fig)
+
+
+class FigureCanvasWxAgg(FigureCanvasAgg, _FigureCanvasWxBase):
+ """
+ The FigureCanvas contains the figure and does event handling.
+
+ In the wxPython backend, it is derived from wxPanel, and (usually)
+ lives inside a frame instantiated by a FigureManagerWx. The parent
+ window probably implements a wxSizer to control the displayed
+ control size - but we give a hint as to our preferred minimum
+ size.
+ """
+
+ def draw(self, drawDC=None):
+ """
+ Render the figure using agg.
+ """
+ FigureCanvasAgg.draw(self)
+
+ self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
+ self._isDrawn = True
+ self.gui_repaint(drawDC=drawDC, origin='WXAgg')
+
+ def blit(self, bbox=None):
+ """
+ Transfer the region of the agg buffer defined by bbox to the display.
+ If bbox is None, the entire buffer is transferred.
+ """
+ if bbox is None:
+ self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
+ self.gui_repaint()
+ return
+
+ l, b, w, h = bbox.bounds
+ r = l + w
+ t = b + h
+ x = int(l)
+ y = int(self.bitmap.GetHeight() - t)
+
+ srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
+ srcDC = wx.MemoryDC()
+ srcDC.SelectObject(srcBmp)
+
+ destDC = wx.MemoryDC()
+ destDC.SelectObject(self.bitmap)
+
+ destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
+
+ destDC.SelectObject(wx.NullBitmap)
+ srcDC.SelectObject(wx.NullBitmap)
+ self.gui_repaint()
+
+ filetypes = FigureCanvasAgg.filetypes
+
+
+@cbook.deprecated("2.2", alternative="NavigationToolbar2WxAgg")
+class Toolbar(NavigationToolbar2WxAgg):
+ pass
+
+
+# agg/wxPython image conversion functions (wxPython >= 2.8)
+
+def _convert_agg_to_wx_image(agg, bbox):
+ """
+ Convert the region of the agg buffer bounded by bbox to a wx.Image. If
+ bbox is None, the entire buffer is converted.
+
+ Note: agg must be a backend_agg.RendererAgg instance.
+ """
+ if bbox is None:
+ # agg => rgb -> image
+ image = wxc.EmptyImage(int(agg.width), int(agg.height))
+ image.SetData(agg.tostring_rgb())
+ return image
+ else:
+ # agg => rgba buffer -> bitmap => clipped bitmap => image
+ return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
+
+
+def _convert_agg_to_wx_bitmap(agg, bbox):
+ """
+ Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
+ bbox is None, the entire buffer is converted.
+
+ Note: agg must be a backend_agg.RendererAgg instance.
+ """
+ if bbox is None:
+ # agg => rgba buffer -> bitmap
+ return wxc.BitmapFromBuffer(int(agg.width), int(agg.height),
+ agg.buffer_rgba())
+ else:
+ # agg => rgba buffer -> bitmap => clipped bitmap
+ return _WX28_clipped_agg_as_bitmap(agg, bbox)
+
+
+def _WX28_clipped_agg_as_bitmap(agg, bbox):
+ """
+ Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
+
+ Note: agg must be a backend_agg.RendererAgg instance.
+ """
+ l, b, width, height = bbox.bounds
+ r = l + width
+ t = b + height
+
+ srcBmp = wxc.BitmapFromBuffer(int(agg.width), int(agg.height),
+ agg.buffer_rgba())
+ srcDC = wx.MemoryDC()
+ srcDC.SelectObject(srcBmp)
+
+ destBmp = wxc.EmptyBitmap(int(width), int(height))
+ destDC = wx.MemoryDC()
+ destDC.SelectObject(destBmp)
+
+ x = int(l)
+ y = int(int(agg.height) - t)
+ destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
+
+ srcDC.SelectObject(wx.NullBitmap)
+ destDC.SelectObject(wx.NullBitmap)
+
+ return destBmp
+
+
+@_BackendWx.export
+class _BackendWxAgg(_BackendWx):
+ FigureCanvas = FigureCanvasWxAgg
+ _frame_class = FigureFrameWxAgg
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/backend_wxcairo.py b/contrib/python/matplotlib/py2/matplotlib/backends/backend_wxcairo.py
new file mode 100644
index 00000000000..fb3290f2bbc
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/backend_wxcairo.py
@@ -0,0 +1,53 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import wx
+
+from .backend_cairo import cairo, FigureCanvasCairo, RendererCairo
+from .backend_wx import (
+ _BackendWx, _FigureCanvasWxBase, FigureFrameWx,
+ NavigationToolbar2Wx as NavigationToolbar2WxCairo)
+import wx.lib.wxcairo as wxcairo
+
+
+class FigureFrameWxCairo(FigureFrameWx):
+ def get_canvas(self, fig):
+ return FigureCanvasWxCairo(self, -1, fig)
+
+
+class FigureCanvasWxCairo(_FigureCanvasWxBase, FigureCanvasCairo):
+ """
+ The FigureCanvas contains the figure and does event handling.
+
+ In the wxPython backend, it is derived from wxPanel, and (usually) lives
+ inside a frame instantiated by a FigureManagerWx. The parent window
+ probably implements a wxSizer to control the displayed control size - but
+ we give a hint as to our preferred minimum size.
+ """
+
+ def __init__(self, parent, id, figure):
+ # _FigureCanvasWxBase should be fixed to have the same signature as
+ # every other FigureCanvas and use cooperative inheritance, but in the
+ # meantime the following will make do.
+ _FigureCanvasWxBase.__init__(self, parent, id, figure)
+ FigureCanvasCairo.__init__(self, figure)
+ self._renderer = RendererCairo(self.figure.dpi)
+
+ def draw(self, drawDC=None):
+ width = int(self.figure.bbox.width)
+ height = int(self.figure.bbox.height)
+ surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
+ self._renderer.set_ctx_from_surface(surface)
+ self._renderer.set_width_height(width, height)
+ self.figure.draw(self._renderer)
+ self.bitmap = wxcairo.BitmapFromImageSurface(surface)
+ self._isDrawn = True
+ self.gui_repaint(drawDC=drawDC, origin='WXCairo')
+
+
+@_BackendWx.export
+class _BackendWxCairo(_BackendWx):
+ FigureCanvas = FigureCanvasWxCairo
+ _frame_class = FigureFrameWxCairo
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/qt_compat.py b/contrib/python/matplotlib/py2/matplotlib/backends/qt_compat.py
new file mode 100644
index 00000000000..6b386143a30
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/qt_compat.py
@@ -0,0 +1,176 @@
+"""
+Qt binding and backend selector.
+
+The selection logic is as follows:
+- if any of PyQt5, PySide2, PyQt4 or PySide have already been imported
+ (checked in that order), use it;
+- otherwise, if the QT_API environment variable (used by Enthought) is
+ set, use it to determine which binding to use (but do not change the
+ backend based on it; i.e. if the Qt4Agg backend is requested but QT_API
+ is set to "pyqt5", then actually use Qt4 with the binding specified by
+ ``rcParams["backend.qt4"]``;
+- otherwise, use whatever the rcParams indicate.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from distutils.version import LooseVersion
+import os
+import sys
+
+from matplotlib import rcParams
+
+QT_API_PYQT5 = "PyQt5"
+QT_API_PYSIDE2 = "PySide2"
+QT_API_PYQTv2 = "PyQt4v2"
+QT_API_PYSIDE = "PySide"
+QT_API_PYQT = "PyQt4" # Use the old sip v1 API (Py3 defaults to v2).
+QT_API_ENV = os.environ.get("QT_API")
+# Mapping of QT_API_ENV to requested binding. ETS does not support PyQt4v1.
+# (https://github.com/enthought/pyface/blob/master/pyface/qt/__init__.py)
+_ETS = {"pyqt5": QT_API_PYQT5, "pyside2": QT_API_PYSIDE2,
+ "pyqt": QT_API_PYQTv2, "pyside": QT_API_PYSIDE,
+ None: None}
+# First, check if anything is already imported.
+if "PyQt5" in sys.modules:
+ QT_API = QT_API_PYQT5
+ dict.__setitem__(rcParams, "backend.qt5", QT_API)
+elif "PySide2" in sys.modules:
+ QT_API = QT_API_PYSIDE2
+ dict.__setitem__(rcParams, "backend.qt5", QT_API)
+elif "PyQt4" in sys.modules:
+ QT_API = QT_API_PYQTv2
+ dict.__setitem__(rcParams, "backend.qt4", QT_API)
+elif "PySide" in sys.modules:
+ QT_API = QT_API_PYSIDE
+ dict.__setitem__(rcParams, "backend.qt4", QT_API)
+# Otherwise, check the QT_API environment variable (from Enthought). This can
+# only override the binding, not the backend (in other words, we check that the
+# requested backend actually matches).
+elif rcParams["backend"] in ["Qt5Agg", "Qt5Cairo"]:
+ if QT_API_ENV == "pyqt5":
+ dict.__setitem__(rcParams, "backend.qt5", QT_API_PYQT5)
+ elif QT_API_ENV == "pyside2":
+ dict.__setitem__(rcParams, "backend.qt5", QT_API_PYSIDE2)
+ QT_API = dict.__getitem__(rcParams, "backend.qt5")
+elif rcParams["backend"] in ["Qt4Agg", "Qt4Cairo"]:
+ if QT_API_ENV == "pyqt4":
+ dict.__setitem__(rcParams, "backend.qt4", QT_API_PYQTv2)
+ elif QT_API_ENV == "pyside":
+ dict.__setitem__(rcParams, "backend.qt4", QT_API_PYSIDE)
+ QT_API = dict.__getitem__(rcParams, "backend.qt4")
+# A non-Qt backend was selected but we still got there (possible, e.g., when
+# fully manually embedding Matplotlib in a Qt app without using pyplot).
+else:
+ try:
+ QT_API = _ETS[QT_API_ENV]
+ except KeyError:
+ raise RuntimeError(
+ "The environment variable QT_API has the unrecognized value {!r};"
+ "valid values are 'pyqt5', 'pyside2', 'pyqt', and 'pyside'")
+
+
+def _setup_pyqt5():
+ global QtCore, QtGui, QtWidgets, __version__, is_pyqt5, _getSaveFileName
+
+ if QT_API == QT_API_PYQT5:
+ from PyQt5 import QtCore, QtGui, QtWidgets
+ __version__ = QtCore.PYQT_VERSION_STR
+ QtCore.Signal = QtCore.pyqtSignal
+ QtCore.Slot = QtCore.pyqtSlot
+ QtCore.Property = QtCore.pyqtProperty
+ elif QT_API == QT_API_PYSIDE2:
+ from PySide2 import QtCore, QtGui, QtWidgets, __version__
+ else:
+ raise ValueError("Unexpected value for the 'backend.qt5' rcparam")
+ _getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
+
+ def is_pyqt5():
+ return True
+
+
+def _setup_pyqt4():
+ global QtCore, QtGui, QtWidgets, __version__, is_pyqt5, _getSaveFileName
+
+ def _setup_pyqt4_internal(api):
+ global QtCore, QtGui, QtWidgets, \
+ __version__, is_pyqt5, _getSaveFileName
+ # List of incompatible APIs:
+ # http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
+ _sip_apis = ["QDate", "QDateTime", "QString", "QTextStream", "QTime",
+ "QUrl", "QVariant"]
+ try:
+ import sip
+ except ImportError:
+ pass
+ else:
+ for _sip_api in _sip_apis:
+ try:
+ sip.setapi(_sip_api, api)
+ except ValueError:
+ pass
+ from PyQt4 import QtCore, QtGui
+ __version__ = QtCore.PYQT_VERSION_STR
+ # PyQt 4.6 introduced getSaveFileNameAndFilter:
+ # https://riverbankcomputing.com/news/pyqt-46
+ if __version__ < LooseVersion("4.6"):
+ raise ImportError("PyQt<4.6 is not supported")
+ QtCore.Signal = QtCore.pyqtSignal
+ QtCore.Slot = QtCore.pyqtSlot
+ QtCore.Property = QtCore.pyqtProperty
+ _getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
+
+ if QT_API == QT_API_PYQTv2:
+ _setup_pyqt4_internal(api=2)
+ elif QT_API == QT_API_PYSIDE:
+ from PySide import QtCore, QtGui, __version__, __version_info__
+ # PySide 1.0.3 fixed the following:
+ # https://srinikom.github.io/pyside-bz-archive/809.html
+ if __version_info__ < (1, 0, 3):
+ raise ImportError("PySide<1.0.3 is not supported")
+ _getSaveFileName = QtGui.QFileDialog.getSaveFileName
+ elif QT_API == QT_API_PYQT:
+ _setup_pyqt4_internal(api=1)
+ else:
+ raise ValueError("Unexpected value for the 'backend.qt4' rcparam")
+ QtWidgets = QtGui
+
+ def is_pyqt5():
+ return False
+
+
+if QT_API in [QT_API_PYQT5, QT_API_PYSIDE2]:
+ _setup_pyqt5()
+elif QT_API in [QT_API_PYQTv2, QT_API_PYSIDE, QT_API_PYQT]:
+ _setup_pyqt4()
+elif QT_API is None:
+ if rcParams["backend"] == "Qt4Agg":
+ _candidates = [(_setup_pyqt4, QT_API_PYQTv2),
+ (_setup_pyqt4, QT_API_PYSIDE),
+ (_setup_pyqt4, QT_API_PYQT),
+ (_setup_pyqt5, QT_API_PYQT5),
+ (_setup_pyqt5, QT_API_PYSIDE2)]
+ else:
+ _candidates = [(_setup_pyqt5, QT_API_PYQT5),
+ (_setup_pyqt5, QT_API_PYSIDE2),
+ (_setup_pyqt4, QT_API_PYQTv2),
+ (_setup_pyqt4, QT_API_PYSIDE),
+ (_setup_pyqt4, QT_API_PYQT)]
+ for _setup, QT_API in _candidates:
+ try:
+ _setup()
+ except ImportError:
+ continue
+ break
+ else:
+ raise ImportError("Failed to import any qt binding")
+else: # We should not get there.
+ raise AssertionError("Unexpected QT_API: {}".format(QT_API))
+
+
+# These globals are only defined for backcompatibilty purposes.
+ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
+ pyqt5=(QT_API_PYQT5, 5), pyside2=(QT_API_PYSIDE2, 5))
+QT_RC_MAJOR_VERSION = 5 if is_pyqt5() else 4
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/__init__.py b/contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/__init__.py
new file mode 100644
index 00000000000..800d82e7ee0
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/__init__.py
@@ -0,0 +1,2 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/figureoptions.py b/contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/figureoptions.py
new file mode 100644
index 00000000000..40572c8bd82
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/figureoptions.py
@@ -0,0 +1,262 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright © 2009 Pierre Raybaut
+# Licensed under the terms of the MIT License
+# see the mpl licenses directory for a copy of the license
+
+
+"""Module that provides a GUI-based editor for matplotlib's figure options"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import os.path as osp
+import re
+
+import matplotlib
+from matplotlib import cm, colors as mcolors, markers, image as mimage
+import matplotlib.backends.qt_editor.formlayout as formlayout
+from matplotlib.backends.qt_compat import QtGui
+
+
+def get_icon(name):
+ basedir = osp.join(matplotlib.rcParams['datapath'], 'images')
+ return QtGui.QIcon(osp.join(basedir, name))
+
+
+LINESTYLES = {'-': 'Solid',
+ '--': 'Dashed',
+ '-.': 'DashDot',
+ ':': 'Dotted',
+ 'None': 'None',
+ }
+
+DRAWSTYLES = {
+ 'default': 'Default',
+ 'steps-pre': 'Steps (Pre)', 'steps': 'Steps (Pre)',
+ 'steps-mid': 'Steps (Mid)',
+ 'steps-post': 'Steps (Post)'}
+
+MARKERS = markers.MarkerStyle.markers
+
+
+def figure_edit(axes, parent=None):
+ """Edit matplotlib figure options"""
+ sep = (None, None) # separator
+
+ # Get / General
+ # Cast to builtin floats as they have nicer reprs.
+ xmin, xmax = map(float, axes.get_xlim())
+ ymin, ymax = map(float, axes.get_ylim())
+ general = [('Title', axes.get_title()),
+ sep,
+ (None, "<b>X-Axis</b>"),
+ ('Left', xmin), ('Right', xmax),
+ ('Label', axes.get_xlabel()),
+ ('Scale', [axes.get_xscale(), 'linear', 'log', 'logit']),
+ sep,
+ (None, "<b>Y-Axis</b>"),
+ ('Bottom', ymin), ('Top', ymax),
+ ('Label', axes.get_ylabel()),
+ ('Scale', [axes.get_yscale(), 'linear', 'log', 'logit']),
+ sep,
+ ('(Re-)Generate automatic legend', False),
+ ]
+
+ # Save the unit data
+ xconverter = axes.xaxis.converter
+ yconverter = axes.yaxis.converter
+ xunits = axes.xaxis.get_units()
+ yunits = axes.yaxis.get_units()
+
+ # Sorting for default labels (_lineXXX, _imageXXX).
+ def cmp_key(label):
+ match = re.match(r"(_line|_image)(\d+)", label)
+ if match:
+ return match.group(1), int(match.group(2))
+ else:
+ return label, 0
+
+ # Get / Curves
+ linedict = {}
+ for line in axes.get_lines():
+ label = line.get_label()
+ if label == '_nolegend_':
+ continue
+ linedict[label] = line
+ curves = []
+
+ def prepare_data(d, init):
+ """Prepare entry for FormLayout.
+
+ `d` is a mapping of shorthands to style names (a single style may
+ have multiple shorthands, in particular the shorthands `None`,
+ `"None"`, `"none"` and `""` are synonyms); `init` is one shorthand
+ of the initial style.
+
+ This function returns an list suitable for initializing a
+ FormLayout combobox, namely `[initial_name, (shorthand,
+ style_name), (shorthand, style_name), ...]`.
+ """
+ # Drop duplicate shorthands from dict (by overwriting them during
+ # the dict comprehension).
+ name2short = {name: short for short, name in d.items()}
+ # Convert back to {shorthand: name}.
+ short2name = {short: name for name, short in name2short.items()}
+ # Find the kept shorthand for the style specified by init.
+ canonical_init = name2short[d[init]]
+ # Sort by representation and prepend the initial value.
+ return ([canonical_init] +
+ sorted(short2name.items(),
+ key=lambda short_and_name: short_and_name[1]))
+
+ curvelabels = sorted(linedict, key=cmp_key)
+ for label in curvelabels:
+ line = linedict[label]
+ color = mcolors.to_hex(
+ mcolors.to_rgba(line.get_color(), line.get_alpha()),
+ keep_alpha=True)
+ ec = mcolors.to_hex(
+ mcolors.to_rgba(line.get_markeredgecolor(), line.get_alpha()),
+ keep_alpha=True)
+ fc = mcolors.to_hex(
+ mcolors.to_rgba(line.get_markerfacecolor(), line.get_alpha()),
+ keep_alpha=True)
+ curvedata = [
+ ('Label', label),
+ sep,
+ (None, '<b>Line</b>'),
+ ('Line style', prepare_data(LINESTYLES, line.get_linestyle())),
+ ('Draw style', prepare_data(DRAWSTYLES, line.get_drawstyle())),
+ ('Width', line.get_linewidth()),
+ ('Color (RGBA)', color),
+ sep,
+ (None, '<b>Marker</b>'),
+ ('Style', prepare_data(MARKERS, line.get_marker())),
+ ('Size', line.get_markersize()),
+ ('Face color (RGBA)', fc),
+ ('Edge color (RGBA)', ec)]
+ curves.append([curvedata, label, ""])
+ # Is there a curve displayed?
+ has_curve = bool(curves)
+
+ # Get / Images
+ imagedict = {}
+ for image in axes.get_images():
+ label = image.get_label()
+ if label == '_nolegend_':
+ continue
+ imagedict[label] = image
+ imagelabels = sorted(imagedict, key=cmp_key)
+ images = []
+ cmaps = [(cmap, name) for name, cmap in sorted(cm.cmap_d.items())]
+ for label in imagelabels:
+ image = imagedict[label]
+ cmap = image.get_cmap()
+ if cmap not in cm.cmap_d.values():
+ cmaps = [(cmap, cmap.name)] + cmaps
+ low, high = image.get_clim()
+ imagedata = [
+ ('Label', label),
+ ('Colormap', [cmap.name] + cmaps),
+ ('Min. value', low),
+ ('Max. value', high),
+ ('Interpolation',
+ [image.get_interpolation()]
+ + [(name, name) for name in sorted(mimage.interpolations_names)])]
+ images.append([imagedata, label, ""])
+ # Is there an image displayed?
+ has_image = bool(images)
+
+ datalist = [(general, "Axes", "")]
+ if curves:
+ datalist.append((curves, "Curves", ""))
+ if images:
+ datalist.append((images, "Images", ""))
+
+ def apply_callback(data):
+ """This function will be called to apply changes"""
+ orig_xlim = axes.get_xlim()
+ orig_ylim = axes.get_ylim()
+
+ general = data.pop(0)
+ curves = data.pop(0) if has_curve else []
+ images = data.pop(0) if has_image else []
+ if data:
+ raise ValueError("Unexpected field")
+
+ # Set / General
+ (title, xmin, xmax, xlabel, xscale, ymin, ymax, ylabel, yscale,
+ generate_legend) = general
+
+ if axes.get_xscale() != xscale:
+ axes.set_xscale(xscale)
+ if axes.get_yscale() != yscale:
+ axes.set_yscale(yscale)
+
+ axes.set_title(title)
+ axes.set_xlim(xmin, xmax)
+ axes.set_xlabel(xlabel)
+ axes.set_ylim(ymin, ymax)
+ axes.set_ylabel(ylabel)
+
+ # Restore the unit data
+ axes.xaxis.converter = xconverter
+ axes.yaxis.converter = yconverter
+ axes.xaxis.set_units(xunits)
+ axes.yaxis.set_units(yunits)
+ axes.xaxis._update_axisinfo()
+ axes.yaxis._update_axisinfo()
+
+ # Set / Curves
+ for index, curve in enumerate(curves):
+ line = linedict[curvelabels[index]]
+ (label, linestyle, drawstyle, linewidth, color, marker, markersize,
+ markerfacecolor, markeredgecolor) = curve
+ line.set_label(label)
+ line.set_linestyle(linestyle)
+ line.set_drawstyle(drawstyle)
+ line.set_linewidth(linewidth)
+ rgba = mcolors.to_rgba(color)
+ line.set_alpha(None)
+ line.set_color(rgba)
+ if marker is not 'none':
+ line.set_marker(marker)
+ line.set_markersize(markersize)
+ line.set_markerfacecolor(markerfacecolor)
+ line.set_markeredgecolor(markeredgecolor)
+
+ # Set / Images
+ for index, image_settings in enumerate(images):
+ image = imagedict[imagelabels[index]]
+ label, cmap, low, high, interpolation = image_settings
+ image.set_label(label)
+ image.set_cmap(cm.get_cmap(cmap))
+ image.set_clim(*sorted([low, high]))
+ image.set_interpolation(interpolation)
+
+ # re-generate legend, if checkbox is checked
+ if generate_legend:
+ draggable = None
+ ncol = 1
+ if axes.legend_ is not None:
+ old_legend = axes.get_legend()
+ draggable = old_legend._draggable is not None
+ ncol = old_legend._ncol
+ new_legend = axes.legend(ncol=ncol)
+ if new_legend:
+ new_legend.draggable(draggable)
+
+ # Redraw
+ figure = axes.get_figure()
+ figure.canvas.draw()
+ if not (axes.get_xlim() == orig_xlim and axes.get_ylim() == orig_ylim):
+ figure.canvas.toolbar.push_current()
+
+ data = formlayout.fedit(datalist, title="Figure options", parent=parent,
+ icon=get_icon('qt4_editor_options.svg'),
+ apply=apply_callback)
+ if data is not None:
+ apply_callback(data)
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/formlayout.py b/contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/formlayout.py
new file mode 100644
index 00000000000..d5fcdfc901d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/formlayout.py
@@ -0,0 +1,544 @@
+# -*- coding: utf-8 -*-
+"""
+formlayout
+==========
+
+Module creating Qt form dialogs/layouts to edit various type of parameters
+
+
+formlayout License Agreement (MIT License)
+------------------------------------------
+
+Copyright (c) 2009 Pierre Raybaut
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+"""
+
+# History:
+# 1.0.10: added float validator (disable "Ok" and "Apply" button when not valid)
+# 1.0.7: added support for "Apply" button
+# 1.0.6: code cleaning
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+__version__ = '1.0.10'
+__license__ = __doc__
+
+import copy
+import datetime
+import warnings
+
+import six
+
+from matplotlib import colors as mcolors
+from matplotlib.backends.qt_compat import QtGui, QtWidgets, QtCore
+
+
+BLACKLIST = {"title", "label"}
+
+
+class ColorButton(QtWidgets.QPushButton):
+ """
+ Color choosing push button
+ """
+ colorChanged = QtCore.Signal(QtGui.QColor)
+
+ def __init__(self, parent=None):
+ QtWidgets.QPushButton.__init__(self, parent)
+ self.setFixedSize(20, 20)
+ self.setIconSize(QtCore.QSize(12, 12))
+ self.clicked.connect(self.choose_color)
+ self._color = QtGui.QColor()
+
+ def choose_color(self):
+ color = QtWidgets.QColorDialog.getColor(
+ self._color, self.parentWidget(), "",
+ QtWidgets.QColorDialog.ShowAlphaChannel)
+ if color.isValid():
+ self.set_color(color)
+
+ def get_color(self):
+ return self._color
+
+ @QtCore.Slot(QtGui.QColor)
+ def set_color(self, color):
+ if color != self._color:
+ self._color = color
+ self.colorChanged.emit(self._color)
+ pixmap = QtGui.QPixmap(self.iconSize())
+ pixmap.fill(color)
+ self.setIcon(QtGui.QIcon(pixmap))
+
+ color = QtCore.Property(QtGui.QColor, get_color, set_color)
+
+
+def to_qcolor(color):
+ """Create a QColor from a matplotlib color"""
+ qcolor = QtGui.QColor()
+ try:
+ rgba = mcolors.to_rgba(color)
+ except ValueError:
+ warnings.warn('Ignoring invalid color %r' % color)
+ return qcolor # return invalid QColor
+ qcolor.setRgbF(*rgba)
+ return qcolor
+
+
+class ColorLayout(QtWidgets.QHBoxLayout):
+ """Color-specialized QLineEdit layout"""
+ def __init__(self, color, parent=None):
+ QtWidgets.QHBoxLayout.__init__(self)
+ assert isinstance(color, QtGui.QColor)
+ self.lineedit = QtWidgets.QLineEdit(
+ mcolors.to_hex(color.getRgbF(), keep_alpha=True), parent)
+ self.lineedit.editingFinished.connect(self.update_color)
+ self.addWidget(self.lineedit)
+ self.colorbtn = ColorButton(parent)
+ self.colorbtn.color = color
+ self.colorbtn.colorChanged.connect(self.update_text)
+ self.addWidget(self.colorbtn)
+
+ def update_color(self):
+ color = self.text()
+ qcolor = to_qcolor(color)
+ self.colorbtn.color = qcolor # defaults to black if not qcolor.isValid()
+
+ def update_text(self, color):
+ self.lineedit.setText(mcolors.to_hex(color.getRgbF(), keep_alpha=True))
+
+ def text(self):
+ return self.lineedit.text()
+
+
+def font_is_installed(font):
+ """Check if font is installed"""
+ return [fam for fam in QtGui.QFontDatabase().families()
+ if six.text_type(fam) == font]
+
+
+def tuple_to_qfont(tup):
+ """
+ Create a QFont from tuple:
+ (family [string], size [int], italic [bool], bold [bool])
+ """
+ if not (isinstance(tup, tuple) and len(tup) == 4
+ and font_is_installed(tup[0])
+ and isinstance(tup[1], int)
+ and isinstance(tup[2], bool)
+ and isinstance(tup[3], bool)):
+ return None
+ font = QtGui.QFont()
+ family, size, italic, bold = tup
+ font.setFamily(family)
+ font.setPointSize(size)
+ font.setItalic(italic)
+ font.setBold(bold)
+ return font
+
+
+def qfont_to_tuple(font):
+ return (six.text_type(font.family()), int(font.pointSize()),
+ font.italic(), font.bold())
+
+
+class FontLayout(QtWidgets.QGridLayout):
+ """Font selection"""
+ def __init__(self, value, parent=None):
+ QtWidgets.QGridLayout.__init__(self)
+ font = tuple_to_qfont(value)
+ assert font is not None
+
+ # Font family
+ self.family = QtWidgets.QFontComboBox(parent)
+ self.family.setCurrentFont(font)
+ self.addWidget(self.family, 0, 0, 1, -1)
+
+ # Font size
+ self.size = QtWidgets.QComboBox(parent)
+ self.size.setEditable(True)
+ sizelist = list(range(6, 12)) + list(range(12, 30, 2)) + [36, 48, 72]
+ size = font.pointSize()
+ if size not in sizelist:
+ sizelist.append(size)
+ sizelist.sort()
+ self.size.addItems([str(s) for s in sizelist])
+ self.size.setCurrentIndex(sizelist.index(size))
+ self.addWidget(self.size, 1, 0)
+
+ # Italic or not
+ self.italic = QtWidgets.QCheckBox(self.tr("Italic"), parent)
+ self.italic.setChecked(font.italic())
+ self.addWidget(self.italic, 1, 1)
+
+ # Bold or not
+ self.bold = QtWidgets.QCheckBox(self.tr("Bold"), parent)
+ self.bold.setChecked(font.bold())
+ self.addWidget(self.bold, 1, 2)
+
+ def get_font(self):
+ font = self.family.currentFont()
+ font.setItalic(self.italic.isChecked())
+ font.setBold(self.bold.isChecked())
+ font.setPointSize(int(self.size.currentText()))
+ return qfont_to_tuple(font)
+
+
+def is_edit_valid(edit):
+ text = edit.text()
+ state = edit.validator().validate(text, 0)[0]
+
+ return state == QtGui.QDoubleValidator.Acceptable
+
+
+class FormWidget(QtWidgets.QWidget):
+ update_buttons = QtCore.Signal()
+ def __init__(self, data, comment="", parent=None):
+ QtWidgets.QWidget.__init__(self, parent)
+ self.data = copy.deepcopy(data)
+ self.widgets = []
+ self.formlayout = QtWidgets.QFormLayout(self)
+ if comment:
+ self.formlayout.addRow(QtWidgets.QLabel(comment))
+ self.formlayout.addRow(QtWidgets.QLabel(" "))
+
+ def get_dialog(self):
+ """Return FormDialog instance"""
+ dialog = self.parent()
+ while not isinstance(dialog, QtWidgets.QDialog):
+ dialog = dialog.parent()
+ return dialog
+
+ def setup(self):
+ for label, value in self.data:
+ if label is None and value is None:
+ # Separator: (None, None)
+ self.formlayout.addRow(QtWidgets.QLabel(" "), QtWidgets.QLabel(" "))
+ self.widgets.append(None)
+ continue
+ elif label is None:
+ # Comment
+ self.formlayout.addRow(QtWidgets.QLabel(value))
+ self.widgets.append(None)
+ continue
+ elif tuple_to_qfont(value) is not None:
+ field = FontLayout(value, self)
+ elif (label.lower() not in BLACKLIST
+ and mcolors.is_color_like(value)):
+ field = ColorLayout(to_qcolor(value), self)
+ elif isinstance(value, six.string_types):
+ field = QtWidgets.QLineEdit(value, self)
+ elif isinstance(value, (list, tuple)):
+ if isinstance(value, tuple):
+ value = list(value)
+ selindex = value.pop(0)
+ field = QtWidgets.QComboBox(self)
+ if isinstance(value[0], (list, tuple)):
+ keys = [key for key, _val in value]
+ value = [val for _key, val in value]
+ else:
+ keys = value
+ field.addItems(value)
+ if selindex in value:
+ selindex = value.index(selindex)
+ elif selindex in keys:
+ selindex = keys.index(selindex)
+ elif not isinstance(selindex, int):
+ warnings.warn(
+ "index '%s' is invalid (label: %s, value: %s)" %
+ (selindex, label, value))
+ selindex = 0
+ field.setCurrentIndex(selindex)
+ elif isinstance(value, bool):
+ field = QtWidgets.QCheckBox(self)
+ if value:
+ field.setCheckState(QtCore.Qt.Checked)
+ else:
+ field.setCheckState(QtCore.Qt.Unchecked)
+ elif isinstance(value, float):
+ field = QtWidgets.QLineEdit(repr(value), self)
+ field.setCursorPosition(0)
+ field.setValidator(QtGui.QDoubleValidator(field))
+ field.validator().setLocale(QtCore.QLocale("C"))
+ dialog = self.get_dialog()
+ dialog.register_float_field(field)
+ field.textChanged.connect(lambda text: dialog.update_buttons())
+ elif isinstance(value, int):
+ field = QtWidgets.QSpinBox(self)
+ field.setRange(-1e9, 1e9)
+ field.setValue(value)
+ elif isinstance(value, datetime.datetime):
+ field = QtWidgets.QDateTimeEdit(self)
+ field.setDateTime(value)
+ elif isinstance(value, datetime.date):
+ field = QtWidgets.QDateEdit(self)
+ field.setDate(value)
+ else:
+ field = QtWidgets.QLineEdit(repr(value), self)
+ self.formlayout.addRow(label, field)
+ self.widgets.append(field)
+
+ def get(self):
+ valuelist = []
+ for index, (label, value) in enumerate(self.data):
+ field = self.widgets[index]
+ if label is None:
+ # Separator / Comment
+ continue
+ elif tuple_to_qfont(value) is not None:
+ value = field.get_font()
+ elif (isinstance(value, six.string_types)
+ or mcolors.is_color_like(value)):
+ value = six.text_type(field.text())
+ elif isinstance(value, (list, tuple)):
+ index = int(field.currentIndex())
+ if isinstance(value[0], (list, tuple)):
+ value = value[index][0]
+ else:
+ value = value[index]
+ elif isinstance(value, bool):
+ value = field.checkState() == QtCore.Qt.Checked
+ elif isinstance(value, float):
+ value = float(str(field.text()))
+ elif isinstance(value, int):
+ value = int(field.value())
+ elif isinstance(value, datetime.datetime):
+ value = field.dateTime().toPyDateTime()
+ elif isinstance(value, datetime.date):
+ value = field.date().toPyDate()
+ else:
+ value = eval(str(field.text()))
+ valuelist.append(value)
+ return valuelist
+
+
+class FormComboWidget(QtWidgets.QWidget):
+ update_buttons = QtCore.Signal()
+
+ def __init__(self, datalist, comment="", parent=None):
+ QtWidgets.QWidget.__init__(self, parent)
+ layout = QtWidgets.QVBoxLayout()
+ self.setLayout(layout)
+ self.combobox = QtWidgets.QComboBox()
+ layout.addWidget(self.combobox)
+
+ self.stackwidget = QtWidgets.QStackedWidget(self)
+ layout.addWidget(self.stackwidget)
+ self.combobox.currentIndexChanged.connect(self.stackwidget.setCurrentIndex)
+
+ self.widgetlist = []
+ for data, title, comment in datalist:
+ self.combobox.addItem(title)
+ widget = FormWidget(data, comment=comment, parent=self)
+ self.stackwidget.addWidget(widget)
+ self.widgetlist.append(widget)
+
+ def setup(self):
+ for widget in self.widgetlist:
+ widget.setup()
+
+ def get(self):
+ return [widget.get() for widget in self.widgetlist]
+
+
+class FormTabWidget(QtWidgets.QWidget):
+ update_buttons = QtCore.Signal()
+
+ def __init__(self, datalist, comment="", parent=None):
+ QtWidgets.QWidget.__init__(self, parent)
+ layout = QtWidgets.QVBoxLayout()
+ self.tabwidget = QtWidgets.QTabWidget()
+ layout.addWidget(self.tabwidget)
+ self.setLayout(layout)
+ self.widgetlist = []
+ for data, title, comment in datalist:
+ if len(data[0]) == 3:
+ widget = FormComboWidget(data, comment=comment, parent=self)
+ else:
+ widget = FormWidget(data, comment=comment, parent=self)
+ index = self.tabwidget.addTab(widget, title)
+ self.tabwidget.setTabToolTip(index, comment)
+ self.widgetlist.append(widget)
+
+ def setup(self):
+ for widget in self.widgetlist:
+ widget.setup()
+
+ def get(self):
+ return [widget.get() for widget in self.widgetlist]
+
+
+class FormDialog(QtWidgets.QDialog):
+ """Form Dialog"""
+ def __init__(self, data, title="", comment="",
+ icon=None, parent=None, apply=None):
+ QtWidgets.QDialog.__init__(self, parent)
+
+ self.apply_callback = apply
+
+ # Form
+ if isinstance(data[0][0], (list, tuple)):
+ self.formwidget = FormTabWidget(data, comment=comment,
+ parent=self)
+ elif len(data[0]) == 3:
+ self.formwidget = FormComboWidget(data, comment=comment,
+ parent=self)
+ else:
+ self.formwidget = FormWidget(data, comment=comment,
+ parent=self)
+ layout = QtWidgets.QVBoxLayout()
+ layout.addWidget(self.formwidget)
+
+ self.float_fields = []
+ self.formwidget.setup()
+
+ # Button box
+ self.bbox = bbox = QtWidgets.QDialogButtonBox(
+ QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
+ self.formwidget.update_buttons.connect(self.update_buttons)
+ if self.apply_callback is not None:
+ apply_btn = bbox.addButton(QtWidgets.QDialogButtonBox.Apply)
+ apply_btn.clicked.connect(self.apply)
+
+ bbox.accepted.connect(self.accept)
+ bbox.rejected.connect(self.reject)
+ layout.addWidget(bbox)
+
+ self.setLayout(layout)
+
+ self.setWindowTitle(title)
+ if not isinstance(icon, QtGui.QIcon):
+ icon = QtWidgets.QWidget().style().standardIcon(QtWidgets.QStyle.SP_MessageBoxQuestion)
+ self.setWindowIcon(icon)
+
+ def register_float_field(self, field):
+ self.float_fields.append(field)
+
+ def update_buttons(self):
+ valid = True
+ for field in self.float_fields:
+ if not is_edit_valid(field):
+ valid = False
+ for btn_type in (QtWidgets.QDialogButtonBox.Ok,
+ QtWidgets.QDialogButtonBox.Apply):
+ btn = self.bbox.button(btn_type)
+ if btn is not None:
+ btn.setEnabled(valid)
+
+ def accept(self):
+ self.data = self.formwidget.get()
+ QtWidgets.QDialog.accept(self)
+
+ def reject(self):
+ self.data = None
+ QtWidgets.QDialog.reject(self)
+
+ def apply(self):
+ self.apply_callback(self.formwidget.get())
+
+ def get(self):
+ """Return form result"""
+ return self.data
+
+
+def fedit(data, title="", comment="", icon=None, parent=None, apply=None):
+ """
+ Create form dialog and return result
+ (if Cancel button is pressed, return None)
+
+ data: datalist, datagroup
+ title: string
+ comment: string
+ icon: QIcon instance
+ parent: parent QWidget
+ apply: apply callback (function)
+
+ datalist: list/tuple of (field_name, field_value)
+ datagroup: list/tuple of (datalist *or* datagroup, title, comment)
+
+ -> one field for each member of a datalist
+ -> one tab for each member of a top-level datagroup
+ -> one page (of a multipage widget, each page can be selected with a combo
+ box) for each member of a datagroup inside a datagroup
+
+ Supported types for field_value:
+ - int, float, str, unicode, bool
+ - colors: in Qt-compatible text form, i.e. in hex format or name (red,...)
+ (automatically detected from a string)
+ - list/tuple:
+ * the first element will be the selected index (or value)
+ * the other elements can be couples (key, value) or only values
+ """
+
+ # Create a QApplication instance if no instance currently exists
+ # (e.g., if the module is used directly from the interpreter)
+ if QtWidgets.QApplication.startingUp():
+ _app = QtWidgets.QApplication([])
+ dialog = FormDialog(data, title, comment, icon, parent, apply)
+ if dialog.exec_():
+ return dialog.get()
+
+
+if __name__ == "__main__":
+
+ def create_datalist_example():
+ return [('str', 'this is a string'),
+ ('list', [0, '1', '3', '4']),
+ ('list2', ['--', ('none', 'None'), ('--', 'Dashed'),
+ ('-.', 'DashDot'), ('-', 'Solid'),
+ ('steps', 'Steps'), (':', 'Dotted')]),
+ ('float', 1.2),
+ (None, 'Other:'),
+ ('int', 12),
+ ('font', ('Arial', 10, False, True)),
+ ('color', '#123409'),
+ ('bool', True),
+ ('date', datetime.date(2010, 10, 10)),
+ ('datetime', datetime.datetime(2010, 10, 10)),
+ ]
+
+ def create_datagroup_example():
+ datalist = create_datalist_example()
+ return ((datalist, "Category 1", "Category 1 comment"),
+ (datalist, "Category 2", "Category 2 comment"),
+ (datalist, "Category 3", "Category 3 comment"))
+
+ #--------- datalist example
+ datalist = create_datalist_example()
+
+ def apply_test(data):
+ print("data:", data)
+ print("result:", fedit(datalist, title="Example",
+ comment="This is just an <b>example</b>.",
+ apply=apply_test))
+
+ #--------- datagroup example
+ datagroup = create_datagroup_example()
+ print("result:", fedit(datagroup, "Global title"))
+
+ #--------- datagroup inside a datagroup example
+ datalist = create_datalist_example()
+ datagroup = create_datagroup_example()
+ print("result:", fedit(((datagroup, "Title 1", "Tab 1 comment"),
+ (datalist, "Title 2", "Tab 2 comment"),
+ (datalist, "Title 3", "Tab 3 comment")),
+ "Global title"))
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/formsubplottool.py b/contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/formsubplottool.py
new file mode 100644
index 00000000000..4906af588a7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/qt_editor/formsubplottool.py
@@ -0,0 +1,56 @@
+from matplotlib.backends.qt_compat import QtWidgets
+
+
+class UiSubplotTool(QtWidgets.QDialog):
+
+ def __init__(self, *args, **kwargs):
+ super(UiSubplotTool, self).__init__(*args, **kwargs)
+ self.setObjectName("SubplotTool")
+ self._widgets = {}
+
+ layout = QtWidgets.QHBoxLayout()
+ self.setLayout(layout)
+
+ left = QtWidgets.QVBoxLayout()
+ layout.addLayout(left)
+ right = QtWidgets.QVBoxLayout()
+ layout.addLayout(right)
+
+ box = QtWidgets.QGroupBox("Borders")
+ left.addWidget(box)
+ inner = QtWidgets.QFormLayout(box)
+ for side in ["top", "bottom", "left", "right"]:
+ self._widgets[side] = widget = QtWidgets.QDoubleSpinBox()
+ widget.setMinimum(0)
+ widget.setMaximum(1)
+ widget.setDecimals(3)
+ widget.setSingleStep(.005)
+ widget.setKeyboardTracking(False)
+ inner.addRow(side, widget)
+ left.addStretch(1)
+
+ box = QtWidgets.QGroupBox("Spacings")
+ right.addWidget(box)
+ inner = QtWidgets.QFormLayout(box)
+ for side in ["hspace", "wspace"]:
+ self._widgets[side] = widget = QtWidgets.QDoubleSpinBox()
+ widget.setMinimum(0)
+ widget.setMaximum(1)
+ widget.setDecimals(3)
+ widget.setSingleStep(.005)
+ widget.setKeyboardTracking(False)
+ inner.addRow(side, widget)
+ right.addStretch(1)
+
+ widget = QtWidgets.QPushButton("Export values")
+ self._widgets["Export values"] = widget
+ # Don't trigger on <enter>, which is used to input values.
+ widget.setAutoDefault(False)
+ left.addWidget(widget)
+
+ for action in ["Tight layout", "Reset", "Close"]:
+ self._widgets[action] = widget = QtWidgets.QPushButton(action)
+ widget.setAutoDefault(False)
+ right.addWidget(widget)
+
+ self._widgets["Close"].setFocus()
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/tkagg.py b/contrib/python/matplotlib/py2/matplotlib/backends/tkagg.py
new file mode 100644
index 00000000000..072fcb48fee
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/tkagg.py
@@ -0,0 +1,44 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import tkinter as Tk
+
+import numpy as np
+
+from matplotlib.backends import _tkagg
+
+def blit(photoimage, aggimage, bbox=None, colormode=1):
+ tk = photoimage.tk
+
+ if bbox is not None:
+ bbox_array = bbox.__array__()
+ # x1, x2, y1, y2
+ bboxptr = (bbox_array[0, 0], bbox_array[1, 0],
+ bbox_array[0, 1], bbox_array[1, 1])
+ else:
+ bboxptr = 0
+ data = np.asarray(aggimage)
+ dataptr = (data.shape[0], data.shape[1], data.ctypes.data)
+ try:
+ tk.call(
+ "PyAggImagePhoto", photoimage,
+ dataptr, colormode, bboxptr)
+ except Tk.TclError:
+ if hasattr(tk, 'interpaddr'):
+ _tkagg.tkinit(tk.interpaddr(), 1)
+ else:
+ # very old python?
+ _tkagg.tkinit(tk, 0)
+ tk.call("PyAggImagePhoto", photoimage,
+ dataptr, colormode, bboxptr)
+
+def test(aggimage):
+ r = Tk.Tk()
+ c = Tk.Canvas(r, width=aggimage.width, height=aggimage.height)
+ c.pack()
+ p = Tk.PhotoImage(width=aggimage.width, height=aggimage.height)
+ blit(p, aggimage)
+ c.create_image(aggimage.width,aggimage.height,image=p)
+ blit(p, aggimage)
+ while True: r.update_idletasks()
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/windowing.py b/contrib/python/matplotlib/py2/matplotlib/backends/windowing.py
new file mode 100644
index 00000000000..6c2e495906c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/windowing.py
@@ -0,0 +1,31 @@
+"""
+MS Windows-specific helper for the TkAgg backend.
+
+With rcParams['tk.window_focus'] default of False, it is
+effectively disabled.
+
+It uses a tiny C++ extension module to access MS Win functions.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from matplotlib import rcParams
+
+try:
+ if not rcParams['tk.window_focus']:
+ raise ImportError
+ from matplotlib._windowing import GetForegroundWindow, SetForegroundWindow
+except ImportError:
+ def GetForegroundWindow():
+ return 0
+ def SetForegroundWindow(hwnd):
+ pass
+
+class FocusManager(object):
+ def __init__(self):
+ self._shellWindow = GetForegroundWindow()
+
+ def __del__(self):
+ SetForegroundWindow(self._shellWindow)
diff --git a/contrib/python/matplotlib/py2/matplotlib/backends/wx_compat.py b/contrib/python/matplotlib/py2/matplotlib/backends/wx_compat.py
new file mode 100644
index 00000000000..e8467fc15d0
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/backends/wx_compat.py
@@ -0,0 +1,177 @@
+"""
+A wx API adapter to hide differences between wxPython classic and phoenix.
+
+It is assumed that the user code is selecting what version it wants to use,
+here we just ensure that it meets the minimum required by matplotlib.
+
+For an example see embedding_in_wx2.py
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from distutils.version import StrictVersion, LooseVersion
+
+missingwx = "Matplotlib backend_wx and backend_wxagg require wxPython>=2.9"
+
+
+try:
+ import wx
+ backend_version = wx.VERSION_STRING
+ is_phoenix = 'phoenix' in wx.PlatformInfo
+except ImportError:
+ raise ImportError(missingwx)
+
+try:
+ wx_version = StrictVersion(wx.VERSION_STRING)
+except ValueError:
+ wx_version = LooseVersion(wx.VERSION_STRING)
+
+# Ensure we have the correct version imported
+if wx_version < str("2.9"):
+ raise ImportError(missingwx)
+
+if is_phoenix:
+ # define all the wxPython phoenix stuff
+
+ # font styles, families and weight
+ fontweights = {
+ 100: wx.FONTWEIGHT_LIGHT,
+ 200: wx.FONTWEIGHT_LIGHT,
+ 300: wx.FONTWEIGHT_LIGHT,
+ 400: wx.FONTWEIGHT_NORMAL,
+ 500: wx.FONTWEIGHT_NORMAL,
+ 600: wx.FONTWEIGHT_NORMAL,
+ 700: wx.FONTWEIGHT_BOLD,
+ 800: wx.FONTWEIGHT_BOLD,
+ 900: wx.FONTWEIGHT_BOLD,
+ 'ultralight': wx.FONTWEIGHT_LIGHT,
+ 'light': wx.FONTWEIGHT_LIGHT,
+ 'normal': wx.FONTWEIGHT_NORMAL,
+ 'medium': wx.FONTWEIGHT_NORMAL,
+ 'semibold': wx.FONTWEIGHT_NORMAL,
+ 'bold': wx.FONTWEIGHT_BOLD,
+ 'heavy': wx.FONTWEIGHT_BOLD,
+ 'ultrabold': wx.FONTWEIGHT_BOLD,
+ 'black': wx.FONTWEIGHT_BOLD
+ }
+ fontangles = {
+ 'italic': wx.FONTSTYLE_ITALIC,
+ 'normal': wx.FONTSTYLE_NORMAL,
+ 'oblique': wx.FONTSTYLE_SLANT}
+
+ # wxPython allows for portable font styles, choosing them appropriately
+ # for the target platform. Map some standard font names to the portable
+ # styles
+ # QUESTION: Is it be wise to agree standard fontnames across all backends?
+ fontnames = {'Sans': wx.FONTFAMILY_SWISS,
+ 'Roman': wx.FONTFAMILY_ROMAN,
+ 'Script': wx.FONTFAMILY_SCRIPT,
+ 'Decorative': wx.FONTFAMILY_DECORATIVE,
+ 'Modern': wx.FONTFAMILY_MODERN,
+ 'Courier': wx.FONTFAMILY_MODERN,
+ 'courier': wx.FONTFAMILY_MODERN}
+
+ dashd_wx = {'solid': wx.PENSTYLE_SOLID,
+ 'dashed': wx.PENSTYLE_SHORT_DASH,
+ 'dashdot': wx.PENSTYLE_DOT_DASH,
+ 'dotted': wx.PENSTYLE_DOT}
+
+ # functions changes
+ BitmapFromBuffer = wx.Bitmap.FromBufferRGBA
+ EmptyBitmap = wx.Bitmap
+ EmptyImage = wx.Image
+ Cursor = wx.Cursor
+ EventLoop = wx.GUIEventLoop
+ NamedColour = wx.Colour
+ StockCursor = wx.Cursor
+
+else:
+ # define all the wxPython classic stuff
+
+ # font styles, families and weight
+ fontweights = {
+ 100: wx.LIGHT,
+ 200: wx.LIGHT,
+ 300: wx.LIGHT,
+ 400: wx.NORMAL,
+ 500: wx.NORMAL,
+ 600: wx.NORMAL,
+ 700: wx.BOLD,
+ 800: wx.BOLD,
+ 900: wx.BOLD,
+ 'ultralight': wx.LIGHT,
+ 'light': wx.LIGHT,
+ 'normal': wx.NORMAL,
+ 'medium': wx.NORMAL,
+ 'semibold': wx.NORMAL,
+ 'bold': wx.BOLD,
+ 'heavy': wx.BOLD,
+ 'ultrabold': wx.BOLD,
+ 'black': wx.BOLD
+ }
+ fontangles = {
+ 'italic': wx.ITALIC,
+ 'normal': wx.NORMAL,
+ 'oblique': wx.SLANT}
+
+ # wxPython allows for portable font styles, choosing them appropriately
+ # for the target platform. Map some standard font names to the portable
+ # styles
+ # QUESTION: Is it be wise to agree standard fontnames across all backends?
+ fontnames = {'Sans': wx.SWISS,
+ 'Roman': wx.ROMAN,
+ 'Script': wx.SCRIPT,
+ 'Decorative': wx.DECORATIVE,
+ 'Modern': wx.MODERN,
+ 'Courier': wx.MODERN,
+ 'courier': wx.MODERN}
+
+ dashd_wx = {'solid': wx.SOLID,
+ 'dashed': wx.SHORT_DASH,
+ 'dashdot': wx.DOT_DASH,
+ 'dotted': wx.DOT}
+
+ # functions changes
+ BitmapFromBuffer = wx.BitmapFromBufferRGBA
+ EmptyBitmap = wx.EmptyBitmap
+ EmptyImage = wx.EmptyImage
+ Cursor = wx.StockCursor
+ EventLoop = wx.EventLoop
+ NamedColour = wx.NamedColour
+ StockCursor = wx.StockCursor
+
+
+# wxPython Classic's DoAddTool has become AddTool in Phoenix. Otherwise
+# they are the same, except for early betas and prerelease builds of
+# Phoenix. This function provides a shim that does the RightThing based on
+# which wxPython is in use.
+def _AddTool(parent, wx_ids, text, bmp, tooltip_text):
+ if text in ['Pan', 'Zoom']:
+ kind = wx.ITEM_CHECK
+ else:
+ kind = wx.ITEM_NORMAL
+ if is_phoenix:
+ add_tool = parent.AddTool
+ else:
+ add_tool = parent.DoAddTool
+
+ if not is_phoenix or wx_version >= str("4.0.0b2"):
+ # NOTE: when support for Phoenix prior to 4.0.0b2 is dropped then
+ # all that is needed is this clause, and the if and else clause can
+ # be removed.
+ kwargs = dict(label=text,
+ bitmap=bmp,
+ bmpDisabled=wx.NullBitmap,
+ shortHelp=text,
+ longHelp=tooltip_text,
+ kind=kind)
+ else:
+ kwargs = dict(label=text,
+ bitmap=bmp,
+ bmpDisabled=wx.NullBitmap,
+ shortHelpString=text,
+ longHelpString=tooltip_text,
+ kind=kind)
+
+ return add_tool(wx_ids[text], **kwargs)
diff --git a/contrib/python/matplotlib/py2/matplotlib/bezier.py b/contrib/python/matplotlib/py2/matplotlib/bezier.py
new file mode 100644
index 00000000000..80fbd6137ef
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/bezier.py
@@ -0,0 +1,495 @@
+"""
+A module providing some utility functions regarding bezier path manipulation.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import numpy as np
+from matplotlib.path import Path
+
+from operator import xor
+import warnings
+
+
+class NonIntersectingPathException(ValueError):
+ pass
+
+# some functions
+
+
+def get_intersection(cx1, cy1, cos_t1, sin_t1,
+ cx2, cy2, cos_t2, sin_t2):
+ """ return a intersecting point between a line through (cx1, cy1)
+ and having angle t1 and a line through (cx2, cy2) and angle t2.
+ """
+
+ # line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
+ # line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
+
+ line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
+ line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
+
+ # rhs matrix
+ a, b = sin_t1, -cos_t1
+ c, d = sin_t2, -cos_t2
+
+ ad_bc = a * d - b * c
+ if ad_bc == 0.:
+ raise ValueError("Given lines do not intersect")
+
+ # rhs_inverse
+ a_, b_ = d, -b
+ c_, d_ = -c, a
+ a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]
+
+ x = a_ * line1_rhs + b_ * line2_rhs
+ y = c_ * line1_rhs + d_ * line2_rhs
+
+ return x, y
+
+
+def get_normal_points(cx, cy, cos_t, sin_t, length):
+ """
+ For a line passing through (*cx*, *cy*) and having a angle *t*, return
+ locations of the two points located along its perpendicular line at the
+ distance of *length*.
+ """
+
+ if length == 0.:
+ return cx, cy, cx, cy
+
+ cos_t1, sin_t1 = sin_t, -cos_t
+ cos_t2, sin_t2 = -sin_t, cos_t
+
+ x1, y1 = length * cos_t1 + cx, length * sin_t1 + cy
+ x2, y2 = length * cos_t2 + cx, length * sin_t2 + cy
+
+ return x1, y1, x2, y2
+
+
+# BEZIER routines
+
+# subdividing bezier curve
+# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
+
+
+def _de_casteljau1(beta, t):
+ next_beta = beta[:-1] * (1 - t) + beta[1:] * t
+ return next_beta
+
+
+def split_de_casteljau(beta, t):
+ """split a bezier segment defined by its controlpoints *beta*
+ into two separate segment divided at *t* and return their control points.
+
+ """
+ beta = np.asarray(beta)
+ beta_list = [beta]
+ while True:
+ beta = _de_casteljau1(beta, t)
+ beta_list.append(beta)
+ if len(beta) == 1:
+ break
+ left_beta = [beta[0] for beta in beta_list]
+ right_beta = [beta[-1] for beta in reversed(beta_list)]
+
+ return left_beta, right_beta
+
+
+# FIXME spelling mistake in the name of the parameter ``tolerence``
+def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
+ inside_closedpath,
+ t0=0., t1=1., tolerence=0.01):
+ """ Find a parameter t0 and t1 of the given bezier path which
+ bounds the intersecting points with a provided closed
+ path(*inside_closedpath*). Search starts from *t0* and *t1* and it
+ uses a simple bisecting algorithm therefore one of the end point
+ must be inside the path while the orther doesn't. The search stop
+ when |t0-t1| gets smaller than the given tolerence.
+ value for
+
+ - bezier_point_at_t : a function which returns x, y coordinates at *t*
+
+ - inside_closedpath : return True if the point is inside the path
+
+ """
+ # inside_closedpath : function
+
+ start = bezier_point_at_t(t0)
+ end = bezier_point_at_t(t1)
+
+ start_inside = inside_closedpath(start)
+ end_inside = inside_closedpath(end)
+
+ if start_inside == end_inside and start != end:
+ raise NonIntersectingPathException(
+ "Both points are on the same side of the closed path")
+
+ while True:
+
+ # return if the distance is smaller than the tolerence
+ if np.hypot(start[0] - end[0], start[1] - end[1]) < tolerence:
+ return t0, t1
+
+ # calculate the middle point
+ middle_t = 0.5 * (t0 + t1)
+ middle = bezier_point_at_t(middle_t)
+ middle_inside = inside_closedpath(middle)
+
+ if xor(start_inside, middle_inside):
+ t1 = middle_t
+ end = middle
+ end_inside = middle_inside
+ else:
+ t0 = middle_t
+ start = middle
+ start_inside = middle_inside
+
+
+class BezierSegment(object):
+ """
+ A simple class of a 2-dimensional bezier segment
+ """
+
+ # Higher order bezier lines can be supported by simplying adding
+ # corresponding values.
+ _binom_coeff = {1: np.array([1., 1.]),
+ 2: np.array([1., 2., 1.]),
+ 3: np.array([1., 3., 3., 1.])}
+
+ def __init__(self, control_points):
+ """
+ *control_points* : location of contol points. It needs have a
+ shpae of n * 2, where n is the order of the bezier line. 1<=
+ n <= 3 is supported.
+ """
+ _o = len(control_points)
+ self._orders = np.arange(_o)
+
+ _coeff = BezierSegment._binom_coeff[_o - 1]
+ xx, yy = np.asarray(control_points).T
+ self._px = xx * _coeff
+ self._py = yy * _coeff
+
+ def point_at_t(self, t):
+ "evaluate a point at t"
+ tt = ((1 - t) ** self._orders)[::-1] * t ** self._orders
+ _x = np.dot(tt, self._px)
+ _y = np.dot(tt, self._py)
+ return _x, _y
+
+
+def split_bezier_intersecting_with_closedpath(bezier,
+ inside_closedpath,
+ tolerence=0.01):
+
+ """
+ bezier : control points of the bezier segment
+ inside_closedpath : a function which returns true if the point is inside
+ the path
+ """
+
+ bz = BezierSegment(bezier)
+ bezier_point_at_t = bz.point_at_t
+
+ t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
+ inside_closedpath,
+ tolerence=tolerence)
+
+ _left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.)
+ return _left, _right
+
+
+def find_r_to_boundary_of_closedpath(inside_closedpath, xy,
+ cos_t, sin_t,
+ rmin=0., rmax=1., tolerence=0.01):
+ """
+ Find a radius r (centered at *xy*) between *rmin* and *rmax* at
+ which it intersect with the path.
+
+ inside_closedpath : function
+ cx, cy : center
+ cos_t, sin_t : cosine and sine for the angle
+ rmin, rmax :
+ """
+
+ cx, cy = xy
+
+ def _f(r):
+ return cos_t * r + cx, sin_t * r + cy
+
+ find_bezier_t_intersecting_with_closedpath(_f, inside_closedpath,
+ t0=rmin, t1=rmax,
+ tolerence=tolerence)
+
+# matplotlib specific
+
+
+def split_path_inout(path, inside, tolerence=0.01, reorder_inout=False):
+ """ divide a path into two segment at the point where inside(x, y)
+ becomes False.
+ """
+
+ path_iter = path.iter_segments()
+
+ ctl_points, command = next(path_iter)
+ begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
+
+ ctl_points_old = ctl_points
+
+ concat = np.concatenate
+
+ iold = 0
+ i = 1
+
+ for ctl_points, command in path_iter:
+ iold = i
+ i += len(ctl_points) // 2
+ if inside(ctl_points[-2:]) != begin_inside:
+ bezier_path = concat([ctl_points_old[-2:], ctl_points])
+ break
+ ctl_points_old = ctl_points
+ else:
+ raise ValueError("The path does not intersect with the patch")
+
+ bp = bezier_path.reshape((-1, 2))
+ left, right = split_bezier_intersecting_with_closedpath(
+ bp, inside, tolerence)
+ if len(left) == 2:
+ codes_left = [Path.LINETO]
+ codes_right = [Path.MOVETO, Path.LINETO]
+ elif len(left) == 3:
+ codes_left = [Path.CURVE3, Path.CURVE3]
+ codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
+ elif len(left) == 4:
+ codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
+ codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
+ else:
+ raise AssertionError("This should never be reached")
+
+ verts_left = left[1:]
+ verts_right = right[:]
+
+ if path.codes is None:
+ path_in = Path(concat([path.vertices[:i], verts_left]))
+ path_out = Path(concat([verts_right, path.vertices[i:]]))
+
+ else:
+ path_in = Path(concat([path.vertices[:iold], verts_left]),
+ concat([path.codes[:iold], codes_left]))
+
+ path_out = Path(concat([verts_right, path.vertices[i:]]),
+ concat([codes_right, path.codes[i:]]))
+
+ if reorder_inout and begin_inside is False:
+ path_in, path_out = path_out, path_in
+
+ return path_in, path_out
+
+
+def inside_circle(cx, cy, r):
+ r2 = r ** 2
+
+ def _f(xy):
+ x, y = xy
+ return (x - cx) ** 2 + (y - cy) ** 2 < r2
+ return _f
+
+
+# quadratic bezier lines
+
+def get_cos_sin(x0, y0, x1, y1):
+ dx, dy = x1 - x0, y1 - y0
+ d = (dx * dx + dy * dy) ** .5
+ # Account for divide by zero
+ if d == 0:
+ return 0.0, 0.0
+ return dx / d, dy / d
+
+
+def check_if_parallel(dx1, dy1, dx2, dy2, tolerence=1.e-5):
+ """ returns
+ * 1 if two lines are parralel in same direction
+ * -1 if two lines are parralel in opposite direction
+ * 0 otherwise
+ """
+ theta1 = np.arctan2(dx1, dy1)
+ theta2 = np.arctan2(dx2, dy2)
+ dtheta = np.abs(theta1 - theta2)
+ if dtheta < tolerence:
+ return 1
+ elif np.abs(dtheta - np.pi) < tolerence:
+ return -1
+ else:
+ return False
+
+
+def get_parallels(bezier2, width):
+ """
+ Given the quadratic bezier control points *bezier2*, returns
+ control points of quadratic bezier lines roughly parallel to given
+ one separated by *width*.
+ """
+
+ # The parallel bezier lines are constructed by following ways.
+ # c1 and c2 are control points representing the begin and end of the
+ # bezier line.
+ # cm is the middle point
+
+ c1x, c1y = bezier2[0]
+ cmx, cmy = bezier2[1]
+ c2x, c2y = bezier2[2]
+
+ parallel_test = check_if_parallel(c1x - cmx, c1y - cmy,
+ cmx - c2x, cmy - c2y)
+
+ if parallel_test == -1:
+ warnings.warn(
+ "Lines do not intersect. A straight line is used instead.")
+ cos_t1, sin_t1 = get_cos_sin(c1x, c1y, c2x, c2y)
+ cos_t2, sin_t2 = cos_t1, sin_t1
+ else:
+ # t1 and t2 is the angle between c1 and cm, cm, c2. They are
+ # also a angle of the tangential line of the path at c1 and c2
+ cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
+ cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
+
+ # find c1_left, c1_right which are located along the lines
+ # through c1 and perpendicular to the tangential lines of the
+ # bezier path at a distance of width. Same thing for c2_left and
+ # c2_right with respect to c2.
+ c1x_left, c1y_left, c1x_right, c1y_right = (
+ get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
+ )
+ c2x_left, c2y_left, c2x_right, c2y_right = (
+ get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
+ )
+
+ # find cm_left which is the intersectng point of a line through
+ # c1_left with angle t1 and a line through c2_left with angle
+ # t2. Same with cm_right.
+ if parallel_test != 0:
+ # a special case for a straight line, i.e., angle between two
+ # lines are smaller than some (arbitrtay) value.
+ cmx_left, cmy_left = (
+ 0.5 * (c1x_left + c2x_left), 0.5 * (c1y_left + c2y_left)
+ )
+ cmx_right, cmy_right = (
+ 0.5 * (c1x_right + c2x_right), 0.5 * (c1y_right + c2y_right)
+ )
+ else:
+ cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1,
+ sin_t1, c2x_left, c2y_left,
+ cos_t2, sin_t2)
+
+ cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1,
+ sin_t1, c2x_right, c2y_right,
+ cos_t2, sin_t2)
+
+ # the parallel bezier lines are created with control points of
+ # [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
+ path_left = [(c1x_left, c1y_left),
+ (cmx_left, cmy_left),
+ (c2x_left, c2y_left)]
+ path_right = [(c1x_right, c1y_right),
+ (cmx_right, cmy_right),
+ (c2x_right, c2y_right)]
+
+ return path_left, path_right
+
+
+def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
+ """ Find control points of the bezier line through c1, mm, c2. We
+ simply assume that c1, mm, c2 which have parametric value 0, 0.5, and 1.
+ """
+
+ cmx = .5 * (4 * mmx - (c1x + c2x))
+ cmy = .5 * (4 * mmy - (c1y + c2y))
+
+ return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
+
+
+def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
+ """
+ Being similar to get_parallels, returns control points of two quadrativ
+ bezier lines having a width roughly parallel to given one separated by
+ *width*.
+ """
+
+ # c1, cm, c2
+ c1x, c1y = bezier2[0]
+ cmx, cmy = bezier2[1]
+ c3x, c3y = bezier2[2]
+
+ # t1 and t2 is the angle between c1 and cm, cm, c3.
+ # They are also a angle of the tangential line of the path at c1 and c3
+ cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
+ cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
+
+ # find c1_left, c1_right which are located along the lines
+ # through c1 and perpendicular to the tangential lines of the
+ # bezier path at a distance of width. Same thing for c3_left and
+ # c3_right with respect to c3.
+ c1x_left, c1y_left, c1x_right, c1y_right = (
+ get_normal_points(c1x, c1y, cos_t1, sin_t1, width * w1)
+ )
+ c3x_left, c3y_left, c3x_right, c3y_right = (
+ get_normal_points(c3x, c3y, cos_t2, sin_t2, width * w2)
+ )
+
+ # find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and
+ # c12-c23
+ c12x, c12y = (c1x + cmx) * .5, (c1y + cmy) * .5
+ c23x, c23y = (cmx + c3x) * .5, (cmy + c3y) * .5
+ c123x, c123y = (c12x + c23x) * .5, (c12y + c23y) * .5
+
+ # tangential angle of c123 (angle between c12 and c23)
+ cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
+
+ c123x_left, c123y_left, c123x_right, c123y_right = (
+ get_normal_points(c123x, c123y, cos_t123, sin_t123, width * wm)
+ )
+
+ path_left = find_control_points(c1x_left, c1y_left,
+ c123x_left, c123y_left,
+ c3x_left, c3y_left)
+ path_right = find_control_points(c1x_right, c1y_right,
+ c123x_right, c123y_right,
+ c3x_right, c3y_right)
+
+ return path_left, path_right
+
+
+def make_path_regular(p):
+ """
+ fill in the codes if None.
+ """
+ c = p.codes
+ if c is None:
+ c = np.empty(p.vertices.shape[:1], "i")
+ c.fill(Path.LINETO)
+ c[0] = Path.MOVETO
+
+ return Path(p.vertices, c)
+ else:
+ return p
+
+
+def concatenate_paths(paths):
+ """
+ concatenate list of paths into a single path.
+ """
+
+ vertices = []
+ codes = []
+ for p in paths:
+ p = make_path_regular(p)
+ vertices.append(p.vertices)
+ codes.append(p.codes)
+
+ _path = Path(np.concatenate(vertices),
+ np.concatenate(codes))
+ return _path
diff --git a/contrib/python/matplotlib/py2/matplotlib/blocking_input.py b/contrib/python/matplotlib/py2/matplotlib/blocking_input.py
new file mode 100644
index 00000000000..090ffdb8647
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/blocking_input.py
@@ -0,0 +1,375 @@
+"""
+This provides several classes used for blocking interaction with figure
+windows:
+
+:class:`BlockingInput`
+ creates a callable object to retrieve events in a blocking way for
+ interactive sessions
+
+:class:`BlockingKeyMouseInput`
+ creates a callable object to retrieve key or mouse clicks in a blocking
+ way for interactive sessions.
+ Note: Subclass of BlockingInput. Used by waitforbuttonpress
+
+:class:`BlockingMouseInput`
+ creates a callable object to retrieve mouse clicks in a blocking way for
+ interactive sessions.
+ Note: Subclass of BlockingInput. Used by ginput
+
+:class:`BlockingContourLabeler`
+ creates a callable object to retrieve mouse clicks in a blocking way that
+ will then be used to place labels on a ContourSet
+ Note: Subclass of BlockingMouseInput. Used by clabel
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+import matplotlib.lines as mlines
+
+import logging
+
+_log = logging.getLogger(__name__)
+
+
+class BlockingInput(object):
+ """
+ Class that creates a callable object to retrieve events in a
+ blocking way.
+ """
+ def __init__(self, fig, eventslist=()):
+ self.fig = fig
+ self.eventslist = eventslist
+
+ def on_event(self, event):
+ """
+ Event handler that will be passed to the current figure to
+ retrieve events.
+ """
+ # Add a new event to list - using a separate function is
+ # overkill for the base class, but this is consistent with
+ # subclasses
+ self.add_event(event)
+ _log.info("Event %i", len(self.events))
+
+ # This will extract info from events
+ self.post_event()
+
+ # Check if we have enough events already
+ if len(self.events) >= self.n and self.n > 0:
+ self.fig.canvas.stop_event_loop()
+
+ def post_event(self):
+ """For baseclass, do nothing but collect events"""
+ pass
+
+ def cleanup(self):
+ """Disconnect all callbacks"""
+ for cb in self.callbacks:
+ self.fig.canvas.mpl_disconnect(cb)
+
+ self.callbacks = []
+
+ def add_event(self, event):
+ """For base class, this just appends an event to events."""
+ self.events.append(event)
+
+ def pop_event(self, index=-1):
+ """
+ This removes an event from the event list. Defaults to
+ removing last event, but an index can be supplied. Note that
+ this does not check that there are events, much like the
+ normal pop method. If not events exist, this will throw an
+ exception.
+ """
+ self.events.pop(index)
+
+ def pop(self, index=-1):
+ self.pop_event(index)
+ pop.__doc__ = pop_event.__doc__
+
+ def __call__(self, n=1, timeout=30):
+ """
+ Blocking call to retrieve n events
+ """
+
+ if not isinstance(n, int):
+ raise ValueError("Requires an integer argument")
+ self.n = n
+
+ self.events = []
+ self.callbacks = []
+
+ if hasattr(self.fig.canvas, "manager"):
+ # Ensure that the figure is shown, if we are managing it.
+ self.fig.show()
+
+ # connect the events to the on_event function call
+ for n in self.eventslist:
+ self.callbacks.append(
+ self.fig.canvas.mpl_connect(n, self.on_event))
+
+ try:
+ # Start event loop
+ self.fig.canvas.start_event_loop(timeout=timeout)
+ finally: # Run even on exception like ctrl-c
+ # Disconnect the callbacks
+ self.cleanup()
+
+ # Return the events in this case
+ return self.events
+
+
+class BlockingMouseInput(BlockingInput):
+ """
+ Class that creates a callable object to retrieve mouse clicks in a
+ blocking way.
+
+ This class will also retrieve keyboard clicks and treat them like
+ appropriate mouse clicks (delete and backspace are like mouse button 3,
+ enter is like mouse button 2 and all others are like mouse button 1).
+ """
+
+ button_add = 1
+ button_pop = 3
+ button_stop = 2
+
+ def __init__(self, fig, mouse_add=1, mouse_pop=3, mouse_stop=2):
+ BlockingInput.__init__(self, fig=fig,
+ eventslist=('button_press_event',
+ 'key_press_event'))
+ self.button_add = mouse_add
+ self.button_pop = mouse_pop
+ self.button_stop = mouse_stop
+
+ def post_event(self):
+ """
+ This will be called to process events
+ """
+ if len(self.events) == 0:
+ _log.warning("No events yet")
+ elif self.events[-1].name == 'key_press_event':
+ self.key_event()
+ else:
+ self.mouse_event()
+
+ def mouse_event(self):
+ '''Process a mouse click event'''
+
+ event = self.events[-1]
+ button = event.button
+
+ if button == self.button_pop:
+ self.mouse_event_pop(event)
+ elif button == self.button_stop:
+ self.mouse_event_stop(event)
+ else:
+ self.mouse_event_add(event)
+
+ def key_event(self):
+ '''
+ Process a key click event. This maps certain keys to appropriate
+ mouse click events.
+ '''
+
+ event = self.events[-1]
+ if event.key is None:
+ # at least in mac os X gtk backend some key returns None.
+ return
+
+ key = event.key.lower()
+
+ if key in ['backspace', 'delete']:
+ self.mouse_event_pop(event)
+ elif key in ['escape', 'enter']:
+ # on windows XP and wxAgg, the enter key doesn't seem to register
+ self.mouse_event_stop(event)
+ else:
+ self.mouse_event_add(event)
+
+ def mouse_event_add(self, event):
+ """
+ Will be called for any event involving a button other than
+ button 2 or 3. This will add a click if it is inside axes.
+ """
+ if event.inaxes:
+ self.add_click(event)
+ else: # If not a valid click, remove from event list
+ BlockingInput.pop(self, -1)
+
+ def mouse_event_stop(self, event):
+ """
+ Will be called for any event involving button 2.
+ Button 2 ends blocking input.
+ """
+
+ # Remove last event just for cleanliness
+ BlockingInput.pop(self, -1)
+
+ # This will exit even if not in infinite mode. This is
+ # consistent with MATLAB and sometimes quite useful, but will
+ # require the user to test how many points were actually
+ # returned before using data.
+ self.fig.canvas.stop_event_loop()
+
+ def mouse_event_pop(self, event):
+ """
+ Will be called for any event involving button 3.
+ Button 3 removes the last click.
+ """
+ # Remove this last event
+ BlockingInput.pop(self, -1)
+
+ # Now remove any existing clicks if possible
+ if len(self.events) > 0:
+ self.pop(event, -1)
+
+ def add_click(self, event):
+ """
+ This add the coordinates of an event to the list of clicks
+ """
+ self.clicks.append((event.xdata, event.ydata))
+ _log.info("input %i: %f,%f" %
+ (len(self.clicks), event.xdata, event.ydata))
+
+ # If desired plot up click
+ if self.show_clicks:
+ line = mlines.Line2D([event.xdata], [event.ydata],
+ marker='+', color='r')
+ event.inaxes.add_line(line)
+ self.marks.append(line)
+ self.fig.canvas.draw()
+
+ def pop_click(self, event, index=-1):
+ """
+ This removes a click from the list of clicks. Defaults to
+ removing the last click.
+ """
+ self.clicks.pop(index)
+
+ if self.show_clicks:
+
+ mark = self.marks.pop(index)
+ mark.remove()
+
+ self.fig.canvas.draw()
+ # NOTE: I do NOT understand why the above 3 lines does not work
+ # for the keyboard backspace event on windows XP wxAgg.
+ # maybe event.inaxes here is a COPY of the actual axes?
+
+ def pop(self, event, index=-1):
+ """
+ This removes a click and the associated event from the object.
+ Defaults to removing the last click, but any index can be
+ supplied.
+ """
+ self.pop_click(event, index)
+ BlockingInput.pop(self, index)
+
+ def cleanup(self, event=None):
+ # clean the figure
+ if self.show_clicks:
+
+ for mark in self.marks:
+ mark.remove()
+ self.marks = []
+
+ self.fig.canvas.draw()
+
+ # Call base class to remove callbacks
+ BlockingInput.cleanup(self)
+
+ def __call__(self, n=1, timeout=30, show_clicks=True):
+ """
+ Blocking call to retrieve n coordinate pairs through mouse
+ clicks.
+ """
+ self.show_clicks = show_clicks
+ self.clicks = []
+ self.marks = []
+ BlockingInput.__call__(self, n=n, timeout=timeout)
+
+ return self.clicks
+
+
+class BlockingContourLabeler(BlockingMouseInput):
+ """
+ Class that creates a callable object that uses mouse clicks or key
+ clicks on a figure window to place contour labels.
+ """
+ def __init__(self, cs):
+ self.cs = cs
+ BlockingMouseInput.__init__(self, fig=cs.ax.figure)
+
+ def add_click(self, event):
+ self.button1(event)
+
+ def pop_click(self, event, index=-1):
+ self.button3(event)
+
+ def button1(self, event):
+ """
+ This will be called if an event involving a button other than
+ 2 or 3 occcurs. This will add a label to a contour.
+ """
+
+ # Shorthand
+ if event.inaxes == self.cs.ax:
+ self.cs.add_label_near(event.x, event.y, self.inline,
+ inline_spacing=self.inline_spacing,
+ transform=False)
+ self.fig.canvas.draw()
+ else: # Remove event if not valid
+ BlockingInput.pop(self)
+
+ def button3(self, event):
+ """
+ This will be called if button 3 is clicked. This will remove
+ a label if not in inline mode. Unfortunately, if one is doing
+ inline labels, then there is currently no way to fix the
+ broken contour - once humpty-dumpty is broken, he can't be put
+ back together. In inline mode, this does nothing.
+ """
+
+ if self.inline:
+ pass
+ else:
+ self.cs.pop_label()
+ self.cs.ax.figure.canvas.draw()
+
+ def __call__(self, inline, inline_spacing=5, n=-1, timeout=-1):
+ self.inline = inline
+ self.inline_spacing = inline_spacing
+
+ BlockingMouseInput.__call__(self, n=n, timeout=timeout,
+ show_clicks=False)
+
+
+class BlockingKeyMouseInput(BlockingInput):
+ """
+ Class that creates a callable object to retrieve a single mouse or
+ keyboard click
+ """
+ def __init__(self, fig):
+ BlockingInput.__init__(self, fig=fig, eventslist=(
+ 'button_press_event', 'key_press_event'))
+
+ def post_event(self):
+ """
+ Determines if it is a key event
+ """
+ if len(self.events) == 0:
+ _log.warning("No events yet")
+ else:
+ self.keyormouse = self.events[-1].name == 'key_press_event'
+
+ def __call__(self, timeout=30):
+ """
+ Blocking call to retrieve a single mouse or key click
+ Returns True if key click, False if mouse, or None if timeout
+ """
+ self.keyormouse = None
+ BlockingInput.__call__(self, n=1, timeout=timeout)
+
+ return self.keyormouse
diff --git a/contrib/python/matplotlib/py2/matplotlib/category.py b/contrib/python/matplotlib/py2/matplotlib/category.py
new file mode 100644
index 00000000000..b135bff1ccf
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/category.py
@@ -0,0 +1,211 @@
+# -*- coding: utf-8 -*-
+"""
+Module that allows plotting of string "category" data. i.e.
+``plot(['d', 'f', 'a'],[1, 2, 3])`` will plot three points with x-axis
+values of 'd', 'f', 'a'.
+
+See :doc:`/gallery/lines_bars_and_markers/categorical_variables` for an
+example.
+
+The module uses Matplotlib's `matplotlib.units` mechanism to convert from
+strings to integers, provides a tick locator and formatter, and the
+class:`.UnitData` that creates and stores the string-to-integer mapping.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from collections import OrderedDict
+import itertools
+
+import six
+
+
+import numpy as np
+
+import matplotlib.units as units
+import matplotlib.ticker as ticker
+
+# np 1.6/1.7 support
+from distutils.version import LooseVersion
+
+VALID_TYPES = tuple(set(six.string_types +
+ (bytes, six.text_type, np.str_, np.bytes_)))
+
+
+class StrCategoryConverter(units.ConversionInterface):
+ @staticmethod
+ def convert(value, unit, axis):
+ """Converts strings in value to floats using
+ mapping information store in the unit object
+
+ Parameters
+ ----------
+ value : string or iterable
+ value or list of values to be converted
+ unit : :class:`.UnitData`
+ object string unit information for value
+ axis : :class:`~matplotlib.Axis.axis`
+ axis on which the converted value is plotted
+
+ Returns
+ -------
+ mapped_ value : float or ndarray[float]
+
+ .. note:: axis is not used in this function
+ """
+ # dtype = object preserves numerical pass throughs
+ values = np.atleast_1d(np.array(value, dtype=object))
+
+ # pass through sequence of non binary numbers
+ if all((units.ConversionInterface.is_numlike(v) and
+ not isinstance(v, VALID_TYPES)) for v in values):
+ return np.asarray(values, dtype=float)
+
+ # force an update so it also does type checking
+ unit.update(values)
+
+ str2idx = np.vectorize(unit._mapping.__getitem__,
+ otypes=[float])
+
+ mapped_value = str2idx(values)
+ return mapped_value
+
+ @staticmethod
+ def axisinfo(unit, axis):
+ """Sets the default axis ticks and labels
+
+ Parameters
+ ---------
+ unit : :class:`.UnitData`
+ object string unit information for value
+ axis : :class:`~matplotlib.Axis.axis`
+ axis for which information is being set
+
+ Returns
+ -------
+ :class:~matplotlib.units.AxisInfo~
+ Information to support default tick labeling
+
+ .. note: axis is not used
+ """
+ # locator and formatter take mapping dict because
+ # args need to be pass by reference for updates
+ majloc = StrCategoryLocator(unit._mapping)
+ majfmt = StrCategoryFormatter(unit._mapping)
+ return units.AxisInfo(majloc=majloc, majfmt=majfmt)
+
+ @staticmethod
+ def default_units(data, axis):
+ """ Sets and updates the :class:`~matplotlib.Axis.axis~ units
+
+ Parameters
+ ----------
+ data : string or iterable of strings
+ axis : :class:`~matplotlib.Axis.axis`
+ axis on which the data is plotted
+
+ Returns
+ -------
+ class:~.UnitData~
+ object storing string to integer mapping
+ """
+ # the conversion call stack is supposed to be
+ # default_units->axis_info->convert
+ if axis.units is None:
+ axis.set_units(UnitData(data))
+ else:
+ axis.units.update(data)
+ return axis.units
+
+
+class StrCategoryLocator(ticker.Locator):
+ """tick at every integer mapping of the string data"""
+ def __init__(self, units_mapping):
+ """
+ Parameters
+ -----------
+ units_mapping : Dict[str, int]
+ string:integer mapping
+ """
+ self._units = units_mapping
+
+ def __call__(self):
+ return list(self._units.values())
+
+ def tick_values(self, vmin, vmax):
+ return self()
+
+
+class StrCategoryFormatter(ticker.Formatter):
+ """String representation of the data at every tick"""
+ def __init__(self, units_mapping):
+ """
+ Parameters
+ ----------
+ units_mapping : Dict[Str, int]
+ string:integer mapping
+ """
+ self._units = units_mapping
+
+ def __call__(self, x, pos=None):
+ if pos is None:
+ return ""
+ r_mapping = {v: StrCategoryFormatter._text(k)
+ for k, v in self._units.items()}
+ return r_mapping.get(int(np.round(x)), '')
+
+ @staticmethod
+ def _text(value):
+ """Converts text values into `utf-8` or `ascii` strings
+ """
+ if LooseVersion(np.__version__) < LooseVersion('1.7.0'):
+ if (isinstance(value, (six.text_type, np.unicode))):
+ value = value.encode('utf-8', 'ignore').decode('utf-8')
+ if isinstance(value, (np.bytes_, six.binary_type)):
+ value = value.decode(encoding='utf-8')
+ elif not isinstance(value, (np.str_, six.string_types)):
+ value = str(value)
+ return value
+
+
+class UnitData(object):
+ def __init__(self, data=None):
+ """Create mapping between unique categorical values
+ and integer identifiers
+ ----------
+ data: iterable
+ sequence of string values
+ """
+ self._mapping = OrderedDict()
+ self._counter = itertools.count(start=0)
+ if data is not None:
+ self.update(data)
+
+ def update(self, data):
+ """Maps new values to integer identifiers.
+
+ Paramters
+ ---------
+ data: iterable
+ sequence of string values
+
+ Raises
+ ------
+ TypeError
+ If the value in data is not a string, unicode, bytes type
+ """
+ data = np.atleast_1d(np.array(data, dtype=object))
+
+ for val in OrderedDict.fromkeys(data):
+ if not isinstance(val, VALID_TYPES):
+ raise TypeError("{val!r} is not a string".format(val=val))
+ if val not in self._mapping:
+ self._mapping[val] = next(self._counter)
+
+
+# Connects the convertor to matplotlib
+units.registry[str] = StrCategoryConverter()
+units.registry[np.str_] = StrCategoryConverter()
+units.registry[six.text_type] = StrCategoryConverter()
+units.registry[bytes] = StrCategoryConverter()
+units.registry[np.bytes_] = StrCategoryConverter()
diff --git a/contrib/python/matplotlib/py2/matplotlib/cbook/__init__.py b/contrib/python/matplotlib/py2/matplotlib/cbook/__init__.py
new file mode 100644
index 00000000000..dcb2d0549de
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/cbook/__init__.py
@@ -0,0 +1,2852 @@
+"""
+A collection of utility functions and classes. Originally, many
+(but not all) were from the Python Cookbook -- hence the name cbook.
+
+This module is safe to import from anywhere within matplotlib;
+it imports matplotlib only at runtime.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import six
+from six.moves import xrange, zip
+import collections
+import contextlib
+import datetime
+import errno
+import functools
+import glob
+import gzip
+import io
+from itertools import repeat
+import locale
+import numbers
+import operator
+import os
+import re
+import sys
+import time
+import traceback
+import types
+import warnings
+from weakref import ref, WeakKeyDictionary
+
+import numpy as np
+
+import matplotlib
+from .deprecation import deprecated, warn_deprecated
+from .deprecation import mplDeprecation, MatplotlibDeprecationWarning
+
+
+def unicode_safe(s):
+
+ if isinstance(s, bytes):
+ try:
+ # On some systems, locale.getpreferredencoding returns None,
+ # which can break unicode; and the sage project reports that
+ # some systems have incorrect locale specifications, e.g.,
+ # an encoding instead of a valid locale name. Another
+ # pathological case that has been reported is an empty string.
+ # On some systems, getpreferredencoding sets the locale, which has
+ # side effects. Passing False eliminates those side effects.
+ preferredencoding = locale.getpreferredencoding(
+ matplotlib.rcParams['axes.formatter.use_locale']).strip()
+ if not preferredencoding:
+ preferredencoding = None
+ except (ValueError, ImportError, AttributeError):
+ preferredencoding = None
+
+ if preferredencoding is None:
+ return six.text_type(s)
+ else:
+ return six.text_type(s, preferredencoding)
+ return s
+
+
+@deprecated('2.1')
+class converter(object):
+ """
+ Base class for handling string -> python type with support for
+ missing values
+ """
+ def __init__(self, missing='Null', missingval=None):
+ self.missing = missing
+ self.missingval = missingval
+
+ def __call__(self, s):
+ if s == self.missing:
+ return self.missingval
+ return s
+
+ def is_missing(self, s):
+ return not s.strip() or s == self.missing
+
+
+@deprecated('2.1')
+class tostr(converter):
+ """convert to string or None"""
+ def __init__(self, missing='Null', missingval=''):
+ converter.__init__(self, missing=missing, missingval=missingval)
+
+
+@deprecated('2.1')
+class todatetime(converter):
+ """convert to a datetime or None"""
+ def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
+ 'use a :func:`time.strptime` format string for conversion'
+ converter.__init__(self, missing, missingval)
+ self.fmt = fmt
+
+ def __call__(self, s):
+ if self.is_missing(s):
+ return self.missingval
+ tup = time.strptime(s, self.fmt)
+ return datetime.datetime(*tup[:6])
+
+
+@deprecated('2.1')
+class todate(converter):
+ """convert to a date or None"""
+ def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
+ """use a :func:`time.strptime` format string for conversion"""
+ converter.__init__(self, missing, missingval)
+ self.fmt = fmt
+
+ def __call__(self, s):
+ if self.is_missing(s):
+ return self.missingval
+ tup = time.strptime(s, self.fmt)
+ return datetime.date(*tup[:3])
+
+
+@deprecated('2.1')
+class tofloat(converter):
+ """convert to a float or None"""
+ def __init__(self, missing='Null', missingval=None):
+ converter.__init__(self, missing)
+ self.missingval = missingval
+
+ def __call__(self, s):
+ if self.is_missing(s):
+ return self.missingval
+ return float(s)
+
+
+@deprecated('2.1')
+class toint(converter):
+ """convert to an int or None"""
+ def __init__(self, missing='Null', missingval=None):
+ converter.__init__(self, missing)
+
+ def __call__(self, s):
+ if self.is_missing(s):
+ return self.missingval
+ return int(s)
+
+
+class _BoundMethodProxy(object):
+ """
+ Our own proxy object which enables weak references to bound and unbound
+ methods and arbitrary callables. Pulls information about the function,
+ class, and instance out of a bound method. Stores a weak reference to the
+ instance to support garbage collection.
+
+ @organization: IBM Corporation
+ @copyright: Copyright (c) 2005, 2006 IBM Corporation
+ @license: The BSD License
+
+ Minor bugfixes by Michael Droettboom
+ """
+ def __init__(self, cb):
+ self._hash = hash(cb)
+ self._destroy_callbacks = []
+ try:
+ try:
+ if six.PY3:
+ self.inst = ref(cb.__self__, self._destroy)
+ else:
+ self.inst = ref(cb.im_self, self._destroy)
+ except TypeError:
+ self.inst = None
+ if six.PY3:
+ self.func = cb.__func__
+ self.klass = cb.__self__.__class__
+ else:
+ self.func = cb.im_func
+ self.klass = cb.im_class
+ except AttributeError:
+ self.inst = None
+ self.func = cb
+ self.klass = None
+
+ def add_destroy_callback(self, callback):
+ self._destroy_callbacks.append(_BoundMethodProxy(callback))
+
+ def _destroy(self, wk):
+ for callback in self._destroy_callbacks:
+ try:
+ callback(self)
+ except ReferenceError:
+ pass
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ # de-weak reference inst
+ inst = d['inst']
+ if inst is not None:
+ d['inst'] = inst()
+ return d
+
+ def __setstate__(self, statedict):
+ self.__dict__ = statedict
+ inst = statedict['inst']
+ # turn inst back into a weakref
+ if inst is not None:
+ self.inst = ref(inst)
+
+ def __call__(self, *args, **kwargs):
+ """
+ Proxy for a call to the weak referenced object. Take
+ arbitrary params to pass to the callable.
+
+ Raises `ReferenceError`: When the weak reference refers to
+ a dead object
+ """
+ if self.inst is not None and self.inst() is None:
+ raise ReferenceError
+ elif self.inst is not None:
+ # build a new instance method with a strong reference to the
+ # instance
+
+ mtd = types.MethodType(self.func, self.inst())
+
+ else:
+ # not a bound method, just return the func
+ mtd = self.func
+ # invoke the callable and return the result
+ return mtd(*args, **kwargs)
+
+ def __eq__(self, other):
+ """
+ Compare the held function and instance with that held by
+ another proxy.
+ """
+ try:
+ if self.inst is None:
+ return self.func == other.func and other.inst is None
+ else:
+ return self.func == other.func and self.inst() == other.inst()
+ except Exception:
+ return False
+
+ def __ne__(self, other):
+ """
+ Inverse of __eq__.
+ """
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return self._hash
+
+
+def _exception_printer(exc):
+ traceback.print_exc()
+
+
+class CallbackRegistry(object):
+ """Handle registering and disconnecting for a set of signals and callbacks:
+
+ >>> def oneat(x):
+ ... print('eat', x)
+ >>> def ondrink(x):
+ ... print('drink', x)
+
+ >>> from matplotlib.cbook import CallbackRegistry
+ >>> callbacks = CallbackRegistry()
+
+ >>> id_eat = callbacks.connect('eat', oneat)
+ >>> id_drink = callbacks.connect('drink', ondrink)
+
+ >>> callbacks.process('drink', 123)
+ drink 123
+ >>> callbacks.process('eat', 456)
+ eat 456
+ >>> callbacks.process('be merry', 456) # nothing will be called
+ >>> callbacks.disconnect(id_eat)
+ >>> callbacks.process('eat', 456) # nothing will be called
+
+ In practice, one should always disconnect all callbacks when they
+ are no longer needed to avoid dangling references (and thus memory
+ leaks). However, real code in matplotlib rarely does so, and due
+ to its design, it is rather difficult to place this kind of code.
+ To get around this, and prevent this class of memory leaks, we
+ instead store weak references to bound methods only, so when the
+ destination object needs to die, the CallbackRegistry won't keep
+ it alive. The Python stdlib weakref module can not create weak
+ references to bound methods directly, so we need to create a proxy
+ object to handle weak references to bound methods (or regular free
+ functions). This technique was shared by Peter Parente on his
+ `"Mindtrove" blog
+ <http://mindtrove.info/python-weak-references/>`_.
+
+
+ Parameters
+ ----------
+ exception_handler : callable, optional
+ If provided must have signature ::
+
+ def handler(exc: Exception) -> None:
+
+ If not None this function will be called with any `Exception`
+ subclass raised by the callbacks in `CallbackRegistry.process`.
+ The handler may either consume the exception or re-raise.
+
+ The callable must be pickle-able.
+
+ The default handler is ::
+
+ def h(exc):
+ traceback.print_exc()
+
+ """
+ def __init__(self, exception_handler=_exception_printer):
+ self.exception_handler = exception_handler
+ self.callbacks = dict()
+ self._cid = 0
+ self._func_cid_map = {}
+
+ # In general, callbacks may not be pickled; thus, we simply recreate an
+ # empty dictionary at unpickling. In order to ensure that `__setstate__`
+ # (which just defers to `__init__`) is called, `__getstate__` must
+ # return a truthy value (for pickle protocol>=3, i.e. Py3, the
+ # *actual* behavior is that `__setstate__` will be called as long as
+ # `__getstate__` does not return `None`, but this is undocumented -- see
+ # http://bugs.python.org/issue12290).
+
+ def __getstate__(self):
+ return {'exception_handler': self.exception_handler}
+
+ def __setstate__(self, state):
+ self.__init__(**state)
+
+ def connect(self, s, func):
+ """Register *func* to be called when signal *s* is generated.
+ """
+ self._func_cid_map.setdefault(s, WeakKeyDictionary())
+ # Note proxy not needed in python 3.
+ # TODO rewrite this when support for python2.x gets dropped.
+ proxy = _BoundMethodProxy(func)
+ if proxy in self._func_cid_map[s]:
+ return self._func_cid_map[s][proxy]
+
+ proxy.add_destroy_callback(self._remove_proxy)
+ self._cid += 1
+ cid = self._cid
+ self._func_cid_map[s][proxy] = cid
+ self.callbacks.setdefault(s, dict())
+ self.callbacks[s][cid] = proxy
+ return cid
+
+ def _remove_proxy(self, proxy):
+ for signal, proxies in list(six.iteritems(self._func_cid_map)):
+ try:
+ del self.callbacks[signal][proxies[proxy]]
+ except KeyError:
+ pass
+
+ if len(self.callbacks[signal]) == 0:
+ del self.callbacks[signal]
+ del self._func_cid_map[signal]
+
+ def disconnect(self, cid):
+ """Disconnect the callback registered with callback id *cid*.
+ """
+ for eventname, callbackd in list(six.iteritems(self.callbacks)):
+ try:
+ del callbackd[cid]
+ except KeyError:
+ continue
+ else:
+ for signal, functions in list(
+ six.iteritems(self._func_cid_map)):
+ for function, value in list(six.iteritems(functions)):
+ if value == cid:
+ del functions[function]
+ return
+
+ def process(self, s, *args, **kwargs):
+ """
+ Process signal *s*.
+
+ All of the functions registered to receive callbacks on *s* will be
+ called with ``*args`` and ``**kwargs``.
+ """
+ if s in self.callbacks:
+ for cid, proxy in list(six.iteritems(self.callbacks[s])):
+ try:
+ proxy(*args, **kwargs)
+ except ReferenceError:
+ self._remove_proxy(proxy)
+ # this does not capture KeyboardInterrupt, SystemExit,
+ # and GeneratorExit
+ except Exception as exc:
+ if self.exception_handler is not None:
+ self.exception_handler(exc)
+ else:
+ raise
+
+
+class silent_list(list):
+ """
+ override repr when returning a list of matplotlib artists to
+ prevent long, meaningless output. This is meant to be used for a
+ homogeneous list of a given type
+ """
+ def __init__(self, type, seq=None):
+ self.type = type
+ if seq is not None:
+ self.extend(seq)
+
+ def __repr__(self):
+ return '<a list of %d %s objects>' % (len(self), self.type)
+
+ def __str__(self):
+ return repr(self)
+
+ def __getstate__(self):
+ # store a dictionary of this SilentList's state
+ return {'type': self.type, 'seq': self[:]}
+
+ def __setstate__(self, state):
+ self.type = state['type']
+ self.extend(state['seq'])
+
+
+class IgnoredKeywordWarning(UserWarning):
+ """
+ A class for issuing warnings about keyword arguments that will be ignored
+ by matplotlib
+ """
+ pass
+
+
+def local_over_kwdict(local_var, kwargs, *keys):
+ """
+ Enforces the priority of a local variable over potentially conflicting
+ argument(s) from a kwargs dict. The following possible output values are
+ considered in order of priority:
+
+ local_var > kwargs[keys[0]] > ... > kwargs[keys[-1]]
+
+ The first of these whose value is not None will be returned. If all are
+ None then None will be returned. Each key in keys will be removed from the
+ kwargs dict in place.
+
+ Parameters
+ ----------
+ local_var: any object
+ The local variable (highest priority)
+
+ kwargs: dict
+ Dictionary of keyword arguments; modified in place
+
+ keys: str(s)
+ Name(s) of keyword arguments to process, in descending order of
+ priority
+
+ Returns
+ -------
+ out: any object
+ Either local_var or one of kwargs[key] for key in keys
+
+ Raises
+ ------
+ IgnoredKeywordWarning
+ For each key in keys that is removed from kwargs but not used as
+ the output value
+
+ """
+ out = local_var
+ for key in keys:
+ kwarg_val = kwargs.pop(key, None)
+ if kwarg_val is not None:
+ if out is None:
+ out = kwarg_val
+ else:
+ warnings.warn('"%s" keyword argument will be ignored' % key,
+ IgnoredKeywordWarning)
+ return out
+
+
+def strip_math(s):
+ """remove latex formatting from mathtext"""
+ remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
+ s = s[1:-1]
+ for r in remove:
+ s = s.replace(r, '')
+ return s
+
+
+class Bunch(object):
+ """
+ Often we want to just collect a bunch of stuff together, naming each
+ item of the bunch; a dictionary's OK for that, but a small do- nothing
+ class is even handier, and prettier to use. Whenever you want to
+ group a few variables::
+
+ >>> point = Bunch(datum=2, squared=4, coord=12)
+ >>> point.datum
+
+ By: Alex Martelli
+ From: https://code.activestate.com/recipes/121294/
+ """
+ def __init__(self, **kwds):
+ self.__dict__.update(kwds)
+
+ def __repr__(self):
+ return 'Bunch(%s)' % ', '.join(
+ '%s=%s' % kv for kv in six.iteritems(vars(self)))
+
+
+@deprecated('2.1')
+def unique(x):
+ """Return a list of unique elements of *x*"""
+ return list(set(x))
+
+
+def iterable(obj):
+ """return true if *obj* is iterable"""
+ try:
+ iter(obj)
+ except TypeError:
+ return False
+ return True
+
+
+@deprecated('2.1')
+def is_string_like(obj):
+ """Return True if *obj* looks like a string"""
+ # (np.str_ == np.unicode_ on Py3).
+ return isinstance(obj, (six.string_types, np.str_, np.unicode_))
+
+
+@deprecated('2.1')
+def is_sequence_of_strings(obj):
+ """Returns true if *obj* is iterable and contains strings"""
+ if not iterable(obj):
+ return False
+ if is_string_like(obj) and not isinstance(obj, np.ndarray):
+ try:
+ obj = obj.values
+ except AttributeError:
+ # not pandas
+ return False
+ for o in obj:
+ if not is_string_like(o):
+ return False
+ return True
+
+
+def is_hashable(obj):
+ """Returns true if *obj* can be hashed"""
+ try:
+ hash(obj)
+ except TypeError:
+ return False
+ return True
+
+
+def is_writable_file_like(obj):
+ """return true if *obj* looks like a file object with a *write* method"""
+ return callable(getattr(obj, 'write', None))
+
+
+def file_requires_unicode(x):
+ """
+ Returns `True` if the given writable file-like object requires Unicode
+ to be written to it.
+ """
+ try:
+ x.write(b'')
+ except TypeError:
+ return True
+ else:
+ return False
+
+
+@deprecated('2.1')
+def is_scalar(obj):
+ """return true if *obj* is not string like and is not iterable"""
+ return not isinstance(obj, six.string_types) and not iterable(obj)
+
+
+def is_numlike(obj):
+ """return true if *obj* looks like a number"""
+ return isinstance(obj, (numbers.Number, np.number))
+
+
+def to_filehandle(fname, flag='rU', return_opened=False, encoding=None):
+ """
+ *fname* can be an `os.PathLike` or a file handle. Support for gzipped
+ files is automatic, if the filename ends in .gz. *flag* is a
+ read/write flag for :func:`file`
+ """
+ if hasattr(os, "PathLike") and isinstance(fname, os.PathLike):
+ return to_filehandle(
+ os.fspath(fname),
+ flag=flag, return_opened=return_opened, encoding=encoding)
+ if isinstance(fname, six.string_types):
+ if fname.endswith('.gz'):
+ # get rid of 'U' in flag for gzipped files.
+ flag = flag.replace('U', '')
+ fh = gzip.open(fname, flag)
+ elif fname.endswith('.bz2'):
+ # python may not be complied with bz2 support,
+ # bury import until we need it
+ import bz2
+ # get rid of 'U' in flag for bz2 files
+ flag = flag.replace('U', '')
+ fh = bz2.BZ2File(fname, flag)
+ else:
+ fh = io.open(fname, flag, encoding=encoding)
+ opened = True
+ elif hasattr(fname, 'seek'):
+ fh = fname
+ opened = False
+ else:
+ raise ValueError('fname must be a PathLike or file handle')
+ if return_opened:
+ return fh, opened
+ return fh
+
+
+@contextlib.contextmanager
+def open_file_cm(path_or_file, mode="r", encoding=None):
+ r"""Pass through file objects and context-manage `.PathLike`\s."""
+ fh, opened = to_filehandle(path_or_file, mode, True, encoding)
+ if opened:
+ with fh:
+ yield fh
+ else:
+ yield fh
+
+
+def is_scalar_or_string(val):
+ """Return whether the given object is a scalar or string like."""
+ return isinstance(val, six.string_types) or not iterable(val)
+
+
+def _string_to_bool(s):
+ """Parses the string argument as a boolean"""
+ if not isinstance(s, six.string_types):
+ return bool(s)
+ warn_deprecated("2.2", "Passing one of 'on', 'true', 'off', 'false' as a "
+ "boolean is deprecated; use an actual boolean "
+ "(True/False) instead.")
+ if s.lower() in ['on', 'true']:
+ return True
+ if s.lower() in ['off', 'false']:
+ return False
+ raise ValueError('String "%s" must be one of: '
+ '"on", "off", "true", or "false"' % s)
+
+
+def get_sample_data(fname, asfileobj=True):
+ """
+ Return a sample data file. *fname* is a path relative to the
+ `mpl-data/sample_data` directory. If *asfileobj* is `True`
+ return a file object, otherwise just a file path.
+
+ Set the rc parameter examples.directory to the directory where we should
+ look, if sample_data files are stored in a location different than
+ default (which is 'mpl-data/sample_data` at the same level of 'matplotlib`
+ Python module files).
+
+ If the filename ends in .gz, the file is implicitly ungzipped.
+ """
+ if matplotlib.rcParams['examples.directory']:
+ root = matplotlib.rcParams['examples.directory']
+ else:
+ root = os.path.join(matplotlib._get_data_path(), 'sample_data')
+ path = os.path.join(root, fname)
+
+ if asfileobj:
+ if (os.path.splitext(fname)[-1].lower() in
+ ('.csv', '.xrc', '.txt')):
+ mode = 'r'
+ else:
+ mode = 'rb'
+
+ base, ext = os.path.splitext(fname)
+ if ext == '.gz':
+ return gzip.open(path, mode)
+ else:
+ return open(path, mode)
+ else:
+ return path
+
+
+def flatten(seq, scalarp=is_scalar_or_string):
+ """
+ Returns a generator of flattened nested containers
+
+ For example:
+
+ >>> from matplotlib.cbook import flatten
+ >>> l = (('John', ['Hunter']), (1, 23), [[([42, (5, 23)], )]])
+ >>> print(list(flatten(l)))
+ ['John', 'Hunter', 1, 23, 42, 5, 23]
+
+ By: Composite of Holger Krekel and Luther Blissett
+ From: https://code.activestate.com/recipes/121294/
+ and Recipe 1.12 in cookbook
+ """
+ for item in seq:
+ if scalarp(item) or item is None:
+ yield item
+ else:
+ for subitem in flatten(item, scalarp):
+ yield subitem
+
+
+@deprecated('2.1', "sorted(..., key=itemgetter(...))")
+class Sorter(object):
+ """
+ Sort by attribute or item
+
+ Example usage::
+
+ sort = Sorter()
+
+ list = [(1, 2), (4, 8), (0, 3)]
+ dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
+ {'a': 9, 'b': 9}]
+
+
+ sort(list) # default sort
+ sort(list, 1) # sort by index 1
+ sort(dict, 'a') # sort a list of dicts by key 'a'
+
+ """
+
+ def _helper(self, data, aux, inplace):
+ aux.sort()
+ result = [data[i] for junk, i in aux]
+ if inplace:
+ data[:] = result
+ return result
+
+ def byItem(self, data, itemindex=None, inplace=1):
+ if itemindex is None:
+ if inplace:
+ data.sort()
+ result = data
+ else:
+ result = sorted(data)
+ return result
+ else:
+ aux = [(data[i][itemindex], i) for i in range(len(data))]
+ return self._helper(data, aux, inplace)
+
+ def byAttribute(self, data, attributename, inplace=1):
+ aux = [(getattr(data[i], attributename), i) for i in range(len(data))]
+ return self._helper(data, aux, inplace)
+
+ # a couple of handy synonyms
+ sort = byItem
+ __call__ = byItem
+
+
+@deprecated('2.1')
+class Xlator(dict):
+ """
+ All-in-one multiple-string-substitution class
+
+ Example usage::
+
+ text = "Larry Wall is the creator of Perl"
+ adict = {
+ "Larry Wall" : "Guido van Rossum",
+ "creator" : "Benevolent Dictator for Life",
+ "Perl" : "Python",
+ }
+
+ print(multiple_replace(adict, text))
+
+ xlat = Xlator(adict)
+ print(xlat.xlat(text))
+ """
+
+ def _make_regex(self):
+ """ Build re object based on the keys of the current dictionary """
+ return re.compile("|".join(map(re.escape, self)))
+
+ def __call__(self, match):
+ """ Handler invoked for each regex *match* """
+ return self[match.group(0)]
+
+ def xlat(self, text):
+ """ Translate *text*, returns the modified text. """
+ return self._make_regex().sub(self, text)
+
+
+@deprecated('2.1')
+def soundex(name, len=4):
+ """ soundex module conforming to Odell-Russell algorithm """
+
+ # digits holds the soundex values for the alphabet
+ soundex_digits = '01230120022455012623010202'
+ sndx = ''
+ fc = ''
+
+ # Translate letters in name to soundex digits
+ for c in name.upper():
+ if c.isalpha():
+ if not fc:
+ fc = c # Remember first letter
+ d = soundex_digits[ord(c) - ord('A')]
+ # Duplicate consecutive soundex digits are skipped
+ if not sndx or (d != sndx[-1]):
+ sndx += d
+
+ # Replace first digit with first letter
+ sndx = fc + sndx[1:]
+
+ # Remove all 0s from the soundex code
+ sndx = sndx.replace('0', '')
+
+ # Return soundex code truncated or 0-padded to len characters
+ return (sndx + (len * '0'))[:len]
+
+
+@deprecated('2.1')
+class Null(object):
+ """ Null objects always and reliably "do nothing." """
+
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __call__(self, *args, **kwargs):
+ return self
+
+ def __str__(self):
+ return "Null()"
+
+ def __repr__(self):
+ return "Null()"
+
+ if six.PY3:
+ def __bool__(self):
+ return 0
+ else:
+ def __nonzero__(self):
+ return 0
+
+ def __getattr__(self, name):
+ return self
+
+ def __setattr__(self, name, value):
+ return self
+
+ def __delattr__(self, name):
+ return self
+
+
+def mkdirs(newdir, mode=0o777):
+ """
+ make directory *newdir* recursively, and set *mode*. Equivalent to ::
+
+ > mkdir -p NEWDIR
+ > chmod MODE NEWDIR
+ """
+ # this functionality is now in core python as of 3.2
+ # LPY DROP
+ if six.PY3:
+ os.makedirs(newdir, mode=mode, exist_ok=True)
+ else:
+ try:
+ os.makedirs(newdir, mode=mode)
+ except OSError as exception:
+ if exception.errno != errno.EEXIST:
+ raise
+
+
+class GetRealpathAndStat(object):
+ def __init__(self):
+ self._cache = {}
+
+ def __call__(self, path):
+ result = self._cache.get(path)
+ if result is None:
+ realpath = os.path.realpath(path)
+ if sys.platform == 'win32':
+ stat_key = realpath
+ else:
+ stat = os.stat(realpath)
+ stat_key = (stat.st_ino, stat.st_dev)
+ result = realpath, stat_key
+ self._cache[path] = result
+ return result
+
+
+get_realpath_and_stat = GetRealpathAndStat()
+
+
+@deprecated('2.1')
+def dict_delall(d, keys):
+ """delete all of the *keys* from the :class:`dict` *d*"""
+ for key in keys:
+ try:
+ del d[key]
+ except KeyError:
+ pass
+
+
+@deprecated('2.1')
+class RingBuffer(object):
+ """ class that implements a not-yet-full buffer """
+ def __init__(self, size_max):
+ self.max = size_max
+ self.data = []
+
+ class __Full:
+ """ class that implements a full buffer """
+ def append(self, x):
+ """ Append an element overwriting the oldest one. """
+ self.data[self.cur] = x
+ self.cur = (self.cur + 1) % self.max
+
+ def get(self):
+ """ return list of elements in correct order """
+ return self.data[self.cur:] + self.data[:self.cur]
+
+ def append(self, x):
+ """append an element at the end of the buffer"""
+ self.data.append(x)
+ if len(self.data) == self.max:
+ self.cur = 0
+ # Permanently change self's class from non-full to full
+ self.__class__ = __Full
+
+ def get(self):
+ """ Return a list of elements from the oldest to the newest. """
+ return self.data
+
+ def __get_item__(self, i):
+ return self.data[i % len(self.data)]
+
+
+@deprecated('2.1')
+def get_split_ind(seq, N):
+ """
+ *seq* is a list of words. Return the index into seq such that::
+
+ len(' '.join(seq[:ind])<=N
+
+ .
+ """
+
+ s_len = 0
+ # todo: use Alex's xrange pattern from the cbook for efficiency
+ for (word, ind) in zip(seq, xrange(len(seq))):
+ s_len += len(word) + 1 # +1 to account for the len(' ')
+ if s_len >= N:
+ return ind
+ return len(seq)
+
+
+@deprecated('2.1', alternative='textwrap.TextWrapper')
+def wrap(prefix, text, cols):
+ """wrap *text* with *prefix* at length *cols*"""
+ pad = ' ' * len(prefix.expandtabs())
+ available = cols - len(pad)
+
+ seq = text.split(' ')
+ Nseq = len(seq)
+ ind = 0
+ lines = []
+ while ind < Nseq:
+ lastInd = ind
+ ind += get_split_ind(seq[ind:], available)
+ lines.append(seq[lastInd:ind])
+
+ # add the prefix to the first line, pad with spaces otherwise
+ ret = prefix + ' '.join(lines[0]) + '\n'
+ for line in lines[1:]:
+ ret += pad + ' '.join(line) + '\n'
+ return ret
+
+
+# A regular expression used to determine the amount of space to
+# remove. It looks for the first sequence of spaces immediately
+# following the first newline, or at the beginning of the string.
+_find_dedent_regex = re.compile(r"(?:(?:\n\r?)|^)( *)\S")
+# A cache to hold the regexs that actually remove the indent.
+_dedent_regex = {}
+
+
+def dedent(s):
+ """
+ Remove excess indentation from docstring *s*.
+
+ Discards any leading blank lines, then removes up to n whitespace
+ characters from each line, where n is the number of leading
+ whitespace characters in the first line. It differs from
+ textwrap.dedent in its deletion of leading blank lines and its use
+ of the first non-blank line to determine the indentation.
+
+ It is also faster in most cases.
+ """
+ # This implementation has a somewhat obtuse use of regular
+ # expressions. However, this function accounted for almost 30% of
+ # matplotlib startup time, so it is worthy of optimization at all
+ # costs.
+
+ if not s: # includes case of s is None
+ return ''
+
+ match = _find_dedent_regex.match(s)
+ if match is None:
+ return s
+
+ # This is the number of spaces to remove from the left-hand side.
+ nshift = match.end(1) - match.start(1)
+ if nshift == 0:
+ return s
+
+ # Get a regex that will remove *up to* nshift spaces from the
+ # beginning of each line. If it isn't in the cache, generate it.
+ unindent = _dedent_regex.get(nshift, None)
+ if unindent is None:
+ unindent = re.compile("\n\r? {0,%d}" % nshift)
+ _dedent_regex[nshift] = unindent
+
+ result = unindent.sub("\n", s).strip()
+ return result
+
+
+def listFiles(root, patterns='*', recurse=1, return_folders=0):
+ """
+ Recursively list files
+
+ from Parmar and Martelli in the Python Cookbook
+ """
+ import os.path
+ import fnmatch
+ # Expand patterns from semicolon-separated string to list
+ pattern_list = patterns.split(';')
+ results = []
+
+ for dirname, dirs, files in os.walk(root):
+ # Append to results all relevant files (and perhaps folders)
+ for name in files:
+ fullname = os.path.normpath(os.path.join(dirname, name))
+ if return_folders or os.path.isfile(fullname):
+ for pattern in pattern_list:
+ if fnmatch.fnmatch(name, pattern):
+ results.append(fullname)
+ break
+ # Block recursion if recursion was disallowed
+ if not recurse:
+ break
+
+ return results
+
+
+@deprecated('2.1')
+def get_recursive_filelist(args):
+ """
+ Recurse all the files and dirs in *args* ignoring symbolic links
+ and return the files as a list of strings
+ """
+ files = []
+
+ for arg in args:
+ if os.path.isfile(arg):
+ files.append(arg)
+ continue
+ if os.path.isdir(arg):
+ newfiles = listFiles(arg, recurse=1, return_folders=1)
+ files.extend(newfiles)
+
+ return [f for f in files if not os.path.islink(f)]
+
+
+@deprecated('2.1')
+def pieces(seq, num=2):
+ """Break up the *seq* into *num* tuples"""
+ start = 0
+ while 1:
+ item = seq[start:start + num]
+ if not len(item):
+ break
+ yield item
+ start += num
+
+
+@deprecated('2.1')
+def exception_to_str(s=None):
+ if six.PY3:
+ sh = io.StringIO()
+ else:
+ sh = io.BytesIO()
+ if s is not None:
+ print(s, file=sh)
+ traceback.print_exc(file=sh)
+ return sh.getvalue()
+
+
+@deprecated('2.1')
+def allequal(seq):
+ """
+ Return *True* if all elements of *seq* compare equal. If *seq* is
+ 0 or 1 length, return *True*
+ """
+ if len(seq) < 2:
+ return True
+ val = seq[0]
+ for i in xrange(1, len(seq)):
+ thisval = seq[i]
+ if thisval != val:
+ return False
+ return True
+
+
+@deprecated('2.1')
+def alltrue(seq):
+ """
+ Return *True* if all elements of *seq* evaluate to *True*. If
+ *seq* is empty, return *False*.
+ """
+ if not len(seq):
+ return False
+ for val in seq:
+ if not val:
+ return False
+ return True
+
+
+@deprecated('2.1')
+def onetrue(seq):
+ """
+ Return *True* if one element of *seq* is *True*. It *seq* is
+ empty, return *False*.
+ """
+ if not len(seq):
+ return False
+ for val in seq:
+ if val:
+ return True
+ return False
+
+
+@deprecated('2.1')
+def allpairs(x):
+ """
+ return all possible pairs in sequence *x*
+ """
+ return [(s, f) for i, f in enumerate(x) for s in x[i + 1:]]
+
+
+class maxdict(dict):
+ """
+ A dictionary with a maximum size; this doesn't override all the
+ relevant methods to constrain the size, just setitem, so use with
+ caution
+ """
+ def __init__(self, maxsize):
+ dict.__init__(self)
+ self.maxsize = maxsize
+ self._killkeys = []
+
+ def __setitem__(self, k, v):
+ if k not in self:
+ if len(self) >= self.maxsize:
+ del self[self._killkeys[0]]
+ del self._killkeys[0]
+ self._killkeys.append(k)
+ dict.__setitem__(self, k, v)
+
+
+class Stack(object):
+ """
+ Implement a stack where elements can be pushed on and you can move
+ back and forth. But no pop. Should mimic home / back / forward
+ in a browser
+ """
+
+ def __init__(self, default=None):
+ self.clear()
+ self._default = default
+
+ def __call__(self):
+ """return the current element, or None"""
+ if not len(self._elements):
+ return self._default
+ else:
+ return self._elements[self._pos]
+
+ def __len__(self):
+ return self._elements.__len__()
+
+ def __getitem__(self, ind):
+ return self._elements.__getitem__(ind)
+
+ def forward(self):
+ """move the position forward and return the current element"""
+ n = len(self._elements)
+ if self._pos < n - 1:
+ self._pos += 1
+ return self()
+
+ def back(self):
+ """move the position back and return the current element"""
+ if self._pos > 0:
+ self._pos -= 1
+ return self()
+
+ def push(self, o):
+ """
+ push object onto stack at current position - all elements
+ occurring later than the current position are discarded
+ """
+ self._elements = self._elements[:self._pos + 1]
+ self._elements.append(o)
+ self._pos = len(self._elements) - 1
+ return self()
+
+ def home(self):
+ """push the first element onto the top of the stack"""
+ if not len(self._elements):
+ return
+ self.push(self._elements[0])
+ return self()
+
+ def empty(self):
+ return len(self._elements) == 0
+
+ def clear(self):
+ """empty the stack"""
+ self._pos = -1
+ self._elements = []
+
+ def bubble(self, o):
+ """
+ raise *o* to the top of the stack and return *o*. *o* must be
+ in the stack
+ """
+
+ if o not in self._elements:
+ raise ValueError('Unknown element o')
+ old = self._elements[:]
+ self.clear()
+ bubbles = []
+ for thiso in old:
+ if thiso == o:
+ bubbles.append(thiso)
+ else:
+ self.push(thiso)
+ for thiso in bubbles:
+ self.push(o)
+ return o
+
+ def remove(self, o):
+ 'remove element *o* from the stack'
+ if o not in self._elements:
+ raise ValueError('Unknown element o')
+ old = self._elements[:]
+ self.clear()
+ for thiso in old:
+ if thiso == o:
+ continue
+ else:
+ self.push(thiso)
+
+
+@deprecated('2.1')
+def finddir(o, match, case=False):
+ """
+ return all attributes of *o* which match string in match. if case
+ is True require an exact case match.
+ """
+ if case:
+ names = [(name, name) for name in dir(o)
+ if isinstance(name, six.string_types)]
+ else:
+ names = [(name.lower(), name) for name in dir(o)
+ if isinstance(name, six.string_types)]
+ match = match.lower()
+ return [orig for name, orig in names if name.find(match) >= 0]
+
+
+@deprecated('2.1')
+def reverse_dict(d):
+ """reverse the dictionary -- may lose data if values are not unique!"""
+ return {v: k for k, v in six.iteritems(d)}
+
+
+@deprecated('2.1')
+def restrict_dict(d, keys):
+ """
+ Return a dictionary that contains those keys that appear in both
+ d and keys, with values from d.
+ """
+ return {k: v for k, v in six.iteritems(d) if k in keys}
+
+
+def report_memory(i=0): # argument may go away
+ """return the memory consumed by process"""
+ from matplotlib.compat.subprocess import Popen, PIPE
+ pid = os.getpid()
+ if sys.platform == 'sunos5':
+ try:
+ a2 = Popen(['ps', '-p', '%d' % pid, '-o', 'osz'],
+ stdout=PIPE).stdout.readlines()
+ except OSError:
+ raise NotImplementedError(
+ "report_memory works on Sun OS only if "
+ "the 'ps' program is found")
+ mem = int(a2[-1].strip())
+ elif sys.platform.startswith('linux'):
+ try:
+ a2 = Popen(['ps', '-p', '%d' % pid, '-o', 'rss,sz'],
+ stdout=PIPE).stdout.readlines()
+ except OSError:
+ raise NotImplementedError(
+ "report_memory works on Linux only if "
+ "the 'ps' program is found")
+ mem = int(a2[1].split()[1])
+ elif sys.platform.startswith('darwin'):
+ try:
+ a2 = Popen(['ps', '-p', '%d' % pid, '-o', 'rss,vsz'],
+ stdout=PIPE).stdout.readlines()
+ except OSError:
+ raise NotImplementedError(
+ "report_memory works on Mac OS only if "
+ "the 'ps' program is found")
+ mem = int(a2[1].split()[0])
+ elif sys.platform.startswith('win'):
+ try:
+ a2 = Popen([str("tasklist"), "/nh", "/fi", "pid eq %d" % pid],
+ stdout=PIPE).stdout.read()
+ except OSError:
+ raise NotImplementedError(
+ "report_memory works on Windows only if "
+ "the 'tasklist' program is found")
+ mem = int(a2.strip().split()[-2].replace(',', ''))
+ else:
+ raise NotImplementedError(
+ "We don't have a memory monitor for %s" % sys.platform)
+ return mem
+
+
+_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
+
+
+def safezip(*args):
+ """make sure *args* are equal len before zipping"""
+ Nx = len(args[0])
+ for i, arg in enumerate(args[1:]):
+ if len(arg) != Nx:
+ raise ValueError(_safezip_msg % (Nx, i + 1, len(arg)))
+ return list(zip(*args))
+
+
+@deprecated('2.1')
+def issubclass_safe(x, klass):
+ """return issubclass(x, klass) and return False on a TypeError"""
+
+ try:
+ return issubclass(x, klass)
+ except TypeError:
+ return False
+
+
+def safe_masked_invalid(x, copy=False):
+ x = np.array(x, subok=True, copy=copy)
+ if not x.dtype.isnative:
+ # Note that the argument to `byteswap` is 'inplace',
+ # thus if we have already made a copy, do the byteswap in
+ # place, else make a copy with the byte order swapped.
+ # Be explicit that we are swapping the byte order of the dtype
+ x = x.byteswap(copy).newbyteorder('S')
+
+ try:
+ xm = np.ma.masked_invalid(x, copy=False)
+ xm.shrink_mask()
+ except TypeError:
+ return x
+ return xm
+
+
+def print_cycles(objects, outstream=sys.stdout, show_progress=False):
+ """
+ *objects*
+ A list of objects to find cycles in. It is often useful to
+ pass in gc.garbage to find the cycles that are preventing some
+ objects from being garbage collected.
+
+ *outstream*
+ The stream for output.
+
+ *show_progress*
+ If True, print the number of objects reached as they are found.
+ """
+ import gc
+ from types import FrameType
+
+ def print_path(path):
+ for i, step in enumerate(path):
+ # next "wraps around"
+ next = path[(i + 1) % len(path)]
+
+ outstream.write(" %s -- " % str(type(step)))
+ if isinstance(step, dict):
+ for key, val in six.iteritems(step):
+ if val is next:
+ outstream.write("[%s]" % repr(key))
+ break
+ if key is next:
+ outstream.write("[key] = %s" % repr(val))
+ break
+ elif isinstance(step, list):
+ outstream.write("[%d]" % step.index(next))
+ elif isinstance(step, tuple):
+ outstream.write("( tuple )")
+ else:
+ outstream.write(repr(step))
+ outstream.write(" ->\n")
+ outstream.write("\n")
+
+ def recurse(obj, start, all, current_path):
+ if show_progress:
+ outstream.write("%d\r" % len(all))
+
+ all[id(obj)] = None
+
+ referents = gc.get_referents(obj)
+ for referent in referents:
+ # If we've found our way back to the start, this is
+ # a cycle, so print it out
+ if referent is start:
+ print_path(current_path)
+
+ # Don't go back through the original list of objects, or
+ # through temporary references to the object, since those
+ # are just an artifact of the cycle detector itself.
+ elif referent is objects or isinstance(referent, FrameType):
+ continue
+
+ # We haven't seen this object before, so recurse
+ elif id(referent) not in all:
+ recurse(referent, start, all, current_path + [obj])
+
+ for obj in objects:
+ outstream.write("Examining: %r\n" % (obj,))
+ recurse(obj, obj, {}, [])
+
+
+class Grouper(object):
+ """
+ This class provides a lightweight way to group arbitrary objects
+ together into disjoint sets when a full-blown graph data structure
+ would be overkill.
+
+ Objects can be joined using :meth:`join`, tested for connectedness
+ using :meth:`joined`, and all disjoint sets can be retrieved by
+ using the object as an iterator.
+
+ The objects being joined must be hashable and weak-referenceable.
+
+ For example:
+
+ >>> from matplotlib.cbook import Grouper
+ >>> class Foo(object):
+ ... def __init__(self, s):
+ ... self.s = s
+ ... def __repr__(self):
+ ... return self.s
+ ...
+ >>> a, b, c, d, e, f = [Foo(x) for x in 'abcdef']
+ >>> grp = Grouper()
+ >>> grp.join(a, b)
+ >>> grp.join(b, c)
+ >>> grp.join(d, e)
+ >>> sorted(map(tuple, grp))
+ [(a, b, c), (d, e)]
+ >>> grp.joined(a, b)
+ True
+ >>> grp.joined(a, c)
+ True
+ >>> grp.joined(a, d)
+ False
+
+ """
+ def __init__(self, init=()):
+ mapping = self._mapping = {}
+ for x in init:
+ mapping[ref(x)] = [ref(x)]
+
+ def __contains__(self, item):
+ return ref(item) in self._mapping
+
+ def clean(self):
+ """
+ Clean dead weak references from the dictionary
+ """
+ mapping = self._mapping
+ to_drop = [key for key in mapping if key() is None]
+ for key in to_drop:
+ val = mapping.pop(key)
+ val.remove(key)
+
+ def join(self, a, *args):
+ """
+ Join given arguments into the same set. Accepts one or more
+ arguments.
+ """
+ mapping = self._mapping
+ set_a = mapping.setdefault(ref(a), [ref(a)])
+
+ for arg in args:
+ set_b = mapping.get(ref(arg))
+ if set_b is None:
+ set_a.append(ref(arg))
+ mapping[ref(arg)] = set_a
+ elif set_b is not set_a:
+ if len(set_b) > len(set_a):
+ set_a, set_b = set_b, set_a
+ set_a.extend(set_b)
+ for elem in set_b:
+ mapping[elem] = set_a
+
+ self.clean()
+
+ def joined(self, a, b):
+ """
+ Returns True if *a* and *b* are members of the same set.
+ """
+ self.clean()
+
+ mapping = self._mapping
+ try:
+ return mapping[ref(a)] is mapping[ref(b)]
+ except KeyError:
+ return False
+
+ def remove(self, a):
+ self.clean()
+
+ mapping = self._mapping
+ seta = mapping.pop(ref(a), None)
+ if seta is not None:
+ seta.remove(ref(a))
+
+ def __iter__(self):
+ """
+ Iterate over each of the disjoint sets as a list.
+
+ The iterator is invalid if interleaved with calls to join().
+ """
+ self.clean()
+ token = object()
+
+ # Mark each group as we come across if by appending a token,
+ # and don't yield it twice
+ for group in six.itervalues(self._mapping):
+ if group[-1] is not token:
+ yield [x() for x in group]
+ group.append(token)
+
+ # Cleanup the tokens
+ for group in six.itervalues(self._mapping):
+ if group[-1] is token:
+ del group[-1]
+
+ def get_siblings(self, a):
+ """
+ Returns all of the items joined with *a*, including itself.
+ """
+ self.clean()
+
+ siblings = self._mapping.get(ref(a), [ref(a)])
+ return [x() for x in siblings]
+
+
+def simple_linear_interpolation(a, steps):
+ """
+ Resample an array with ``steps - 1`` points between original point pairs.
+
+ Parameters
+ ----------
+ a : array, shape (n, ...)
+ steps : int
+
+ Returns
+ -------
+ array, shape ``((n - 1) * steps + 1, ...)``
+
+ Along each column of *a*, ``(steps - 1)`` points are introduced between
+ each original values; the values are linearly interpolated.
+ """
+ fps = a.reshape((len(a), -1))
+ xp = np.arange(len(a)) * steps
+ x = np.arange((len(a) - 1) * steps + 1)
+ return (np.column_stack([np.interp(x, xp, fp) for fp in fps.T])
+ .reshape((len(x),) + a.shape[1:]))
+
+
+@deprecated('2.1', alternative='shutil.rmtree')
+def recursive_remove(path):
+ if os.path.isdir(path):
+ for fname in (glob.glob(os.path.join(path, '*')) +
+ glob.glob(os.path.join(path, '.*'))):
+ if os.path.isdir(fname):
+ recursive_remove(fname)
+ os.removedirs(fname)
+ else:
+ os.remove(fname)
+ # os.removedirs(path)
+ else:
+ os.remove(path)
+
+
+def delete_masked_points(*args):
+ """
+ Find all masked and/or non-finite points in a set of arguments,
+ and return the arguments with only the unmasked points remaining.
+
+ Arguments can be in any of 5 categories:
+
+ 1) 1-D masked arrays
+ 2) 1-D ndarrays
+ 3) ndarrays with more than one dimension
+ 4) other non-string iterables
+ 5) anything else
+
+ The first argument must be in one of the first four categories;
+ any argument with a length differing from that of the first
+ argument (and hence anything in category 5) then will be
+ passed through unchanged.
+
+ Masks are obtained from all arguments of the correct length
+ in categories 1, 2, and 4; a point is bad if masked in a masked
+ array or if it is a nan or inf. No attempt is made to
+ extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
+ does not yield a Boolean array.
+
+ All input arguments that are not passed unchanged are returned
+ as ndarrays after removing the points or rows corresponding to
+ masks in any of the arguments.
+
+ A vastly simpler version of this function was originally
+ written as a helper for Axes.scatter().
+
+ """
+ if not len(args):
+ return ()
+ if (isinstance(args[0], six.string_types) or not iterable(args[0])):
+ raise ValueError("First argument must be a sequence")
+ nrecs = len(args[0])
+ margs = []
+ seqlist = [False] * len(args)
+ for i, x in enumerate(args):
+ if (not isinstance(x, six.string_types) and iterable(x)
+ and len(x) == nrecs):
+ seqlist[i] = True
+ if isinstance(x, np.ma.MaskedArray):
+ if x.ndim > 1:
+ raise ValueError("Masked arrays must be 1-D")
+ else:
+ x = np.asarray(x)
+ margs.append(x)
+ masks = [] # list of masks that are True where good
+ for i, x in enumerate(margs):
+ if seqlist[i]:
+ if x.ndim > 1:
+ continue # Don't try to get nan locations unless 1-D.
+ if isinstance(x, np.ma.MaskedArray):
+ masks.append(~np.ma.getmaskarray(x)) # invert the mask
+ xd = x.data
+ else:
+ xd = x
+ try:
+ mask = np.isfinite(xd)
+ if isinstance(mask, np.ndarray):
+ masks.append(mask)
+ except: # Fixme: put in tuple of possible exceptions?
+ pass
+ if len(masks):
+ mask = np.logical_and.reduce(masks)
+ igood = mask.nonzero()[0]
+ if len(igood) < nrecs:
+ for i, x in enumerate(margs):
+ if seqlist[i]:
+ margs[i] = x.take(igood, axis=0)
+ for i, x in enumerate(margs):
+ if seqlist[i] and isinstance(x, np.ma.MaskedArray):
+ margs[i] = x.filled()
+ return margs
+
+
+def boxplot_stats(X, whis=1.5, bootstrap=None, labels=None,
+ autorange=False):
+ """
+ Returns list of dictionaries of statistics used to draw a series
+ of box and whisker plots. The `Returns` section enumerates the
+ required keys of the dictionary. Users can skip this function and
+ pass a user-defined set of dictionaries to the new `axes.bxp` method
+ instead of relying on MPL to do the calculations.
+
+ Parameters
+ ----------
+ X : array-like
+ Data that will be represented in the boxplots. Should have 2 or
+ fewer dimensions.
+
+ whis : float, string, or sequence (default = 1.5)
+ As a float, determines the reach of the whiskers to the beyond the
+ first and third quartiles. In other words, where IQR is the
+ interquartile range (`Q3-Q1`), the upper whisker will extend to last
+ datum less than `Q3 + whis*IQR`). Similarly, the lower whisker will
+ extend to the first datum greater than `Q1 - whis*IQR`.
+ Beyond the whiskers, data are considered outliers
+ and are plotted as individual points. This can be set this to an
+ ascending sequence of percentile (e.g., [5, 95]) to set the
+ whiskers at specific percentiles of the data. Finally, `whis`
+ can be the string ``'range'`` to force the whiskers to the
+ minimum and maximum of the data. In the edge case that the 25th
+ and 75th percentiles are equivalent, `whis` can be automatically
+ set to ``'range'`` via the `autorange` option.
+
+ bootstrap : int, optional
+ Number of times the confidence intervals around the median
+ should be bootstrapped (percentile method).
+
+ labels : array-like, optional
+ Labels for each dataset. Length must be compatible with
+ dimensions of `X`.
+
+ autorange : bool, optional (False)
+ When `True` and the data are distributed such that the 25th and
+ 75th percentiles are equal, ``whis`` is set to ``'range'`` such
+ that the whisker ends are at the minimum and maximum of the
+ data.
+
+ Returns
+ -------
+ bxpstats : list of dict
+ A list of dictionaries containing the results for each column
+ of data. Keys of each dictionary are the following:
+
+ ======== ===================================
+ Key Value Description
+ ======== ===================================
+ label tick label for the boxplot
+ mean arithemetic mean value
+ med 50th percentile
+ q1 first quartile (25th percentile)
+ q3 third quartile (75th percentile)
+ cilo lower notch around the median
+ cihi upper notch around the median
+ whislo end of the lower whisker
+ whishi end of the upper whisker
+ fliers outliers
+ ======== ===================================
+
+ Notes
+ -----
+ Non-bootstrapping approach to confidence interval uses Gaussian-
+ based asymptotic approximation:
+
+ .. math::
+
+ \\mathrm{med} \\pm 1.57 \\times \\frac{\\mathrm{iqr}}{\\sqrt{N}}
+
+ General approach from:
+ McGill, R., Tukey, J.W., and Larsen, W.A. (1978) "Variations of
+ Boxplots", The American Statistician, 32:12-16.
+
+ """
+
+ def _bootstrap_median(data, N=5000):
+ # determine 95% confidence intervals of the median
+ M = len(data)
+ percentiles = [2.5, 97.5]
+
+ bs_index = np.random.randint(M, size=(N, M))
+ bsData = data[bs_index]
+ estimate = np.median(bsData, axis=1, overwrite_input=True)
+
+ CI = np.percentile(estimate, percentiles)
+ return CI
+
+ def _compute_conf_interval(data, med, iqr, bootstrap):
+ if bootstrap is not None:
+ # Do a bootstrap estimate of notch locations.
+ # get conf. intervals around median
+ CI = _bootstrap_median(data, N=bootstrap)
+ notch_min = CI[0]
+ notch_max = CI[1]
+ else:
+
+ N = len(data)
+ notch_min = med - 1.57 * iqr / np.sqrt(N)
+ notch_max = med + 1.57 * iqr / np.sqrt(N)
+
+ return notch_min, notch_max
+
+ # output is a list of dicts
+ bxpstats = []
+
+ # convert X to a list of lists
+ X = _reshape_2D(X, "X")
+
+ ncols = len(X)
+ if labels is None:
+ labels = repeat(None)
+ elif len(labels) != ncols:
+ raise ValueError("Dimensions of labels and X must be compatible")
+
+ input_whis = whis
+ for ii, (x, label) in enumerate(zip(X, labels), start=0):
+
+ # empty dict
+ stats = {}
+ if label is not None:
+ stats['label'] = label
+
+ # restore whis to the input values in case it got changed in the loop
+ whis = input_whis
+
+ # note tricksyness, append up here and then mutate below
+ bxpstats.append(stats)
+
+ # if empty, bail
+ if len(x) == 0:
+ stats['fliers'] = np.array([])
+ stats['mean'] = np.nan
+ stats['med'] = np.nan
+ stats['q1'] = np.nan
+ stats['q3'] = np.nan
+ stats['cilo'] = np.nan
+ stats['cihi'] = np.nan
+ stats['whislo'] = np.nan
+ stats['whishi'] = np.nan
+ stats['med'] = np.nan
+ continue
+
+ # up-convert to an array, just to be safe
+ x = np.asarray(x)
+
+ # arithmetic mean
+ stats['mean'] = np.mean(x)
+
+ # medians and quartiles
+ q1, med, q3 = np.percentile(x, [25, 50, 75])
+
+ # interquartile range
+ stats['iqr'] = q3 - q1
+ if stats['iqr'] == 0 and autorange:
+ whis = 'range'
+
+ # conf. interval around median
+ stats['cilo'], stats['cihi'] = _compute_conf_interval(
+ x, med, stats['iqr'], bootstrap
+ )
+
+ # lowest/highest non-outliers
+ if np.isscalar(whis):
+ if np.isreal(whis):
+ loval = q1 - whis * stats['iqr']
+ hival = q3 + whis * stats['iqr']
+ elif whis in ['range', 'limit', 'limits', 'min/max']:
+ loval = np.min(x)
+ hival = np.max(x)
+ else:
+ raise ValueError('whis must be a float, valid string, or list '
+ 'of percentiles')
+ else:
+ loval = np.percentile(x, whis[0])
+ hival = np.percentile(x, whis[1])
+
+ # get high extreme
+ wiskhi = np.compress(x <= hival, x)
+ if len(wiskhi) == 0 or np.max(wiskhi) < q3:
+ stats['whishi'] = q3
+ else:
+ stats['whishi'] = np.max(wiskhi)
+
+ # get low extreme
+ wisklo = np.compress(x >= loval, x)
+ if len(wisklo) == 0 or np.min(wisklo) > q1:
+ stats['whislo'] = q1
+ else:
+ stats['whislo'] = np.min(wisklo)
+
+ # compute a single array of outliers
+ stats['fliers'] = np.hstack([
+ np.compress(x < stats['whislo'], x),
+ np.compress(x > stats['whishi'], x)
+ ])
+
+ # add in the remaining stats
+ stats['q1'], stats['med'], stats['q3'] = q1, med, q3
+
+ return bxpstats
+
+
+# FIXME I don't think this is used anywhere
+@deprecated('2.1')
+def unmasked_index_ranges(mask, compressed=True):
+ """
+ Find index ranges where *mask* is *False*.
+
+ *mask* will be flattened if it is not already 1-D.
+
+ Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
+ indices for slices of the compressed :class:`numpy.ndarray`
+ corresponding to each of *N* uninterrupted runs of unmasked
+ values. If optional argument *compressed* is *False*, it returns
+ the start and stop indices into the original :class:`numpy.ndarray`,
+ not the compressed :class:`numpy.ndarray`. Returns *None* if there
+ are no unmasked values.
+
+ Example::
+
+ y = ma.array(np.arange(5), mask = [0,0,1,0,0])
+ ii = unmasked_index_ranges(ma.getmaskarray(y))
+ # returns array [[0,2,] [2,4,]]
+
+ y.compressed()[ii[1,0]:ii[1,1]]
+ # returns array [3,4,]
+
+ ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
+ # returns array [[0, 2], [3, 5]]
+
+ y.filled()[ii[1,0]:ii[1,1]]
+ # returns array [3,4,]
+
+ Prior to the transforms refactoring, this was used to support
+ masked arrays in Line2D.
+ """
+ mask = mask.reshape(mask.size)
+ m = np.concatenate(((1,), mask, (1,)))
+ indices = np.arange(len(mask) + 1)
+ mdif = m[1:] - m[:-1]
+ i0 = np.compress(mdif == -1, indices)
+ i1 = np.compress(mdif == 1, indices)
+ assert len(i0) == len(i1)
+ if len(i1) == 0:
+ return None # Maybe this should be np.zeros((0,2), dtype=int)
+ if not compressed:
+ return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
+ seglengths = i1 - i0
+ breakpoints = np.cumsum(seglengths)
+ ic0 = np.concatenate(((0,), breakpoints[:-1]))
+ ic1 = breakpoints
+ return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
+
+
+# The ls_mapper maps short codes for line style to their full name used by
+# backends; the reverse mapper is for mapping full names to short ones.
+ls_mapper = {'-': 'solid', '--': 'dashed', '-.': 'dashdot', ':': 'dotted'}
+ls_mapper_r = {v: k for k, v in six.iteritems(ls_mapper)}
+
+
+@deprecated('2.2')
+def align_iterators(func, *iterables):
+ """
+ This generator takes a bunch of iterables that are ordered by func
+ It sends out ordered tuples::
+
+ (func(row), [rows from all iterators matching func(row)])
+
+ It is used by :func:`matplotlib.mlab.recs_join` to join record arrays
+ """
+ class myiter:
+ def __init__(self, it):
+ self.it = it
+ self.key = self.value = None
+ self.iternext()
+
+ def iternext(self):
+ try:
+ self.value = next(self.it)
+ self.key = func(self.value)
+ except StopIteration:
+ self.value = self.key = None
+
+ def __call__(self, key):
+ retval = None
+ if key == self.key:
+ retval = self.value
+ self.iternext()
+ elif self.key and key > self.key:
+ raise ValueError("Iterator has been left behind")
+ return retval
+
+ # This can be made more efficient by not computing the minimum key for each
+ # iteration
+ iters = [myiter(it) for it in iterables]
+ minvals = minkey = True
+ while True:
+ minvals = ([_f for _f in [it.key for it in iters] if _f])
+ if minvals:
+ minkey = min(minvals)
+ yield (minkey, [it(minkey) for it in iters])
+ else:
+ break
+
+
+def contiguous_regions(mask):
+ """
+ Return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
+ True and we cover all such regions
+ """
+ mask = np.asarray(mask, dtype=bool)
+
+ if not mask.size:
+ return []
+
+ # Find the indices of region changes, and correct offset
+ idx, = np.nonzero(mask[:-1] != mask[1:])
+ idx += 1
+
+ # List operations are faster for moderately sized arrays
+ idx = idx.tolist()
+
+ # Add first and/or last index if needed
+ if mask[0]:
+ idx = [0] + idx
+ if mask[-1]:
+ idx.append(len(mask))
+
+ return list(zip(idx[::2], idx[1::2]))
+
+
+def is_math_text(s):
+ # Did we find an even number of non-escaped dollar signs?
+ # If so, treat is as math text.
+ try:
+ s = six.text_type(s)
+ except UnicodeDecodeError:
+ raise ValueError(
+ "matplotlib display text must have all code points < 128 or use "
+ "Unicode strings")
+
+ dollar_count = s.count(r'$') - s.count(r'\$')
+ even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
+
+ return even_dollars
+
+
+def _to_unmasked_float_array(x):
+ """
+ Convert a sequence to a float array; if input was a masked array, masked
+ values are converted to nans.
+ """
+ if hasattr(x, 'mask'):
+ return np.ma.asarray(x, float).filled(np.nan)
+ else:
+ return np.asarray(x, float)
+
+
+def _check_1d(x):
+ '''
+ Converts a sequence of less than 1 dimension, to an array of 1
+ dimension; leaves everything else untouched.
+ '''
+ if not hasattr(x, 'shape') or len(x.shape) < 1:
+ return np.atleast_1d(x)
+ else:
+ try:
+ x[:, None]
+ return x
+ except (IndexError, TypeError):
+ return np.atleast_1d(x)
+
+
+def _reshape_2D(X, name):
+ """
+ Use Fortran ordering to convert ndarrays and lists of iterables to lists of
+ 1D arrays.
+
+ Lists of iterables are converted by applying `np.asarray` to each of their
+ elements. 1D ndarrays are returned in a singleton list containing them.
+ 2D ndarrays are converted to the list of their *columns*.
+
+ *name* is used to generate the error message for invalid inputs.
+ """
+ # Iterate over columns for ndarrays, over rows otherwise.
+ X = np.atleast_1d(X.T if isinstance(X, np.ndarray) else np.asarray(X))
+ if X.ndim == 1 and X.dtype.type != np.object_:
+ # 1D array of scalars: directly return it.
+ return [X]
+ elif X.ndim in [1, 2]:
+ # 2D array, or 1D array of iterables: flatten them first.
+ return [np.reshape(x, -1) for x in X]
+ else:
+ raise ValueError("{} must have 2 or fewer dimensions".format(name))
+
+
+def violin_stats(X, method, points=100):
+ """
+ Returns a list of dictionaries of data which can be used to draw a series
+ of violin plots. See the `Returns` section below to view the required keys
+ of the dictionary. Users can skip this function and pass a user-defined set
+ of dictionaries to the `axes.vplot` method instead of using MPL to do the
+ calculations.
+
+ Parameters
+ ----------
+ X : array-like
+ Sample data that will be used to produce the gaussian kernel density
+ estimates. Must have 2 or fewer dimensions.
+
+ method : callable
+ The method used to calculate the kernel density estimate for each
+ column of data. When called via `method(v, coords)`, it should
+ return a vector of the values of the KDE evaluated at the values
+ specified in coords.
+
+ points : scalar, default = 100
+ Defines the number of points to evaluate each of the gaussian kernel
+ density estimates at.
+
+ Returns
+ -------
+
+ A list of dictionaries containing the results for each column of data.
+ The dictionaries contain at least the following:
+
+ - coords: A list of scalars containing the coordinates this particular
+ kernel density estimate was evaluated at.
+ - vals: A list of scalars containing the values of the kernel density
+ estimate at each of the coordinates given in `coords`.
+ - mean: The mean value for this column of data.
+ - median: The median value for this column of data.
+ - min: The minimum value for this column of data.
+ - max: The maximum value for this column of data.
+ """
+
+ # List of dictionaries describing each of the violins.
+ vpstats = []
+
+ # Want X to be a list of data sequences
+ X = _reshape_2D(X, "X")
+
+ for x in X:
+ # Dictionary of results for this distribution
+ stats = {}
+
+ # Calculate basic stats for the distribution
+ min_val = np.min(x)
+ max_val = np.max(x)
+
+ # Evaluate the kernel density estimate
+ coords = np.linspace(min_val, max_val, points)
+ stats['vals'] = method(x, coords)
+ stats['coords'] = coords
+
+ # Store additional statistics for this distribution
+ stats['mean'] = np.mean(x)
+ stats['median'] = np.median(x)
+ stats['min'] = min_val
+ stats['max'] = max_val
+
+ # Append to output
+ vpstats.append(stats)
+
+ return vpstats
+
+
+class _NestedClassGetter(object):
+ # recipe from http://stackoverflow.com/a/11493777/741316
+ """
+ When called with the containing class as the first argument,
+ and the name of the nested class as the second argument,
+ returns an instance of the nested class.
+ """
+ def __call__(self, containing_class, class_name):
+ nested_class = getattr(containing_class, class_name)
+
+ # make an instance of a simple object (this one will do), for which we
+ # can change the __class__ later on.
+ nested_instance = _NestedClassGetter()
+
+ # set the class of the instance, the __init__ will never be called on
+ # the class but the original state will be set later on by pickle.
+ nested_instance.__class__ = nested_class
+ return nested_instance
+
+
+class _InstanceMethodPickler(object):
+ """
+ Pickle cannot handle instancemethod saving. _InstanceMethodPickler
+ provides a solution to this.
+ """
+ def __init__(self, instancemethod):
+ """Takes an instancemethod as its only argument."""
+ if six.PY3:
+ self.parent_obj = instancemethod.__self__
+ self.instancemethod_name = instancemethod.__func__.__name__
+ else:
+ self.parent_obj = instancemethod.im_self
+ self.instancemethod_name = instancemethod.im_func.__name__
+
+ def get_instancemethod(self):
+ return getattr(self.parent_obj, self.instancemethod_name)
+
+
+def pts_to_prestep(x, *args):
+ """
+ Convert continuous line to pre-steps.
+
+ Given a set of ``N`` points, convert to ``2N - 1`` points, which when
+ connected linearly give a step function which changes values at the
+ beginning of the intervals.
+
+ Parameters
+ ----------
+ x : array
+ The x location of the steps. May be empty.
+
+ y1, ..., yp : array
+ y arrays to be turned into steps; all must be the same length as ``x``.
+
+ Returns
+ -------
+ out : array
+ The x and y values converted to steps in the same order as the input;
+ can be unpacked as ``x_out, y1_out, ..., yp_out``. If the input is
+ length ``N``, each of these arrays will be length ``2N + 1``. For
+ ``N=0``, the length will be 0.
+
+ Examples
+ --------
+ >> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
+ """
+ steps = np.zeros((1 + len(args), max(2 * len(x) - 1, 0)))
+ # In all `pts_to_*step` functions, only assign *once* using `x` and `args`,
+ # as converting to an array may be expensive.
+ steps[0, 0::2] = x
+ steps[0, 1::2] = steps[0, 0:-2:2]
+ steps[1:, 0::2] = args
+ steps[1:, 1::2] = steps[1:, 2::2]
+ return steps
+
+
+def pts_to_poststep(x, *args):
+ """
+ Convert continuous line to post-steps.
+
+ Given a set of ``N`` points convert to ``2N + 1`` points, which when
+ connected linearly give a step function which changes values at the end of
+ the intervals.
+
+ Parameters
+ ----------
+ x : array
+ The x location of the steps. May be empty.
+
+ y1, ..., yp : array
+ y arrays to be turned into steps; all must be the same length as ``x``.
+
+ Returns
+ -------
+ out : array
+ The x and y values converted to steps in the same order as the input;
+ can be unpacked as ``x_out, y1_out, ..., yp_out``. If the input is
+ length ``N``, each of these arrays will be length ``2N + 1``. For
+ ``N=0``, the length will be 0.
+
+ Examples
+ --------
+ >> x_s, y1_s, y2_s = pts_to_poststep(x, y1, y2)
+ """
+ steps = np.zeros((1 + len(args), max(2 * len(x) - 1, 0)))
+ steps[0, 0::2] = x
+ steps[0, 1::2] = steps[0, 2::2]
+ steps[1:, 0::2] = args
+ steps[1:, 1::2] = steps[1:, 0:-2:2]
+ return steps
+
+
+def pts_to_midstep(x, *args):
+ """
+ Convert continuous line to mid-steps.
+
+ Given a set of ``N`` points convert to ``2N`` points which when connected
+ linearly give a step function which changes values at the middle of the
+ intervals.
+
+ Parameters
+ ----------
+ x : array
+ The x location of the steps. May be empty.
+
+ y1, ..., yp : array
+ y arrays to be turned into steps; all must be the same length as ``x``.
+
+ Returns
+ -------
+ out : array
+ The x and y values converted to steps in the same order as the input;
+ can be unpacked as ``x_out, y1_out, ..., yp_out``. If the input is
+ length ``N``, each of these arrays will be length ``2N``.
+
+ Examples
+ --------
+ >> x_s, y1_s, y2_s = pts_to_midstep(x, y1, y2)
+ """
+ steps = np.zeros((1 + len(args), 2 * len(x)))
+ x = np.asanyarray(x)
+ steps[0, 1:-1:2] = steps[0, 2::2] = (x[:-1] + x[1:]) / 2
+ steps[0, :1] = x[:1] # Also works for zero-sized input.
+ steps[0, -1:] = x[-1:]
+ steps[1:, 0::2] = args
+ steps[1:, 1::2] = steps[1:, 0::2]
+ return steps
+
+
+STEP_LOOKUP_MAP = {'default': lambda x, y: (x, y),
+ 'steps': pts_to_prestep,
+ 'steps-pre': pts_to_prestep,
+ 'steps-post': pts_to_poststep,
+ 'steps-mid': pts_to_midstep}
+
+
+def index_of(y):
+ """
+ A helper function to get the index of an input to plot
+ against if x values are not explicitly given.
+
+ Tries to get `y.index` (works if this is a pd.Series), if that
+ fails, return np.arange(y.shape[0]).
+
+ This will be extended in the future to deal with more types of
+ labeled data.
+
+ Parameters
+ ----------
+ y : scalar or array-like
+ The proposed y-value
+
+ Returns
+ -------
+ x, y : ndarray
+ The x and y values to plot.
+ """
+ try:
+ return y.index.values, y.values
+ except AttributeError:
+ y = _check_1d(y)
+ return np.arange(y.shape[0], dtype=float), y
+
+
+def safe_first_element(obj):
+ if isinstance(obj, collections.Iterator):
+ # needed to accept `array.flat` as input.
+ # np.flatiter reports as an instance of collections.Iterator
+ # but can still be indexed via [].
+ # This has the side effect of re-setting the iterator, but
+ # that is acceptable.
+ try:
+ return obj[0]
+ except TypeError:
+ pass
+ raise RuntimeError("matplotlib does not support generators "
+ "as input")
+ return next(iter(obj))
+
+
+def sanitize_sequence(data):
+ """Converts dictview object to list"""
+ return list(data) if isinstance(data, collections.MappingView) else data
+
+
+def normalize_kwargs(kw, alias_mapping=None, required=(), forbidden=(),
+ allowed=None):
+ """Helper function to normalize kwarg inputs
+
+ The order they are resolved are:
+
+ 1. aliasing
+ 2. required
+ 3. forbidden
+ 4. allowed
+
+ This order means that only the canonical names need appear in
+ `allowed`, `forbidden`, `required`
+
+ Parameters
+ ----------
+
+ alias_mapping, dict, optional
+ A mapping between a canonical name to a list of
+ aliases, in order of precedence from lowest to highest.
+
+ If the canonical value is not in the list it is assumed to have
+ the highest priority.
+
+ required : iterable, optional
+ A tuple of fields that must be in kwargs.
+
+ forbidden : iterable, optional
+ A list of keys which may not be in kwargs
+
+ allowed : tuple, optional
+ A tuple of allowed fields. If this not None, then raise if
+ `kw` contains any keys not in the union of `required`
+ and `allowed`. To allow only the required fields pass in
+ ``()`` for `allowed`
+
+ Raises
+ ------
+ TypeError
+ To match what python raises if invalid args/kwargs are passed to
+ a callable.
+
+ """
+ # deal with default value of alias_mapping
+ if alias_mapping is None:
+ alias_mapping = dict()
+
+ # make a local so we can pop
+ kw = dict(kw)
+ # output dictionary
+ ret = dict()
+
+ # hit all alias mappings
+ for canonical, alias_list in six.iteritems(alias_mapping):
+
+ # the alias lists are ordered from lowest to highest priority
+ # so we know to use the last value in this list
+ tmp = []
+ seen = []
+ for a in alias_list:
+ try:
+ tmp.append(kw.pop(a))
+ seen.append(a)
+ except KeyError:
+ pass
+ # if canonical is not in the alias_list assume highest priority
+ if canonical not in alias_list:
+ try:
+ tmp.append(kw.pop(canonical))
+ seen.append(canonical)
+ except KeyError:
+ pass
+ # if we found anything in this set of aliases put it in the return
+ # dict
+ if tmp:
+ ret[canonical] = tmp[-1]
+ if len(tmp) > 1:
+ warnings.warn("Saw kwargs {seen!r} which are all aliases for "
+ "{canon!r}. Kept value from {used!r}".format(
+ seen=seen, canon=canonical, used=seen[-1]))
+
+ # at this point we know that all keys which are aliased are removed, update
+ # the return dictionary from the cleaned local copy of the input
+ ret.update(kw)
+
+ fail_keys = [k for k in required if k not in ret]
+ if fail_keys:
+ raise TypeError("The required keys {keys!r} "
+ "are not in kwargs".format(keys=fail_keys))
+
+ fail_keys = [k for k in forbidden if k in ret]
+ if fail_keys:
+ raise TypeError("The forbidden keys {keys!r} "
+ "are in kwargs".format(keys=fail_keys))
+
+ if allowed is not None:
+ allowed_set = set(required) | set(allowed)
+ fail_keys = [k for k in ret if k not in allowed_set]
+ if fail_keys:
+ raise TypeError("kwargs contains {keys!r} which are not in "
+ "the required {req!r} or "
+ "allowed {allow!r} keys".format(
+ keys=fail_keys, req=required,
+ allow=allowed))
+
+ return ret
+
+
+def get_label(y, default_name):
+ try:
+ return y.name
+ except AttributeError:
+ return default_name
+
+
+_lockstr = """\
+LOCKERROR: matplotlib is trying to acquire the lock
+ {!r}
+and has failed. This maybe due to any other process holding this
+lock. If you are sure no other matplotlib process is running try
+removing these folders and trying again.
+"""
+
+
+class Locked(object):
+ """
+ Context manager to handle locks.
+
+ Based on code from conda.
+
+ (c) 2012-2013 Continuum Analytics, Inc. / https://www.continuum.io/
+ All Rights Reserved
+
+ conda is distributed under the terms of the BSD 3-clause license.
+ Consult LICENSE_CONDA or https://opensource.org/licenses/BSD-3-Clause.
+ """
+ LOCKFN = '.matplotlib_lock'
+
+ class TimeoutError(RuntimeError):
+ pass
+
+ def __init__(self, path):
+ self.path = path
+ self.end = "-" + str(os.getpid())
+ self.lock_path = os.path.join(self.path, self.LOCKFN + self.end)
+ self.pattern = os.path.join(self.path, self.LOCKFN + '-*')
+ self.remove = True
+
+ def __enter__(self):
+ retries = 50
+ sleeptime = 0.1
+ while retries:
+ files = glob.glob(self.pattern)
+ if files and not files[0].endswith(self.end):
+ time.sleep(sleeptime)
+ retries -= 1
+ else:
+ break
+ else:
+ err_str = _lockstr.format(self.pattern)
+ raise self.TimeoutError(err_str)
+
+ if not files:
+ try:
+ os.makedirs(self.lock_path)
+ except OSError:
+ pass
+ else: # PID lock already here --- someone else will remove it.
+ self.remove = False
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if self.remove:
+ for path in self.lock_path, self.path:
+ try:
+ os.rmdir(path)
+ except OSError:
+ pass
+
+
+class _FuncInfo(object):
+ """
+ Class used to store a function.
+
+ """
+
+ def __init__(self, function, inverse, bounded_0_1=True, check_params=None):
+ """
+ Parameters
+ ----------
+
+ function : callable
+ A callable implementing the function receiving the variable as
+ first argument and any additional parameters in a list as second
+ argument.
+ inverse : callable
+ A callable implementing the inverse function receiving the variable
+ as first argument and any additional parameters in a list as
+ second argument. It must satisfy 'inverse(function(x, p), p) == x'.
+ bounded_0_1: bool or callable
+ A boolean indicating whether the function is bounded in the [0,1]
+ interval, or a callable taking a list of values for the additional
+ parameters, and returning a boolean indicating whether the function
+ is bounded in the [0,1] interval for that combination of
+ parameters. Default True.
+ check_params: callable or None
+ A callable taking a list of values for the additional parameters
+ and returning a boolean indicating whether that combination of
+ parameters is valid. It is only required if the function has
+ additional parameters and some of them are restricted.
+ Default None.
+
+ """
+
+ self.function = function
+ self.inverse = inverse
+
+ if callable(bounded_0_1):
+ self._bounded_0_1 = bounded_0_1
+ else:
+ self._bounded_0_1 = lambda x: bounded_0_1
+
+ if check_params is None:
+ self._check_params = lambda x: True
+ elif callable(check_params):
+ self._check_params = check_params
+ else:
+ raise ValueError("Invalid 'check_params' argument.")
+
+ def is_bounded_0_1(self, params=None):
+ """
+ Returns a boolean indicating if the function is bounded in the [0,1]
+ interval for a particular set of additional parameters.
+
+ Parameters
+ ----------
+
+ params : list
+ The list of additional parameters. Default None.
+
+ Returns
+ -------
+
+ out : bool
+ True if the function is bounded in the [0,1] interval for
+ parameters 'params'. Otherwise False.
+
+ """
+
+ return self._bounded_0_1(params)
+
+ def check_params(self, params=None):
+ """
+ Returns a boolean indicating if the set of additional parameters is
+ valid.
+
+ Parameters
+ ----------
+
+ params : list
+ The list of additional parameters. Default None.
+
+ Returns
+ -------
+
+ out : bool
+ True if 'params' is a valid set of additional parameters for the
+ function. Otherwise False.
+
+ """
+
+ return self._check_params(params)
+
+
+class _StringFuncParser(object):
+ """
+ A class used to convert predefined strings into
+ _FuncInfo objects, or to directly obtain _FuncInfo
+ properties.
+
+ """
+
+ _funcs = {}
+ _funcs['linear'] = _FuncInfo(lambda x: x,
+ lambda x: x,
+ True)
+ _funcs['quadratic'] = _FuncInfo(np.square,
+ np.sqrt,
+ True)
+ _funcs['cubic'] = _FuncInfo(lambda x: x**3,
+ lambda x: x**(1. / 3),
+ True)
+ _funcs['sqrt'] = _FuncInfo(np.sqrt,
+ np.square,
+ True)
+ _funcs['cbrt'] = _FuncInfo(lambda x: x**(1. / 3),
+ lambda x: x**3,
+ True)
+ _funcs['log10'] = _FuncInfo(np.log10,
+ lambda x: (10**(x)),
+ False)
+ _funcs['log'] = _FuncInfo(np.log,
+ np.exp,
+ False)
+ _funcs['log2'] = _FuncInfo(np.log2,
+ lambda x: (2**x),
+ False)
+ _funcs['x**{p}'] = _FuncInfo(lambda x, p: x**p[0],
+ lambda x, p: x**(1. / p[0]),
+ True)
+ _funcs['root{p}(x)'] = _FuncInfo(lambda x, p: x**(1. / p[0]),
+ lambda x, p: x**p,
+ True)
+ _funcs['log{p}(x)'] = _FuncInfo(lambda x, p: (np.log(x) /
+ np.log(p[0])),
+ lambda x, p: p[0]**(x),
+ False,
+ lambda p: p[0] > 0)
+ _funcs['log10(x+{p})'] = _FuncInfo(lambda x, p: np.log10(x + p[0]),
+ lambda x, p: 10**x - p[0],
+ lambda p: p[0] > 0)
+ _funcs['log(x+{p})'] = _FuncInfo(lambda x, p: np.log(x + p[0]),
+ lambda x, p: np.exp(x) - p[0],
+ lambda p: p[0] > 0)
+ _funcs['log{p}(x+{p})'] = _FuncInfo(lambda x, p: (np.log(x + p[1]) /
+ np.log(p[0])),
+ lambda x, p: p[0]**(x) - p[1],
+ lambda p: p[1] > 0,
+ lambda p: p[0] > 0)
+
+ def __init__(self, str_func):
+ """
+ Parameters
+ ----------
+ str_func : string
+ String to be parsed.
+
+ """
+
+ if not isinstance(str_func, six.string_types):
+ raise ValueError("'%s' must be a string." % str_func)
+ self._str_func = six.text_type(str_func)
+ self._key, self._params = self._get_key_params()
+ self._func = self._parse_func()
+
+ def _parse_func(self):
+ """
+ Parses the parameters to build a new _FuncInfo object,
+ replacing the relevant parameters if necessary in the lambda
+ functions.
+
+ """
+
+ func = self._funcs[self._key]
+
+ if not self._params:
+ func = _FuncInfo(func.function, func.inverse,
+ func.is_bounded_0_1())
+ else:
+ m = func.function
+ function = (lambda x, m=m: m(x, self._params))
+
+ m = func.inverse
+ inverse = (lambda x, m=m: m(x, self._params))
+
+ is_bounded_0_1 = func.is_bounded_0_1(self._params)
+
+ func = _FuncInfo(function, inverse,
+ is_bounded_0_1)
+ return func
+
+ @property
+ def func_info(self):
+ """
+ Returns the _FuncInfo object.
+
+ """
+ return self._func
+
+ @property
+ def function(self):
+ """
+ Returns the callable for the direct function.
+
+ """
+ return self._func.function
+
+ @property
+ def inverse(self):
+ """
+ Returns the callable for the inverse function.
+
+ """
+ return self._func.inverse
+
+ @property
+ def is_bounded_0_1(self):
+ """
+ Returns a boolean indicating if the function is bounded
+ in the [0-1 interval].
+
+ """
+ return self._func.is_bounded_0_1()
+
+ def _get_key_params(self):
+ str_func = self._str_func
+ # Checking if it comes with parameters
+ regex = r'\{(.*?)\}'
+ params = re.findall(regex, str_func)
+
+ for i, param in enumerate(params):
+ try:
+ params[i] = float(param)
+ except ValueError:
+ raise ValueError("Parameter %i is '%s', which is "
+ "not a number." %
+ (i, param))
+
+ str_func = re.sub(regex, '{p}', str_func)
+
+ try:
+ func = self._funcs[str_func]
+ except (ValueError, KeyError):
+ raise ValueError("'%s' is an invalid string. The only strings "
+ "recognized as functions are %s." %
+ (str_func, list(self._funcs)))
+
+ # Checking that the parameters are valid
+ if not func.check_params(params):
+ raise ValueError("%s are invalid values for the parameters "
+ "in %s." %
+ (params, str_func))
+
+ return str_func, params
+
+
+def _topmost_artist(
+ artists,
+ _cached_max=functools.partial(max, key=operator.attrgetter("zorder"))):
+ """Get the topmost artist of a list.
+
+ In case of a tie, return the *last* of the tied artists, as it will be
+ drawn on top of the others. `max` returns the first maximum in case of ties
+ (on Py2 this is undocumented but true), so we need to iterate over the list
+ in reverse order.
+ """
+ return _cached_max(reversed(artists))
+
+
+def _str_equal(obj, s):
+ """Return whether *obj* is a string equal to string *s*.
+
+ This helper solely exists to handle the case where *obj* is a numpy array,
+ because in such cases, a naive ``obj == s`` would yield an array, which
+ cannot be used in a boolean context.
+ """
+ return isinstance(obj, six.string_types) and obj == s
+
+
+def _str_lower_equal(obj, s):
+ """Return whether *obj* is a string equal, when lowercased, to string *s*.
+
+ This helper solely exists to handle the case where *obj* is a numpy array,
+ because in such cases, a naive ``obj == s`` would yield an array, which
+ cannot be used in a boolean context.
+ """
+ return isinstance(obj, six.string_types) and obj.lower() == s
+
+
+@contextlib.contextmanager
+def _setattr_cm(obj, **kwargs):
+ """Temporarily set some attributes; restore original state at context exit.
+ """
+ sentinel = object()
+ origs = [(attr, getattr(obj, attr, sentinel)) for attr in kwargs]
+ try:
+ for attr, val in kwargs.items():
+ setattr(obj, attr, val)
+ yield
+ finally:
+ for attr, orig in origs:
+ if orig is sentinel:
+ delattr(obj, attr)
+ else:
+ setattr(obj, attr, orig)
diff --git a/contrib/python/matplotlib/py2/matplotlib/cbook/_backports.py b/contrib/python/matplotlib/py2/matplotlib/cbook/_backports.py
new file mode 100644
index 00000000000..83833258551
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/cbook/_backports.py
@@ -0,0 +1,147 @@
+from __future__ import absolute_import
+
+import os
+import sys
+
+import numpy as np
+
+
+# Copy-pasted from Python 3.4's shutil.
+def which(cmd, mode=os.F_OK | os.X_OK, path=None):
+ """Given a command, mode, and a PATH string, return the path which
+ conforms to the given mode on the PATH, or None if there is no such
+ file.
+
+ `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
+ of os.environ.get("PATH"), or can be overridden with a custom search
+ path.
+
+ """
+ # Check that a given file can be accessed with the correct mode.
+ # Additionally check that `file` is not a directory, as on Windows
+ # directories pass the os.access check.
+ def _access_check(fn, mode):
+ return (os.path.exists(fn) and os.access(fn, mode)
+ and not os.path.isdir(fn))
+
+ # If we're given a path with a directory part, look it up directly rather
+ # than referring to PATH directories. This includes checking relative to the
+ # current directory, e.g. ./script
+ if os.path.dirname(cmd):
+ if _access_check(cmd, mode):
+ return cmd
+ return None
+
+ if path is None:
+ path = os.environ.get("PATH", os.defpath)
+ if not path:
+ return None
+ path = path.split(os.pathsep)
+
+ if sys.platform == "win32":
+ # The current directory takes precedence on Windows.
+ if not os.curdir in path:
+ path.insert(0, os.curdir)
+
+ # PATHEXT is necessary to check on Windows.
+ pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
+ # See if the given file matches any of the expected path extensions.
+ # This will allow us to short circuit when given "python.exe".
+ # If it does match, only test that one, otherwise we have to try
+ # others.
+ if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
+ files = [cmd]
+ else:
+ files = [cmd + ext for ext in pathext]
+ else:
+ # On other platforms you don't have things like PATHEXT to tell you
+ # what file suffixes are executable, so just pass on cmd as-is.
+ files = [cmd]
+
+ seen = set()
+ for dir in path:
+ normdir = os.path.normcase(dir)
+ if not normdir in seen:
+ seen.add(normdir)
+ for thefile in files:
+ name = os.path.join(dir, thefile)
+ if _access_check(name, mode):
+ return name
+ return None
+
+
+# Copy-pasted from numpy.lib.stride_tricks 1.11.2.
+def _maybe_view_as_subclass(original_array, new_array):
+ if type(original_array) is not type(new_array):
+ # if input was an ndarray subclass and subclasses were OK,
+ # then view the result as that subclass.
+ new_array = new_array.view(type=type(original_array))
+ # Since we have done something akin to a view from original_array, we
+ # should let the subclass finalize (if it has it implemented, i.e., is
+ # not None).
+ if new_array.__array_finalize__:
+ new_array.__array_finalize__(original_array)
+ return new_array
+
+
+# Copy-pasted from numpy.lib.stride_tricks 1.11.2.
+def _broadcast_to(array, shape, subok, readonly):
+ shape = tuple(shape) if np.iterable(shape) else (shape,)
+ array = np.array(array, copy=False, subok=subok)
+ if not shape and array.shape:
+ raise ValueError('cannot broadcast a non-scalar to a scalar array')
+ if any(size < 0 for size in shape):
+ raise ValueError('all elements of broadcast shape must be non-'
+ 'negative')
+ needs_writeable = not readonly and array.flags.writeable
+ extras = ['reduce_ok'] if needs_writeable else []
+ op_flag = 'readwrite' if needs_writeable else 'readonly'
+ broadcast = np.nditer(
+ (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
+ op_flags=[op_flag], itershape=shape, order='C').itviews[0]
+ result = _maybe_view_as_subclass(array, broadcast)
+ if needs_writeable and not result.flags.writeable:
+ result.flags.writeable = True
+ return result
+
+
+# Copy-pasted from numpy.lib.stride_tricks 1.11.2.
+def broadcast_to(array, shape, subok=False):
+ """Broadcast an array to a new shape.
+
+ Parameters
+ ----------
+ array : array_like
+ The array to broadcast.
+ shape : tuple
+ The shape of the desired array.
+ subok : bool, optional
+ If True, then sub-classes will be passed-through, otherwise
+ the returned array will be forced to be a base-class array (default).
+
+ Returns
+ -------
+ broadcast : array
+ A readonly view on the original array with the given shape. It is
+ typically not contiguous. Furthermore, more than one element of a
+ broadcasted array may refer to a single memory location.
+
+ Raises
+ ------
+ ValueError
+ If the array is not compatible with the new shape according to NumPy's
+ broadcasting rules.
+
+ Notes
+ -----
+ .. versionadded:: 1.10.0
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> np.broadcast_to(x, (3, 3))
+ array([[1, 2, 3],
+ [1, 2, 3],
+ [1, 2, 3]])
+ """
+ return _broadcast_to(array, shape, subok=subok, readonly=True)
diff --git a/contrib/python/matplotlib/py2/matplotlib/cbook/deprecation.py b/contrib/python/matplotlib/py2/matplotlib/cbook/deprecation.py
new file mode 100644
index 00000000000..ca7ae333f27
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/cbook/deprecation.py
@@ -0,0 +1,222 @@
+import functools
+import textwrap
+import warnings
+
+
+class MatplotlibDeprecationWarning(UserWarning):
+ """
+ A class for issuing deprecation warnings for Matplotlib users.
+
+ In light of the fact that Python builtin DeprecationWarnings are ignored
+ by default as of Python 2.7 (see link below), this class was put in to
+ allow for the signaling of deprecation, but via UserWarnings which are not
+ ignored by default.
+
+ https://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
+ """
+ pass
+
+
+mplDeprecation = MatplotlibDeprecationWarning
+
+
+def _generate_deprecation_message(since, message='', name='',
+ alternative='', pending=False,
+ obj_type='attribute',
+ addendum=''):
+
+ if not message:
+
+ if pending:
+ message = (
+ 'The %(name)s %(obj_type)s will be deprecated in a '
+ 'future version.')
+ else:
+ message = (
+ 'The %(name)s %(obj_type)s was deprecated in version '
+ '%(since)s.')
+
+ altmessage = ''
+ if alternative:
+ altmessage = ' Use %s instead.' % alternative
+
+ message = ((message % {
+ 'func': name,
+ 'name': name,
+ 'alternative': alternative,
+ 'obj_type': obj_type,
+ 'since': since}) +
+ altmessage)
+
+ if addendum:
+ message += addendum
+
+ return message
+
+
+def warn_deprecated(
+ since, message='', name='', alternative='', pending=False,
+ obj_type='attribute', addendum=''):
+ """
+ Used to display deprecation warning in a standard way.
+
+ Parameters
+ ----------
+ since : str
+ The release at which this API became deprecated.
+
+ message : str, optional
+ Override the default deprecation message. The format
+ specifier `%(name)s` may be used for the name of the function,
+ and `%(alternative)s` may be used in the deprecation message
+ to insert the name of an alternative to the deprecated
+ function. `%(obj_type)s` may be used to insert a friendly name
+ for the type of object being deprecated.
+
+ name : str, optional
+ The name of the deprecated object.
+
+ alternative : str, optional
+ An alternative function that the user may use in place of the
+ deprecated function. The deprecation warning will tell the user
+ about this alternative if provided.
+
+ pending : bool, optional
+ If True, uses a PendingDeprecationWarning instead of a
+ DeprecationWarning.
+
+ obj_type : str, optional
+ The object type being deprecated.
+
+ addendum : str, optional
+ Additional text appended directly to the final message.
+
+ Examples
+ --------
+
+ Basic example::
+
+ # To warn of the deprecation of "matplotlib.name_of_module"
+ warn_deprecated('1.4.0', name='matplotlib.name_of_module',
+ obj_type='module')
+
+ """
+ message = _generate_deprecation_message(
+ since, message, name, alternative, pending, obj_type)
+
+ warnings.warn(message, mplDeprecation, stacklevel=1)
+
+
+def deprecated(since, message='', name='', alternative='', pending=False,
+ obj_type=None, addendum=''):
+ """
+ Decorator to mark a function or a class as deprecated.
+
+ Parameters
+ ----------
+ since : str
+ The release at which this API became deprecated. This is
+ required.
+
+ message : str, optional
+ Override the default deprecation message. The format
+ specifier `%(name)s` may be used for the name of the object,
+ and `%(alternative)s` may be used in the deprecation message
+ to insert the name of an alternative to the deprecated
+ object. `%(obj_type)s` may be used to insert a friendly name
+ for the type of object being deprecated.
+
+ name : str, optional
+ The name of the deprecated object; if not provided the name
+ is automatically determined from the passed in object,
+ though this is useful in the case of renamed functions, where
+ the new function is just assigned to the name of the
+ deprecated function. For example::
+
+ def new_function():
+ ...
+ oldFunction = new_function
+
+ alternative : str, optional
+ An alternative object that the user may use in place of the
+ deprecated object. The deprecation warning will tell the user
+ about this alternative if provided.
+
+ pending : bool, optional
+ If True, uses a PendingDeprecationWarning instead of a
+ DeprecationWarning.
+
+ addendum : str, optional
+ Additional text appended directly to the final message.
+
+ Examples
+ --------
+
+ Basic example::
+
+ @deprecated('1.4.0')
+ def the_function_to_deprecate():
+ pass
+
+ """
+
+ def deprecate(obj, message=message, name=name, alternative=alternative,
+ pending=pending, addendum=addendum):
+
+ if not name:
+ name = obj.__name__
+
+ if isinstance(obj, type):
+ obj_type = "class"
+ old_doc = obj.__doc__
+ func = obj.__init__
+
+ def finalize(wrapper, new_doc):
+ try:
+ obj.__doc__ = new_doc
+ except (AttributeError, TypeError):
+ # cls.__doc__ is not writeable on Py2.
+ # TypeError occurs on PyPy
+ pass
+ obj.__init__ = wrapper
+ return obj
+ else:
+ obj_type = "function"
+ if isinstance(obj, classmethod):
+ func = obj.__func__
+ old_doc = func.__doc__
+
+ def finalize(wrapper, new_doc):
+ wrapper = functools.wraps(func)(wrapper)
+ wrapper.__doc__ = new_doc
+ return classmethod(wrapper)
+ else:
+ func = obj
+ old_doc = func.__doc__
+
+ def finalize(wrapper, new_doc):
+ wrapper = functools.wraps(func)(wrapper)
+ wrapper.__doc__ = new_doc
+ return wrapper
+
+ message = _generate_deprecation_message(
+ since, message, name, alternative, pending,
+ obj_type, addendum)
+
+ def wrapper(*args, **kwargs):
+ warnings.warn(message, mplDeprecation, stacklevel=2)
+ return func(*args, **kwargs)
+
+ old_doc = textwrap.dedent(old_doc or '').strip('\n')
+ message = message.strip()
+ new_doc = (('\n.. deprecated:: %(since)s'
+ '\n %(message)s\n\n' %
+ {'since': since, 'message': message}) + old_doc)
+ if not old_doc:
+ # This is to prevent a spurious 'unexected unindent' warning from
+ # docutils when the original docstring was blank.
+ new_doc += r'\ '
+
+ return finalize(wrapper, new_doc)
+
+ return deprecate
diff --git a/contrib/python/matplotlib/py2/matplotlib/cm.py b/contrib/python/matplotlib/py2/matplotlib/cm.py
new file mode 100644
index 00000000000..e4e3dca5025
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/cm.py
@@ -0,0 +1,392 @@
+"""
+Builtin colormaps, colormap handling utilities, and the `ScalarMappable` mixin.
+
+See :doc:`/gallery/color/colormap_reference` for a list of builtin colormaps.
+See :doc:`/tutorials/colors/colormaps` for an in-depth discussion of colormaps.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import numpy as np
+from numpy import ma
+import matplotlib as mpl
+import matplotlib.colors as colors
+import matplotlib.cbook as cbook
+from matplotlib._cm import datad
+from matplotlib._cm_listed import cmaps as cmaps_listed
+
+
+cmap_d = {}
+
+
+# reverse all the colormaps.
+# reversed colormaps have '_r' appended to the name.
+
+
+def _reverser(f):
+ def freversed(x):
+ return f(1 - x)
+ return freversed
+
+
+def revcmap(data):
+ """Can only handle specification *data* in dictionary format."""
+ data_r = {}
+ for key, val in six.iteritems(data):
+ if callable(val):
+ valnew = _reverser(val)
+ # This doesn't work: lambda x: val(1-x)
+ # The same "val" (the first one) is used
+ # each time, so the colors are identical
+ # and the result is shades of gray.
+ else:
+ # Flip x and exchange the y values facing x = 0 and x = 1.
+ valnew = [(1.0 - x, y1, y0) for x, y0, y1 in reversed(val)]
+ data_r[key] = valnew
+ return data_r
+
+
+def _reverse_cmap_spec(spec):
+ """Reverses cmap specification *spec*, can handle both dict and tuple
+ type specs."""
+
+ if 'listed' in spec:
+ return {'listed': spec['listed'][::-1]}
+
+ if 'red' in spec:
+ return revcmap(spec)
+ else:
+ revspec = list(reversed(spec))
+ if len(revspec[0]) == 2: # e.g., (1, (1.0, 0.0, 1.0))
+ revspec = [(1.0 - a, b) for a, b in revspec]
+ return revspec
+
+
+def _generate_cmap(name, lutsize):
+ """Generates the requested cmap from its *name*. The lut size is
+ *lutsize*."""
+
+ spec = datad[name]
+
+ # Generate the colormap object.
+ if 'red' in spec:
+ return colors.LinearSegmentedColormap(name, spec, lutsize)
+ elif 'listed' in spec:
+ return colors.ListedColormap(spec['listed'], name)
+ else:
+ return colors.LinearSegmentedColormap.from_list(name, spec, lutsize)
+
+LUTSIZE = mpl.rcParams['image.lut']
+
+# Generate the reversed specifications (all at once, to avoid
+# modify-when-iterating).
+datad.update({cmapname + '_r': _reverse_cmap_spec(spec)
+ for cmapname, spec in six.iteritems(datad)})
+
+# Precache the cmaps with ``lutsize = LUTSIZE``.
+# Also add the reversed ones added in the section above:
+for cmapname in datad:
+ cmap_d[cmapname] = _generate_cmap(cmapname, LUTSIZE)
+
+cmap_d.update(cmaps_listed)
+
+locals().update(cmap_d)
+
+
+# Continue with definitions ...
+
+
+def register_cmap(name=None, cmap=None, data=None, lut=None):
+ """
+ Add a colormap to the set recognized by :func:`get_cmap`.
+
+ It can be used in two ways::
+
+ register_cmap(name='swirly', cmap=swirly_cmap)
+
+ register_cmap(name='choppy', data=choppydata, lut=128)
+
+ In the first case, *cmap* must be a :class:`matplotlib.colors.Colormap`
+ instance. The *name* is optional; if absent, the name will
+ be the :attr:`~matplotlib.colors.Colormap.name` attribute of the *cmap*.
+
+ In the second case, the three arguments are passed to
+ the :class:`~matplotlib.colors.LinearSegmentedColormap` initializer,
+ and the resulting colormap is registered.
+
+ """
+ if name is None:
+ try:
+ name = cmap.name
+ except AttributeError:
+ raise ValueError("Arguments must include a name or a Colormap")
+
+ if not isinstance(name, six.string_types):
+ raise ValueError("Colormap name must be a string")
+
+ if isinstance(cmap, colors.Colormap):
+ cmap_d[name] = cmap
+ return
+
+ # For the remainder, let exceptions propagate.
+ if lut is None:
+ lut = mpl.rcParams['image.lut']
+ cmap = colors.LinearSegmentedColormap(name, data, lut)
+ cmap_d[name] = cmap
+
+
+def get_cmap(name=None, lut=None):
+ """
+ Get a colormap instance, defaulting to rc values if *name* is None.
+
+ Colormaps added with :func:`register_cmap` take precedence over
+ built-in colormaps.
+
+ If *name* is a :class:`matplotlib.colors.Colormap` instance, it will be
+ returned.
+
+ If *lut* is not None it must be an integer giving the number of
+ entries desired in the lookup table, and *name* must be a standard
+ mpl colormap name.
+ """
+ if name is None:
+ name = mpl.rcParams['image.cmap']
+
+ if isinstance(name, colors.Colormap):
+ return name
+
+ if name in cmap_d:
+ if lut is None:
+ return cmap_d[name]
+ else:
+ return cmap_d[name]._resample(lut)
+ else:
+ raise ValueError(
+ "Colormap %s is not recognized. Possible values are: %s"
+ % (name, ', '.join(sorted(cmap_d))))
+
+
+class ScalarMappable(object):
+ """
+ This is a mixin class to support scalar data to RGBA mapping.
+ The ScalarMappable makes use of data normalization before returning
+ RGBA colors from the given colormap.
+
+ """
+ def __init__(self, norm=None, cmap=None):
+ r"""
+
+ Parameters
+ ----------
+ norm : :class:`matplotlib.colors.Normalize` instance
+ The normalizing object which scales data, typically into the
+ interval ``[0, 1]``.
+ If *None*, *norm* defaults to a *colors.Normalize* object which
+ initializes its scaling based on the first data processed.
+ cmap : str or :class:`~matplotlib.colors.Colormap` instance
+ The colormap used to map normalized data values to RGBA colors.
+ """
+
+ self.callbacksSM = cbook.CallbackRegistry()
+
+ if cmap is None:
+ cmap = get_cmap()
+ if norm is None:
+ norm = colors.Normalize()
+
+ self._A = None
+ #: The Normalization instance of this ScalarMappable.
+ self.norm = norm
+ #: The Colormap instance of this ScalarMappable.
+ self.cmap = get_cmap(cmap)
+ #: The last colorbar associated with this ScalarMappable. May be None.
+ self.colorbar = None
+ self.update_dict = {'array': False}
+
+ def to_rgba(self, x, alpha=None, bytes=False, norm=True):
+ """
+ Return a normalized rgba array corresponding to *x*.
+
+ In the normal case, *x* is a 1-D or 2-D sequence of scalars, and
+ the corresponding ndarray of rgba values will be returned,
+ based on the norm and colormap set for this ScalarMappable.
+
+ There is one special case, for handling images that are already
+ rgb or rgba, such as might have been read from an image file.
+ If *x* is an ndarray with 3 dimensions,
+ and the last dimension is either 3 or 4, then it will be
+ treated as an rgb or rgba array, and no mapping will be done.
+ The array can be uint8, or it can be floating point with
+ values in the 0-1 range; otherwise a ValueError will be raised.
+ If it is a masked array, the mask will be ignored.
+ If the last dimension is 3, the *alpha* kwarg (defaulting to 1)
+ will be used to fill in the transparency. If the last dimension
+ is 4, the *alpha* kwarg is ignored; it does not
+ replace the pre-existing alpha. A ValueError will be raised
+ if the third dimension is other than 3 or 4.
+
+ In either case, if *bytes* is *False* (default), the rgba
+ array will be floats in the 0-1 range; if it is *True*,
+ the returned rgba array will be uint8 in the 0 to 255 range.
+
+ If norm is False, no normalization of the input data is
+ performed, and it is assumed to be in the range (0-1).
+
+ """
+ # First check for special case, image input:
+ try:
+ if x.ndim == 3:
+ if x.shape[2] == 3:
+ if alpha is None:
+ alpha = 1
+ if x.dtype == np.uint8:
+ alpha = np.uint8(alpha * 255)
+ m, n = x.shape[:2]
+ xx = np.empty(shape=(m, n, 4), dtype=x.dtype)
+ xx[:, :, :3] = x
+ xx[:, :, 3] = alpha
+ elif x.shape[2] == 4:
+ xx = x
+ else:
+ raise ValueError("third dimension must be 3 or 4")
+ if xx.dtype.kind == 'f':
+ if norm and (xx.max() > 1 or xx.min() < 0):
+ raise ValueError("Floating point image RGB values "
+ "must be in the 0..1 range.")
+ if bytes:
+ xx = (xx * 255).astype(np.uint8)
+ elif xx.dtype == np.uint8:
+ if not bytes:
+ xx = xx.astype(np.float32) / 255
+ else:
+ raise ValueError("Image RGB array must be uint8 or "
+ "floating point; found %s" % xx.dtype)
+ return xx
+ except AttributeError:
+ # e.g., x is not an ndarray; so try mapping it
+ pass
+
+ # This is the normal case, mapping a scalar array:
+ x = ma.asarray(x)
+ if norm:
+ x = self.norm(x)
+ rgba = self.cmap(x, alpha=alpha, bytes=bytes)
+ return rgba
+
+ def set_array(self, A):
+ """Set the image array from numpy array *A*.
+
+ .. ACCEPTS: ndarray
+
+ Parameters
+ ----------
+ A : ndarray
+ """
+ self._A = A
+ self.update_dict['array'] = True
+
+ def get_array(self):
+ 'Return the array'
+ return self._A
+
+ def get_cmap(self):
+ 'return the colormap'
+ return self.cmap
+
+ def get_clim(self):
+ 'return the min, max of the color limits for image scaling'
+ return self.norm.vmin, self.norm.vmax
+
+ def set_clim(self, vmin=None, vmax=None):
+ """
+ set the norm limits for image scaling; if *vmin* is a length2
+ sequence, interpret it as ``(vmin, vmax)`` which is used to
+ support setp
+
+ ACCEPTS: a length 2 sequence of floats; may be overridden in methods
+ that have ``vmin`` and ``vmax`` kwargs.
+ """
+ if vmax is None:
+ try:
+ vmin, vmax = vmin
+ except (TypeError, ValueError):
+ pass
+ if vmin is not None:
+ self.norm.vmin = colors._sanitize_extrema(vmin)
+ if vmax is not None:
+ self.norm.vmax = colors._sanitize_extrema(vmax)
+ self.changed()
+
+ def set_cmap(self, cmap):
+ """
+ set the colormap for luminance data
+
+ ACCEPTS: a colormap or registered colormap name
+ """
+ cmap = get_cmap(cmap)
+ self.cmap = cmap
+ self.changed()
+
+ def set_norm(self, norm):
+ """Set the normalization instance.
+
+ .. ACCEPTS: `.Normalize`
+
+ Parameters
+ ----------
+ norm : `.Normalize`
+ """
+ if norm is None:
+ norm = colors.Normalize()
+ self.norm = norm
+ self.changed()
+
+ def autoscale(self):
+ """
+ Autoscale the scalar limits on the norm instance using the
+ current array
+ """
+ if self._A is None:
+ raise TypeError('You must first set_array for mappable')
+ self.norm.autoscale(self._A)
+ self.changed()
+
+ def autoscale_None(self):
+ """
+ Autoscale the scalar limits on the norm instance using the
+ current array, changing only limits that are None
+ """
+ if self._A is None:
+ raise TypeError('You must first set_array for mappable')
+ self.norm.autoscale_None(self._A)
+ self.changed()
+
+ def add_checker(self, checker):
+ """
+ Add an entry to a dictionary of boolean flags
+ that are set to True when the mappable is changed.
+ """
+ self.update_dict[checker] = False
+
+ def check_update(self, checker):
+ """
+ If mappable has changed since the last check,
+ return True; else return False
+ """
+ if self.update_dict[checker]:
+ self.update_dict[checker] = False
+ return True
+ return False
+
+ def changed(self):
+ """
+ Call this whenever the mappable is changed to notify all the
+ callbackSM listeners to the 'changed' signal
+ """
+ self.callbacksSM.process('changed', self)
+
+ for key in self.update_dict:
+ self.update_dict[key] = True
+ self.stale = True
diff --git a/contrib/python/matplotlib/py2/matplotlib/collections.py b/contrib/python/matplotlib/py2/matplotlib/collections.py
new file mode 100644
index 00000000000..9e124cdf479
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/collections.py
@@ -0,0 +1,1994 @@
+"""
+Classes for the efficient drawing of large collections of objects that
+share most properties, e.g., a large number of line segments or
+polygons.
+
+The classes are not meant to be as flexible as their single element
+counterparts (e.g., you may not be able to select all line styles) but
+they are meant to be fast for common use cases (e.g., a large set of solid
+line segemnts)
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import warnings
+
+import six
+from six.moves import zip
+try:
+ from math import gcd
+except ImportError:
+ # LPy workaround
+ from fractions import gcd
+
+import numpy as np
+import matplotlib as mpl
+from . import (_path, artist, cbook, cm, colors as mcolors, docstring,
+ lines as mlines, path as mpath, transforms)
+
+CIRCLE_AREA_FACTOR = 1.0 / np.sqrt(np.pi)
+
+
+_color_aliases = {'facecolors': ['facecolor'],
+ 'edgecolors': ['edgecolor']}
+
+
+class Collection(artist.Artist, cm.ScalarMappable):
+ """
+ Base class for Collections. Must be subclassed to be usable.
+
+ All properties in a collection must be sequences or scalars;
+ if scalars, they will be converted to sequences. The
+ property of the ith element of the collection is::
+
+ prop[i % len(props)]
+
+ Exceptions are *capstyle* and *joinstyle* properties, these can
+ only be set globally for the whole collection.
+
+ Keyword arguments and default values:
+
+ * *edgecolors*: None
+ * *facecolors*: None
+ * *linewidths*: None
+ * *capstyle*: None
+ * *joinstyle*: None
+ * *antialiaseds*: None
+ * *offsets*: None
+ * *transOffset*: transforms.IdentityTransform()
+ * *offset_position*: 'screen' (default) or 'data'
+ * *norm*: None (optional for
+ :class:`matplotlib.cm.ScalarMappable`)
+ * *cmap*: None (optional for
+ :class:`matplotlib.cm.ScalarMappable`)
+ * *hatch*: None
+ * *zorder*: 1
+
+
+ *offsets* and *transOffset* are used to translate the patch after
+ rendering (default no offsets). If offset_position is 'screen'
+ (default) the offset is applied after the master transform has
+ been applied, that is, the offsets are in screen coordinates. If
+ offset_position is 'data', the offset is applied before the master
+ transform, i.e., the offsets are in data coordinates.
+
+ If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
+ are None, they default to their :data:`matplotlib.rcParams` patch
+ setting, in sequence form.
+
+ The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
+ the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
+ (i.e., a call to set_array has been made), at draw time a call to
+ scalar mappable will be made to set the face colors.
+ """
+ _offsets = np.zeros((0, 2))
+ _transOffset = transforms.IdentityTransform()
+ #: Either a list of 3x3 arrays or an Nx3x3 array of transforms, suitable
+ #: for the `all_transforms` argument to
+ #: :meth:`~matplotlib.backend_bases.RendererBase.draw_path_collection`;
+ #: each 3x3 array is used to initialize an
+ #: :class:`~matplotlib.transforms.Affine2D` object.
+ #: Each kind of collection defines this based on its arguments.
+ _transforms = np.empty((0, 3, 3))
+
+ # Whether to draw an edge by default. Set on a
+ # subclass-by-subclass basis.
+ _edge_default = False
+
+ def __init__(self,
+ edgecolors=None,
+ facecolors=None,
+ linewidths=None,
+ linestyles='solid',
+ capstyle=None,
+ joinstyle=None,
+ antialiaseds=None,
+ offsets=None,
+ transOffset=None,
+ norm=None, # optional for ScalarMappable
+ cmap=None, # ditto
+ pickradius=5.0,
+ hatch=None,
+ urls=None,
+ offset_position='screen',
+ zorder=1,
+ **kwargs
+ ):
+ """
+ Create a Collection
+
+ %(Collection)s
+ """
+ artist.Artist.__init__(self)
+ cm.ScalarMappable.__init__(self, norm, cmap)
+ # list of un-scaled dash patterns
+ # this is needed scaling the dash pattern by linewidth
+ self._us_linestyles = [(None, None)]
+ # list of dash patterns
+ self._linestyles = [(None, None)]
+ # list of unbroadcast/scaled linewidths
+ self._us_lw = [0]
+ self._linewidths = [0]
+ self._is_filled = True # May be modified by set_facecolor().
+
+ self._hatch_color = mcolors.to_rgba(mpl.rcParams['hatch.color'])
+ self.set_facecolor(facecolors)
+ self.set_edgecolor(edgecolors)
+ self.set_linewidth(linewidths)
+ self.set_linestyle(linestyles)
+ self.set_antialiased(antialiaseds)
+ self.set_pickradius(pickradius)
+ self.set_urls(urls)
+ self.set_hatch(hatch)
+ self.set_offset_position(offset_position)
+ self.set_zorder(zorder)
+
+ if capstyle:
+ self.set_capstyle(capstyle)
+ else:
+ self._capstyle = None
+
+ if joinstyle:
+ self.set_joinstyle(joinstyle)
+ else:
+ self._joinstyle = None
+
+ self._offsets = np.zeros((1, 2))
+ self._uniform_offsets = None
+ if offsets is not None:
+ offsets = np.asanyarray(offsets, float)
+ # Broadcast (2,) -> (1, 2) but nothing else.
+ if offsets.shape == (2,):
+ offsets = offsets[None, :]
+ if transOffset is not None:
+ self._offsets = offsets
+ self._transOffset = transOffset
+ else:
+ self._uniform_offsets = offsets
+
+ self._path_effects = None
+ self.update(kwargs)
+ self._paths = None
+
+ def get_paths(self):
+ return self._paths
+
+ def set_paths(self):
+ raise NotImplementedError
+
+ def get_transforms(self):
+ return self._transforms
+
+ def get_offset_transform(self):
+ t = self._transOffset
+ if (not isinstance(t, transforms.Transform)
+ and hasattr(t, '_as_mpl_transform')):
+ t = t._as_mpl_transform(self.axes)
+ return t
+
+ def get_datalim(self, transData):
+ transform = self.get_transform()
+ transOffset = self.get_offset_transform()
+ offsets = self._offsets
+ paths = self.get_paths()
+
+ if not transform.is_affine:
+ paths = [transform.transform_path_non_affine(p) for p in paths]
+ transform = transform.get_affine()
+ if not transOffset.is_affine:
+ offsets = transOffset.transform_non_affine(offsets)
+ transOffset = transOffset.get_affine()
+
+ if isinstance(offsets, np.ma.MaskedArray):
+ offsets = offsets.filled(np.nan)
+ # get_path_collection_extents handles nan but not masked arrays
+
+ if len(paths) and len(offsets):
+ result = mpath.get_path_collection_extents(
+ transform.frozen(), paths, self.get_transforms(),
+ offsets, transOffset.frozen())
+ result = result.inverse_transformed(transData)
+ else:
+ result = transforms.Bbox.null()
+ return result
+
+ def get_window_extent(self, renderer):
+ # TODO:check to ensure that this does not fail for
+ # cases other than scatter plot legend
+ return self.get_datalim(transforms.IdentityTransform())
+
+ def _prepare_points(self):
+ """Point prep for drawing and hit testing"""
+
+ transform = self.get_transform()
+ transOffset = self.get_offset_transform()
+ offsets = self._offsets
+ paths = self.get_paths()
+
+ if self.have_units():
+ paths = []
+ for path in self.get_paths():
+ vertices = path.vertices
+ xs, ys = vertices[:, 0], vertices[:, 1]
+ xs = self.convert_xunits(xs)
+ ys = self.convert_yunits(ys)
+ paths.append(mpath.Path(np.column_stack([xs, ys]), path.codes))
+
+ if offsets.size > 0:
+ xs = self.convert_xunits(offsets[:, 0])
+ ys = self.convert_yunits(offsets[:, 1])
+ offsets = np.column_stack([xs, ys])
+
+ if not transform.is_affine:
+ paths = [transform.transform_path_non_affine(path)
+ for path in paths]
+ transform = transform.get_affine()
+ if not transOffset.is_affine:
+ offsets = transOffset.transform_non_affine(offsets)
+ # This might have changed an ndarray into a masked array.
+ transOffset = transOffset.get_affine()
+
+ if isinstance(offsets, np.ma.MaskedArray):
+ offsets = offsets.filled(np.nan)
+ # Changing from a masked array to nan-filled ndarray
+ # is probably most efficient at this point.
+
+ return transform, transOffset, offsets, paths
+
+ @artist.allow_rasterization
+ def draw(self, renderer):
+ if not self.get_visible():
+ return
+ renderer.open_group(self.__class__.__name__, self.get_gid())
+
+ self.update_scalarmappable()
+
+ transform, transOffset, offsets, paths = self._prepare_points()
+
+ gc = renderer.new_gc()
+ self._set_gc_clip(gc)
+ gc.set_snap(self.get_snap())
+
+ if self._hatch:
+ gc.set_hatch(self._hatch)
+ try:
+ gc.set_hatch_color(self._hatch_color)
+ except AttributeError:
+ # if we end up with a GC that does not have this method
+ warnings.warn("Your backend does not support setting the "
+ "hatch color.")
+
+ if self.get_sketch_params() is not None:
+ gc.set_sketch_params(*self.get_sketch_params())
+
+ if self.get_path_effects():
+ from matplotlib.patheffects import PathEffectRenderer
+ renderer = PathEffectRenderer(self.get_path_effects(), renderer)
+
+ # If the collection is made up of a single shape/color/stroke,
+ # it can be rendered once and blitted multiple times, using
+ # `draw_markers` rather than `draw_path_collection`. This is
+ # *much* faster for Agg, and results in smaller file sizes in
+ # PDF/SVG/PS.
+
+ trans = self.get_transforms()
+ facecolors = self.get_facecolor()
+ edgecolors = self.get_edgecolor()
+ do_single_path_optimization = False
+ if (len(paths) == 1 and len(trans) <= 1 and
+ len(facecolors) == 1 and len(edgecolors) == 1 and
+ len(self._linewidths) == 1 and
+ self._linestyles == [(None, None)] and
+ len(self._antialiaseds) == 1 and len(self._urls) == 1 and
+ self.get_hatch() is None):
+ if len(trans):
+ combined_transform = (transforms.Affine2D(trans[0]) +
+ transform)
+ else:
+ combined_transform = transform
+ extents = paths[0].get_extents(combined_transform)
+ width, height = renderer.get_canvas_width_height()
+ if (extents.width < width and
+ extents.height < height):
+ do_single_path_optimization = True
+
+ if self._joinstyle:
+ gc.set_joinstyle(self._joinstyle)
+
+ if self._capstyle:
+ gc.set_capstyle(self._capstyle)
+
+ if do_single_path_optimization:
+ gc.set_foreground(tuple(edgecolors[0]))
+ gc.set_linewidth(self._linewidths[0])
+ gc.set_dashes(*self._linestyles[0])
+ gc.set_antialiased(self._antialiaseds[0])
+ gc.set_url(self._urls[0])
+ renderer.draw_markers(
+ gc, paths[0], combined_transform.frozen(),
+ mpath.Path(offsets), transOffset, tuple(facecolors[0]))
+ else:
+ renderer.draw_path_collection(
+ gc, transform.frozen(), paths,
+ self.get_transforms(), offsets, transOffset,
+ self.get_facecolor(), self.get_edgecolor(),
+ self._linewidths, self._linestyles,
+ self._antialiaseds, self._urls,
+ self._offset_position)
+
+ gc.restore()
+ renderer.close_group(self.__class__.__name__)
+ self.stale = False
+
+ def set_pickradius(self, pr):
+ """Set the pick radius used for containment tests.
+
+ .. ACCEPTS: float distance in points
+
+ Parameters
+ ----------
+ d : float
+ Pick radius, in points.
+ """
+ self._pickradius = pr
+
+ def get_pickradius(self):
+ return self._pickradius
+
+ def contains(self, mouseevent):
+ """
+ Test whether the mouse event occurred in the collection.
+
+ Returns True | False, ``dict(ind=itemlist)``, where every
+ item in itemlist contains the event.
+ """
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+
+ if not self.get_visible():
+ return False, {}
+
+ pickradius = (
+ float(self._picker)
+ if cbook.is_numlike(self._picker) and
+ self._picker is not True # the bool, not just nonzero or 1
+ else self._pickradius)
+
+ transform, transOffset, offsets, paths = self._prepare_points()
+
+ ind = _path.point_in_path_collection(
+ mouseevent.x, mouseevent.y, pickradius,
+ transform.frozen(), paths, self.get_transforms(),
+ offsets, transOffset, pickradius <= 0,
+ self.get_offset_position())
+
+ return len(ind) > 0, dict(ind=ind)
+
+ def set_urls(self, urls):
+ """
+ Parameters
+ ----------
+ urls : List[str] or None
+ .. ACCEPTS: List[str] or None
+ """
+ self._urls = urls if urls is not None else [None]
+ self.stale = True
+
+ def get_urls(self):
+ return self._urls
+
+ def set_hatch(self, hatch):
+ r"""
+ Set the hatching pattern
+
+ *hatch* can be one of::
+
+ / - diagonal hatching
+ \ - back diagonal
+ | - vertical
+ - - horizontal
+ + - crossed
+ x - crossed diagonal
+ o - small circle
+ O - large circle
+ . - dots
+ * - stars
+
+ Letters can be combined, in which case all the specified
+ hatchings are done. If same letter repeats, it increases the
+ density of hatching of that pattern.
+
+ Hatching is supported in the PostScript, PDF, SVG and Agg
+ backends only.
+
+ Unlike other properties such as linewidth and colors, hatching
+ can only be specified for the collection as a whole, not separately
+ for each member.
+
+ ACCEPTS: [ '/' | '\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*' ]
+ """
+ self._hatch = hatch
+ self.stale = True
+
+ def get_hatch(self):
+ """Return the current hatching pattern."""
+ return self._hatch
+
+ def set_offsets(self, offsets):
+ """
+ Set the offsets for the collection. *offsets* can be a scalar
+ or a sequence.
+
+ ACCEPTS: float or sequence of floats
+ """
+ offsets = np.asanyarray(offsets, float)
+ if offsets.shape == (2,): # Broadcast (2,) -> (1, 2) but nothing else.
+ offsets = offsets[None, :]
+ # This decision is based on how they are initialized above in __init__.
+ if self._uniform_offsets is None:
+ self._offsets = offsets
+ else:
+ self._uniform_offsets = offsets
+ self.stale = True
+
+ def get_offsets(self):
+ """Return the offsets for the collection."""
+ # This decision is based on how they are initialized above in __init__.
+ if self._uniform_offsets is None:
+ return self._offsets
+ else:
+ return self._uniform_offsets
+
+ def set_offset_position(self, offset_position):
+ """
+ Set how offsets are applied. If *offset_position* is 'screen'
+ (default) the offset is applied after the master transform has
+ been applied, that is, the offsets are in screen coordinates.
+ If offset_position is 'data', the offset is applied before the
+ master transform, i.e., the offsets are in data coordinates.
+
+ .. ACCEPTS: [ 'screen' | 'data' ]
+ """
+ if offset_position not in ('screen', 'data'):
+ raise ValueError("offset_position must be 'screen' or 'data'")
+ self._offset_position = offset_position
+ self.stale = True
+
+ def get_offset_position(self):
+ """
+ Returns how offsets are applied for the collection. If
+ *offset_position* is 'screen', the offset is applied after the
+ master transform has been applied, that is, the offsets are in
+ screen coordinates. If offset_position is 'data', the offset
+ is applied before the master transform, i.e., the offsets are
+ in data coordinates.
+ """
+ return self._offset_position
+
+ def set_linewidth(self, lw):
+ """
+ Set the linewidth(s) for the collection. *lw* can be a scalar
+ or a sequence; if it is a sequence the patches will cycle
+ through the sequence
+
+ ACCEPTS: float or sequence of floats
+ """
+ if lw is None:
+ lw = mpl.rcParams['patch.linewidth']
+ if lw is None:
+ lw = mpl.rcParams['lines.linewidth']
+ # get the un-scaled/broadcast lw
+ self._us_lw = np.atleast_1d(np.asarray(lw))
+
+ # scale all of the dash patterns.
+ self._linewidths, self._linestyles = self._bcast_lwls(
+ self._us_lw, self._us_linestyles)
+ self.stale = True
+
+ def set_linewidths(self, lw):
+ """alias for set_linewidth"""
+ return self.set_linewidth(lw)
+
+ def set_lw(self, lw):
+ """alias for set_linewidth"""
+ return self.set_linewidth(lw)
+
+ def set_linestyle(self, ls):
+ """
+ Set the linestyle(s) for the collection.
+
+ =========================== =================
+ linestyle description
+ =========================== =================
+ ``'-'`` or ``'solid'`` solid line
+ ``'--'`` or ``'dashed'`` dashed line
+ ``'-.'`` or ``'dashdot'`` dash-dotted line
+ ``':'`` or ``'dotted'`` dotted line
+ =========================== =================
+
+ Alternatively a dash tuple of the following form can be provided::
+
+ (offset, onoffseq),
+
+ where ``onoffseq`` is an even length tuple of on and off ink
+ in points.
+
+ ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
+ (offset, on-off-dash-seq) |
+ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` |
+ ``' '`` | ``''``]
+
+ Parameters
+ ----------
+ ls : { '-', '--', '-.', ':'} and more see description
+ The line style.
+ """
+ try:
+ if isinstance(ls, six.string_types):
+ ls = cbook.ls_mapper.get(ls, ls)
+ dashes = [mlines._get_dash_pattern(ls)]
+ else:
+ try:
+ dashes = [mlines._get_dash_pattern(ls)]
+ except ValueError:
+ dashes = [mlines._get_dash_pattern(x) for x in ls]
+
+ except ValueError:
+ raise ValueError(
+ 'Do not know how to convert {!r} to dashes'.format(ls))
+
+ # get the list of raw 'unscaled' dash patterns
+ self._us_linestyles = dashes
+
+ # broadcast and scale the lw and dash patterns
+ self._linewidths, self._linestyles = self._bcast_lwls(
+ self._us_lw, self._us_linestyles)
+
+ def set_capstyle(self, cs):
+ """
+ Set the capstyle for the collection. The capstyle can
+ only be set globally for all elements in the collection
+
+ Parameters
+ ----------
+ cs : ['butt' | 'round' | 'projecting']
+ The capstyle
+ """
+ if cs in ('butt', 'round', 'projecting'):
+ self._capstyle = cs
+ else:
+ raise ValueError('Unrecognized cap style. Found %s' % cs)
+
+ def get_capstyle(self):
+ return self._capstyle
+
+ def set_joinstyle(self, js):
+ """
+ Set the joinstyle for the collection. The joinstyle can only be
+ set globally for all elements in the collection.
+
+ Parameters
+ ----------
+ js : ['miter' | 'round' | 'bevel']
+ The joinstyle
+ """
+ if js in ('miter', 'round', 'bevel'):
+ self._joinstyle = js
+ else:
+ raise ValueError('Unrecognized join style. Found %s' % js)
+
+ def get_joinstyle(self):
+ return self._joinstyle
+
+ @staticmethod
+ def _bcast_lwls(linewidths, dashes):
+ '''Internal helper function to broadcast + scale ls/lw
+
+ In the collection drawing code the linewidth and linestyle are
+ cycled through as circular buffers (via v[i % len(v)]). Thus,
+ if we are going to scale the dash pattern at set time (not
+ draw time) we need to do the broadcasting now and expand both
+ lists to be the same length.
+
+ Parameters
+ ----------
+ linewidths : list
+ line widths of collection
+
+ dashes : list
+ dash specification (offset, (dash pattern tuple))
+
+ Returns
+ -------
+ linewidths, dashes : list
+ Will be the same length, dashes are scaled by paired linewidth
+
+ '''
+ if mpl.rcParams['_internal.classic_mode']:
+ return linewidths, dashes
+ # make sure they are the same length so we can zip them
+ if len(dashes) != len(linewidths):
+ l_dashes = len(dashes)
+ l_lw = len(linewidths)
+ GCD = gcd(l_dashes, l_lw)
+ dashes = list(dashes) * (l_lw // GCD)
+ linewidths = list(linewidths) * (l_dashes // GCD)
+
+ # scale the dash patters
+ dashes = [mlines._scale_dashes(o, d, lw)
+ for (o, d), lw in zip(dashes, linewidths)]
+
+ return linewidths, dashes
+
+ def set_linestyles(self, ls):
+ """alias for set_linestyle"""
+ return self.set_linestyle(ls)
+
+ def set_dashes(self, ls):
+ """alias for set_linestyle"""
+ return self.set_linestyle(ls)
+
+ def set_antialiased(self, aa):
+ """
+ Set the antialiasing state for rendering.
+
+ ACCEPTS: Boolean or sequence of booleans
+ """
+ if aa is None:
+ aa = mpl.rcParams['patch.antialiased']
+ self._antialiaseds = np.atleast_1d(np.asarray(aa, bool))
+ self.stale = True
+
+ def set_antialiaseds(self, aa):
+ """alias for set_antialiased"""
+ return self.set_antialiased(aa)
+
+ def set_color(self, c):
+ """
+ Set both the edgecolor and the facecolor.
+
+ ACCEPTS: matplotlib color arg or sequence of rgba tuples
+
+ .. seealso::
+
+ :meth:`set_facecolor`, :meth:`set_edgecolor`
+ For setting the edge or face color individually.
+ """
+ self.set_facecolor(c)
+ self.set_edgecolor(c)
+
+ def _set_facecolor(self, c):
+ if c is None:
+ c = mpl.rcParams['patch.facecolor']
+
+ self._is_filled = True
+ try:
+ if c.lower() == 'none':
+ self._is_filled = False
+ except AttributeError:
+ pass
+ self._facecolors = mcolors.to_rgba_array(c, self._alpha)
+ self.stale = True
+
+ def set_facecolor(self, c):
+ """
+ Set the facecolor(s) of the collection. *c* can be a
+ matplotlib color spec (all patches have same color), or a
+ sequence of specs; if it is a sequence the patches will
+ cycle through the sequence.
+
+ If *c* is 'none', the patch will not be filled.
+
+ ACCEPTS: matplotlib color spec or sequence of specs
+ """
+ self._original_facecolor = c
+ self._set_facecolor(c)
+
+ def set_facecolors(self, c):
+ """alias for set_facecolor"""
+ return self.set_facecolor(c)
+
+ def get_facecolor(self):
+ return self._facecolors
+ get_facecolors = get_facecolor
+
+ def get_edgecolor(self):
+ if (isinstance(self._edgecolors, six.string_types)
+ and self._edgecolors == str('face')):
+ return self.get_facecolors()
+ else:
+ return self._edgecolors
+ get_edgecolors = get_edgecolor
+
+ def _set_edgecolor(self, c):
+ set_hatch_color = True
+ if c is None:
+ if (mpl.rcParams['patch.force_edgecolor'] or
+ not self._is_filled or self._edge_default):
+ c = mpl.rcParams['patch.edgecolor']
+ else:
+ c = 'none'
+ set_hatch_color = False
+
+ self._is_stroked = True
+ try:
+ if c.lower() == 'none':
+ self._is_stroked = False
+ except AttributeError:
+ pass
+
+ try:
+ if c.lower() == 'face': # Special case: lookup in "get" method.
+ self._edgecolors = 'face'
+ return
+ except AttributeError:
+ pass
+ self._edgecolors = mcolors.to_rgba_array(c, self._alpha)
+ if set_hatch_color and len(self._edgecolors):
+ self._hatch_color = tuple(self._edgecolors[0])
+ self.stale = True
+
+ def set_edgecolor(self, c):
+ """
+ Set the edgecolor(s) of the collection. *c* can be a
+ matplotlib color spec (all patches have same color), or a
+ sequence of specs; if it is a sequence the patches will
+ cycle through the sequence.
+
+ If *c* is 'face', the edge color will always be the same as
+ the face color. If it is 'none', the patch boundary will not
+ be drawn.
+
+ ACCEPTS: matplotlib color spec or sequence of specs
+ """
+ self._original_edgecolor = c
+ self._set_edgecolor(c)
+
+ def set_edgecolors(self, c):
+ """alias for set_edgecolor"""
+ return self.set_edgecolor(c)
+
+ def set_alpha(self, alpha):
+ """
+ Set the alpha tranparencies of the collection. *alpha* must be
+ a float or *None*.
+
+ ACCEPTS: float or None
+ """
+ if alpha is not None:
+ try:
+ float(alpha)
+ except TypeError:
+ raise TypeError('alpha must be a float or None')
+ self.update_dict['array'] = True
+ artist.Artist.set_alpha(self, alpha)
+ self._set_facecolor(self._original_facecolor)
+ self._set_edgecolor(self._original_edgecolor)
+
+ def get_linewidths(self):
+ return self._linewidths
+ get_linewidth = get_linewidths
+
+ def get_linestyles(self):
+ return self._linestyles
+ get_dashes = get_linestyle = get_linestyles
+
+ def update_scalarmappable(self):
+ """
+ If the scalar mappable array is not none, update colors
+ from scalar data
+ """
+ if self._A is None:
+ return
+ if self._A.ndim > 1:
+ raise ValueError('Collections can only map rank 1 arrays')
+ if not self.check_update("array"):
+ return
+ if self._is_filled:
+ self._facecolors = self.to_rgba(self._A, self._alpha)
+ elif self._is_stroked:
+ self._edgecolors = self.to_rgba(self._A, self._alpha)
+ self.stale = True
+
+ def get_fill(self):
+ 'return whether fill is set'
+ return self._is_filled
+
+ def update_from(self, other):
+ 'copy properties from other to self'
+
+ artist.Artist.update_from(self, other)
+ self._antialiaseds = other._antialiaseds
+ self._original_edgecolor = other._original_edgecolor
+ self._edgecolors = other._edgecolors
+ self._original_facecolor = other._original_facecolor
+ self._facecolors = other._facecolors
+ self._linewidths = other._linewidths
+ self._linestyles = other._linestyles
+ self._us_linestyles = other._us_linestyles
+ self._pickradius = other._pickradius
+ self._hatch = other._hatch
+
+ # update_from for scalarmappable
+ self._A = other._A
+ self.norm = other.norm
+ self.cmap = other.cmap
+ # self.update_dict = other.update_dict # do we need to copy this? -JJL
+ self.stale = True
+
+# these are not available for the object inspector until after the
+# class is built so we define an initial set here for the init
+# function and they will be overridden after object defn
+docstring.interpd.update(Collection="""\
+ Valid Collection keyword arguments:
+
+ * *edgecolors*: None
+ * *facecolors*: None
+ * *linewidths*: None
+ * *antialiaseds*: None
+ * *offsets*: None
+ * *transOffset*: transforms.IdentityTransform()
+ * *norm*: None (optional for
+ :class:`matplotlib.cm.ScalarMappable`)
+ * *cmap*: None (optional for
+ :class:`matplotlib.cm.ScalarMappable`)
+
+ *offsets* and *transOffset* are used to translate the patch after
+ rendering (default no offsets)
+
+ If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
+ are None, they default to their :data:`matplotlib.rcParams` patch
+ setting, in sequence form.
+""")
+
+
+class _CollectionWithSizes(Collection):
+ """
+ Base class for collections that have an array of sizes.
+ """
+ _factor = 1.0
+
+ def get_sizes(self):
+ """
+ Returns the sizes of the elements in the collection. The
+ value represents the 'area' of the element.
+
+ Returns
+ -------
+ sizes : array
+ The 'area' of each element.
+ """
+ return self._sizes
+
+ def set_sizes(self, sizes, dpi=72.0):
+ """
+ Set the sizes of each member of the collection.
+
+ Parameters
+ ----------
+ sizes : ndarray or None
+ The size to set for each element of the collection. The
+ value is the 'area' of the element.
+
+ dpi : float
+ The dpi of the canvas. Defaults to 72.0.
+ """
+ if sizes is None:
+ self._sizes = np.array([])
+ self._transforms = np.empty((0, 3, 3))
+ else:
+ self._sizes = np.asarray(sizes)
+ self._transforms = np.zeros((len(self._sizes), 3, 3))
+ scale = np.sqrt(self._sizes) * dpi / 72.0 * self._factor
+ self._transforms[:, 0, 0] = scale
+ self._transforms[:, 1, 1] = scale
+ self._transforms[:, 2, 2] = 1.0
+ self.stale = True
+
+ @artist.allow_rasterization
+ def draw(self, renderer):
+ self.set_sizes(self._sizes, self.figure.dpi)
+ Collection.draw(self, renderer)
+
+
+class PathCollection(_CollectionWithSizes):
+ """
+ This is the most basic :class:`Collection` subclass.
+ """
+ @docstring.dedent_interpd
+ def __init__(self, paths, sizes=None, **kwargs):
+ """
+ *paths* is a sequence of :class:`matplotlib.path.Path`
+ instances.
+
+ %(Collection)s
+ """
+
+ Collection.__init__(self, **kwargs)
+ self.set_paths(paths)
+ self.set_sizes(sizes)
+ self.stale = True
+
+ def set_paths(self, paths):
+ self._paths = paths
+ self.stale = True
+
+ def get_paths(self):
+ return self._paths
+
+
+class PolyCollection(_CollectionWithSizes):
+ @docstring.dedent_interpd
+ def __init__(self, verts, sizes=None, closed=True, **kwargs):
+ """
+ *verts* is a sequence of ( *verts0*, *verts1*, ...) where
+ *verts_i* is a sequence of *xy* tuples of vertices, or an
+ equivalent :mod:`numpy` array of shape (*nv*, 2).
+
+ *sizes* is *None* (default) or a sequence of floats that
+ scale the corresponding *verts_i*. The scaling is applied
+ before the Artist master transform; if the latter is an identity
+ transform, then the overall scaling is such that if
+ *verts_i* specify a unit square, then *sizes_i* is the area
+ of that square in points^2.
+ If len(*sizes*) < *nv*, the additional values will be
+ taken cyclically from the array.
+
+ *closed*, when *True*, will explicitly close the polygon.
+
+ %(Collection)s
+ """
+ Collection.__init__(self, **kwargs)
+ self.set_sizes(sizes)
+ self.set_verts(verts, closed)
+ self.stale = True
+
+ def set_verts(self, verts, closed=True):
+ '''This allows one to delay initialization of the vertices.'''
+ if isinstance(verts, np.ma.MaskedArray):
+ verts = verts.astype(float).filled(np.nan)
+ # This is much faster than having Path do it one at a time.
+ if closed:
+ self._paths = []
+ for xy in verts:
+ if len(xy):
+ if isinstance(xy, np.ma.MaskedArray):
+ xy = np.ma.concatenate([xy, xy[0:1]])
+ else:
+ xy = np.asarray(xy)
+ xy = np.concatenate([xy, xy[0:1]])
+ codes = np.empty(xy.shape[0], dtype=mpath.Path.code_type)
+ codes[:] = mpath.Path.LINETO
+ codes[0] = mpath.Path.MOVETO
+ codes[-1] = mpath.Path.CLOSEPOLY
+ self._paths.append(mpath.Path(xy, codes))
+ else:
+ self._paths.append(mpath.Path(xy))
+ else:
+ self._paths = [mpath.Path(xy) for xy in verts]
+ self.stale = True
+
+ set_paths = set_verts
+
+ def set_verts_and_codes(self, verts, codes):
+ '''This allows one to initialize vertices with path codes.'''
+ if (len(verts) != len(codes)):
+ raise ValueError("'codes' must be a 1D list or array "
+ "with the same length of 'verts'")
+ self._paths = []
+ for xy, cds in zip(verts, codes):
+ if len(xy):
+ self._paths.append(mpath.Path(xy, cds))
+ else:
+ self._paths.append(mpath.Path(xy))
+ self.stale = True
+
+
+class BrokenBarHCollection(PolyCollection):
+ """
+ A collection of horizontal bars spanning *yrange* with a sequence of
+ *xranges*.
+ """
+ @docstring.dedent_interpd
+ def __init__(self, xranges, yrange, **kwargs):
+ """
+ *xranges*
+ sequence of (*xmin*, *xwidth*)
+
+ *yrange*
+ *ymin*, *ywidth*
+
+ %(Collection)s
+ """
+ ymin, ywidth = yrange
+ ymax = ymin + ywidth
+ verts = [[(xmin, ymin),
+ (xmin, ymax),
+ (xmin + xwidth, ymax),
+ (xmin + xwidth, ymin),
+ (xmin, ymin)] for xmin, xwidth in xranges]
+ PolyCollection.__init__(self, verts, **kwargs)
+
+ @staticmethod
+ def span_where(x, ymin, ymax, where, **kwargs):
+ """
+ Create a BrokenBarHCollection to plot horizontal bars from
+ over the regions in *x* where *where* is True. The bars range
+ on the y-axis from *ymin* to *ymax*
+
+ A :class:`BrokenBarHCollection` is returned. *kwargs* are
+ passed on to the collection.
+ """
+ xranges = []
+ for ind0, ind1 in cbook.contiguous_regions(where):
+ xslice = x[ind0:ind1]
+ if not len(xslice):
+ continue
+ xranges.append((xslice[0], xslice[-1] - xslice[0]))
+
+ collection = BrokenBarHCollection(
+ xranges, [ymin, ymax - ymin], **kwargs)
+ return collection
+
+
+class RegularPolyCollection(_CollectionWithSizes):
+ """Draw a collection of regular polygons with *numsides*."""
+ _path_generator = mpath.Path.unit_regular_polygon
+
+ _factor = CIRCLE_AREA_FACTOR
+
+ @docstring.dedent_interpd
+ def __init__(self,
+ numsides,
+ rotation=0,
+ sizes=(1,),
+ **kwargs):
+ """
+ *numsides*
+ the number of sides of the polygon
+
+ *rotation*
+ the rotation of the polygon in radians
+
+ *sizes*
+ gives the area of the circle circumscribing the
+ regular polygon in points^2
+
+ %(Collection)s
+
+ Example: see :file:`examples/dynamic_collection.py` for
+ complete example::
+
+ offsets = np.random.rand(20,2)
+ facecolors = [cm.jet(x) for x in np.random.rand(20)]
+ black = (0,0,0,1)
+
+ collection = RegularPolyCollection(
+ numsides=5, # a pentagon
+ rotation=0, sizes=(50,),
+ facecolors = facecolors,
+ edgecolors = (black,),
+ linewidths = (1,),
+ offsets = offsets,
+ transOffset = ax.transData,
+ )
+ """
+ Collection.__init__(self, **kwargs)
+ self.set_sizes(sizes)
+ self._numsides = numsides
+ self._paths = [self._path_generator(numsides)]
+ self._rotation = rotation
+ self.set_transform(transforms.IdentityTransform())
+
+ def get_numsides(self):
+ return self._numsides
+
+ def get_rotation(self):
+ return self._rotation
+
+ @artist.allow_rasterization
+ def draw(self, renderer):
+ self.set_sizes(self._sizes, self.figure.dpi)
+ self._transforms = [
+ transforms.Affine2D(x).rotate(-self._rotation).get_matrix()
+ for x in self._transforms
+ ]
+ Collection.draw(self, renderer)
+
+
+class StarPolygonCollection(RegularPolyCollection):
+ """
+ Draw a collection of regular stars with *numsides* points."""
+
+ _path_generator = mpath.Path.unit_regular_star
+
+
+class AsteriskPolygonCollection(RegularPolyCollection):
+ """
+ Draw a collection of regular asterisks with *numsides* points."""
+
+ _path_generator = mpath.Path.unit_regular_asterisk
+
+
+class LineCollection(Collection):
+ """
+ All parameters must be sequences or scalars; if scalars, they will
+ be converted to sequences. The property of the ith line
+ segment is::
+
+ prop[i % len(props)]
+
+ i.e., the properties cycle if the ``len`` of props is less than the
+ number of segments.
+ """
+
+ _edge_default = True
+
+ def __init__(self, segments, # Can be None.
+ linewidths=None,
+ colors=None,
+ antialiaseds=None,
+ linestyles='solid',
+ offsets=None,
+ transOffset=None,
+ norm=None,
+ cmap=None,
+ pickradius=5,
+ zorder=2,
+ facecolors='none',
+ **kwargs
+ ):
+ """
+ Parameters
+ ----------
+ segments :
+ A sequence of (*line0*, *line1*, *line2*), where::
+
+ linen = (x0, y0), (x1, y1), ... (xm, ym)
+
+ or the equivalent numpy array with two columns. Each line
+ can be a different length.
+
+ colors : sequence, optional
+ A sequence of RGBA tuples (e.g., arbitrary color
+ strings, etc, not allowed).
+
+ antialiaseds : sequence, optional
+ A sequence of ones or zeros.
+
+ linestyles : string, tuple, optional
+ Either one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ], or
+ a dash tuple. The dash tuple is::
+
+ (offset, onoffseq)
+
+ where ``onoffseq`` is an even length tuple of on and off ink
+ in points.
+
+ norm : Normalize, optional
+ `~.colors.Normalize` instance.
+
+ cmap : string or Colormap, optional
+ Colormap name or `~.colors.Colormap` instance.
+
+ pickradius : float, optional
+ The tolerance in points for mouse clicks picking a line.
+ Default is 5 pt.
+
+ zorder : int, optional
+ zorder of the LineCollection. Default is 2.
+
+ facecolors : optional
+ The facecolors of the LineCollection. Default is 'none'.
+ Setting to a value other than 'none' will lead to a filled
+ polygon being drawn between points on each line.
+
+ Notes
+ -----
+ If *linewidths*, *colors*, or *antialiaseds* is None, they
+ default to their rcParams setting, in sequence form.
+
+ If *offsets* and *transOffset* are not None, then
+ *offsets* are transformed by *transOffset* and applied after
+ the segments have been transformed to display coordinates.
+
+ If *offsets* is not None but *transOffset* is None, then the
+ *offsets* are added to the segments before any transformation.
+ In this case, a single offset can be specified as::
+
+ offsets=(xo,yo)
+
+ and this value will be added cumulatively to each successive
+ segment, so as to produce a set of successively offset curves.
+
+ The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
+ If the :class:`~matplotlib.cm.ScalarMappable` array
+ :attr:`~matplotlib.cm.ScalarMappable._A` is not None (i.e., a call to
+ :meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
+ draw time a call to scalar mappable will be made to set the colors.
+ """
+ if colors is None:
+ colors = mpl.rcParams['lines.color']
+ if linewidths is None:
+ linewidths = (mpl.rcParams['lines.linewidth'],)
+ if antialiaseds is None:
+ antialiaseds = (mpl.rcParams['lines.antialiased'],)
+
+ colors = mcolors.to_rgba_array(colors)
+
+ Collection.__init__(
+ self,
+ edgecolors=colors,
+ facecolors=facecolors,
+ linewidths=linewidths,
+ linestyles=linestyles,
+ antialiaseds=antialiaseds,
+ offsets=offsets,
+ transOffset=transOffset,
+ norm=norm,
+ cmap=cmap,
+ pickradius=pickradius,
+ zorder=zorder,
+ **kwargs)
+
+ self.set_segments(segments)
+
+ def set_segments(self, segments):
+ if segments is None:
+ return
+ _segments = []
+
+ for seg in segments:
+ if not isinstance(seg, np.ma.MaskedArray):
+ seg = np.asarray(seg, float)
+ _segments.append(seg)
+
+ if self._uniform_offsets is not None:
+ _segments = self._add_offsets(_segments)
+
+ self._paths = [mpath.Path(_seg) for _seg in _segments]
+ self.stale = True
+
+ set_verts = set_segments # for compatibility with PolyCollection
+ set_paths = set_segments
+
+ def get_segments(self):
+ """
+ Returns
+ -------
+ segments : list
+ List of segments in the LineCollection. Each list item contains an
+ array of vertices.
+ """
+ segments = []
+
+ for path in self._paths:
+ vertices = [vertex for vertex, _ in path.iter_segments()]
+ vertices = np.asarray(vertices)
+ segments.append(vertices)
+
+ return segments
+
+ def _add_offsets(self, segs):
+ offsets = self._uniform_offsets
+ Nsegs = len(segs)
+ Noffs = offsets.shape[0]
+ if Noffs == 1:
+ for i in range(Nsegs):
+ segs[i] = segs[i] + i * offsets
+ else:
+ for i in range(Nsegs):
+ io = i % Noffs
+ segs[i] = segs[i] + offsets[io:io + 1]
+ return segs
+
+ def set_color(self, c):
+ """
+ Set the color(s) of the LineCollection.
+
+ Parameters
+ ----------
+ c :
+ Matplotlib color argument (all patches have same color), or a
+ sequence or rgba tuples; if it is a sequence the patches will
+ cycle through the sequence.
+ """
+ self.set_edgecolor(c)
+ self.stale = True
+
+ def get_color(self):
+ return self._edgecolors
+
+ get_colors = get_color # for compatibility with old versions
+
+
+class EventCollection(LineCollection):
+ '''
+ A collection of discrete events.
+
+ The events are given by a 1-dimensional array, usually the position of
+ something along an axis, such as time or length. They do not have an
+ amplitude and are displayed as vertical or horizontal parallel bars.
+ '''
+
+ _edge_default = True
+
+ def __init__(self,
+ positions, # Cannot be None.
+ orientation=None,
+ lineoffset=0,
+ linelength=1,
+ linewidth=None,
+ color=None,
+ linestyle='solid',
+ antialiased=None,
+ **kwargs
+ ):
+ """
+ Parameters
+ ----------
+ positions : 1D array-like object
+ Each value is an event.
+
+ orientation : {None, 'horizontal', 'vertical'}, optional
+ The orientation of the **collection** (the event bars are along
+ the orthogonal direction). Defaults to 'horizontal' if not
+ specified or None.
+
+ lineoffset : scalar, optional, default: 0
+ The offset of the center of the markers from the origin, in the
+ direction orthogonal to *orientation*.
+
+ linelength : scalar, optional, default: 1
+ The total height of the marker (i.e. the marker stretches from
+ ``lineoffset - linelength/2`` to ``lineoffset + linelength/2``).
+
+ linewidth : scalar or None, optional, default: None
+ If it is None, defaults to its rcParams setting, in sequence form.
+
+ color : color, sequence of colors or None, optional, default: None
+ If it is None, defaults to its rcParams setting, in sequence form.
+
+ linestyle : str or tuple, optional, default: 'solid'
+ Valid strings are ['solid', 'dashed', 'dashdot', 'dotted',
+ '-', '--', '-.', ':']. Dash tuples should be of the form::
+
+ (offset, onoffseq),
+
+ where *onoffseq* is an even length tuple of on and off ink
+ in points.
+
+ antialiased : {None, 1, 2}, optional
+ If it is None, defaults to its rcParams setting, in sequence form.
+
+ **kwargs : optional
+ Other keyword arguments are line collection properties. See
+ :class:`~matplotlib.collections.LineCollection` for a list of
+ the valid properties.
+
+ Examples
+ --------
+
+ .. plot:: gallery/lines_bars_and_markers/eventcollection_demo.py
+ """
+
+ segment = (lineoffset + linelength / 2.,
+ lineoffset - linelength / 2.)
+ if positions is None or len(positions) == 0:
+ segments = []
+ elif hasattr(positions, 'ndim') and positions.ndim > 1:
+ raise ValueError('positions cannot be an array with more than '
+ 'one dimension.')
+ elif (orientation is None or orientation.lower() == 'none' or
+ orientation.lower() == 'horizontal'):
+ positions.sort()
+ segments = [[(coord1, coord2) for coord2 in segment] for
+ coord1 in positions]
+ self._is_horizontal = True
+ elif orientation.lower() == 'vertical':
+ positions.sort()
+ segments = [[(coord2, coord1) for coord2 in segment] for
+ coord1 in positions]
+ self._is_horizontal = False
+ else:
+ raise ValueError("orientation must be 'horizontal' or 'vertical'")
+
+ LineCollection.__init__(self,
+ segments,
+ linewidths=linewidth,
+ colors=color,
+ antialiaseds=antialiased,
+ linestyles=linestyle,
+ **kwargs)
+
+ self._linelength = linelength
+ self._lineoffset = lineoffset
+
+ def get_positions(self):
+ '''
+ return an array containing the floating-point values of the positions
+ '''
+ segments = self.get_segments()
+ pos = 0 if self.is_horizontal() else 1
+ positions = []
+ for segment in segments:
+ positions.append(segment[0, pos])
+ return positions
+
+ def set_positions(self, positions):
+ '''
+ set the positions of the events to the specified value
+ '''
+ if positions is None or (hasattr(positions, 'len') and
+ len(positions) == 0):
+ self.set_segments([])
+ return
+
+ lineoffset = self.get_lineoffset()
+ linelength = self.get_linelength()
+ segment = (lineoffset + linelength / 2.,
+ lineoffset - linelength / 2.)
+ positions = np.asanyarray(positions)
+ positions.sort()
+ if self.is_horizontal():
+ segments = [[(coord1, coord2) for coord2 in segment] for
+ coord1 in positions]
+ else:
+ segments = [[(coord2, coord1) for coord2 in segment] for
+ coord1 in positions]
+ self.set_segments(segments)
+
+ def add_positions(self, position):
+ '''
+ add one or more events at the specified positions
+ '''
+ if position is None or (hasattr(position, 'len') and
+ len(position) == 0):
+ return
+ positions = self.get_positions()
+ positions = np.hstack([positions, np.asanyarray(position)])
+ self.set_positions(positions)
+ extend_positions = append_positions = add_positions
+
+ def is_horizontal(self):
+ '''
+ True if the eventcollection is horizontal, False if vertical
+ '''
+ return self._is_horizontal
+
+ def get_orientation(self):
+ '''
+ get the orientation of the event line, may be:
+ [ 'horizontal' | 'vertical' ]
+ '''
+ return 'horizontal' if self.is_horizontal() else 'vertical'
+
+ def switch_orientation(self):
+ '''
+ switch the orientation of the event line, either from vertical to
+ horizontal or vice versus
+ '''
+ segments = self.get_segments()
+ for i, segment in enumerate(segments):
+ segments[i] = np.fliplr(segment)
+ self.set_segments(segments)
+ self._is_horizontal = not self.is_horizontal()
+ self.stale = True
+
+ def set_orientation(self, orientation=None):
+ '''
+ set the orientation of the event line
+ [ 'horizontal' | 'vertical' | None ]
+ defaults to 'horizontal' if not specified or None
+ '''
+ if (orientation is None or orientation.lower() == 'none' or
+ orientation.lower() == 'horizontal'):
+ is_horizontal = True
+ elif orientation.lower() == 'vertical':
+ is_horizontal = False
+ else:
+ raise ValueError("orientation must be 'horizontal' or 'vertical'")
+
+ if is_horizontal == self.is_horizontal():
+ return
+ self.switch_orientation()
+
+ def get_linelength(self):
+ '''
+ get the length of the lines used to mark each event
+ '''
+ return self._linelength
+
+ def set_linelength(self, linelength):
+ '''
+ set the length of the lines used to mark each event
+ '''
+ if linelength == self.get_linelength():
+ return
+ lineoffset = self.get_lineoffset()
+ segments = self.get_segments()
+ pos = 1 if self.is_horizontal() else 0
+ for segment in segments:
+ segment[0, pos] = lineoffset + linelength / 2.
+ segment[1, pos] = lineoffset - linelength / 2.
+ self.set_segments(segments)
+ self._linelength = linelength
+
+ def get_lineoffset(self):
+ '''
+ get the offset of the lines used to mark each event
+ '''
+ return self._lineoffset
+
+ def set_lineoffset(self, lineoffset):
+ '''
+ set the offset of the lines used to mark each event
+ '''
+ if lineoffset == self.get_lineoffset():
+ return
+ linelength = self.get_linelength()
+ segments = self.get_segments()
+ pos = 1 if self.is_horizontal() else 0
+ for segment in segments:
+ segment[0, pos] = lineoffset + linelength / 2.
+ segment[1, pos] = lineoffset - linelength / 2.
+ self.set_segments(segments)
+ self._lineoffset = lineoffset
+
+ def get_linewidth(self):
+ '''
+ get the width of the lines used to mark each event
+ '''
+ return self.get_linewidths()[0]
+
+ def get_linestyle(self):
+ '''
+ get the style of the lines used to mark each event
+ [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
+ '''
+ return self.get_linestyles()
+
+ def get_color(self):
+ '''
+ get the color of the lines used to mark each event
+ '''
+ return self.get_colors()[0]
+
+
+class CircleCollection(_CollectionWithSizes):
+ """
+ A collection of circles, drawn using splines.
+ """
+ _factor = CIRCLE_AREA_FACTOR
+
+ @docstring.dedent_interpd
+ def __init__(self, sizes, **kwargs):
+ """
+ *sizes*
+ Gives the area of the circle in points^2
+
+ %(Collection)s
+ """
+ Collection.__init__(self, **kwargs)
+ self.set_sizes(sizes)
+ self.set_transform(transforms.IdentityTransform())
+ self._paths = [mpath.Path.unit_circle()]
+
+
+class EllipseCollection(Collection):
+ """
+ A collection of ellipses, drawn using splines.
+ """
+ @docstring.dedent_interpd
+ def __init__(self, widths, heights, angles, units='points', **kwargs):
+ """
+ *widths*: sequence
+ lengths of first axes (e.g., major axis lengths)
+
+ *heights*: sequence
+ lengths of second axes
+
+ *angles*: sequence
+ angles of first axes, degrees CCW from the X-axis
+
+ *units*: ['points' | 'inches' | 'dots' | 'width' | 'height'
+ | 'x' | 'y' | 'xy']
+
+ units in which majors and minors are given; 'width' and
+ 'height' refer to the dimensions of the axes, while 'x'
+ and 'y' refer to the *offsets* data units. 'xy' differs
+ from all others in that the angle as plotted varies with
+ the aspect ratio, and equals the specified angle only when
+ the aspect ratio is unity. Hence it behaves the same as
+ the :class:`~matplotlib.patches.Ellipse` with
+ axes.transData as its transform.
+
+ Additional kwargs inherited from the base :class:`Collection`:
+
+ %(Collection)s
+ """
+ Collection.__init__(self, **kwargs)
+ self._widths = 0.5 * np.asarray(widths).ravel()
+ self._heights = 0.5 * np.asarray(heights).ravel()
+ self._angles = np.deg2rad(angles).ravel()
+ self._units = units
+ self.set_transform(transforms.IdentityTransform())
+ self._transforms = np.empty((0, 3, 3))
+ self._paths = [mpath.Path.unit_circle()]
+
+ def _set_transforms(self):
+ """
+ Calculate transforms immediately before drawing.
+ """
+ ax = self.axes
+ fig = self.figure
+
+ if self._units == 'xy':
+ sc = 1
+ elif self._units == 'x':
+ sc = ax.bbox.width / ax.viewLim.width
+ elif self._units == 'y':
+ sc = ax.bbox.height / ax.viewLim.height
+ elif self._units == 'inches':
+ sc = fig.dpi
+ elif self._units == 'points':
+ sc = fig.dpi / 72.0
+ elif self._units == 'width':
+ sc = ax.bbox.width
+ elif self._units == 'height':
+ sc = ax.bbox.height
+ elif self._units == 'dots':
+ sc = 1.0
+ else:
+ raise ValueError('unrecognized units: %s' % self._units)
+
+ self._transforms = np.zeros((len(self._widths), 3, 3))
+ widths = self._widths * sc
+ heights = self._heights * sc
+ sin_angle = np.sin(self._angles)
+ cos_angle = np.cos(self._angles)
+ self._transforms[:, 0, 0] = widths * cos_angle
+ self._transforms[:, 0, 1] = heights * -sin_angle
+ self._transforms[:, 1, 0] = widths * sin_angle
+ self._transforms[:, 1, 1] = heights * cos_angle
+ self._transforms[:, 2, 2] = 1.0
+
+ _affine = transforms.Affine2D
+ if self._units == 'xy':
+ m = ax.transData.get_affine().get_matrix().copy()
+ m[:2, 2:] = 0
+ self.set_transform(_affine(m))
+
+ @artist.allow_rasterization
+ def draw(self, renderer):
+ self._set_transforms()
+ Collection.draw(self, renderer)
+
+
+class PatchCollection(Collection):
+ """
+ A generic collection of patches.
+
+ This makes it easier to assign a color map to a heterogeneous
+ collection of patches.
+
+ This also may improve plotting speed, since PatchCollection will
+ draw faster than a large number of patches.
+ """
+
+ def __init__(self, patches, match_original=False, **kwargs):
+ """
+ *patches*
+ a sequence of Patch objects. This list may include
+ a heterogeneous assortment of different patch types.
+
+ *match_original*
+ If True, use the colors and linewidths of the original
+ patches. If False, new colors may be assigned by
+ providing the standard collection arguments, facecolor,
+ edgecolor, linewidths, norm or cmap.
+
+ If any of *edgecolors*, *facecolors*, *linewidths*,
+ *antialiaseds* are None, they default to their
+ :data:`matplotlib.rcParams` patch setting, in sequence form.
+
+ The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
+ If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
+ None (i.e., a call to set_array has been made), at draw time a
+ call to scalar mappable will be made to set the face colors.
+ """
+
+ if match_original:
+ def determine_facecolor(patch):
+ if patch.get_fill():
+ return patch.get_facecolor()
+ return [0, 0, 0, 0]
+
+ kwargs['facecolors'] = [determine_facecolor(p) for p in patches]
+ kwargs['edgecolors'] = [p.get_edgecolor() for p in patches]
+ kwargs['linewidths'] = [p.get_linewidth() for p in patches]
+ kwargs['linestyles'] = [p.get_linestyle() for p in patches]
+ kwargs['antialiaseds'] = [p.get_antialiased() for p in patches]
+
+ Collection.__init__(self, **kwargs)
+
+ self.set_paths(patches)
+
+ def set_paths(self, patches):
+ paths = [p.get_transform().transform_path(p.get_path())
+ for p in patches]
+ self._paths = paths
+
+
+class TriMesh(Collection):
+ """
+ Class for the efficient drawing of a triangular mesh using
+ Gouraud shading.
+
+ A triangular mesh is a :class:`~matplotlib.tri.Triangulation`
+ object.
+ """
+ def __init__(self, triangulation, **kwargs):
+ Collection.__init__(self, **kwargs)
+ self._triangulation = triangulation
+ self._shading = 'gouraud'
+ self._is_filled = True
+
+ self._bbox = transforms.Bbox.unit()
+
+ # Unfortunately this requires a copy, unless Triangulation
+ # was rewritten.
+ xy = np.hstack((triangulation.x.reshape(-1, 1),
+ triangulation.y.reshape(-1, 1)))
+ self._bbox.update_from_data_xy(xy)
+
+ def get_paths(self):
+ if self._paths is None:
+ self.set_paths()
+ return self._paths
+
+ def set_paths(self):
+ self._paths = self.convert_mesh_to_paths(self._triangulation)
+
+ @staticmethod
+ def convert_mesh_to_paths(tri):
+ """
+ Converts a given mesh into a sequence of
+ :class:`matplotlib.path.Path` objects for easier rendering by
+ backends that do not directly support meshes.
+
+ This function is primarily of use to backend implementers.
+ """
+ Path = mpath.Path
+ triangles = tri.get_masked_triangles()
+ verts = np.concatenate((tri.x[triangles][..., np.newaxis],
+ tri.y[triangles][..., np.newaxis]), axis=2)
+ return [Path(x) for x in verts]
+
+ @artist.allow_rasterization
+ def draw(self, renderer):
+ if not self.get_visible():
+ return
+ renderer.open_group(self.__class__.__name__)
+ transform = self.get_transform()
+
+ # Get a list of triangles and the color at each vertex.
+ tri = self._triangulation
+ triangles = tri.get_masked_triangles()
+
+ verts = np.concatenate((tri.x[triangles][..., np.newaxis],
+ tri.y[triangles][..., np.newaxis]), axis=2)
+
+ self.update_scalarmappable()
+ colors = self._facecolors[triangles]
+
+ gc = renderer.new_gc()
+ self._set_gc_clip(gc)
+ gc.set_linewidth(self.get_linewidth()[0])
+ renderer.draw_gouraud_triangles(gc, verts, colors, transform.frozen())
+ gc.restore()
+ renderer.close_group(self.__class__.__name__)
+
+
+class QuadMesh(Collection):
+ """
+ Class for the efficient drawing of a quadrilateral mesh.
+
+ A quadrilateral mesh consists of a grid of vertices. The
+ dimensions of this array are (*meshWidth* + 1, *meshHeight* +
+ 1). Each vertex in the mesh has a different set of "mesh
+ coordinates" representing its position in the topology of the
+ mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
+ and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
+ (*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
+ form one of the quadrilaterals in the mesh. There are thus
+ (*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
+ need not be regular and the polygons need not be convex.
+
+ A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
+ (*meshHeight* + 1))) numpy array *coordinates*, where each row is
+ the *x* and *y* coordinates of one of the vertices. To define the
+ function that maps from a data point to its corresponding color,
+ use the :meth:`set_cmap` method. Each of these arrays is indexed in
+ row-major order by the mesh coordinates of the vertex (or the mesh
+ coordinates of the lower left vertex, in the case of the
+ colors).
+
+ For example, the first entry in *coordinates* is the
+ coordinates of the vertex at mesh coordinates (0, 0), then the one
+ at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
+ so on.
+
+ *shading* may be 'flat', or 'gouraud'
+ """
+ def __init__(self, meshWidth, meshHeight, coordinates,
+ antialiased=True, shading='flat', **kwargs):
+ Collection.__init__(self, **kwargs)
+ self._meshWidth = meshWidth
+ self._meshHeight = meshHeight
+ # By converting to floats now, we can avoid that on every draw.
+ self._coordinates = np.asarray(coordinates, float).reshape(
+ (meshHeight + 1, meshWidth + 1, 2))
+ self._antialiased = antialiased
+ self._shading = shading
+
+ self._bbox = transforms.Bbox.unit()
+ self._bbox.update_from_data_xy(coordinates.reshape(
+ ((meshWidth + 1) * (meshHeight + 1), 2)))
+
+ def get_paths(self):
+ if self._paths is None:
+ self.set_paths()
+ return self._paths
+
+ def set_paths(self):
+ self._paths = self.convert_mesh_to_paths(
+ self._meshWidth, self._meshHeight, self._coordinates)
+ self.stale = True
+
+ def get_datalim(self, transData):
+ return (self.get_transform() - transData).transform_bbox(self._bbox)
+
+ @staticmethod
+ def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
+ """
+ Converts a given mesh into a sequence of
+ :class:`matplotlib.path.Path` objects for easier rendering by
+ backends that do not directly support quadmeshes.
+
+ This function is primarily of use to backend implementers.
+ """
+ Path = mpath.Path
+
+ if isinstance(coordinates, np.ma.MaskedArray):
+ c = coordinates.data
+ else:
+ c = coordinates
+
+ points = np.concatenate((
+ c[0:-1, 0:-1],
+ c[0:-1, 1:],
+ c[1:, 1:],
+ c[1:, 0:-1],
+ c[0:-1, 0:-1]
+ ), axis=2)
+ points = points.reshape((meshWidth * meshHeight, 5, 2))
+ return [Path(x) for x in points]
+
+ def convert_mesh_to_triangles(self, meshWidth, meshHeight, coordinates):
+ """
+ Converts a given mesh into a sequence of triangles, each point
+ with its own color. This is useful for experiments using
+ `draw_qouraud_triangle`.
+ """
+ if isinstance(coordinates, np.ma.MaskedArray):
+ p = coordinates.data
+ else:
+ p = coordinates
+
+ p_a = p[:-1, :-1]
+ p_b = p[:-1, 1:]
+ p_c = p[1:, 1:]
+ p_d = p[1:, :-1]
+ p_center = (p_a + p_b + p_c + p_d) / 4.0
+
+ triangles = np.concatenate((
+ p_a, p_b, p_center,
+ p_b, p_c, p_center,
+ p_c, p_d, p_center,
+ p_d, p_a, p_center,
+ ), axis=2)
+ triangles = triangles.reshape((meshWidth * meshHeight * 4, 3, 2))
+
+ c = self.get_facecolor().reshape((meshHeight + 1, meshWidth + 1, 4))
+ c_a = c[:-1, :-1]
+ c_b = c[:-1, 1:]
+ c_c = c[1:, 1:]
+ c_d = c[1:, :-1]
+ c_center = (c_a + c_b + c_c + c_d) / 4.0
+
+ colors = np.concatenate((
+ c_a, c_b, c_center,
+ c_b, c_c, c_center,
+ c_c, c_d, c_center,
+ c_d, c_a, c_center,
+ ), axis=2)
+ colors = colors.reshape((meshWidth * meshHeight * 4, 3, 4))
+
+ return triangles, colors
+
+ @artist.allow_rasterization
+ def draw(self, renderer):
+ if not self.get_visible():
+ return
+ renderer.open_group(self.__class__.__name__, self.get_gid())
+ transform = self.get_transform()
+ transOffset = self.get_offset_transform()
+ offsets = self._offsets
+
+ if self.have_units():
+ if len(self._offsets):
+ xs = self.convert_xunits(self._offsets[:, 0])
+ ys = self.convert_yunits(self._offsets[:, 1])
+ offsets = np.column_stack([xs, ys])
+
+ self.update_scalarmappable()
+
+ if not transform.is_affine:
+ coordinates = self._coordinates.reshape((-1, 2))
+ coordinates = transform.transform(coordinates)
+ coordinates = coordinates.reshape(self._coordinates.shape)
+ transform = transforms.IdentityTransform()
+ else:
+ coordinates = self._coordinates
+
+ if not transOffset.is_affine:
+ offsets = transOffset.transform_non_affine(offsets)
+ transOffset = transOffset.get_affine()
+
+ gc = renderer.new_gc()
+ self._set_gc_clip(gc)
+ gc.set_linewidth(self.get_linewidth()[0])
+
+ if self._shading == 'gouraud':
+ triangles, colors = self.convert_mesh_to_triangles(
+ self._meshWidth, self._meshHeight, coordinates)
+ renderer.draw_gouraud_triangles(
+ gc, triangles, colors, transform.frozen())
+ else:
+ renderer.draw_quad_mesh(
+ gc, transform.frozen(), self._meshWidth, self._meshHeight,
+ coordinates, offsets, transOffset, self.get_facecolor(),
+ self._antialiased, self.get_edgecolors())
+ gc.restore()
+ renderer.close_group(self.__class__.__name__)
+ self.stale = False
+
+
+patchstr = artist.kwdoc(Collection)
+for k in ('QuadMesh', 'TriMesh', 'PolyCollection', 'BrokenBarHCollection',
+ 'RegularPolyCollection', 'PathCollection',
+ 'StarPolygonCollection', 'PatchCollection',
+ 'CircleCollection', 'Collection',):
+ docstring.interpd.update({k: patchstr})
+docstring.interpd.update(LineCollection=artist.kwdoc(LineCollection))
diff --git a/contrib/python/matplotlib/py2/matplotlib/colorbar.py b/contrib/python/matplotlib/py2/matplotlib/colorbar.py
new file mode 100644
index 00000000000..80664a99f93
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/colorbar.py
@@ -0,0 +1,1405 @@
+'''
+Colorbar toolkit with two classes and a function:
+
+ :class:`ColorbarBase`
+ the base class with full colorbar drawing functionality.
+ It can be used as-is to make a colorbar for a given colormap;
+ a mappable object (e.g., image) is not needed.
+
+ :class:`Colorbar`
+ the derived class for use with images or contour plots.
+
+ :func:`make_axes`
+ a function for resizing an axes and adding a second axes
+ suitable for a colorbar
+
+The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
+and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
+is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
+
+'''
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange, zip
+
+import warnings
+
+import numpy as np
+
+import matplotlib as mpl
+import matplotlib.artist as martist
+import matplotlib.cbook as cbook
+import matplotlib.collections as collections
+import matplotlib.colors as colors
+import matplotlib.contour as contour
+import matplotlib.cm as cm
+import matplotlib.gridspec as gridspec
+import matplotlib.patches as mpatches
+import matplotlib.path as mpath
+import matplotlib.ticker as ticker
+import matplotlib.transforms as mtransforms
+import matplotlib._layoutbox as layoutbox
+import matplotlib._constrained_layout as constrained_layout
+from matplotlib import docstring
+
+make_axes_kw_doc = '''
+
+ ============= ====================================================
+ Property Description
+ ============= ====================================================
+ *orientation* vertical or horizontal
+ *fraction* 0.15; fraction of original axes to use for colorbar
+ *pad* 0.05 if vertical, 0.15 if horizontal; fraction
+ of original axes between colorbar and new image axes
+ *shrink* 1.0; fraction by which to multiply the size of the colorbar
+ *aspect* 20; ratio of long to short dimensions
+ *anchor* (0.0, 0.5) if vertical; (0.5, 1.0) if horizontal;
+ the anchor point of the colorbar axes
+ *panchor* (1.0, 0.5) if vertical; (0.5, 0.0) if horizontal;
+ the anchor point of the colorbar parent axes. If
+ False, the parent axes' anchor will be unchanged
+ ============= ====================================================
+
+'''
+
+colormap_kw_doc = '''
+
+ ============ ====================================================
+ Property Description
+ ============ ====================================================
+ *extend* [ 'neither' | 'both' | 'min' | 'max' ]
+ If not 'neither', make pointed end(s) for out-of-
+ range values. These are set for a given colormap
+ using the colormap set_under and set_over methods.
+ *extendfrac* [ *None* | 'auto' | length | lengths ]
+ If set to *None*, both the minimum and maximum
+ triangular colorbar extensions with have a length of
+ 5% of the interior colorbar length (this is the
+ default setting). If set to 'auto', makes the
+ triangular colorbar extensions the same lengths as
+ the interior boxes (when *spacing* is set to
+ 'uniform') or the same lengths as the respective
+ adjacent interior boxes (when *spacing* is set to
+ 'proportional'). If a scalar, indicates the length
+ of both the minimum and maximum triangular colorbar
+ extensions as a fraction of the interior colorbar
+ length. A two-element sequence of fractions may also
+ be given, indicating the lengths of the minimum and
+ maximum colorbar extensions respectively as a
+ fraction of the interior colorbar length.
+ *extendrect* bool
+ If *False* the minimum and maximum colorbar extensions
+ will be triangular (the default). If *True* the
+ extensions will be rectangular.
+ *spacing* [ 'uniform' | 'proportional' ]
+ Uniform spacing gives each discrete color the same
+ space; proportional makes the space proportional to
+ the data interval.
+ *ticks* [ None | list of ticks | Locator object ]
+ If None, ticks are determined automatically from the
+ input.
+ *format* [ None | format string | Formatter object ]
+ If None, the
+ :class:`~matplotlib.ticker.ScalarFormatter` is used.
+ If a format string is given, e.g., '%.3f', that is
+ used. An alternative
+ :class:`~matplotlib.ticker.Formatter` object may be
+ given instead.
+ *drawedges* bool
+ Whether to draw lines at color boundaries.
+ ============ ====================================================
+
+ The following will probably be useful only in the context of
+ indexed colors (that is, when the mappable has norm=NoNorm()),
+ or other unusual circumstances.
+
+ ============ ===================================================
+ Property Description
+ ============ ===================================================
+ *boundaries* None or a sequence
+ *values* None or a sequence which must be of length 1 less
+ than the sequence of *boundaries*. For each region
+ delimited by adjacent entries in *boundaries*, the
+ color mapped to the corresponding value in values
+ will be used.
+ ============ ===================================================
+
+'''
+
+colorbar_doc = '''
+
+Add a colorbar to a plot.
+
+Function signatures for the :mod:`~matplotlib.pyplot` interface; all
+but the first are also method signatures for the
+:meth:`~matplotlib.figure.Figure.colorbar` method::
+
+ colorbar(**kwargs)
+ colorbar(mappable, **kwargs)
+ colorbar(mappable, cax=cax, **kwargs)
+ colorbar(mappable, ax=ax, **kwargs)
+
+Parameters
+----------
+mappable :
+ The :class:`~matplotlib.image.Image`,
+ :class:`~matplotlib.contour.ContourSet`, etc. to
+ which the colorbar applies; this argument is mandatory for the Figure
+ :meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
+ pyplot :func:`~matplotlib.pyplot.colorbar` function, which sets the
+ default to the current image.
+
+cax : :class:`~matplotlib.axes.Axes` object, optional
+ Axes into which the colorbar will be drawn.
+
+ax : :class:`~matplotlib.axes.Axes`, list of Axes, optional
+ Parent axes from which space for a new colorbar axes will be stolen.
+ If a list of axes is given they will all be resized to make room for the
+ colorbar axes.
+
+use_gridspec : bool, optional
+ If *cax* is ``None``, a new *cax* is created as an instance of
+ Axes. If *ax* is an instance of Subplot and *use_gridspec* is ``True``,
+ *cax* is created as an instance of Subplot using the
+ grid_spec module.
+
+
+Returns
+-------
+:class:`~matplotlib.colorbar.Colorbar` instance
+ See also its base class, :class:`~matplotlib.colorbar.ColorbarBase`.
+ Call the :meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
+ to label the colorbar.
+
+Notes
+-----
+Additional keyword arguments are of two kinds:
+
+ axes properties:
+%s
+ colorbar properties:
+%s
+
+If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
+kwarg is included automatically.
+
+The *shrink* kwarg provides a simple way to scale the colorbar with respect
+to the axes. Note that if *cax* is specified it determines the size of the
+colorbar and *shrink* and *aspect* kwargs are ignored.
+
+For more precise control, you can manually specify the positions of
+the axes objects in which the mappable and the colorbar are drawn. In
+this case, do not use any of the axes properties kwargs.
+
+It is known that some vector graphics viewer (svg and pdf) renders white gaps
+between segments of the colorbar. This is due to bugs in the viewers not
+matplotlib. As a workaround the colorbar can be rendered with overlapping
+segments::
+
+ cbar = colorbar()
+ cbar.solids.set_edgecolor("face")
+ draw()
+
+However this has negative consequences in other circumstances. Particularly
+with semi transparent images (alpha < 1) and colorbar extensions and is not
+enabled by default see (issue #1188).
+
+''' % (make_axes_kw_doc, colormap_kw_doc)
+
+docstring.interpd.update(colorbar_doc=colorbar_doc)
+
+
+def _set_ticks_on_axis_warn(*args, **kw):
+ # a top level function which gets put in at the axes'
+ # set_xticks set_yticks by _patch_ax
+ warnings.warn("Use the colorbar set_ticks() method instead.")
+
+
+class ColorbarBase(cm.ScalarMappable):
+ '''
+ Draw a colorbar in an existing axes.
+
+ This is a base class for the :class:`Colorbar` class, which is the
+ basis for the :func:`~matplotlib.pyplot.colorbar` function and the
+ :meth:`~matplotlib.figure.Figure.colorbar` method, which are the
+ usual ways of creating a colorbar.
+
+ It is also useful by itself for showing a colormap. If the *cmap*
+ kwarg is given but *boundaries* and *values* are left as None,
+ then the colormap will be displayed on a 0-1 scale. To show the
+ under- and over-value colors, specify the *norm* as::
+
+ colors.Normalize(clip=False)
+
+ To show the colors versus index instead of on the 0-1 scale,
+ use::
+
+ norm=colors.NoNorm.
+
+ Useful public methods are :meth:`set_label` and :meth:`add_lines`.
+
+ Attributes
+ ----------
+ ax : Axes
+ The `Axes` instance in which the colorbar is drawn.
+
+ lines : list
+ A list of `LineCollection` if lines were drawn, otherwise
+ an empty list.
+
+ dividers : LineCollection
+ A LineCollection if *drawedges* is ``True``, otherwise ``None``.
+ '''
+ _slice_dict = {'neither': slice(0, None),
+ 'both': slice(1, -1),
+ 'min': slice(1, None),
+ 'max': slice(0, -1)}
+
+ n_rasterize = 50 # rasterize solids if number of colors >= n_rasterize
+
+ def __init__(self, ax, cmap=None,
+ norm=None,
+ alpha=None,
+ values=None,
+ boundaries=None,
+ orientation='vertical',
+ ticklocation='auto',
+ extend='neither',
+ spacing='uniform', # uniform or proportional
+ ticks=None,
+ format=None,
+ drawedges=False,
+ filled=True,
+ extendfrac=None,
+ extendrect=False,
+ label='',
+ ):
+ #: The axes that this colorbar lives in.
+ self.ax = ax
+ self._patch_ax()
+ if cmap is None:
+ cmap = cm.get_cmap()
+ if norm is None:
+ norm = colors.Normalize()
+ self.alpha = alpha
+ cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
+ self.values = values
+ self.boundaries = boundaries
+ self.extend = extend
+ self._inside = self._slice_dict[extend]
+ self.spacing = spacing
+ self.orientation = orientation
+ self.drawedges = drawedges
+ self.filled = filled
+ self.extendfrac = extendfrac
+ self.extendrect = extendrect
+ self.solids = None
+ self.lines = list()
+ self.outline = None
+ self.patch = None
+ self.dividers = None
+
+ if ticklocation == 'auto':
+ ticklocation = 'bottom' if orientation == 'horizontal' else 'right'
+ self.ticklocation = ticklocation
+
+ self.set_label(label)
+ if cbook.iterable(ticks):
+ self.locator = ticker.FixedLocator(ticks, nbins=len(ticks))
+ else:
+ self.locator = ticks # Handle default in _ticker()
+ if format is None:
+ if isinstance(self.norm, colors.LogNorm):
+ self.formatter = ticker.LogFormatterSciNotation()
+ elif isinstance(self.norm, colors.SymLogNorm):
+ self.formatter = ticker.LogFormatterSciNotation(
+ linthresh=self.norm.linthresh)
+ else:
+ self.formatter = ticker.ScalarFormatter()
+ elif isinstance(format, six.string_types):
+ self.formatter = ticker.FormatStrFormatter(format)
+ else:
+ self.formatter = format # Assume it is a Formatter
+ # The rest is in a method so we can recalculate when clim changes.
+ self.config_axis()
+ self.draw_all()
+
+ def _extend_lower(self):
+ """Returns whether the lower limit is open ended."""
+ return self.extend in ('both', 'min')
+
+ def _extend_upper(self):
+ """Returns whether the uper limit is open ended."""
+ return self.extend in ('both', 'max')
+
+ def _patch_ax(self):
+ # bind some methods to the axes to warn users
+ # against using those methods.
+ self.ax.set_xticks = _set_ticks_on_axis_warn
+ self.ax.set_yticks = _set_ticks_on_axis_warn
+
+ def draw_all(self):
+ '''
+ Calculate any free parameters based on the current cmap and norm,
+ and do all the drawing.
+ '''
+
+ self._process_values()
+ self._find_range()
+ X, Y = self._mesh()
+ C = self._values[:, np.newaxis]
+ self._config_axes(X, Y)
+ if self.filled:
+ self._add_solids(X, Y, C)
+
+ def config_axis(self):
+ ax = self.ax
+ if self.orientation == 'vertical':
+ ax.xaxis.set_ticks([])
+ # location is either one of 'bottom' or 'top'
+ ax.yaxis.set_label_position(self.ticklocation)
+ ax.yaxis.set_ticks_position(self.ticklocation)
+ else:
+ ax.yaxis.set_ticks([])
+ # location is either one of 'left' or 'right'
+ ax.xaxis.set_label_position(self.ticklocation)
+ ax.xaxis.set_ticks_position(self.ticklocation)
+
+ self._set_label()
+
+ def update_ticks(self):
+ """
+ Force the update of the ticks and ticklabels. This must be
+ called whenever the tick locator and/or tick formatter changes.
+ """
+ ax = self.ax
+ ticks, ticklabels, offset_string = self._ticker()
+ if self.orientation == 'vertical':
+ ax.yaxis.set_ticks(ticks)
+ ax.set_yticklabels(ticklabels)
+ ax.yaxis.get_major_formatter().set_offset_string(offset_string)
+
+ else:
+ ax.xaxis.set_ticks(ticks)
+ ax.set_xticklabels(ticklabels)
+ ax.xaxis.get_major_formatter().set_offset_string(offset_string)
+
+ def set_ticks(self, ticks, update_ticks=True):
+ """
+ Set tick locations.
+
+ Parameters
+ ----------
+ ticks : {None, sequence, :class:`~matplotlib.ticker.Locator` instance}
+ If None, a default Locator will be used.
+
+ update_ticks : {True, False}, optional
+ If True, tick locations are updated immediately. If False,
+ use :meth:`update_ticks` to manually update the ticks.
+
+ """
+ if cbook.iterable(ticks):
+ self.locator = ticker.FixedLocator(ticks, nbins=len(ticks))
+ else:
+ self.locator = ticks
+
+ if update_ticks:
+ self.update_ticks()
+ self.stale = True
+
+ def get_ticks(self, minor=False):
+ """Return the x ticks as a list of locations"""
+ return self._tick_data_values
+
+ def set_ticklabels(self, ticklabels, update_ticks=True):
+ """
+ set tick labels. Tick labels are updated immediately unless
+ update_ticks is *False*. To manually update the ticks, call
+ *update_ticks* method explicitly.
+ """
+ if isinstance(self.locator, ticker.FixedLocator):
+ self.formatter = ticker.FixedFormatter(ticklabels)
+ if update_ticks:
+ self.update_ticks()
+ else:
+ warnings.warn("set_ticks() must have been called.")
+ self.stale = True
+
+ def _config_axes(self, X, Y):
+ '''
+ Make an axes patch and outline.
+ '''
+ ax = self.ax
+ ax.set_frame_on(False)
+ ax.set_navigate(False)
+ xy = self._outline(X, Y)
+ ax.update_datalim(xy)
+ ax.set_xlim(*ax.dataLim.intervalx)
+ ax.set_ylim(*ax.dataLim.intervaly)
+ if self.outline is not None:
+ self.outline.remove()
+ self.outline = mpatches.Polygon(
+ xy, edgecolor=mpl.rcParams['axes.edgecolor'],
+ facecolor='none',
+ linewidth=mpl.rcParams['axes.linewidth'],
+ closed=True,
+ zorder=2)
+ ax.add_artist(self.outline)
+ self.outline.set_clip_box(None)
+ self.outline.set_clip_path(None)
+ c = mpl.rcParams['axes.facecolor']
+ if self.patch is not None:
+ self.patch.remove()
+ self.patch = mpatches.Polygon(xy, edgecolor=c,
+ facecolor=c,
+ linewidth=0.01,
+ zorder=-1)
+ ax.add_artist(self.patch)
+
+ self.update_ticks()
+
+ def _set_label(self):
+ if self.orientation == 'vertical':
+ self.ax.set_ylabel(self._label, **self._labelkw)
+ else:
+ self.ax.set_xlabel(self._label, **self._labelkw)
+ self.stale = True
+
+ def set_label(self, label, **kw):
+ '''
+ Label the long axis of the colorbar
+ '''
+ self._label = '%s' % (label, )
+ self._labelkw = kw
+ self._set_label()
+
+ def _outline(self, X, Y):
+ '''
+ Return *x*, *y* arrays of colorbar bounding polygon,
+ taking orientation into account.
+ '''
+ N = X.shape[0]
+ ii = [0, 1, N - 2, N - 1, 2 * N - 1, 2 * N - 2, N + 1, N, 0]
+ x = np.take(np.ravel(np.transpose(X)), ii)
+ y = np.take(np.ravel(np.transpose(Y)), ii)
+ x = x.reshape((len(x), 1))
+ y = y.reshape((len(y), 1))
+ if self.orientation == 'horizontal':
+ return np.hstack((y, x))
+ return np.hstack((x, y))
+
+ def _edges(self, X, Y):
+ '''
+ Return the separator line segments; helper for _add_solids.
+ '''
+ N = X.shape[0]
+ # Using the non-array form of these line segments is much
+ # simpler than making them into arrays.
+ if self.orientation == 'vertical':
+ return [list(zip(X[i], Y[i])) for i in xrange(1, N - 1)]
+ else:
+ return [list(zip(Y[i], X[i])) for i in xrange(1, N - 1)]
+
+ def _add_solids(self, X, Y, C):
+ '''
+ Draw the colors using :meth:`~matplotlib.axes.Axes.pcolormesh`;
+ optionally add separators.
+ '''
+ if self.orientation == 'vertical':
+ args = (X, Y, C)
+ else:
+ args = (np.transpose(Y), np.transpose(X), np.transpose(C))
+ kw = dict(cmap=self.cmap,
+ norm=self.norm,
+ alpha=self.alpha,
+ edgecolors='None')
+ # Save, set, and restore hold state to keep pcolor from
+ # clearing the axes. Ordinarily this will not be needed,
+ # since the axes object should already have hold set.
+ _hold = self.ax._hold
+ self.ax._hold = True
+ col = self.ax.pcolormesh(*args, **kw)
+ self.ax._hold = _hold
+ #self.add_observer(col) # We should observe, not be observed...
+
+ if self.solids is not None:
+ self.solids.remove()
+ self.solids = col
+ if self.dividers is not None:
+ self.dividers.remove()
+ self.dividers = None
+ if self.drawedges:
+ linewidths = (0.5 * mpl.rcParams['axes.linewidth'],)
+ self.dividers = collections.LineCollection(
+ self._edges(X, Y),
+ colors=(mpl.rcParams['axes.edgecolor'],),
+ linewidths=linewidths)
+ self.ax.add_collection(self.dividers)
+ elif len(self._y) >= self.n_rasterize:
+ self.solids.set_rasterized(True)
+
+ def add_lines(self, levels, colors, linewidths, erase=True):
+ '''
+ Draw lines on the colorbar.
+
+ *colors* and *linewidths* must be scalars or
+ sequences the same length as *levels*.
+
+ Set *erase* to False to add lines without first
+ removing any previously added lines.
+ '''
+ y = self._locate(levels)
+ igood = (y < 1.001) & (y > -0.001)
+ y = y[igood]
+ if cbook.iterable(colors):
+ colors = np.asarray(colors)[igood]
+ if cbook.iterable(linewidths):
+ linewidths = np.asarray(linewidths)[igood]
+ N = len(y)
+ x = np.array([0.0, 1.0])
+ X, Y = np.meshgrid(x, y)
+ if self.orientation == 'vertical':
+ xy = [list(zip(X[i], Y[i])) for i in xrange(N)]
+ else:
+ xy = [list(zip(Y[i], X[i])) for i in xrange(N)]
+ col = collections.LineCollection(xy, linewidths=linewidths)
+
+ if erase and self.lines:
+ for lc in self.lines:
+ lc.remove()
+ self.lines = []
+ self.lines.append(col)
+ col.set_color(colors)
+ self.ax.add_collection(col)
+ self.stale = True
+
+ def _ticker(self):
+ '''
+ Return the sequence of ticks (colorbar data locations),
+ ticklabels (strings), and the corresponding offset string.
+ '''
+ locator = self.locator
+ formatter = self.formatter
+ if locator is None:
+ if self.boundaries is None:
+ if isinstance(self.norm, colors.NoNorm):
+ nv = len(self._values)
+ base = 1 + int(nv / 10)
+ locator = ticker.IndexLocator(base=base, offset=0)
+ elif isinstance(self.norm, colors.BoundaryNorm):
+ b = self.norm.boundaries
+ locator = ticker.FixedLocator(b, nbins=10)
+ elif isinstance(self.norm, colors.LogNorm):
+ locator = ticker.LogLocator(subs='all')
+ elif isinstance(self.norm, colors.SymLogNorm):
+ # The subs setting here should be replaced
+ # by logic in the locator.
+ locator = ticker.SymmetricalLogLocator(
+ subs=np.arange(1, 10),
+ linthresh=self.norm.linthresh,
+ base=10)
+ else:
+ if mpl.rcParams['_internal.classic_mode']:
+ locator = ticker.MaxNLocator()
+ else:
+ locator = ticker.AutoLocator()
+ else:
+ b = self._boundaries[self._inside]
+ locator = ticker.FixedLocator(b, nbins=10)
+ if isinstance(self.norm, colors.NoNorm) and self.boundaries is None:
+ intv = self._values[0], self._values[-1]
+ else:
+ intv = self.vmin, self.vmax
+ locator.create_dummy_axis(minpos=intv[0])
+ formatter.create_dummy_axis(minpos=intv[0])
+ locator.set_view_interval(*intv)
+ locator.set_data_interval(*intv)
+ formatter.set_view_interval(*intv)
+ formatter.set_data_interval(*intv)
+
+ b = np.array(locator())
+ if isinstance(locator, ticker.LogLocator):
+ eps = 1e-10
+ b = b[(b <= intv[1] * (1 + eps)) & (b >= intv[0] * (1 - eps))]
+ else:
+ eps = (intv[1] - intv[0]) * 1e-10
+ b = b[(b <= intv[1] + eps) & (b >= intv[0] - eps)]
+ self._tick_data_values = b
+ ticks = self._locate(b)
+ formatter.set_locs(b)
+ ticklabels = [formatter(t, i) for i, t in enumerate(b)]
+ offset_string = formatter.get_offset()
+ return ticks, ticklabels, offset_string
+
+ def _process_values(self, b=None):
+ '''
+ Set the :attr:`_boundaries` and :attr:`_values` attributes
+ based on the input boundaries and values. Input boundaries
+ can be *self.boundaries* or the argument *b*.
+ '''
+ if b is None:
+ b = self.boundaries
+ if b is not None:
+ self._boundaries = np.asarray(b, dtype=float)
+ if self.values is None:
+ self._values = 0.5 * (self._boundaries[:-1]
+ + self._boundaries[1:])
+ if isinstance(self.norm, colors.NoNorm):
+ self._values = (self._values + 0.00001).astype(np.int16)
+ return
+ self._values = np.array(self.values)
+ return
+ if self.values is not None:
+ self._values = np.array(self.values)
+ if self.boundaries is None:
+ b = np.zeros(len(self.values) + 1, 'd')
+ b[1:-1] = 0.5 * (self._values[:-1] - self._values[1:])
+ b[0] = 2.0 * b[1] - b[2]
+ b[-1] = 2.0 * b[-2] - b[-3]
+ self._boundaries = b
+ return
+ self._boundaries = np.array(self.boundaries)
+ return
+ # Neither boundaries nor values are specified;
+ # make reasonable ones based on cmap and norm.
+ if isinstance(self.norm, colors.NoNorm):
+ b = self._uniform_y(self.cmap.N + 1) * self.cmap.N - 0.5
+ v = np.zeros((len(b) - 1,), dtype=np.int16)
+ v[self._inside] = np.arange(self.cmap.N, dtype=np.int16)
+ if self._extend_lower():
+ v[0] = -1
+ if self._extend_upper():
+ v[-1] = self.cmap.N
+ self._boundaries = b
+ self._values = v
+ return
+ elif isinstance(self.norm, colors.BoundaryNorm):
+ b = list(self.norm.boundaries)
+ if self._extend_lower():
+ b = [b[0] - 1] + b
+ if self._extend_upper():
+ b = b + [b[-1] + 1]
+ b = np.array(b)
+ v = np.zeros((len(b) - 1,), dtype=float)
+ bi = self.norm.boundaries
+ v[self._inside] = 0.5 * (bi[:-1] + bi[1:])
+ if self._extend_lower():
+ v[0] = b[0] - 1
+ if self._extend_upper():
+ v[-1] = b[-1] + 1
+ self._boundaries = b
+ self._values = v
+ return
+ else:
+ if not self.norm.scaled():
+ self.norm.vmin = 0
+ self.norm.vmax = 1
+
+ self.norm.vmin, self.norm.vmax = mtransforms.nonsingular(
+ self.norm.vmin,
+ self.norm.vmax,
+ expander=0.1)
+
+ b = self.norm.inverse(self._uniform_y(self.cmap.N + 1))
+
+ if isinstance(self.norm, colors.LogNorm):
+ # If using a lognorm, ensure extensions don't go negative
+ if self._extend_lower():
+ b[0] = 0.9 * b[0]
+ if self._extend_upper():
+ b[-1] = 1.1 * b[-1]
+ else:
+ if self._extend_lower():
+ b[0] = b[0] - 1
+ if self._extend_upper():
+ b[-1] = b[-1] + 1
+ self._process_values(b)
+
+ def _find_range(self):
+ '''
+ Set :attr:`vmin` and :attr:`vmax` attributes to the first and
+ last boundary excluding extended end boundaries.
+ '''
+ b = self._boundaries[self._inside]
+ self.vmin = b[0]
+ self.vmax = b[-1]
+
+ def _central_N(self):
+ '''number of boundaries **before** extension of ends'''
+ nb = len(self._boundaries)
+ if self.extend == 'both':
+ nb -= 2
+ elif self.extend in ('min', 'max'):
+ nb -= 1
+ return nb
+
+ def _extended_N(self):
+ '''
+ Based on the colormap and extend variable, return the
+ number of boundaries.
+ '''
+ N = self.cmap.N + 1
+ if self.extend == 'both':
+ N += 2
+ elif self.extend in ('min', 'max'):
+ N += 1
+ return N
+
+ def _get_extension_lengths(self, frac, automin, automax, default=0.05):
+ '''
+ Get the lengths of colorbar extensions.
+
+ A helper method for _uniform_y and _proportional_y.
+ '''
+ # Set the default value.
+ extendlength = np.array([default, default])
+ if isinstance(frac, six.string_types):
+ if frac.lower() == 'auto':
+ # Use the provided values when 'auto' is required.
+ extendlength[0] = automin
+ extendlength[1] = automax
+ else:
+ # Any other string is invalid.
+ raise ValueError('invalid value for extendfrac')
+ elif frac is not None:
+ try:
+ # Try to set min and max extension fractions directly.
+ extendlength[:] = frac
+ # If frac is a sequence containing None then NaN may
+ # be encountered. This is an error.
+ if np.isnan(extendlength).any():
+ raise ValueError()
+ except (TypeError, ValueError):
+ # Raise an error on encountering an invalid value for frac.
+ raise ValueError('invalid value for extendfrac')
+ return extendlength
+
+ def _uniform_y(self, N):
+ '''
+ Return colorbar data coordinates for *N* uniformly
+ spaced boundaries, plus ends if required.
+ '''
+ if self.extend == 'neither':
+ y = np.linspace(0, 1, N)
+ else:
+ automin = automax = 1. / (N - 1.)
+ extendlength = self._get_extension_lengths(self.extendfrac,
+ automin, automax,
+ default=0.05)
+ if self.extend == 'both':
+ y = np.zeros(N + 2, 'd')
+ y[0] = 0. - extendlength[0]
+ y[-1] = 1. + extendlength[1]
+ elif self.extend == 'min':
+ y = np.zeros(N + 1, 'd')
+ y[0] = 0. - extendlength[0]
+ else:
+ y = np.zeros(N + 1, 'd')
+ y[-1] = 1. + extendlength[1]
+ y[self._inside] = np.linspace(0, 1, N)
+ return y
+
+ def _proportional_y(self):
+ '''
+ Return colorbar data coordinates for the boundaries of
+ a proportional colorbar.
+ '''
+ if isinstance(self.norm, colors.BoundaryNorm):
+ y = (self._boundaries - self._boundaries[0])
+ y = y / (self._boundaries[-1] - self._boundaries[0])
+ else:
+ y = self.norm(self._boundaries.copy())
+ y = np.ma.filled(y, np.nan)
+ if self.extend == 'min':
+ # Exclude leftmost interval of y.
+ clen = y[-1] - y[1]
+ automin = (y[2] - y[1]) / clen
+ automax = (y[-1] - y[-2]) / clen
+ elif self.extend == 'max':
+ # Exclude rightmost interval in y.
+ clen = y[-2] - y[0]
+ automin = (y[1] - y[0]) / clen
+ automax = (y[-2] - y[-3]) / clen
+ elif self.extend == 'both':
+ # Exclude leftmost and rightmost intervals in y.
+ clen = y[-2] - y[1]
+ automin = (y[2] - y[1]) / clen
+ automax = (y[-2] - y[-3]) / clen
+ if self.extend in ('both', 'min', 'max'):
+ extendlength = self._get_extension_lengths(self.extendfrac,
+ automin, automax,
+ default=0.05)
+ if self.extend in ('both', 'min'):
+ y[0] = 0. - extendlength[0]
+ if self.extend in ('both', 'max'):
+ y[-1] = 1. + extendlength[1]
+ yi = y[self._inside]
+ norm = colors.Normalize(yi[0], yi[-1])
+ y[self._inside] = np.ma.filled(norm(yi), np.nan)
+ return y
+
+ def _mesh(self):
+ '''
+ Return X,Y, the coordinate arrays for the colorbar pcolormesh.
+ These are suitable for a vertical colorbar; swapping and
+ transposition for a horizontal colorbar are done outside
+ this function.
+ '''
+ x = np.array([0.0, 1.0])
+ if self.spacing == 'uniform':
+ y = self._uniform_y(self._central_N())
+ else:
+ y = self._proportional_y()
+ self._y = y
+ X, Y = np.meshgrid(x, y)
+ if self._extend_lower() and not self.extendrect:
+ X[0, :] = 0.5
+ if self._extend_upper() and not self.extendrect:
+ X[-1, :] = 0.5
+ return X, Y
+
+ def _locate(self, x):
+ '''
+ Given a set of color data values, return their
+ corresponding colorbar data coordinates.
+ '''
+ if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)):
+ b = self._boundaries
+ xn = x
+ else:
+ # Do calculations using normalized coordinates so
+ # as to make the interpolation more accurate.
+ b = self.norm(self._boundaries, clip=False).filled()
+ xn = self.norm(x, clip=False).filled()
+
+ # The rest is linear interpolation with extrapolation at ends.
+ ii = np.searchsorted(b, xn)
+ i0 = ii - 1
+ itop = (ii == len(b))
+ ibot = (ii == 0)
+ i0[itop] -= 1
+ ii[itop] -= 1
+ i0[ibot] += 1
+ ii[ibot] += 1
+
+ db = np.take(b, ii) - np.take(b, i0)
+ y = self._y
+ dy = np.take(y, ii) - np.take(y, i0)
+ z = np.take(y, i0) + (xn - np.take(b, i0)) * dy / db
+ return z
+
+ def set_alpha(self, alpha):
+ self.alpha = alpha
+
+ def remove(self):
+ """
+ Remove this colorbar from the figure
+ """
+
+ fig = self.ax.figure
+ fig.delaxes(self.ax)
+
+
+class Colorbar(ColorbarBase):
+ """
+ This class connects a :class:`ColorbarBase` to a
+ :class:`~matplotlib.cm.ScalarMappable` such as a
+ :class:`~matplotlib.image.AxesImage` generated via
+ :meth:`~matplotlib.axes.Axes.imshow`.
+
+ It is not intended to be instantiated directly; instead,
+ use :meth:`~matplotlib.figure.Figure.colorbar` or
+ :func:`~matplotlib.pyplot.colorbar` to make your colorbar.
+
+ """
+ def __init__(self, ax, mappable, **kw):
+ # Ensure the given mappable's norm has appropriate vmin and vmax set
+ # even if mappable.draw has not yet been called.
+ mappable.autoscale_None()
+
+ self.mappable = mappable
+ kw['cmap'] = cmap = mappable.cmap
+ kw['norm'] = norm = mappable.norm
+
+ if isinstance(mappable, contour.ContourSet):
+ CS = mappable
+ kw['alpha'] = mappable.get_alpha()
+ kw['boundaries'] = CS._levels
+ kw['values'] = CS.cvalues
+ kw['extend'] = CS.extend
+ #kw['ticks'] = CS._levels
+ kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
+ kw['filled'] = CS.filled
+ ColorbarBase.__init__(self, ax, **kw)
+ if not CS.filled:
+ self.add_lines(CS)
+ else:
+ if getattr(cmap, 'colorbar_extend', False) is not False:
+ kw.setdefault('extend', cmap.colorbar_extend)
+
+ if isinstance(mappable, martist.Artist):
+ kw['alpha'] = mappable.get_alpha()
+
+ ColorbarBase.__init__(self, ax, **kw)
+
+ def on_mappable_changed(self, mappable):
+ """
+ Updates this colorbar to match the mappable's properties.
+
+ Typically this is automatically registered as an event handler
+ by :func:`colorbar_factory` and should not be called manually.
+
+ """
+ self.set_cmap(mappable.get_cmap())
+ self.set_clim(mappable.get_clim())
+ self.update_normal(mappable)
+
+ def add_lines(self, CS, erase=True):
+ '''
+ Add the lines from a non-filled
+ :class:`~matplotlib.contour.ContourSet` to the colorbar.
+
+ Set *erase* to False if these lines should be added to
+ any pre-existing lines.
+ '''
+ if not isinstance(CS, contour.ContourSet) or CS.filled:
+ raise ValueError('add_lines is only for a ContourSet of lines')
+ tcolors = [c[0] for c in CS.tcolors]
+ tlinewidths = [t[0] for t in CS.tlinewidths]
+ # The following was an attempt to get the colorbar lines
+ # to follow subsequent changes in the contour lines,
+ # but more work is needed: specifically, a careful
+ # look at event sequences, and at how
+ # to make one object track another automatically.
+ #tcolors = [col.get_colors()[0] for col in CS.collections]
+ #tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
+ ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths,
+ erase=erase)
+
+ def update_normal(self, mappable):
+ '''
+ update solid, lines, etc. Unlike update_bruteforce, it does
+ not clear the axes. This is meant to be called when the image
+ or contour plot to which this colorbar belongs is changed.
+ '''
+ self.draw_all()
+ if isinstance(self.mappable, contour.ContourSet):
+ CS = self.mappable
+ if not CS.filled:
+ self.add_lines(CS)
+ self.stale = True
+
+ def update_bruteforce(self, mappable):
+ '''
+ Destroy and rebuild the colorbar. This is
+ intended to become obsolete, and will probably be
+ deprecated and then removed. It is not called when
+ the pyplot.colorbar function or the Figure.colorbar
+ method are used to create the colorbar.
+
+ '''
+ # We are using an ugly brute-force method: clearing and
+ # redrawing the whole thing. The problem is that if any
+ # properties have been changed by methods other than the
+ # colorbar methods, those changes will be lost.
+ self.ax.cla()
+ # clearing the axes will delete outline, patch, solids, and lines:
+ self.outline = None
+ self.patch = None
+ self.solids = None
+ self.lines = list()
+ self.dividers = None
+ self.set_alpha(mappable.get_alpha())
+ self.cmap = mappable.cmap
+ self.norm = mappable.norm
+ self.config_axis()
+ self.draw_all()
+ if isinstance(self.mappable, contour.ContourSet):
+ CS = self.mappable
+ if not CS.filled:
+ self.add_lines(CS)
+ #if self.lines is not None:
+ # tcolors = [c[0] for c in CS.tcolors]
+ # self.lines.set_color(tcolors)
+ #Fixme? Recalculate boundaries, ticks if vmin, vmax have changed.
+ #Fixme: Some refactoring may be needed; we should not
+ # be recalculating everything if there was a simple alpha
+ # change.
+
+ def remove(self):
+ """
+ Remove this colorbar from the figure. If the colorbar was created with
+ ``use_gridspec=True`` then restore the gridspec to its previous value.
+ """
+
+ ColorbarBase.remove(self)
+ self.mappable.callbacksSM.disconnect(self.mappable.colorbar_cid)
+ self.mappable.colorbar = None
+ self.mappable.colorbar_cid = None
+
+ try:
+ ax = self.mappable.axes
+ except AttributeError:
+ return
+
+ try:
+ gs = ax.get_subplotspec().get_gridspec()
+ subplotspec = gs.get_topmost_subplotspec()
+ except AttributeError:
+ # use_gridspec was False
+ pos = ax.get_position(original=True)
+ ax._set_position(pos)
+ else:
+ # use_gridspec was True
+ ax.set_subplotspec(subplotspec)
+
+
+@docstring.Substitution(make_axes_kw_doc)
+def make_axes(parents, location=None, orientation=None, fraction=0.15,
+ shrink=1.0, aspect=20, **kw):
+ '''
+ Resize and reposition parent axes, and return a child
+ axes suitable for a colorbar.
+
+ Keyword arguments may include the following (with defaults):
+
+ location : [None|'left'|'right'|'top'|'bottom']
+ The position, relative to **parents**, where the colorbar axes
+ should be created. If None, the value will either come from the
+ given ``orientation``, else it will default to 'right'.
+
+ orientation : [None|'vertical'|'horizontal']
+ The orientation of the colorbar. Typically, this keyword shouldn't
+ be used, as it can be derived from the ``location`` keyword.
+
+ %s
+
+ Returns (cax, kw), the child axes and the reduced kw dictionary to be
+ passed when creating the colorbar instance.
+ '''
+ locations = ["left", "right", "top", "bottom"]
+ if orientation is not None and location is not None:
+ raise TypeError('position and orientation are mutually exclusive. '
+ 'Consider setting the position to any of {}'
+ .format(', '.join(locations)))
+
+ # provide a default location
+ if location is None and orientation is None:
+ location = 'right'
+
+ # allow the user to not specify the location by specifying the
+ # orientation instead
+ if location is None:
+ location = 'right' if orientation == 'vertical' else 'bottom'
+
+ if location not in locations:
+ raise ValueError('Invalid colorbar location. Must be one '
+ 'of %s' % ', '.join(locations))
+
+ default_location_settings = {'left': {'anchor': (1.0, 0.5),
+ 'panchor': (0.0, 0.5),
+ 'pad': 0.10,
+ 'orientation': 'vertical'},
+ 'right': {'anchor': (0.0, 0.5),
+ 'panchor': (1.0, 0.5),
+ 'pad': 0.05,
+ 'orientation': 'vertical'},
+ 'top': {'anchor': (0.5, 0.0),
+ 'panchor': (0.5, 1.0),
+ 'pad': 0.05,
+ 'orientation': 'horizontal'},
+ 'bottom': {'anchor': (0.5, 1.0),
+ 'panchor': (0.5, 0.0),
+ 'pad': 0.15, # backwards compat
+ 'orientation': 'horizontal'},
+ }
+
+ loc_settings = default_location_settings[location]
+
+ # put appropriate values into the kw dict for passing back to
+ # the Colorbar class
+ kw['orientation'] = loc_settings['orientation']
+ kw['ticklocation'] = location
+
+ anchor = kw.pop('anchor', loc_settings['anchor'])
+ parent_anchor = kw.pop('panchor', loc_settings['panchor'])
+
+ parents_iterable = cbook.iterable(parents)
+ # turn parents into a list if it is not already. We do this w/ np
+ # because `plt.subplots` can return an ndarray and is natural to
+ # pass to `colorbar`.
+ parents = np.atleast_1d(parents).ravel()
+
+ # check if using constrained_layout:
+ try:
+ gs = parents[0].get_subplotspec().get_gridspec()
+ using_constrained_layout = (gs._layoutbox is not None)
+ except AttributeError:
+ using_constrained_layout = False
+
+ # defaults are not appropriate for constrained_layout:
+ pad0 = loc_settings['pad']
+ if using_constrained_layout:
+ pad0 = 0.02
+ pad = kw.pop('pad', pad0)
+
+ fig = parents[0].get_figure()
+ if not all(fig is ax.get_figure() for ax in parents):
+ raise ValueError('Unable to create a colorbar axes as not all '
+ 'parents share the same figure.')
+
+ # take a bounding box around all of the given axes
+ parents_bbox = mtransforms.Bbox.union(
+ [ax.get_position(original=True).frozen() for ax in parents])
+
+ pb = parents_bbox
+ if location in ('left', 'right'):
+ if location == 'left':
+ pbcb, _, pb1 = pb.splitx(fraction, fraction + pad)
+ else:
+ pb1, _, pbcb = pb.splitx(1 - fraction - pad, 1 - fraction)
+ pbcb = pbcb.shrunk(1.0, shrink).anchored(anchor, pbcb)
+ else:
+ if location == 'bottom':
+ pbcb, _, pb1 = pb.splity(fraction, fraction + pad)
+ else:
+ pb1, _, pbcb = pb.splity(1 - fraction - pad, 1 - fraction)
+ pbcb = pbcb.shrunk(shrink, 1.0).anchored(anchor, pbcb)
+
+ # define the aspect ratio in terms of y's per x rather than x's per y
+ aspect = 1.0 / aspect
+
+ # define a transform which takes us from old axes coordinates to
+ # new axes coordinates
+ shrinking_trans = mtransforms.BboxTransform(parents_bbox, pb1)
+
+ # transform each of the axes in parents using the new transform
+ for ax in parents:
+ new_posn = shrinking_trans.transform(ax.get_position())
+ new_posn = mtransforms.Bbox(new_posn)
+ ax._set_position(new_posn)
+ if parent_anchor is not False:
+ ax.set_anchor(parent_anchor)
+
+ cax = fig.add_axes(pbcb)
+
+ # OK, now make a layoutbox for the cb axis. Later, we will use this
+ # to make the colorbar fit nicely.
+ if not using_constrained_layout:
+ # no layout boxes:
+ lb = None
+ lbpos = None
+ # and we need to set the aspect ratio by hand...
+ cax.set_aspect(aspect, anchor=anchor, adjustable='box')
+ else:
+ if not parents_iterable:
+ # this is a single axis...
+ ax = parents[0]
+ lb, lbpos = constrained_layout.layoutcolorbarsingle(
+ ax, cax, shrink, aspect, location, pad=pad)
+ else: # there is more than one parent, so lets use gridspec
+ # the colorbar will be a sibling of this gridspec, so the
+ # parent is the same parent as the gridspec. Either the figure,
+ # or a subplotspec.
+
+ lb, lbpos = constrained_layout.layoutcolorbargridspec(
+ parents, cax, shrink, aspect, location, pad)
+
+ cax._layoutbox = lb
+ cax._poslayoutbox = lbpos
+
+ return cax, kw
+
+
+@docstring.Substitution(make_axes_kw_doc)
+def make_axes_gridspec(parent, **kw):
+ '''
+ Resize and reposition a parent axes, and return a child axes
+ suitable for a colorbar. This function is similar to
+ make_axes. Prmary differences are
+
+ * *make_axes_gridspec* only handles the *orientation* keyword
+ and cannot handle the "location" keyword.
+
+ * *make_axes_gridspec* should only be used with a subplot parent.
+
+ * *make_axes* creates an instance of Axes. *make_axes_gridspec*
+ creates an instance of Subplot.
+
+ * *make_axes* updates the position of the
+ parent. *make_axes_gridspec* replaces the grid_spec attribute
+ of the parent with a new one.
+
+ While this function is meant to be compatible with *make_axes*,
+ there could be some minor differences.
+
+ Keyword arguments may include the following (with defaults):
+
+ *orientation*
+ 'vertical' or 'horizontal'
+
+ %s
+
+ All but the first of these are stripped from the input kw set.
+
+ Returns (cax, kw), the child axes and the reduced kw dictionary to be
+ passed when creating the colorbar instance.
+ '''
+
+ orientation = kw.setdefault('orientation', 'vertical')
+ kw['ticklocation'] = 'auto'
+
+ fraction = kw.pop('fraction', 0.15)
+ shrink = kw.pop('shrink', 1.0)
+ aspect = kw.pop('aspect', 20)
+
+ x1 = 1 - fraction
+
+ # for shrinking
+ pad_s = (1 - shrink) * 0.5
+ wh_ratios = [pad_s, shrink, pad_s]
+
+ # we need to none the tree of layoutboxes because
+ # constrained_layout can't remove and replace the tree
+ # hierarchy w/o a seg fault.
+ gs = parent.get_subplotspec().get_gridspec()
+ layoutbox.nonetree(gs._layoutbox)
+ gs_from_subplotspec = gridspec.GridSpecFromSubplotSpec
+ if orientation == 'vertical':
+ pad = kw.pop('pad', 0.05)
+ wh_space = 2 * pad / (1 - pad)
+ gs = gs_from_subplotspec(1, 2,
+ subplot_spec=parent.get_subplotspec(),
+ wspace=wh_space,
+ width_ratios=[x1 - pad, fraction])
+ gs2 = gs_from_subplotspec(3, 1,
+ subplot_spec=gs[1],
+ hspace=0.,
+ height_ratios=wh_ratios)
+ anchor = (0.0, 0.5)
+ panchor = (1.0, 0.5)
+ else:
+ pad = kw.pop('pad', 0.15)
+ wh_space = 2 * pad / (1 - pad)
+ gs = gs_from_subplotspec(2, 1,
+ subplot_spec=parent.get_subplotspec(),
+ hspace=wh_space,
+ height_ratios=[x1 - pad, fraction])
+ gs2 = gs_from_subplotspec(1, 3,
+ subplot_spec=gs[1],
+ wspace=0.,
+ width_ratios=wh_ratios)
+ aspect = 1 / aspect
+ anchor = (0.5, 1.0)
+ panchor = (0.5, 0.0)
+
+ parent.set_subplotspec(gs[0])
+ parent.update_params()
+ parent._set_position(parent.figbox)
+ parent.set_anchor(panchor)
+
+ fig = parent.get_figure()
+ cax = fig.add_subplot(gs2[1])
+ cax.set_aspect(aspect, anchor=anchor, adjustable='box')
+ return cax, kw
+
+
+class ColorbarPatch(Colorbar):
+ """
+ A Colorbar which is created using :class:`~matplotlib.patches.Patch`
+ rather than the default :func:`~matplotlib.axes.pcolor`.
+
+ It uses a list of Patch instances instead of a
+ :class:`~matplotlib.collections.PatchCollection` because the
+ latter does not allow the hatch pattern to vary among the
+ members of the collection.
+ """
+ def __init__(self, ax, mappable, **kw):
+ # we do not want to override the behaviour of solids
+ # so add a new attribute which will be a list of the
+ # colored patches in the colorbar
+ self.solids_patches = []
+ Colorbar.__init__(self, ax, mappable, **kw)
+
+ def _add_solids(self, X, Y, C):
+ """
+ Draw the colors using :class:`~matplotlib.patches.Patch`;
+ optionally add separators.
+ """
+ # Save, set, and restore hold state to keep pcolor from
+ # clearing the axes. Ordinarily this will not be needed,
+ # since the axes object should already have hold set.
+ _hold = self.ax._hold
+ self.ax._hold = True
+
+ kw = {'alpha': self.alpha, }
+
+ n_segments = len(C)
+
+ # ensure there are sufficient hatches
+ hatches = self.mappable.hatches * n_segments
+
+ patches = []
+ for i in xrange(len(X) - 1):
+ val = C[i][0]
+ hatch = hatches[i]
+
+ xy = np.array([[X[i][0], Y[i][0]],
+ [X[i][1], Y[i][0]],
+ [X[i + 1][1], Y[i + 1][0]],
+ [X[i + 1][0], Y[i + 1][1]]])
+
+ if self.orientation == 'horizontal':
+ # if horizontal swap the xs and ys
+ xy = xy[..., ::-1]
+
+ patch = mpatches.PathPatch(mpath.Path(xy),
+ facecolor=self.cmap(self.norm(val)),
+ hatch=hatch, linewidth=0,
+ antialiased=False, **kw)
+ self.ax.add_patch(patch)
+ patches.append(patch)
+
+ if self.solids_patches:
+ for solid in self.solids_patches:
+ solid.remove()
+
+ self.solids_patches = patches
+
+ if self.dividers is not None:
+ self.dividers.remove()
+ self.dividers = None
+
+ if self.drawedges:
+ self.dividers = collections.LineCollection(
+ self._edges(X, Y),
+ colors=(mpl.rcParams['axes.edgecolor'],),
+ linewidths=(0.5 * mpl.rcParams['axes.linewidth'],))
+ self.ax.add_collection(self.dividers)
+
+ self.ax._hold = _hold
+
+
+def colorbar_factory(cax, mappable, **kwargs):
+ """
+ Creates a colorbar on the given axes for the given mappable.
+
+ Typically, for automatic colorbar placement given only a mappable use
+ :meth:`~matplotlib.figure.Figure.colorbar`.
+
+ """
+ # if the given mappable is a contourset with any hatching, use
+ # ColorbarPatch else use Colorbar
+ if (isinstance(mappable, contour.ContourSet)
+ and any([hatch is not None for hatch in mappable.hatches])):
+ cb = ColorbarPatch(cax, mappable, **kwargs)
+ else:
+ cb = Colorbar(cax, mappable, **kwargs)
+
+ cid = mappable.callbacksSM.connect('changed', cb.on_mappable_changed)
+ mappable.colorbar = cb
+ mappable.colorbar_cid = cid
+
+ return cb
diff --git a/contrib/python/matplotlib/py2/matplotlib/colors.py b/contrib/python/matplotlib/py2/matplotlib/colors.py
new file mode 100644
index 00000000000..32e282f6f56
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/colors.py
@@ -0,0 +1,2027 @@
+"""
+A module for converting numbers or color arguments to *RGB* or *RGBA*
+
+*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
+range 0-1.
+
+This module includes functions and classes for color specification
+conversions, and for mapping numbers to colors in a 1-D array of colors called
+a colormap. Colormapping typically involves two steps: a data array is first
+mapped onto the range 0-1 using an instance of :class:`Normalize` or of a
+subclass; then this number in the 0-1 range is mapped to a color using an
+instance of a subclass of :class:`Colormap`. Two are provided here:
+:class:`LinearSegmentedColormap`, which is used to generate all the built-in
+colormap instances, but is also useful for making custom colormaps, and
+:class:`ListedColormap`, which is used for generating a custom colormap from a
+list of color specifications.
+
+The module also provides functions for checking whether an object can be
+interpreted as a color (:func:`is_color_like`), for converting such an object
+to an RGBA tuple (:func:`to_rgba`) or to an HTML-like hex string in the
+`#rrggbb` format (:func:`to_hex`), and a sequence of colors to an `(n, 4)`
+RGBA array (:func:`to_rgba_array`). Caching is used for efficiency.
+
+Matplotlib recognizes the following formats to specify a color:
+
+* an RGB or RGBA tuple of float values in ``[0, 1]`` (e.g., ``(0.1, 0.2, 0.5)``
+ or ``(0.1, 0.2, 0.5, 0.3)``);
+* a hex RGB or RGBA string (e.g., ``'#0F0F0F'`` or ``'#0F0F0F0F'``);
+* a string representation of a float value in ``[0, 1]`` inclusive for gray
+ level (e.g., ``'0.5'``);
+* one of ``{'b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'}``;
+* a X11/CSS4 color name;
+* a name from the `xkcd color survey <https://xkcd.com/color/rgb/>`__;
+ prefixed with ``'xkcd:'`` (e.g., ``'xkcd:sky blue'``);
+* one of ``{'tab:blue', 'tab:orange', 'tab:green',
+ 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink',
+ 'tab:gray', 'tab:olive', 'tab:cyan'}`` which are the Tableau Colors from the
+ 'T10' categorical palette (which is the default color cycle);
+* a "CN" color spec, i.e. `'C'` followed by a single digit, which is an index
+ into the default property cycle (``matplotlib.rcParams['axes.prop_cycle']``);
+ the indexing occurs at artist creation time and defaults to black if the
+ cycle does not include color.
+
+All string specifications of color, other than "CN", are case-insensitive.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import zip
+
+from collections import Sized
+import itertools
+import re
+import warnings
+
+import numpy as np
+import matplotlib.cbook as cbook
+from ._color_data import BASE_COLORS, TABLEAU_COLORS, CSS4_COLORS, XKCD_COLORS
+
+
+class _ColorMapping(dict):
+ def __init__(self, mapping):
+ super(_ColorMapping, self).__init__(mapping)
+ self.cache = {}
+
+ def __setitem__(self, key, value):
+ super(_ColorMapping, self).__setitem__(key, value)
+ self.cache.clear()
+
+ def __delitem__(self, key):
+ super(_ColorMapping, self).__delitem__(key)
+ self.cache.clear()
+
+
+_colors_full_map = {}
+# Set by reverse priority order.
+_colors_full_map.update(XKCD_COLORS)
+_colors_full_map.update({k.replace('grey', 'gray'): v
+ for k, v in XKCD_COLORS.items()
+ if 'grey' in k})
+_colors_full_map.update(CSS4_COLORS)
+_colors_full_map.update(TABLEAU_COLORS)
+_colors_full_map.update({k.replace('gray', 'grey'): v
+ for k, v in TABLEAU_COLORS.items()
+ if 'gray' in k})
+_colors_full_map.update(BASE_COLORS)
+_colors_full_map = _ColorMapping(_colors_full_map)
+
+
+def get_named_colors_mapping():
+ """Return the global mapping of names to named colors."""
+ return _colors_full_map
+
+
+def _sanitize_extrema(ex):
+ if ex is None:
+ return ex
+ try:
+ ret = ex.item()
+ except AttributeError:
+ ret = float(ex)
+ return ret
+
+
+def _is_nth_color(c):
+ """Return whether *c* can be interpreted as an item in the color cycle."""
+ return isinstance(c, six.string_types) and re.match(r"\AC[0-9]\Z", c)
+
+
+def is_color_like(c):
+ """Return whether *c* can be interpreted as an RGB(A) color."""
+ # Special-case nth color syntax because it cannot be parsed during
+ # setup.
+ if _is_nth_color(c):
+ return True
+ try:
+ to_rgba(c)
+ except ValueError:
+ return False
+ else:
+ return True
+
+
+def same_color(c1, c2):
+ """
+ Compare two colors to see if they are the same.
+
+ Parameters
+ ----------
+ c1, c2 : Matplotlib colors
+
+ Returns
+ -------
+ bool
+ ``True`` if *c1* and *c2* are the same color, otherwise ``False``.
+ """
+ return (to_rgba_array(c1) == to_rgba_array(c2)).all()
+
+
+def to_rgba(c, alpha=None):
+ """
+ Convert *c* to an RGBA color.
+
+ Parameters
+ ----------
+ c : Matplotlib color
+
+ alpha : scalar, optional
+ If *alpha* is not ``None``, it forces the alpha value, except if *c* is
+ ``"none"`` (case-insensitive), which always maps to ``(0, 0, 0, 0)``.
+
+ Returns
+ -------
+ tuple
+ Tuple of ``(r, g, b, a)`` scalars.
+ """
+ # Special-case nth color syntax because it should not be cached.
+ if _is_nth_color(c):
+ from matplotlib import rcParams
+ prop_cycler = rcParams['axes.prop_cycle']
+ colors = prop_cycler.by_key().get('color', ['k'])
+ c = colors[int(c[1]) % len(colors)]
+ try:
+ rgba = _colors_full_map.cache[c, alpha]
+ except (KeyError, TypeError): # Not in cache, or unhashable.
+ rgba = _to_rgba_no_colorcycle(c, alpha)
+ try:
+ _colors_full_map.cache[c, alpha] = rgba
+ except TypeError:
+ pass
+ return rgba
+
+
+def _to_rgba_no_colorcycle(c, alpha=None):
+ """Convert *c* to an RGBA color, with no support for color-cycle syntax.
+
+ If *alpha* is not ``None``, it forces the alpha value, except if *c* is
+ ``"none"`` (case-insensitive), which always maps to ``(0, 0, 0, 0)``.
+ """
+ orig_c = c
+ if isinstance(c, six.string_types):
+ if c.lower() == "none":
+ return (0., 0., 0., 0.)
+ # Named color.
+ try:
+ # This may turn c into a non-string, so we check again below.
+ c = _colors_full_map[c.lower()]
+ except KeyError:
+ pass
+ if isinstance(c, six.string_types):
+ # hex color with no alpha.
+ match = re.match(r"\A#[a-fA-F0-9]{6}\Z", c)
+ if match:
+ return (tuple(int(n, 16) / 255
+ for n in [c[1:3], c[3:5], c[5:7]])
+ + (alpha if alpha is not None else 1.,))
+ # hex color with alpha.
+ match = re.match(r"\A#[a-fA-F0-9]{8}\Z", c)
+ if match:
+ color = [int(n, 16) / 255
+ for n in [c[1:3], c[3:5], c[5:7], c[7:9]]]
+ if alpha is not None:
+ color[-1] = alpha
+ return tuple(color)
+ # string gray.
+ try:
+ return (float(c),) * 3 + (alpha if alpha is not None else 1.,)
+ except ValueError:
+ pass
+ raise ValueError("Invalid RGBA argument: {!r}".format(orig_c))
+ # tuple color.
+ c = np.array(c)
+ if not np.can_cast(c.dtype, float, "same_kind") or c.ndim != 1:
+ # Test the dtype explicitly as `map(float, ...)`, `np.array(...,
+ # float)` and `np.array(...).astype(float)` all convert "0.5" to 0.5.
+ # Test dimensionality to reject single floats.
+ raise ValueError("Invalid RGBA argument: {!r}".format(orig_c))
+ # Return a tuple to prevent the cached value from being modified.
+ c = tuple(c.astype(float))
+ if len(c) not in [3, 4]:
+ raise ValueError("RGBA sequence should have length 3 or 4")
+ if len(c) == 3 and alpha is None:
+ alpha = 1
+ if alpha is not None:
+ c = c[:3] + (alpha,)
+ if any(elem < 0 or elem > 1 for elem in c):
+ raise ValueError("RGBA values should be within 0-1 range")
+ return c
+
+
+def to_rgba_array(c, alpha=None):
+ """Convert *c* to a (n, 4) array of RGBA colors.
+
+ If *alpha* is not ``None``, it forces the alpha value. If *c* is
+ ``"none"`` (case-insensitive) or an empty list, an empty array is returned.
+ """
+ # Special-case inputs that are already arrays, for performance. (If the
+ # array has the wrong kind or shape, raise the error during one-at-a-time
+ # conversion.)
+ if (isinstance(c, np.ndarray) and c.dtype.kind in "if"
+ and c.ndim == 2 and c.shape[1] in [3, 4]):
+ if c.shape[1] == 3:
+ result = np.column_stack([c, np.zeros(len(c))])
+ result[:, -1] = alpha if alpha is not None else 1.
+ elif c.shape[1] == 4:
+ result = c.copy()
+ if alpha is not None:
+ result[:, -1] = alpha
+ if np.any((result < 0) | (result > 1)):
+ raise ValueError("RGBA values should be within 0-1 range")
+ return result
+ # Handle single values.
+ # Note that this occurs *after* handling inputs that are already arrays, as
+ # `to_rgba(c, alpha)` (below) is expensive for such inputs, due to the need
+ # to format the array in the ValueError message(!).
+ if isinstance(c, six.string_types) and c.lower() == "none":
+ return np.zeros((0, 4), float)
+ try:
+ return np.array([to_rgba(c, alpha)], float)
+ except (ValueError, TypeError):
+ pass
+ # Convert one at a time.
+ result = np.empty((len(c), 4), float)
+ for i, cc in enumerate(c):
+ result[i] = to_rgba(cc, alpha)
+ return result
+
+
+def to_rgb(c):
+ """Convert *c* to an RGB color, silently dropping the alpha channel."""
+ return to_rgba(c)[:3]
+
+
+def to_hex(c, keep_alpha=False):
+ """Convert *c* to a hex color.
+
+ Uses the ``#rrggbb`` format if *keep_alpha* is False (the default),
+ ``#rrggbbaa`` otherwise.
+ """
+ c = to_rgba(c)
+ if not keep_alpha:
+ c = c[:3]
+ return "#" + "".join(format(int(np.round(val * 255)), "02x")
+ for val in c)
+
+
+### Backwards-compatible color-conversion API
+
+
+cnames = CSS4_COLORS
+hexColorPattern = re.compile(r"\A#[a-fA-F0-9]{6}\Z")
+rgb2hex = to_hex
+hex2color = to_rgb
+
+
+class ColorConverter(object):
+ """
+ Provides methods for converting color specifications to *RGB* or *RGBA*
+
+ Caching is used for more efficient conversion upon repeated calls
+ with the same argument.
+
+ Ordinarily only the single instance instantiated in this module,
+ *colorConverter*, is needed.
+ """
+
+ colors = _colors_full_map
+ cache = _colors_full_map.cache
+
+ @staticmethod
+ def to_rgb(arg):
+ """
+ Returns an *RGB* tuple of three floats from 0-1.
+
+ *arg* can be an *RGB* or *RGBA* sequence or a string in any of
+ several forms:
+
+ 1) a letter from the set 'rgbcmykw'
+ 2) a hex color string, like '#00FFFF'
+ 3) a standard name, like 'aqua'
+ 4) a string representation of a float, like '0.4',
+ indicating gray on a 0-1 scale
+
+ if *arg* is *RGBA*, the *A* will simply be discarded.
+ """
+ return to_rgb(arg)
+
+ @staticmethod
+ def to_rgba(arg, alpha=None):
+ """
+ Returns an *RGBA* tuple of four floats from 0-1.
+
+ For acceptable values of *arg*, see :meth:`to_rgb`.
+ In addition, if *arg* is "none" (case-insensitive),
+ then (0,0,0,0) will be returned.
+ If *arg* is an *RGBA* sequence and *alpha* is not *None*,
+ *alpha* will replace the original *A*.
+ """
+ return to_rgba(arg, alpha)
+
+ @staticmethod
+ def to_rgba_array(arg, alpha=None):
+ """
+ Returns a numpy array of *RGBA* tuples.
+
+ Accepts a single mpl color spec or a sequence of specs.
+
+ Special case to handle "no color": if *c* is "none" (case-insensitive),
+ then an empty array will be returned. Same for an empty list.
+ """
+ return to_rgba_array(arg, alpha)
+
+
+colorConverter = ColorConverter()
+
+
+### End of backwards-compatible color-conversion API
+
+
+def makeMappingArray(N, data, gamma=1.0):
+ """Create an *N* -element 1-d lookup table
+
+ *data* represented by a list of x,y0,y1 mapping correspondences.
+ Each element in this list represents how a value between 0 and 1
+ (inclusive) represented by x is mapped to a corresponding value
+ between 0 and 1 (inclusive). The two values of y are to allow
+ for discontinuous mapping functions (say as might be found in a
+ sawtooth) where y0 represents the value of y for values of x
+ <= to that given, and y1 is the value to be used for x > than
+ that given). The list must start with x=0, end with x=1, and
+ all values of x must be in increasing order. Values between
+ the given mapping points are determined by simple linear interpolation.
+
+ Alternatively, data can be a function mapping values between 0 - 1
+ to 0 - 1.
+
+ The function returns an array "result" where ``result[x*(N-1)]``
+ gives the closest value for values of x between 0 and 1.
+ """
+
+ if callable(data):
+ xind = np.linspace(0, 1, N) ** gamma
+ lut = np.clip(np.array(data(xind), dtype=float), 0, 1)
+ return lut
+
+ try:
+ adata = np.array(data)
+ except Exception:
+ raise TypeError("data must be convertible to an array")
+ shape = adata.shape
+ if len(shape) != 2 or shape[1] != 3:
+ raise ValueError("data must be nx3 format")
+
+ x = adata[:, 0]
+ y0 = adata[:, 1]
+ y1 = adata[:, 2]
+
+ if x[0] != 0. or x[-1] != 1.0:
+ raise ValueError(
+ "data mapping points must start with x=0 and end with x=1")
+ if (np.diff(x) < 0).any():
+ raise ValueError("data mapping points must have x in increasing order")
+ # begin generation of lookup table
+ x = x * (N - 1)
+ lut = np.zeros((N,), float)
+ xind = (N - 1) * np.linspace(0, 1, N) ** gamma
+ ind = np.searchsorted(x, xind)[1:-1]
+
+ distance = (xind[1:-1] - x[ind - 1]) / (x[ind] - x[ind - 1])
+ lut[1:-1] = distance * (y0[ind] - y1[ind - 1]) + y1[ind - 1]
+ lut[0] = y1[0]
+ lut[-1] = y0[-1]
+ # ensure that the lut is confined to values between 0 and 1 by clipping it
+ return np.clip(lut, 0.0, 1.0)
+
+
+class Colormap(object):
+ """
+ Baseclass for all scalar to RGBA mappings.
+
+ Typically Colormap instances are used to convert data values (floats) from
+ the interval ``[0, 1]`` to the RGBA color that the respective Colormap
+ represents. For scaling of data into the ``[0, 1]`` interval see
+ :class:`matplotlib.colors.Normalize`. It is worth noting that
+ :class:`matplotlib.cm.ScalarMappable` subclasses make heavy use of this
+ ``data->normalize->map-to-color`` processing chain.
+
+ """
+ def __init__(self, name, N=256):
+ """
+ Parameters
+ ----------
+ name : str
+ The name of the colormap.
+ N : int
+ The number of rgb quantization levels.
+
+ """
+ self.name = name
+ self.N = int(N) # ensure that N is always int
+ self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
+ self._rgba_under = None
+ self._rgba_over = None
+ self._i_under = self.N
+ self._i_over = self.N + 1
+ self._i_bad = self.N + 2
+ self._isinit = False
+
+ #: When this colormap exists on a scalar mappable and colorbar_extend
+ #: is not False, colorbar creation will pick up ``colorbar_extend`` as
+ #: the default value for the ``extend`` keyword in the
+ #: :class:`matplotlib.colorbar.Colorbar` constructor.
+ self.colorbar_extend = False
+
+ def __call__(self, X, alpha=None, bytes=False):
+ """
+ Parameters
+ ----------
+ X : scalar, ndarray
+ The data value(s) to convert to RGBA.
+ For floats, X should be in the interval ``[0.0, 1.0]`` to
+ return the RGBA values ``X*100`` percent along the Colormap line.
+ For integers, X should be in the interval ``[0, Colormap.N)`` to
+ return RGBA values *indexed* from the Colormap with index ``X``.
+ alpha : float, None
+ Alpha must be a scalar between 0 and 1, or None.
+ bytes : bool
+ If False (default), the returned RGBA values will be floats in the
+ interval ``[0, 1]`` otherwise they will be uint8s in the interval
+ ``[0, 255]``.
+
+ Returns
+ -------
+ Tuple of RGBA values if X is scalar, otherwise an array of
+ RGBA values with a shape of ``X.shape + (4, )``.
+
+ """
+ # See class docstring for arg/kwarg documentation.
+ if not self._isinit:
+ self._init()
+ mask_bad = None
+ if not cbook.iterable(X):
+ vtype = 'scalar'
+ xa = np.array([X])
+ else:
+ vtype = 'array'
+ xma = np.ma.array(X, copy=True) # Copy here to avoid side effects.
+ mask_bad = xma.mask # Mask will be used below.
+ xa = xma.filled() # Fill to avoid infs, etc.
+ del xma
+
+ # Calculations with native byteorder are faster, and avoid a
+ # bug that otherwise can occur with putmask when the last
+ # argument is a numpy scalar.
+ if not xa.dtype.isnative:
+ xa = xa.byteswap().newbyteorder()
+
+ if xa.dtype.kind == "f":
+ xa *= self.N
+ # Negative values are out of range, but astype(int) would truncate
+ # them towards zero.
+ xa[xa < 0] = -1
+ # xa == 1 (== N after multiplication) is not out of range.
+ xa[xa == self.N] = self.N - 1
+ # Avoid converting large positive values to negative integers.
+ np.clip(xa, -1, self.N, out=xa)
+ xa = xa.astype(int)
+ # Set the over-range indices before the under-range;
+ # otherwise the under-range values get converted to over-range.
+ xa[xa > self.N - 1] = self._i_over
+ xa[xa < 0] = self._i_under
+ if mask_bad is not None:
+ if mask_bad.shape == xa.shape:
+ np.copyto(xa, self._i_bad, where=mask_bad)
+ elif mask_bad:
+ xa.fill(self._i_bad)
+ if bytes:
+ lut = (self._lut * 255).astype(np.uint8)
+ else:
+ lut = self._lut.copy() # Don't let alpha modify original _lut.
+
+ if alpha is not None:
+ alpha = min(alpha, 1.0) # alpha must be between 0 and 1
+ alpha = max(alpha, 0.0)
+ if bytes:
+ alpha = int(alpha * 255)
+ if (lut[-1] == 0).all():
+ lut[:-1, -1] = alpha
+ # All zeros is taken as a flag for the default bad
+ # color, which is no color--fully transparent. We
+ # don't want to override this.
+ else:
+ lut[:, -1] = alpha
+ # If the bad value is set to have a color, then we
+ # override its alpha just as for any other value.
+
+ rgba = np.empty(shape=xa.shape + (4,), dtype=lut.dtype)
+ lut.take(xa, axis=0, mode='clip', out=rgba)
+ if vtype == 'scalar':
+ rgba = tuple(rgba[0, :])
+ return rgba
+
+ def __copy__(self):
+ """Create new object with the same class, update attributes
+ """
+ cls = self.__class__
+ cmapobject = cls.__new__(cls)
+ cmapobject.__dict__.update(self.__dict__)
+ if self._isinit:
+ cmapobject._lut = np.copy(self._lut)
+ return cmapobject
+
+ def set_bad(self, color='k', alpha=None):
+ """Set color to be used for masked values.
+ """
+ self._rgba_bad = colorConverter.to_rgba(color, alpha)
+ if self._isinit:
+ self._set_extremes()
+
+ def set_under(self, color='k', alpha=None):
+ """Set color to be used for low out-of-range values.
+ Requires norm.clip = False
+ """
+ self._rgba_under = colorConverter.to_rgba(color, alpha)
+ if self._isinit:
+ self._set_extremes()
+
+ def set_over(self, color='k', alpha=None):
+ """Set color to be used for high out-of-range values.
+ Requires norm.clip = False
+ """
+ self._rgba_over = colorConverter.to_rgba(color, alpha)
+ if self._isinit:
+ self._set_extremes()
+
+ def _set_extremes(self):
+ if self._rgba_under:
+ self._lut[self._i_under] = self._rgba_under
+ else:
+ self._lut[self._i_under] = self._lut[0]
+ if self._rgba_over:
+ self._lut[self._i_over] = self._rgba_over
+ else:
+ self._lut[self._i_over] = self._lut[self.N - 1]
+ self._lut[self._i_bad] = self._rgba_bad
+
+ def _init(self):
+ """Generate the lookup table, self._lut"""
+ raise NotImplementedError("Abstract class only")
+
+ def is_gray(self):
+ if not self._isinit:
+ self._init()
+ return (np.all(self._lut[:, 0] == self._lut[:, 1]) and
+ np.all(self._lut[:, 0] == self._lut[:, 2]))
+
+ def _resample(self, lutsize):
+ """
+ Return a new color map with *lutsize* entries.
+ """
+ raise NotImplementedError()
+
+ def reversed(self, name=None):
+ """
+ Make a reversed instance of the Colormap.
+
+ .. note :: Function not implemented for base class.
+
+ Parameters
+ ----------
+ name : str, optional
+ The name for the reversed colormap. If it's None the
+ name will be the name of the parent colormap + "_r".
+
+ Notes
+ -----
+ See :meth:`LinearSegmentedColormap.reversed` and
+ :meth:`ListedColormap.reversed`
+ """
+ raise NotImplementedError()
+
+
+class LinearSegmentedColormap(Colormap):
+ """Colormap objects based on lookup tables using linear segments.
+
+ The lookup table is generated using linear interpolation for each
+ primary color, with the 0-1 domain divided into any number of
+ segments.
+ """
+ def __init__(self, name, segmentdata, N=256, gamma=1.0):
+ """Create color map from linear mapping segments
+
+ segmentdata argument is a dictionary with a red, green and blue
+ entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
+ forming rows in a table. Entries for alpha are optional.
+
+ Example: suppose you want red to increase from 0 to 1 over
+ the bottom half, green to do the same over the middle half,
+ and blue over the top half. Then you would use::
+
+ cdict = {'red': [(0.0, 0.0, 0.0),
+ (0.5, 1.0, 1.0),
+ (1.0, 1.0, 1.0)],
+
+ 'green': [(0.0, 0.0, 0.0),
+ (0.25, 0.0, 0.0),
+ (0.75, 1.0, 1.0),
+ (1.0, 1.0, 1.0)],
+
+ 'blue': [(0.0, 0.0, 0.0),
+ (0.5, 0.0, 0.0),
+ (1.0, 1.0, 1.0)]}
+
+ Each row in the table for a given color is a sequence of
+ *x*, *y0*, *y1* tuples. In each sequence, *x* must increase
+ monotonically from 0 to 1. For any input value *z* falling
+ between *x[i]* and *x[i+1]*, the output value of a given color
+ will be linearly interpolated between *y1[i]* and *y0[i+1]*::
+
+ row i: x y0 y1
+ /
+ /
+ row i+1: x y0 y1
+
+ Hence y0 in the first row and y1 in the last row are never used.
+
+
+ .. seealso::
+
+ :meth:`LinearSegmentedColormap.from_list`
+ Static method; factory function for generating a
+ smoothly-varying LinearSegmentedColormap.
+
+ :func:`makeMappingArray`
+ For information about making a mapping array.
+ """
+ # True only if all colors in map are identical; needed for contouring.
+ self.monochrome = False
+ Colormap.__init__(self, name, N)
+ self._segmentdata = segmentdata
+ self._gamma = gamma
+
+ def _init(self):
+ self._lut = np.ones((self.N + 3, 4), float)
+ self._lut[:-3, 0] = makeMappingArray(
+ self.N, self._segmentdata['red'], self._gamma)
+ self._lut[:-3, 1] = makeMappingArray(
+ self.N, self._segmentdata['green'], self._gamma)
+ self._lut[:-3, 2] = makeMappingArray(
+ self.N, self._segmentdata['blue'], self._gamma)
+ if 'alpha' in self._segmentdata:
+ self._lut[:-3, 3] = makeMappingArray(
+ self.N, self._segmentdata['alpha'], 1)
+ self._isinit = True
+ self._set_extremes()
+
+ def set_gamma(self, gamma):
+ """
+ Set a new gamma value and regenerate color map.
+ """
+ self._gamma = gamma
+ self._init()
+
+ @staticmethod
+ def from_list(name, colors, N=256, gamma=1.0):
+ """
+ Make a linear segmented colormap with *name* from a sequence
+ of *colors* which evenly transitions from colors[0] at val=0
+ to colors[-1] at val=1. *N* is the number of rgb quantization
+ levels.
+ Alternatively, a list of (value, color) tuples can be given
+ to divide the range unevenly.
+ """
+
+ if not cbook.iterable(colors):
+ raise ValueError('colors must be iterable')
+
+ if (isinstance(colors[0], Sized) and len(colors[0]) == 2
+ and not isinstance(colors[0], six.string_types)):
+ # List of value, color pairs
+ vals, colors = zip(*colors)
+ else:
+ vals = np.linspace(0, 1, len(colors))
+
+ cdict = dict(red=[], green=[], blue=[], alpha=[])
+ for val, color in zip(vals, colors):
+ r, g, b, a = colorConverter.to_rgba(color)
+ cdict['red'].append((val, r, r))
+ cdict['green'].append((val, g, g))
+ cdict['blue'].append((val, b, b))
+ cdict['alpha'].append((val, a, a))
+
+ return LinearSegmentedColormap(name, cdict, N, gamma)
+
+ def _resample(self, lutsize):
+ """
+ Return a new color map with *lutsize* entries.
+ """
+ return LinearSegmentedColormap(self.name, self._segmentdata, lutsize)
+
+ def reversed(self, name=None):
+ """
+ Make a reversed instance of the Colormap.
+
+ Parameters
+ ----------
+ name : str, optional
+ The name for the reversed colormap. If it's None the
+ name will be the name of the parent colormap + "_r".
+
+ Returns
+ -------
+ LinearSegmentedColormap
+ The reversed colormap.
+ """
+ if name is None:
+ name = self.name + "_r"
+
+ # Function factory needed to deal with 'late binding' issue.
+ def factory(dat):
+ def func_r(x):
+ return dat(1.0 - x)
+ return func_r
+
+ data_r = dict()
+ for key, data in six.iteritems(self._segmentdata):
+ if callable(data):
+ data_r[key] = factory(data)
+ else:
+ new_data = [(1.0 - x, y1, y0) for x, y0, y1 in reversed(data)]
+ data_r[key] = new_data
+
+ return LinearSegmentedColormap(name, data_r, self.N, self._gamma)
+
+
+class ListedColormap(Colormap):
+ """Colormap object generated from a list of colors.
+
+ This may be most useful when indexing directly into a colormap,
+ but it can also be used to generate special colormaps for ordinary
+ mapping.
+ """
+ def __init__(self, colors, name='from_list', N=None):
+ """
+ Make a colormap from a list of colors.
+
+ *colors*
+ a list of matplotlib color specifications,
+ or an equivalent Nx3 or Nx4 floating point array
+ (*N* rgb or rgba values)
+ *name*
+ a string to identify the colormap
+ *N*
+ the number of entries in the map. The default is *None*,
+ in which case there is one colormap entry for each
+ element in the list of colors. If::
+
+ N < len(colors)
+
+ the list will be truncated at *N*. If::
+
+ N > len(colors)
+
+ the list will be extended by repetition.
+ """
+ self.monochrome = False # True only if all colors in map are
+ # identical; needed for contouring.
+ if N is None:
+ self.colors = colors
+ N = len(colors)
+ else:
+ if isinstance(colors, six.string_types):
+ self.colors = [colors] * N
+ self.monochrome = True
+ elif cbook.iterable(colors):
+ if len(colors) == 1:
+ self.monochrome = True
+ self.colors = list(
+ itertools.islice(itertools.cycle(colors), N))
+ else:
+ try:
+ gray = float(colors)
+ except TypeError:
+ pass
+ else:
+ self.colors = [gray] * N
+ self.monochrome = True
+ Colormap.__init__(self, name, N)
+
+ def _init(self):
+ rgba = colorConverter.to_rgba_array(self.colors)
+ self._lut = np.zeros((self.N + 3, 4), float)
+ self._lut[:-3] = rgba
+ self._isinit = True
+ self._set_extremes()
+
+ def _resample(self, lutsize):
+ """
+ Return a new color map with *lutsize* entries.
+ """
+ colors = self(np.linspace(0, 1, lutsize))
+ return ListedColormap(colors, name=self.name)
+
+ def reversed(self, name=None):
+ """
+ Make a reversed instance of the Colormap.
+
+ Parameters
+ ----------
+ name : str, optional
+ The name for the reversed colormap. If it's None the
+ name will be the name of the parent colormap + "_r".
+
+ Returns
+ -------
+ ListedColormap
+ A reversed instance of the colormap.
+ """
+ if name is None:
+ name = self.name + "_r"
+
+ colors_r = list(reversed(self.colors))
+ return ListedColormap(colors_r, name=name, N=self.N)
+
+
+class Normalize(object):
+ """
+ A class which, when called, can normalize data into
+ the ``[0.0, 1.0]`` interval.
+
+ """
+ def __init__(self, vmin=None, vmax=None, clip=False):
+ """
+ If *vmin* or *vmax* is not given, they are initialized from the
+ minimum and maximum value respectively of the first input
+ processed. That is, *__call__(A)* calls *autoscale_None(A)*.
+ If *clip* is *True* and the given value falls outside the range,
+ the returned value will be 0 or 1, whichever is closer.
+ Returns 0 if::
+
+ vmin==vmax
+
+ Works with scalars or arrays, including masked arrays. If
+ *clip* is *True*, masked values are set to 1; otherwise they
+ remain masked. Clipping silently defeats the purpose of setting
+ the over, under, and masked colors in the colormap, so it is
+ likely to lead to surprises; therefore the default is
+ *clip* = *False*.
+ """
+ self.vmin = _sanitize_extrema(vmin)
+ self.vmax = _sanitize_extrema(vmax)
+ self.clip = clip
+
+ @staticmethod
+ def process_value(value):
+ """
+ Homogenize the input *value* for easy and efficient normalization.
+
+ *value* can be a scalar or sequence.
+
+ Returns *result*, *is_scalar*, where *result* is a
+ masked array matching *value*. Float dtypes are preserved;
+ integer types with two bytes or smaller are converted to
+ np.float32, and larger types are converted to np.float64.
+ Preserving float32 when possible, and using in-place operations,
+ can greatly improve speed for large arrays.
+
+ Experimental; we may want to add an option to force the
+ use of float32.
+ """
+ is_scalar = not cbook.iterable(value)
+ if is_scalar:
+ value = [value]
+ dtype = np.min_scalar_type(value)
+ if np.issubdtype(dtype, np.integer) or dtype.type is np.bool_:
+ # bool_/int8/int16 -> float32; int32/int64 -> float64
+ dtype = np.promote_types(dtype, np.float32)
+ # ensure data passed in as an ndarray subclass are interpreted as
+ # an ndarray. See issue #6622.
+ mask = np.ma.getmask(value)
+ data = np.asarray(np.ma.getdata(value))
+ result = np.ma.array(data, mask=mask, dtype=dtype, copy=True)
+ return result, is_scalar
+
+ def __call__(self, value, clip=None):
+ """
+ Normalize *value* data in the ``[vmin, vmax]`` interval into
+ the ``[0.0, 1.0]`` interval and return it. *clip* defaults
+ to *self.clip* (which defaults to *False*). If not already
+ initialized, *vmin* and *vmax* are initialized using
+ *autoscale_None(value)*.
+ """
+ if clip is None:
+ clip = self.clip
+
+ result, is_scalar = self.process_value(value)
+
+ self.autoscale_None(result)
+ # Convert at least to float, without losing precision.
+ (vmin,), _ = self.process_value(self.vmin)
+ (vmax,), _ = self.process_value(self.vmax)
+ if vmin == vmax:
+ result.fill(0) # Or should it be all masked? Or 0.5?
+ elif vmin > vmax:
+ raise ValueError("minvalue must be less than or equal to maxvalue")
+ else:
+ if clip:
+ mask = np.ma.getmask(result)
+ result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax),
+ mask=mask)
+ # ma division is very slow; we can take a shortcut
+ resdat = result.data
+ resdat -= vmin
+ resdat /= (vmax - vmin)
+ result = np.ma.array(resdat, mask=result.mask, copy=False)
+ # Agg cannot handle float128. We actually only need 32-bit of
+ # precision, but on Windows, `np.dtype(np.longdouble) == np.float64`,
+ # so casting to float32 would lose precision on float64s as well.
+ if result.dtype == np.longdouble:
+ result = result.astype(np.float64)
+ if is_scalar:
+ result = result[0]
+ return result
+
+ def inverse(self, value):
+ if not self.scaled():
+ raise ValueError("Not invertible until scaled")
+ (vmin,), _ = self.process_value(self.vmin)
+ (vmax,), _ = self.process_value(self.vmax)
+
+ if cbook.iterable(value):
+ val = np.ma.asarray(value)
+ return vmin + val * (vmax - vmin)
+ else:
+ return vmin + value * (vmax - vmin)
+
+ def autoscale(self, A):
+ """
+ Set *vmin*, *vmax* to min, max of *A*.
+ """
+ A = np.asanyarray(A)
+ self.vmin = A.min()
+ self.vmax = A.max()
+
+ def autoscale_None(self, A):
+ """autoscale only None-valued vmin or vmax."""
+ A = np.asanyarray(A)
+ if self.vmin is None and A.size:
+ self.vmin = A.min()
+ if self.vmax is None and A.size:
+ self.vmax = A.max()
+
+ def scaled(self):
+ 'return true if vmin and vmax set'
+ return (self.vmin is not None and self.vmax is not None)
+
+
+class LogNorm(Normalize):
+ """
+ Normalize a given value to the 0-1 range on a log scale
+ """
+ def __call__(self, value, clip=None):
+ if clip is None:
+ clip = self.clip
+
+ result, is_scalar = self.process_value(value)
+
+ result = np.ma.masked_less_equal(result, 0, copy=False)
+
+ self.autoscale_None(result)
+ vmin, vmax = self.vmin, self.vmax
+ if vmin > vmax:
+ raise ValueError("minvalue must be less than or equal to maxvalue")
+ elif vmin <= 0:
+ raise ValueError("values must all be positive")
+ elif vmin == vmax:
+ result.fill(0)
+ else:
+ if clip:
+ mask = np.ma.getmask(result)
+ result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax),
+ mask=mask)
+ # in-place equivalent of above can be much faster
+ resdat = result.data
+ mask = result.mask
+ if mask is np.ma.nomask:
+ mask = (resdat <= 0)
+ else:
+ mask |= resdat <= 0
+ np.copyto(resdat, 1, where=mask)
+ np.log(resdat, resdat)
+ resdat -= np.log(vmin)
+ resdat /= (np.log(vmax) - np.log(vmin))
+ result = np.ma.array(resdat, mask=mask, copy=False)
+ if is_scalar:
+ result = result[0]
+ return result
+
+ def inverse(self, value):
+ if not self.scaled():
+ raise ValueError("Not invertible until scaled")
+ vmin, vmax = self.vmin, self.vmax
+
+ if cbook.iterable(value):
+ val = np.ma.asarray(value)
+ return vmin * np.ma.power((vmax / vmin), val)
+ else:
+ return vmin * pow((vmax / vmin), value)
+
+ def autoscale(self, A):
+ """
+ Set *vmin*, *vmax* to min, max of *A*.
+ """
+ A = np.ma.masked_less_equal(A, 0, copy=False)
+ self.vmin = np.ma.min(A)
+ self.vmax = np.ma.max(A)
+
+ def autoscale_None(self, A):
+ """autoscale only None-valued vmin or vmax."""
+ if self.vmin is not None and self.vmax is not None:
+ return
+ A = np.ma.masked_less_equal(A, 0, copy=False)
+ if self.vmin is None and A.size:
+ self.vmin = A.min()
+ if self.vmax is None and A.size:
+ self.vmax = A.max()
+
+
+class SymLogNorm(Normalize):
+ """
+ The symmetrical logarithmic scale is logarithmic in both the
+ positive and negative directions from the origin.
+
+ Since the values close to zero tend toward infinity, there is a
+ need to have a range around zero that is linear. The parameter
+ *linthresh* allows the user to specify the size of this range
+ (-*linthresh*, *linthresh*).
+ """
+ def __init__(self, linthresh, linscale=1.0,
+ vmin=None, vmax=None, clip=False):
+ """
+ *linthresh*:
+ The range within which the plot is linear (to
+ avoid having the plot go to infinity around zero).
+
+ *linscale*:
+ This allows the linear range (-*linthresh* to *linthresh*)
+ to be stretched relative to the logarithmic range. Its
+ value is the number of decades to use for each half of the
+ linear range. For example, when *linscale* == 1.0 (the
+ default), the space used for the positive and negative
+ halves of the linear range will be equal to one decade in
+ the logarithmic range. Defaults to 1.
+ """
+ Normalize.__init__(self, vmin, vmax, clip)
+ self.linthresh = float(linthresh)
+ self._linscale_adj = (linscale / (1.0 - np.e ** -1))
+ if vmin is not None and vmax is not None:
+ self._transform_vmin_vmax()
+
+ def __call__(self, value, clip=None):
+ if clip is None:
+ clip = self.clip
+
+ result, is_scalar = self.process_value(value)
+ self.autoscale_None(result)
+ vmin, vmax = self.vmin, self.vmax
+
+ if vmin > vmax:
+ raise ValueError("minvalue must be less than or equal to maxvalue")
+ elif vmin == vmax:
+ result.fill(0)
+ else:
+ if clip:
+ mask = np.ma.getmask(result)
+ result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax),
+ mask=mask)
+ # in-place equivalent of above can be much faster
+ resdat = self._transform(result.data)
+ resdat -= self._lower
+ resdat /= (self._upper - self._lower)
+
+ if is_scalar:
+ result = result[0]
+ return result
+
+ def _transform(self, a):
+ """
+ Inplace transformation.
+ """
+ masked = np.abs(a) > self.linthresh
+ sign = np.sign(a[masked])
+ log = (self._linscale_adj + np.log(np.abs(a[masked]) / self.linthresh))
+ log *= sign * self.linthresh
+ a[masked] = log
+ a[~masked] *= self._linscale_adj
+ return a
+
+ def _inv_transform(self, a):
+ """
+ Inverse inplace Transformation.
+ """
+ masked = np.abs(a) > (self.linthresh * self._linscale_adj)
+ sign = np.sign(a[masked])
+ exp = np.exp(sign * a[masked] / self.linthresh - self._linscale_adj)
+ exp *= sign * self.linthresh
+ a[masked] = exp
+ a[~masked] /= self._linscale_adj
+ return a
+
+ def _transform_vmin_vmax(self):
+ """
+ Calculates vmin and vmax in the transformed system.
+ """
+ vmin, vmax = self.vmin, self.vmax
+ arr = np.array([vmax, vmin]).astype(float)
+ self._upper, self._lower = self._transform(arr)
+
+ def inverse(self, value):
+ if not self.scaled():
+ raise ValueError("Not invertible until scaled")
+ val = np.ma.asarray(value)
+ val = val * (self._upper - self._lower) + self._lower
+ return self._inv_transform(val)
+
+ def autoscale(self, A):
+ """
+ Set *vmin*, *vmax* to min, max of *A*.
+ """
+ self.vmin = np.ma.min(A)
+ self.vmax = np.ma.max(A)
+ self._transform_vmin_vmax()
+
+ def autoscale_None(self, A):
+ """autoscale only None-valued vmin or vmax."""
+ if self.vmin is not None and self.vmax is not None:
+ pass
+ A = np.asanyarray(A)
+ if self.vmin is None and A.size:
+ self.vmin = A.min()
+ if self.vmax is None and A.size:
+ self.vmax = A.max()
+ self._transform_vmin_vmax()
+
+
+class PowerNorm(Normalize):
+ """
+ Normalize a given value to the ``[0, 1]`` interval with a power-law
+ scaling. This will clip any negative data points to 0.
+ """
+ def __init__(self, gamma, vmin=None, vmax=None, clip=False):
+ Normalize.__init__(self, vmin, vmax, clip)
+ self.gamma = gamma
+
+ def __call__(self, value, clip=None):
+ if clip is None:
+ clip = self.clip
+
+ result, is_scalar = self.process_value(value)
+
+ self.autoscale_None(result)
+ gamma = self.gamma
+ vmin, vmax = self.vmin, self.vmax
+ if vmin > vmax:
+ raise ValueError("minvalue must be less than or equal to maxvalue")
+ elif vmin == vmax:
+ result.fill(0)
+ else:
+ res_mask = result.data < 0
+ if clip:
+ mask = np.ma.getmask(result)
+ result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax),
+ mask=mask)
+ resdat = result.data
+ resdat -= vmin
+ np.power(resdat, gamma, resdat)
+ resdat /= (vmax - vmin) ** gamma
+
+ result = np.ma.array(resdat, mask=result.mask, copy=False)
+ result[res_mask] = 0
+ if is_scalar:
+ result = result[0]
+ return result
+
+ def inverse(self, value):
+ if not self.scaled():
+ raise ValueError("Not invertible until scaled")
+ gamma = self.gamma
+ vmin, vmax = self.vmin, self.vmax
+
+ if cbook.iterable(value):
+ val = np.ma.asarray(value)
+ return np.ma.power(val, 1. / gamma) * (vmax - vmin) + vmin
+ else:
+ return pow(value, 1. / gamma) * (vmax - vmin) + vmin
+
+ def autoscale(self, A):
+ """
+ Set *vmin*, *vmax* to min, max of *A*.
+ """
+ self.vmin = np.ma.min(A)
+ if self.vmin < 0:
+ self.vmin = 0
+ warnings.warn("Power-law scaling on negative values is "
+ "ill-defined, clamping to 0.")
+ self.vmax = np.ma.max(A)
+
+ def autoscale_None(self, A):
+ """autoscale only None-valued vmin or vmax."""
+ A = np.asanyarray(A)
+ if self.vmin is None and A.size:
+ self.vmin = A.min()
+ if self.vmin < 0:
+ self.vmin = 0
+ warnings.warn("Power-law scaling on negative values is "
+ "ill-defined, clamping to 0.")
+ if self.vmax is None and A.size:
+ self.vmax = A.max()
+
+
+class BoundaryNorm(Normalize):
+ """
+ Generate a colormap index based on discrete intervals.
+
+ Unlike :class:`Normalize` or :class:`LogNorm`,
+ :class:`BoundaryNorm` maps values to integers instead of to the
+ interval 0-1.
+
+ Mapping to the 0-1 interval could have been done via
+ piece-wise linear interpolation, but using integers seems
+ simpler, and reduces the number of conversions back and forth
+ between integer and floating point.
+ """
+ def __init__(self, boundaries, ncolors, clip=False):
+ """
+ Parameters
+ ----------
+ boundaries : array-like
+ Monotonically increasing sequence of boundaries
+ ncolors : int
+ Number of colors in the colormap to be used
+ clip : bool, optional
+ If clip is ``True``, out of range values are mapped to 0 if they
+ are below ``boundaries[0]`` or mapped to ncolors - 1 if they are
+ above ``boundaries[-1]``.
+
+ If clip is ``False``, out of range values are mapped to -1 if
+ they are below ``boundaries[0]`` or mapped to ncolors if they are
+ above ``boundaries[-1]``. These are then converted to valid indices
+ by :meth:`Colormap.__call__`.
+
+ Notes
+ -----
+ *boundaries* defines the edges of bins, and data falling within a bin
+ is mapped to the color with the same index.
+
+ If the number of bins doesn't equal *ncolors*, the color is chosen
+ by linear interpolation of the bin number onto color numbers.
+ """
+ self.clip = clip
+ self.vmin = boundaries[0]
+ self.vmax = boundaries[-1]
+ self.boundaries = np.asarray(boundaries)
+ self.N = len(self.boundaries)
+ self.Ncmap = ncolors
+ if self.N - 1 == self.Ncmap:
+ self._interp = False
+ else:
+ self._interp = True
+
+ def __call__(self, value, clip=None):
+ if clip is None:
+ clip = self.clip
+
+ xx, is_scalar = self.process_value(value)
+ mask = np.ma.getmaskarray(xx)
+ xx = np.atleast_1d(xx.filled(self.vmax + 1))
+ if clip:
+ np.clip(xx, self.vmin, self.vmax, out=xx)
+ max_col = self.Ncmap - 1
+ else:
+ max_col = self.Ncmap
+ iret = np.zeros(xx.shape, dtype=np.int16)
+ for i, b in enumerate(self.boundaries):
+ iret[xx >= b] = i
+ if self._interp:
+ scalefac = (self.Ncmap - 1) / (self.N - 2)
+ iret = (iret * scalefac).astype(np.int16)
+ iret[xx < self.vmin] = -1
+ iret[xx >= self.vmax] = max_col
+ ret = np.ma.array(iret, mask=mask)
+ if is_scalar:
+ ret = int(ret[0]) # assume python scalar
+ return ret
+
+ def inverse(self, value):
+ """
+ Raises
+ ------
+ ValueError
+ BoundaryNorm is not invertible, so calling this method will always
+ raise an error
+ """
+ return ValueError("BoundaryNorm is not invertible")
+
+
+class NoNorm(Normalize):
+ """
+ Dummy replacement for Normalize, for the case where we
+ want to use indices directly in a
+ :class:`~matplotlib.cm.ScalarMappable` .
+ """
+ def __call__(self, value, clip=None):
+ return value
+
+ def inverse(self, value):
+ return value
+
+
+def rgb_to_hsv(arr):
+ """
+ convert float rgb values (in the range [0, 1]), in a numpy array to hsv
+ values.
+
+ Parameters
+ ----------
+ arr : (..., 3) array-like
+ All values must be in the range [0, 1]
+
+ Returns
+ -------
+ hsv : (..., 3) ndarray
+ Colors converted to hsv values in range [0, 1]
+ """
+ # make sure it is an ndarray
+ arr = np.asarray(arr)
+
+ # check length of the last dimension, should be _some_ sort of rgb
+ if arr.shape[-1] != 3:
+ raise ValueError("Last dimension of input array must be 3; "
+ "shape {} was found.".format(arr.shape))
+
+ in_ndim = arr.ndim
+ if arr.ndim == 1:
+ arr = np.array(arr, ndmin=2)
+
+ # make sure we don't have an int image
+ arr = arr.astype(np.promote_types(arr.dtype, np.float32))
+
+ out = np.zeros_like(arr)
+ arr_max = arr.max(-1)
+ ipos = arr_max > 0
+ delta = arr.ptp(-1)
+ s = np.zeros_like(delta)
+ s[ipos] = delta[ipos] / arr_max[ipos]
+ ipos = delta > 0
+ # red is max
+ idx = (arr[..., 0] == arr_max) & ipos
+ out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
+ # green is max
+ idx = (arr[..., 1] == arr_max) & ipos
+ out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
+ # blue is max
+ idx = (arr[..., 2] == arr_max) & ipos
+ out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
+
+ out[..., 0] = (out[..., 0] / 6.0) % 1.0
+ out[..., 1] = s
+ out[..., 2] = arr_max
+
+ if in_ndim == 1:
+ out.shape = (3,)
+
+ return out
+
+
+def hsv_to_rgb(hsv):
+ """
+ convert hsv values in a numpy array to rgb values
+ all values assumed to be in range [0, 1]
+
+ Parameters
+ ----------
+ hsv : (..., 3) array-like
+ All values assumed to be in range [0, 1]
+
+ Returns
+ -------
+ rgb : (..., 3) ndarray
+ Colors converted to RGB values in range [0, 1]
+ """
+ hsv = np.asarray(hsv)
+
+ # check length of the last dimension, should be _some_ sort of rgb
+ if hsv.shape[-1] != 3:
+ raise ValueError("Last dimension of input array must be 3; "
+ "shape {shp} was found.".format(shp=hsv.shape))
+
+ # if we got passed a 1D array, try to treat as
+ # a single color and reshape as needed
+ in_ndim = hsv.ndim
+ if in_ndim == 1:
+ hsv = np.array(hsv, ndmin=2)
+
+ # make sure we don't have an int image
+ hsv = hsv.astype(np.promote_types(hsv.dtype, np.float32))
+
+ h = hsv[..., 0]
+ s = hsv[..., 1]
+ v = hsv[..., 2]
+
+ r = np.empty_like(h)
+ g = np.empty_like(h)
+ b = np.empty_like(h)
+
+ i = (h * 6.0).astype(int)
+ f = (h * 6.0) - i
+ p = v * (1.0 - s)
+ q = v * (1.0 - s * f)
+ t = v * (1.0 - s * (1.0 - f))
+
+ idx = i % 6 == 0
+ r[idx] = v[idx]
+ g[idx] = t[idx]
+ b[idx] = p[idx]
+
+ idx = i == 1
+ r[idx] = q[idx]
+ g[idx] = v[idx]
+ b[idx] = p[idx]
+
+ idx = i == 2
+ r[idx] = p[idx]
+ g[idx] = v[idx]
+ b[idx] = t[idx]
+
+ idx = i == 3
+ r[idx] = p[idx]
+ g[idx] = q[idx]
+ b[idx] = v[idx]
+
+ idx = i == 4
+ r[idx] = t[idx]
+ g[idx] = p[idx]
+ b[idx] = v[idx]
+
+ idx = i == 5
+ r[idx] = v[idx]
+ g[idx] = p[idx]
+ b[idx] = q[idx]
+
+ idx = s == 0
+ r[idx] = v[idx]
+ g[idx] = v[idx]
+ b[idx] = v[idx]
+
+ # `np.stack([r, g, b], axis=-1)` (numpy 1.10).
+ rgb = np.concatenate([r[..., None], g[..., None], b[..., None]], -1)
+
+ if in_ndim == 1:
+ rgb.shape = (3,)
+
+ return rgb
+
+
+def _vector_magnitude(arr):
+ # things that don't work here:
+ # * np.linalg.norm
+ # - doesn't broadcast in numpy 1.7
+ # - drops the mask from ma.array
+ # * using keepdims - broken on ma.array until 1.11.2
+ # * using sum - discards mask on ma.array unless entire vector is masked
+
+ sum_sq = 0
+ for i in range(arr.shape[-1]):
+ sum_sq += np.square(arr[..., i, np.newaxis])
+ return np.sqrt(sum_sq)
+
+
+def _vector_dot(a, b):
+ # things that don't work here:
+ # * a.dot(b) - fails on masked arrays until 1.10
+ # * np.ma.dot(a, b) - doesn't mask enough things
+ # * np.ma.dot(a, b, strict=True) - returns a maskedarray with no mask
+ dot = 0
+ for i in range(a.shape[-1]):
+ dot += a[..., i] * b[..., i]
+ return dot
+
+
+class LightSource(object):
+ """
+ Create a light source coming from the specified azimuth and elevation.
+ Angles are in degrees, with the azimuth measured
+ clockwise from north and elevation up from the zero plane of the surface.
+
+ The :meth:`shade` is used to produce "shaded" rgb values for a data array.
+ :meth:`shade_rgb` can be used to combine an rgb image with
+ The :meth:`shade_rgb`
+ The :meth:`hillshade` produces an illumination map of a surface.
+ """
+ def __init__(self, azdeg=315, altdeg=45, hsv_min_val=0, hsv_max_val=1,
+ hsv_min_sat=1, hsv_max_sat=0):
+ """
+ Specify the azimuth (measured clockwise from south) and altitude
+ (measured up from the plane of the surface) of the light source
+ in degrees.
+
+ Parameters
+ ----------
+ azdeg : number, optional
+ The azimuth (0-360, degrees clockwise from North) of the light
+ source. Defaults to 315 degrees (from the northwest).
+ altdeg : number, optional
+ The altitude (0-90, degrees up from horizontal) of the light
+ source. Defaults to 45 degrees from horizontal.
+
+ Notes
+ -----
+ For backwards compatibility, the parameters *hsv_min_val*,
+ *hsv_max_val*, *hsv_min_sat*, and *hsv_max_sat* may be supplied at
+ initialization as well. However, these parameters will only be used if
+ "blend_mode='hsv'" is passed into :meth:`shade` or :meth:`shade_rgb`.
+ See the documentation for :meth:`blend_hsv` for more details.
+ """
+ self.azdeg = azdeg
+ self.altdeg = altdeg
+ self.hsv_min_val = hsv_min_val
+ self.hsv_max_val = hsv_max_val
+ self.hsv_min_sat = hsv_min_sat
+ self.hsv_max_sat = hsv_max_sat
+
+ @property
+ def direction(self):
+ """ The unit vector direction towards the light source """
+
+ # Azimuth is in degrees clockwise from North. Convert to radians
+ # counterclockwise from East (mathematical notation).
+ az = np.radians(90 - self.azdeg)
+ alt = np.radians(self.altdeg)
+
+ return np.array([
+ np.cos(az) * np.cos(alt),
+ np.sin(az) * np.cos(alt),
+ np.sin(alt)
+ ])
+
+ def hillshade(self, elevation, vert_exag=1, dx=1, dy=1, fraction=1.):
+ """
+ Calculates the illumination intensity for a surface using the defined
+ azimuth and elevation for the light source.
+
+ This computes the normal vectors for the surface, and then passes them
+ on to `shade_normals`
+
+ Parameters
+ ----------
+ elevation : array-like
+ A 2d array (or equivalent) of the height values used to generate an
+ illumination map
+ vert_exag : number, optional
+ The amount to exaggerate the elevation values by when calculating
+ illumination. This can be used either to correct for differences in
+ units between the x-y coordinate system and the elevation
+ coordinate system (e.g. decimal degrees vs meters) or to exaggerate
+ or de-emphasize topographic effects.
+ dx : number, optional
+ The x-spacing (columns) of the input *elevation* grid.
+ dy : number, optional
+ The y-spacing (rows) of the input *elevation* grid.
+ fraction : number, optional
+ Increases or decreases the contrast of the hillshade. Values
+ greater than one will cause intermediate values to move closer to
+ full illumination or shadow (and clipping any values that move
+ beyond 0 or 1). Note that this is not visually or mathematically
+ the same as vertical exaggeration.
+ Returns
+ -------
+ intensity : ndarray
+ A 2d array of illumination values between 0-1, where 0 is
+ completely in shadow and 1 is completely illuminated.
+ """
+
+ # Because most image and raster GIS data has the first row in the array
+ # as the "top" of the image, dy is implicitly negative. This is
+ # consistent to what `imshow` assumes, as well.
+ dy = -dy
+
+ # compute the normal vectors from the partial derivatives
+ e_dy, e_dx = np.gradient(vert_exag * elevation, dy, dx)
+
+ # .view is to keep subclasses
+ normal = np.empty(elevation.shape + (3,)).view(type(elevation))
+ normal[..., 0] = -e_dx
+ normal[..., 1] = -e_dy
+ normal[..., 2] = 1
+ normal /= _vector_magnitude(normal)
+
+ return self.shade_normals(normal, fraction)
+
+ def shade_normals(self, normals, fraction=1.):
+ """
+ Calculates the illumination intensity for the normal vectors of a
+ surface using the defined azimuth and elevation for the light source.
+
+ Imagine an artificial sun placed at infinity in some azimuth and
+ elevation position illuminating our surface. The parts of the surface
+ that slope toward the sun should brighten while those sides facing away
+ should become darker.
+
+ Parameters
+ ----------
+ fraction : number, optional
+ Increases or decreases the contrast of the hillshade. Values
+ greater than one will cause intermediate values to move closer to
+ full illumination or shadow (and clipping any values that move
+ beyond 0 or 1). Note that this is not visually or mathematically
+ the same as vertical exaggeration.
+
+ Returns
+ -------
+ intensity : ndarray
+ A 2d array of illumination values between 0-1, where 0 is
+ completely in shadow and 1 is completely illuminated.
+ """
+
+ intensity = _vector_dot(normals, self.direction)
+
+ # Apply contrast stretch
+ imin, imax = intensity.min(), intensity.max()
+ intensity *= fraction
+
+ # Rescale to 0-1, keeping range before contrast stretch
+ # If constant slope, keep relative scaling (i.e. flat should be 0.5,
+ # fully occluded 0, etc.)
+ if (imax - imin) > 1e-6:
+ # Strictly speaking, this is incorrect. Negative values should be
+ # clipped to 0 because they're fully occluded. However, rescaling
+ # in this manner is consistent with the previous implementation and
+ # visually appears better than a "hard" clip.
+ intensity -= imin
+ intensity /= (imax - imin)
+ intensity = np.clip(intensity, 0, 1, intensity)
+
+ return intensity
+
+ def shade(self, data, cmap, norm=None, blend_mode='overlay', vmin=None,
+ vmax=None, vert_exag=1, dx=1, dy=1, fraction=1, **kwargs):
+ """
+ Combine colormapped data values with an illumination intensity map
+ (a.k.a. "hillshade") of the values.
+
+ Parameters
+ ----------
+ data : array-like
+ A 2d array (or equivalent) of the height values used to generate a
+ shaded map.
+ cmap : `~matplotlib.colors.Colormap` instance
+ The colormap used to color the *data* array. Note that this must be
+ a `~matplotlib.colors.Colormap` instance. For example, rather than
+ passing in `cmap='gist_earth'`, use
+ `cmap=plt.get_cmap('gist_earth')` instead.
+ norm : `~matplotlib.colors.Normalize` instance, optional
+ The normalization used to scale values before colormapping. If
+ None, the input will be linearly scaled between its min and max.
+ blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional
+ The type of blending used to combine the colormapped data
+ values with the illumination intensity. Default is
+ "overlay". Note that for most topographic surfaces,
+ "overlay" or "soft" appear more visually realistic. If a
+ user-defined function is supplied, it is expected to
+ combine an MxNx3 RGB array of floats (ranging 0 to 1) with
+ an MxNx1 hillshade array (also 0 to 1). (Call signature
+ `func(rgb, illum, **kwargs)`) Additional kwargs supplied
+ to this function will be passed on to the *blend_mode*
+ function.
+ vmin : scalar or None, optional
+ The minimum value used in colormapping *data*. If *None* the
+ minimum value in *data* is used. If *norm* is specified, then this
+ argument will be ignored.
+ vmax : scalar or None, optional
+ The maximum value used in colormapping *data*. If *None* the
+ maximum value in *data* is used. If *norm* is specified, then this
+ argument will be ignored.
+ vert_exag : number, optional
+ The amount to exaggerate the elevation values by when calculating
+ illumination. This can be used either to correct for differences in
+ units between the x-y coordinate system and the elevation
+ coordinate system (e.g. decimal degrees vs meters) or to exaggerate
+ or de-emphasize topography.
+ dx : number, optional
+ The x-spacing (columns) of the input *elevation* grid.
+ dy : number, optional
+ The y-spacing (rows) of the input *elevation* grid.
+ fraction : number, optional
+ Increases or decreases the contrast of the hillshade. Values
+ greater than one will cause intermediate values to move closer to
+ full illumination or shadow (and clipping any values that move
+ beyond 0 or 1). Note that this is not visually or mathematically
+ the same as vertical exaggeration.
+ Additional kwargs are passed on to the *blend_mode* function.
+
+ Returns
+ -------
+ rgba : ndarray
+ An MxNx4 array of floats ranging between 0-1.
+ """
+ if vmin is None:
+ vmin = data.min()
+ if vmax is None:
+ vmax = data.max()
+ if norm is None:
+ norm = Normalize(vmin=vmin, vmax=vmax)
+
+ rgb0 = cmap(norm(data))
+ rgb1 = self.shade_rgb(rgb0, elevation=data, blend_mode=blend_mode,
+ vert_exag=vert_exag, dx=dx, dy=dy,
+ fraction=fraction, **kwargs)
+ # Don't overwrite the alpha channel, if present.
+ rgb0[..., :3] = rgb1[..., :3]
+ return rgb0
+
+ def shade_rgb(self, rgb, elevation, fraction=1., blend_mode='hsv',
+ vert_exag=1, dx=1, dy=1, **kwargs):
+ """
+ Take the input RGB array (ny*nx*3) adjust their color values
+ to given the impression of a shaded relief map with a
+ specified light source using the elevation (ny*nx).
+ A new RGB array ((ny*nx*3)) is returned.
+
+ Parameters
+ ----------
+ rgb : array-like
+ An MxNx3 RGB array, assumed to be in the range of 0 to 1.
+ elevation : array-like
+ A 2d array (or equivalent) of the height values used to generate a
+ shaded map.
+ fraction : number
+ Increases or decreases the contrast of the hillshade. Values
+ greater than one will cause intermediate values to move closer to
+ full illumination or shadow (and clipping any values that move
+ beyond 0 or 1). Note that this is not visually or mathematically
+ the same as vertical exaggeration.
+ blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional
+ The type of blending used to combine the colormapped data values
+ with the illumination intensity. For backwards compatibility, this
+ defaults to "hsv". Note that for most topographic surfaces,
+ "overlay" or "soft" appear more visually realistic. If a
+ user-defined function is supplied, it is expected to combine an
+ MxNx3 RGB array of floats (ranging 0 to 1) with an MxNx1 hillshade
+ array (also 0 to 1). (Call signature `func(rgb, illum, **kwargs)`)
+ Additional kwargs supplied to this function will be passed on to
+ the *blend_mode* function.
+ vert_exag : number, optional
+ The amount to exaggerate the elevation values by when calculating
+ illumination. This can be used either to correct for differences in
+ units between the x-y coordinate system and the elevation
+ coordinate system (e.g. decimal degrees vs meters) or to exaggerate
+ or de-emphasize topography.
+ dx : number, optional
+ The x-spacing (columns) of the input *elevation* grid.
+ dy : number, optional
+ The y-spacing (rows) of the input *elevation* grid.
+ Additional kwargs are passed on to the *blend_mode* function.
+
+ Returns
+ -------
+ shaded_rgb : ndarray
+ An MxNx3 array of floats ranging between 0-1.
+ """
+ # Calculate the "hillshade" intensity.
+ intensity = self.hillshade(elevation, vert_exag, dx, dy, fraction)
+ intensity = intensity[..., np.newaxis]
+
+ # Blend the hillshade and rgb data using the specified mode
+ lookup = {
+ 'hsv': self.blend_hsv,
+ 'soft': self.blend_soft_light,
+ 'overlay': self.blend_overlay,
+ }
+ if blend_mode in lookup:
+ blend = lookup[blend_mode](rgb, intensity, **kwargs)
+ else:
+ try:
+ blend = blend_mode(rgb, intensity, **kwargs)
+ except TypeError:
+ raise ValueError('"blend_mode" must be callable or one of {}'
+ .format(lookup.keys))
+
+ # Only apply result where hillshade intensity isn't masked
+ if hasattr(intensity, 'mask'):
+ mask = intensity.mask[..., 0]
+ for i in range(3):
+ blend[..., i][mask] = rgb[..., i][mask]
+
+ return blend
+
+ def blend_hsv(self, rgb, intensity, hsv_max_sat=None, hsv_max_val=None,
+ hsv_min_val=None, hsv_min_sat=None):
+ """
+ Take the input data array, convert to HSV values in the given colormap,
+ then adjust those color values to give the impression of a shaded
+ relief map with a specified light source. RGBA values are returned,
+ which can then be used to plot the shaded image with imshow.
+
+ The color of the resulting image will be darkened by moving the (s,v)
+ values (in hsv colorspace) toward (hsv_min_sat, hsv_min_val) in the
+ shaded regions, or lightened by sliding (s,v) toward (hsv_max_sat
+ hsv_max_val) in regions that are illuminated. The default extremes are
+ chose so that completely shaded points are nearly black (s = 1, v = 0)
+ and completely illuminated points are nearly white (s = 0, v = 1).
+
+ Parameters
+ ----------
+ rgb : ndarray
+ An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
+ intensity : ndarray
+ An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
+ hsv_max_sat : number, optional
+ The maximum saturation value that the *intensity* map can shift the
+ output image to. Defaults to 1.
+ hsv_min_sat : number, optional
+ The minimum saturation value that the *intensity* map can shift the
+ output image to. Defaults to 0.
+ hsv_max_val : number, optional
+ The maximum value ("v" in "hsv") that the *intensity* map can shift
+ the output image to. Defaults to 1.
+ hsv_min_val: number, optional
+ The minimum value ("v" in "hsv") that the *intensity* map can shift
+ the output image to. Defaults to 0.
+
+ Returns
+ -------
+ rgb : ndarray
+ An MxNx3 RGB array representing the combined images.
+ """
+ # Backward compatibility...
+ if hsv_max_sat is None:
+ hsv_max_sat = self.hsv_max_sat
+ if hsv_max_val is None:
+ hsv_max_val = self.hsv_max_val
+ if hsv_min_sat is None:
+ hsv_min_sat = self.hsv_min_sat
+ if hsv_min_val is None:
+ hsv_min_val = self.hsv_min_val
+
+ # Expects a 2D intensity array scaled between -1 to 1...
+ intensity = intensity[..., 0]
+ intensity = 2 * intensity - 1
+
+ # convert to rgb, then rgb to hsv
+ hsv = rgb_to_hsv(rgb[:, :, 0:3])
+
+ # modify hsv values to simulate illumination.
+ hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,
+ intensity > 0),
+ ((1. - intensity) * hsv[:, :, 1] +
+ intensity * hsv_max_sat),
+ hsv[:, :, 1])
+
+ hsv[:, :, 2] = np.where(intensity > 0,
+ ((1. - intensity) * hsv[:, :, 2] +
+ intensity * hsv_max_val),
+ hsv[:, :, 2])
+
+ hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,
+ intensity < 0),
+ ((1. + intensity) * hsv[:, :, 1] -
+ intensity * hsv_min_sat),
+ hsv[:, :, 1])
+ hsv[:, :, 2] = np.where(intensity < 0,
+ ((1. + intensity) * hsv[:, :, 2] -
+ intensity * hsv_min_val),
+ hsv[:, :, 2])
+ hsv[:, :, 1:] = np.where(hsv[:, :, 1:] < 0., 0, hsv[:, :, 1:])
+ hsv[:, :, 1:] = np.where(hsv[:, :, 1:] > 1., 1, hsv[:, :, 1:])
+ # convert modified hsv back to rgb.
+ return hsv_to_rgb(hsv)
+
+ def blend_soft_light(self, rgb, intensity):
+ """
+ Combines an rgb image with an intensity map using "soft light"
+ blending. Uses the "pegtop" formula.
+
+ Parameters
+ ----------
+ rgb : ndarray
+ An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
+ intensity : ndarray
+ An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
+
+ Returns
+ -------
+ rgb : ndarray
+ An MxNx3 RGB array representing the combined images.
+ """
+ return 2 * intensity * rgb + (1 - 2 * intensity) * rgb**2
+
+ def blend_overlay(self, rgb, intensity):
+ """
+ Combines an rgb image with an intensity map using "overlay" blending.
+
+ Parameters
+ ----------
+ rgb : ndarray
+ An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
+ intensity : ndarray
+ An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
+
+ Returns
+ -------
+ rgb : ndarray
+ An MxNx3 RGB array representing the combined images.
+ """
+ low = 2 * intensity * rgb
+ high = 1 - 2 * (1 - intensity) * (1 - rgb)
+ return np.where(rgb <= 0.5, low, high)
+
+
+def from_levels_and_colors(levels, colors, extend='neither'):
+ """
+ A helper routine to generate a cmap and a norm instance which
+ behave similar to contourf's levels and colors arguments.
+
+ Parameters
+ ----------
+ levels : sequence of numbers
+ The quantization levels used to construct the :class:`BoundaryNorm`.
+ Values ``v`` are quantizized to level ``i`` if
+ ``lev[i] <= v < lev[i+1]``.
+ colors : sequence of colors
+ The fill color to use for each level. If `extend` is "neither" there
+ must be ``n_level - 1`` colors. For an `extend` of "min" or "max" add
+ one extra color, and for an `extend` of "both" add two colors.
+ extend : {'neither', 'min', 'max', 'both'}, optional
+ The behaviour when a value falls out of range of the given levels.
+ See :func:`~matplotlib.pyplot.contourf` for details.
+
+ Returns
+ -------
+ (cmap, norm) : tuple containing a :class:`Colormap` and a \
+ :class:`Normalize` instance
+ """
+ colors_i0 = 0
+ colors_i1 = None
+
+ if extend == 'both':
+ colors_i0 = 1
+ colors_i1 = -1
+ extra_colors = 2
+ elif extend == 'min':
+ colors_i0 = 1
+ extra_colors = 1
+ elif extend == 'max':
+ colors_i1 = -1
+ extra_colors = 1
+ elif extend == 'neither':
+ extra_colors = 0
+ else:
+ raise ValueError('Unexpected value for extend: {0!r}'.format(extend))
+
+ n_data_colors = len(levels) - 1
+ n_expected_colors = n_data_colors + extra_colors
+ if len(colors) != n_expected_colors:
+ raise ValueError('With extend == {0!r} and n_levels == {1!r} expected'
+ ' n_colors == {2!r}. Got {3!r}.'
+ ''.format(extend, len(levels), n_expected_colors,
+ len(colors)))
+
+ cmap = ListedColormap(colors[colors_i0:colors_i1], N=n_data_colors)
+
+ if extend in ['min', 'both']:
+ cmap.set_under(colors[0])
+ else:
+ cmap.set_under('none')
+
+ if extend in ['max', 'both']:
+ cmap.set_over(colors[-1])
+ else:
+ cmap.set_over('none')
+
+ cmap.colorbar_extend = extend
+
+ norm = BoundaryNorm(levels, ncolors=n_data_colors)
+ return cmap, norm
diff --git a/contrib/python/matplotlib/py2/matplotlib/compat/__init__.py b/contrib/python/matplotlib/py2/matplotlib/compat/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/compat/__init__.py
diff --git a/contrib/python/matplotlib/py2/matplotlib/compat/subprocess.py b/contrib/python/matplotlib/py2/matplotlib/compat/subprocess.py
new file mode 100644
index 00000000000..6607a011836
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/compat/subprocess.py
@@ -0,0 +1,51 @@
+"""
+A replacement wrapper around the subprocess module, with a number of
+work-arounds:
+- Provides a stub implementation of subprocess members on Google App Engine
+ (which are missing in subprocess).
+- Use subprocess32, backport from python 3.2 on Linux/Mac work-around for
+ https://github.com/matplotlib/matplotlib/issues/5314
+
+Instead of importing subprocess, other modules should use this as follows:
+
+from matplotlib.compat import subprocess
+
+This module is safe to import from anywhere within matplotlib.
+"""
+
+from __future__ import absolute_import # Required to import subprocess
+from __future__ import print_function
+import os
+import sys
+if os.name == 'posix' and sys.version_info[0] < 3:
+ # work around for https://github.com/matplotlib/matplotlib/issues/5314
+ try:
+ import subprocess32 as subprocess
+ except ImportError:
+ import subprocess
+else:
+ import subprocess
+
+__all__ = ['Popen', 'PIPE', 'STDOUT', 'check_output', 'CalledProcessError']
+
+
+if hasattr(subprocess, 'Popen'):
+ Popen = subprocess.Popen
+ # Assume that it also has the other constants.
+ PIPE = subprocess.PIPE
+ STDOUT = subprocess.STDOUT
+ CalledProcessError = subprocess.CalledProcessError
+ check_output = subprocess.check_output
+else:
+ # In restricted environments (such as Google App Engine), these are
+ # non-existent. Replace them with dummy versions that always raise OSError.
+ def Popen(*args, **kwargs):
+ raise OSError("subprocess.Popen is not supported")
+
+ def check_output(*args, **kwargs):
+ raise OSError("subprocess.check_output is not supported")
+ PIPE = -1
+ STDOUT = -2
+ # There is no need to catch CalledProcessError. These stubs cannot raise
+ # it. None in an except clause will simply not match any exceptions.
+ CalledProcessError = None
diff --git a/contrib/python/matplotlib/py2/matplotlib/container.py b/contrib/python/matplotlib/py2/matplotlib/container.py
new file mode 100644
index 00000000000..f96bf9f03f7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/container.py
@@ -0,0 +1,194 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import matplotlib.cbook as cbook
+import matplotlib.artist as martist
+
+
+class Container(tuple):
+ """
+ Base class for containers.
+
+ Containers are classes that collect semantically related Artists such as
+ the bars of a bar plot.
+ """
+
+ def __repr__(self):
+ return ("<{} object of {} artists>"
+ .format(type(self).__name__, len(self)))
+
+ def __new__(cls, *kl, **kwargs):
+ return tuple.__new__(cls, kl[0])
+
+ def __init__(self, kl, label=None):
+
+ self.eventson = False # fire events only if eventson
+ self._oid = 0 # an observer id
+ self._propobservers = {} # a dict from oids to funcs
+
+ self._remove_method = None
+
+ self.set_label(label)
+
+ def set_remove_method(self, f):
+ self._remove_method = f
+
+ def remove(self):
+ for c in cbook.flatten(
+ self, scalarp=lambda x: isinstance(x, martist.Artist)):
+ if c is not None:
+ c.remove()
+
+ if self._remove_method:
+ self._remove_method(self)
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ # remove the unpicklable remove method, this will get re-added on load
+ # (by the axes) if the artist lives on an axes.
+ d['_remove_method'] = None
+ return d
+
+ def get_label(self):
+ """
+ Get the label used for this artist in the legend.
+ """
+ return self._label
+
+ def set_label(self, s):
+ """
+ Set the label to *s* for auto legend.
+
+ ACCEPTS: string or anything printable with '%s' conversion.
+ """
+ if s is not None:
+ self._label = '%s' % (s, )
+ else:
+ self._label = None
+ self.pchanged()
+
+ def add_callback(self, func):
+ """
+ Adds a callback function that will be called whenever one of
+ the :class:`Artist`'s properties changes.
+
+ Returns an *id* that is useful for removing the callback with
+ :meth:`remove_callback` later.
+ """
+ oid = self._oid
+ self._propobservers[oid] = func
+ self._oid += 1
+ return oid
+
+ def remove_callback(self, oid):
+ """
+ Remove a callback based on its *id*.
+
+ .. seealso::
+
+ :meth:`add_callback`
+ For adding callbacks
+
+ """
+ try:
+ del self._propobservers[oid]
+ except KeyError:
+ pass
+
+ def pchanged(self):
+ """
+ Fire an event when property changed, calling all of the
+ registered callbacks.
+ """
+ for oid, func in list(six.iteritems(self._propobservers)):
+ func(self)
+
+ def get_children(self):
+ return [child for child in cbook.flatten(self) if child is not None]
+
+
+class BarContainer(Container):
+ """
+ Container for the artists of bar plots (e.g. created by `.Axes.bar`).
+
+ The container can be treated as a tuple of the *patches* themselves.
+ Additionally, you can access these and further parameters by the
+ attributes.
+
+ Attributes
+ ----------
+ patches : list of :class:`~matplotlib.patches.Rectangle`
+ The artists of the bars.
+
+ errorbar : None or :class:`~matplotlib.container.ErrorbarContainer`
+ A container for the error bar artists if error bars are present.
+ *None* otherwise.
+
+ """
+
+ def __init__(self, patches, errorbar=None, **kwargs):
+ self.patches = patches
+ self.errorbar = errorbar
+ Container.__init__(self, patches, **kwargs)
+
+
+class ErrorbarContainer(Container):
+ """
+ Container for the artists of error bars (e.g. created by `.Axes.errorbar`).
+
+ The container can be treated as the *lines* tuple itself.
+ Additionally, you can access these and further parameters by the
+ attributes.
+
+ Attributes
+ ----------
+ lines : tuple
+ Tuple of ``(data_line, caplines, barlinecols)``.
+
+ - data_line : :class:`~matplotlib.lines.Line2D` instance of
+ x, y plot markers and/or line.
+ - caplines : tuple of :class:`~matplotlib.lines.Line2D` instances of
+ the error bar caps.
+ - barlinecols : list of :class:`~matplotlib.collections.LineCollection`
+ with the horizontal and vertical error ranges.
+
+ has_xerr, has_yerr : bool
+ ``True`` if the errorbar has x/y errors.
+
+ """
+
+ def __init__(self, lines, has_xerr=False, has_yerr=False, **kwargs):
+ self.lines = lines
+ self.has_xerr = has_xerr
+ self.has_yerr = has_yerr
+ Container.__init__(self, lines, **kwargs)
+
+
+class StemContainer(Container):
+ """
+ Container for the artists created in a :meth:`.Axes.stem` plot.
+
+ The container can be treated like a namedtuple ``(markerline, stemlines,
+ baseline)``.
+
+ Attributes
+ ----------
+ markerline : :class:`~matplotlib.lines.Line2D`
+ The artist of the markers at the stem heads.
+
+ stemlines : list of :class:`~matplotlib.lines.Line2D`
+ The artists of the vertical lines for all stems.
+
+ baseline : :class:`~matplotlib.lines.Line2D`
+ The artist of the horizontal baseline.
+
+ """
+
+ def __init__(self, markerline_stemlines_baseline, **kwargs):
+ markerline, stemlines, baseline = markerline_stemlines_baseline
+ self.markerline = markerline
+ self.stemlines = stemlines
+ self.baseline = baseline
+ Container.__init__(self, markerline_stemlines_baseline, **kwargs)
diff --git a/contrib/python/matplotlib/py2/matplotlib/contour.py b/contrib/python/matplotlib/py2/matplotlib/contour.py
new file mode 100644
index 00000000000..f6fdfd61c26
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/contour.py
@@ -0,0 +1,1836 @@
+"""
+These are classes to support contour plotting and labelling for the Axes class.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange
+
+import warnings
+import matplotlib as mpl
+import numpy as np
+from numpy import ma
+import matplotlib._contour as _contour
+import matplotlib.path as mpath
+import matplotlib.ticker as ticker
+import matplotlib.cm as cm
+import matplotlib.colors as colors
+import matplotlib.collections as mcoll
+import matplotlib.font_manager as font_manager
+import matplotlib.text as text
+import matplotlib.cbook as cbook
+import matplotlib.mathtext as mathtext
+import matplotlib.patches as mpatches
+import matplotlib.texmanager as texmanager
+import matplotlib.transforms as mtransforms
+
+# Import needed for adding manual selection capability to clabel
+from matplotlib.blocking_input import BlockingContourLabeler
+
+# We can't use a single line collection for contour because a line
+# collection can have only a single line style, and we want to be able to have
+# dashed negative contours, for example, and solid positive contours.
+# We could use a single polygon collection for filled contours, but it
+# seems better to keep line and filled contours similar, with one collection
+# per level.
+
+
+class ClabelText(text.Text):
+ """
+ Unlike the ordinary text, the get_rotation returns an updated
+ angle in the pixel coordinate assuming that the input rotation is
+ an angle in data coordinate (or whatever transform set).
+ """
+ def get_rotation(self):
+ angle = text.Text.get_rotation(self)
+ trans = self.get_transform()
+ x, y = self.get_position()
+ new_angles = trans.transform_angles(np.array([angle]),
+ np.array([[x, y]]))
+ return new_angles[0]
+
+
+class ContourLabeler(object):
+ """Mixin to provide labelling capability to `.ContourSet`."""
+
+ def clabel(self, *args, **kwargs):
+ """
+ Label a contour plot.
+
+ Call signature::
+
+ clabel(cs, **kwargs)
+
+ Adds labels to line contours in *cs*, where *cs* is a
+ :class:`~matplotlib.contour.ContourSet` object returned by
+ contour.
+
+ ::
+
+ clabel(cs, v, **kwargs)
+
+ only labels contours listed in *v*.
+
+ Parameters
+ ----------
+ fontsize : string or float, optional
+ Size in points or relative size e.g., 'smaller', 'x-large'.
+ See `Text.set_size` for accepted string values.
+
+ colors :
+ Color of each label
+
+ - if *None*, the color of each label matches the color of
+ the corresponding contour
+
+ - if one string color, e.g., *colors* = 'r' or *colors* =
+ 'red', all labels will be plotted in this color
+
+ - if a tuple of matplotlib color args (string, float, rgb, etc),
+ different labels will be plotted in different colors in the order
+ specified
+
+ inline : bool, optional
+ If ``True`` the underlying contour is removed where the label is
+ placed. Default is ``True``.
+
+ inline_spacing : float, optional
+ Space in pixels to leave on each side of label when
+ placing inline. Defaults to 5.
+
+ This spacing will be exact for labels at locations where the
+ contour is straight, less so for labels on curved contours.
+
+ fmt : string or dict, optional
+ A format string for the label. Default is '%1.3f'
+
+ Alternatively, this can be a dictionary matching contour
+ levels with arbitrary strings to use for each contour level
+ (i.e., fmt[level]=string), or it can be any callable, such
+ as a :class:`~matplotlib.ticker.Formatter` instance, that
+ returns a string when called with a numeric contour level.
+
+ manual : bool or iterable, optional
+ If ``True``, contour labels will be placed manually using
+ mouse clicks. Click the first button near a contour to
+ add a label, click the second button (or potentially both
+ mouse buttons at once) to finish adding labels. The third
+ button can be used to remove the last label added, but
+ only if labels are not inline. Alternatively, the keyboard
+ can be used to select label locations (enter to end label
+ placement, delete or backspace act like the third mouse button,
+ and any other key will select a label location).
+
+ *manual* can also be an iterable object of x,y tuples.
+ Contour labels will be created as if mouse is clicked at each
+ x,y positions.
+
+ rightside_up : bool, optional
+ If ``True``, label rotations will always be plus
+ or minus 90 degrees from level. Default is ``True``.
+
+ use_clabeltext : bool, optional
+ If ``True``, `ClabelText` class (instead of `Text`) is used to
+ create labels. `ClabelText` recalculates rotation angles
+ of texts during the drawing time, therefore this can be used if
+ aspect of the axes changes. Default is ``False``.
+ """
+
+ """
+ NOTES on how this all works:
+
+ clabel basically takes the input arguments and uses them to
+ add a list of "label specific" attributes to the ContourSet
+ object. These attributes are all of the form label* and names
+ should be fairly self explanatory.
+
+ Once these attributes are set, clabel passes control to the
+ labels method (case of automatic label placement) or
+ `BlockingContourLabeler` (case of manual label placement).
+ """
+
+ fontsize = kwargs.get('fontsize', None)
+ inline = kwargs.get('inline', 1)
+ inline_spacing = kwargs.get('inline_spacing', 5)
+ self.labelFmt = kwargs.get('fmt', '%1.3f')
+ _colors = kwargs.get('colors', None)
+
+ self._use_clabeltext = kwargs.get('use_clabeltext', False)
+
+ # Detect if manual selection is desired and remove from argument list
+ self.labelManual = kwargs.get('manual', False)
+
+ self.rightside_up = kwargs.get('rightside_up', True)
+ if len(args) == 0:
+ levels = self.levels
+ indices = list(xrange(len(self.cvalues)))
+ elif len(args) == 1:
+ levlabs = list(args[0])
+ indices, levels = [], []
+ for i, lev in enumerate(self.levels):
+ if lev in levlabs:
+ indices.append(i)
+ levels.append(lev)
+ if len(levels) < len(levlabs):
+ raise ValueError("Specified levels {} don't match available "
+ "levels {}".format(levlabs, self.levels))
+ else:
+ raise TypeError("Illegal arguments to clabel, see help(clabel)")
+ self.labelLevelList = levels
+ self.labelIndiceList = indices
+
+ self.labelFontProps = font_manager.FontProperties()
+ self.labelFontProps.set_size(fontsize)
+ font_size_pts = self.labelFontProps.get_size_in_points()
+ self.labelFontSizeList = [font_size_pts] * len(levels)
+
+ if _colors is None:
+ self.labelMappable = self
+ self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
+ else:
+ cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))
+ self.labelCValueList = list(xrange(len(self.labelLevelList)))
+ self.labelMappable = cm.ScalarMappable(cmap=cmap,
+ norm=colors.NoNorm())
+
+ self.labelXYs = []
+
+ if cbook.iterable(self.labelManual):
+ for x, y in self.labelManual:
+ self.add_label_near(x, y, inline,
+ inline_spacing)
+
+ elif self.labelManual:
+ print('Select label locations manually using first mouse button.')
+ print('End manual selection with second mouse button.')
+ if not inline:
+ print('Remove last label by clicking third mouse button.')
+
+ blocking_contour_labeler = BlockingContourLabeler(self)
+ blocking_contour_labeler(inline, inline_spacing)
+ else:
+ self.labels(inline, inline_spacing)
+
+ # Hold on to some old attribute names. These are deprecated and will
+ # be removed in the near future (sometime after 2008-08-01), but
+ # keeping for now for backwards compatibility
+ self.cl = self.labelTexts
+ self.cl_xy = self.labelXYs
+ self.cl_cvalues = self.labelCValues
+
+ self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
+ return self.labelTextsList
+
+ def print_label(self, linecontour, labelwidth):
+ "Return *False* if contours are too short for a label."
+ return (len(linecontour) > 10 * labelwidth
+ or (np.ptp(linecontour, axis=0) > 1.2 * labelwidth).any())
+
+ def too_close(self, x, y, lw):
+ "Return *True* if a label is already near this location."
+ for loc in self.labelXYs:
+ d = np.sqrt((x - loc[0]) ** 2 + (y - loc[1]) ** 2)
+ if d < 1.2 * lw:
+ return True
+ return False
+
+ def get_label_coords(self, distances, XX, YY, ysize, lw):
+ """
+ Return x, y, and the index of a label location.
+
+ Labels are plotted at a location with the smallest
+ deviation of the contour from a straight line
+ unless there is another label nearby, in which case
+ the next best place on the contour is picked up.
+ If all such candidates are rejected, the beginning
+ of the contour is chosen.
+ """
+ hysize = int(ysize / 2)
+ adist = np.argsort(distances)
+
+ for ind in adist:
+ x, y = XX[ind][hysize], YY[ind][hysize]
+ if self.too_close(x, y, lw):
+ continue
+ return x, y, ind
+
+ ind = adist[0]
+ x, y = XX[ind][hysize], YY[ind][hysize]
+ return x, y, ind
+
+ def get_label_width(self, lev, fmt, fsize):
+ """
+ Return the width of the label in points.
+ """
+ if not isinstance(lev, six.string_types):
+ lev = self.get_text(lev, fmt)
+
+ lev, ismath = text.Text.is_math_text(lev)
+ if ismath == 'TeX':
+ if not hasattr(self, '_TeX_manager'):
+ self._TeX_manager = texmanager.TexManager()
+ lw, _, _ = self._TeX_manager.get_text_width_height_descent(lev,
+ fsize)
+ elif ismath:
+ if not hasattr(self, '_mathtext_parser'):
+ self._mathtext_parser = mathtext.MathTextParser('bitmap')
+ img, _ = self._mathtext_parser.parse(lev, dpi=72,
+ prop=self.labelFontProps)
+ lw = img.get_width() # at dpi=72, the units are PostScript points
+ else:
+ # width is much less than "font size"
+ lw = (len(lev)) * fsize * 0.6
+
+ return lw
+
+ @cbook.deprecated("2.2")
+ def get_real_label_width(self, lev, fmt, fsize):
+ """
+ This computes actual onscreen label width.
+ This uses some black magic to determine onscreen extent of non-drawn
+ label. This magic may not be very robust.
+
+ This method is not being used, and may be modified or removed.
+ """
+ # Find middle of axes
+ xx = np.mean(np.asarray(self.ax.axis()).reshape(2, 2), axis=1)
+
+ # Temporarily create text object
+ t = text.Text(xx[0], xx[1])
+ self.set_label_props(t, self.get_text(lev, fmt), 'k')
+
+ # Some black magic to get onscreen extent
+ # NOTE: This will only work for already drawn figures, as the canvas
+ # does not have a renderer otherwise. This is the reason this function
+ # can't be integrated into the rest of the code.
+ bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)
+
+ # difference in pixel extent of image
+ lw = np.diff(bbox.corners()[0::2, 0])[0]
+
+ return lw
+
+ def set_label_props(self, label, text, color):
+ """Set the label properties - color, fontsize, text."""
+ label.set_text(text)
+ label.set_color(color)
+ label.set_fontproperties(self.labelFontProps)
+ label.set_clip_box(self.ax.bbox)
+
+ def get_text(self, lev, fmt):
+ """Get the text of the label."""
+ if isinstance(lev, six.string_types):
+ return lev
+ else:
+ if isinstance(fmt, dict):
+ return fmt.get(lev, '%1.3f')
+ elif callable(fmt):
+ return fmt(lev)
+ else:
+ return fmt % lev
+
+ def locate_label(self, linecontour, labelwidth):
+ """
+ Find good place to draw a label (relatively flat part of the contour).
+ """
+
+ # Number of contour points
+ nsize = len(linecontour)
+ if labelwidth > 1:
+ xsize = int(np.ceil(nsize / labelwidth))
+ else:
+ xsize = 1
+ if xsize == 1:
+ ysize = nsize
+ else:
+ ysize = int(labelwidth)
+
+ XX = np.resize(linecontour[:, 0], (xsize, ysize))
+ YY = np.resize(linecontour[:, 1], (xsize, ysize))
+ # I might have fouled up the following:
+ yfirst = YY[:, :1]
+ ylast = YY[:, -1:]
+ xfirst = XX[:, :1]
+ xlast = XX[:, -1:]
+ s = (yfirst - YY) * (xlast - xfirst) - (xfirst - XX) * (ylast - yfirst)
+ L = np.hypot(xlast - xfirst, ylast - yfirst)
+ # Ignore warning that divide by zero throws, as this is a valid option
+ with np.errstate(divide='ignore', invalid='ignore'):
+ dist = np.sum(np.abs(s) / L, axis=-1)
+ x, y, ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)
+
+ # There must be a more efficient way...
+ lc = [tuple(l) for l in linecontour]
+ dind = lc.index((x, y))
+
+ return x, y, dind
+
+ def calc_label_rot_and_inline(self, slc, ind, lw, lc=None, spacing=5):
+ """
+ This function calculates the appropriate label rotation given
+ the linecontour coordinates in screen units, the index of the
+ label location and the label width.
+
+ It will also break contour and calculate inlining if *lc* is
+ not empty (lc defaults to the empty list if None). *spacing*
+ is the space around the label in pixels to leave empty.
+
+ Do both of these tasks at once to avoid calculating path lengths
+ multiple times, which is relatively costly.
+
+ The method used here involves calculating the path length
+ along the contour in pixel coordinates and then looking
+ approximately label width / 2 away from central point to
+ determine rotation and then to break contour if desired.
+ """
+
+ if lc is None:
+ lc = []
+ # Half the label width
+ hlw = lw / 2.0
+
+ # Check if closed and, if so, rotate contour so label is at edge
+ closed = _is_closed_polygon(slc)
+ if closed:
+ slc = np.r_[slc[ind:-1], slc[:ind + 1]]
+
+ if len(lc): # Rotate lc also if not empty
+ lc = np.r_[lc[ind:-1], lc[:ind + 1]]
+
+ ind = 0
+
+ # Calculate path lengths
+ pl = np.zeros(slc.shape[0], dtype=float)
+ dx = np.diff(slc, axis=0)
+ pl[1:] = np.cumsum(np.hypot(dx[:, 0], dx[:, 1]))
+ pl = pl - pl[ind]
+
+ # Use linear interpolation to get points around label
+ xi = np.array([-hlw, hlw])
+ if closed: # Look at end also for closed contours
+ dp = np.array([pl[-1], 0])
+ else:
+ dp = np.zeros_like(xi)
+
+ # Get angle of vector between the two ends of the label - must be
+ # calculated in pixel space for text rotation to work correctly.
+ (dx,), (dy,) = (np.diff(np.interp(dp + xi, pl, slc_col))
+ for slc_col in slc.T)
+ rotation = np.rad2deg(np.arctan2(dy, dx))
+
+ if self.rightside_up:
+ # Fix angle so text is never upside-down
+ rotation = (rotation + 90) % 180 - 90
+
+ # Break contour if desired
+ nlc = []
+ if len(lc):
+ # Expand range by spacing
+ xi = dp + xi + np.array([-spacing, spacing])
+
+ # Get (integer) indices near points of interest; use -1 as marker
+ # for out of bounds.
+ I = np.interp(xi, pl, np.arange(len(pl)), left=-1, right=-1)
+ I = [np.floor(I[0]).astype(int), np.ceil(I[1]).astype(int)]
+ if I[0] != -1:
+ xy1 = [np.interp(xi[0], pl, lc_col) for lc_col in lc.T]
+ if I[1] != -1:
+ xy2 = [np.interp(xi[1], pl, lc_col) for lc_col in lc.T]
+
+ # Actually break contours
+ if closed:
+ # This will remove contour if shorter than label
+ if all(i != -1 for i in I):
+ nlc.append(np.row_stack([xy2, lc[I[1]:I[0]+1], xy1]))
+ else:
+ # These will remove pieces of contour if they have length zero
+ if I[0] != -1:
+ nlc.append(np.row_stack([lc[:I[0]+1], xy1]))
+ if I[1] != -1:
+ nlc.append(np.row_stack([xy2, lc[I[1]:]]))
+
+ # The current implementation removes contours completely
+ # covered by labels. Uncomment line below to keep
+ # original contour if this is the preferred behavior.
+ # if not len(nlc): nlc = [ lc ]
+
+ return rotation, nlc
+
+ def _get_label_text(self, x, y, rotation):
+ dx, dy = self.ax.transData.inverted().transform_point((x, y))
+ t = text.Text(dx, dy, rotation=rotation,
+ horizontalalignment='center',
+ verticalalignment='center')
+ return t
+
+ def _get_label_clabeltext(self, x, y, rotation):
+ # x, y, rotation is given in pixel coordinate. Convert them to
+ # the data coordinate and create a label using ClabelText
+ # class. This way, the roation of the clabel is along the
+ # contour line always.
+ transDataInv = self.ax.transData.inverted()
+ dx, dy = transDataInv.transform_point((x, y))
+ drotation = transDataInv.transform_angles(np.array([rotation]),
+ np.array([[x, y]]))
+ t = ClabelText(dx, dy, rotation=drotation[0],
+ horizontalalignment='center',
+ verticalalignment='center')
+
+ return t
+
+ def _add_label(self, t, x, y, lev, cvalue):
+ color = self.labelMappable.to_rgba(cvalue, alpha=self.alpha)
+
+ _text = self.get_text(lev, self.labelFmt)
+ self.set_label_props(t, _text, color)
+ self.labelTexts.append(t)
+ self.labelCValues.append(cvalue)
+ self.labelXYs.append((x, y))
+
+ # Add label to plot here - useful for manual mode label selection
+ self.ax.add_artist(t)
+
+ def add_label(self, x, y, rotation, lev, cvalue):
+ """
+ Add contour label using :class:`~matplotlib.text.Text` class.
+ """
+
+ t = self._get_label_text(x, y, rotation)
+ self._add_label(t, x, y, lev, cvalue)
+
+ def add_label_clabeltext(self, x, y, rotation, lev, cvalue):
+ """
+ Add contour label using :class:`ClabelText` class.
+ """
+ # x, y, rotation is given in pixel coordinate. Convert them to
+ # the data coordinate and create a label using ClabelText
+ # class. This way, the roation of the clabel is along the
+ # contour line always.
+
+ t = self._get_label_clabeltext(x, y, rotation)
+ self._add_label(t, x, y, lev, cvalue)
+
+ def add_label_near(self, x, y, inline=True, inline_spacing=5,
+ transform=None):
+ """
+ Add a label near the point (x, y). If transform is None
+ (default), (x, y) is in data coordinates; if transform is
+ False, (x, y) is in display coordinates; otherwise, the
+ specified transform will be used to translate (x, y) into
+ display coordinates.
+
+ Parameters
+ ----------
+ x, y : float
+ The approximate location of the label.
+
+ inline : bool, optional, default: True
+ If *True* remove the segment of the contour beneath the label.
+
+ inline_spacing : int, optional, default: 5
+ Space in pixels to leave on each side of label when placing
+ inline. This spacing will be exact for labels at locations where
+ the contour is straight, less so for labels on curved contours.
+ """
+
+ if transform is None:
+ transform = self.ax.transData
+
+ if transform:
+ x, y = transform.transform_point((x, y))
+
+ # find the nearest contour _in screen units_
+ conmin, segmin, imin, xmin, ymin = self.find_nearest_contour(
+ x, y, self.labelIndiceList)[:5]
+
+ # The calc_label_rot_and_inline routine requires that (xmin,ymin)
+ # be a vertex in the path. So, if it isn't, add a vertex here
+
+ # grab the paths from the collections
+ paths = self.collections[conmin].get_paths()
+ # grab the correct segment
+ active_path = paths[segmin]
+ # grab its vertices
+ lc = active_path.vertices
+ # sort out where the new vertex should be added data-units
+ xcmin = self.ax.transData.inverted().transform_point([xmin, ymin])
+ # if there isn't a vertex close enough
+ if not np.allclose(xcmin, lc[imin]):
+ # insert new data into the vertex list
+ lc = np.r_[lc[:imin], np.array(xcmin)[None, :], lc[imin:]]
+ # replace the path with the new one
+ paths[segmin] = mpath.Path(lc)
+
+ # Get index of nearest level in subset of levels used for labeling
+ lmin = self.labelIndiceList.index(conmin)
+
+ # Coordinates of contour
+ paths = self.collections[conmin].get_paths()
+ lc = paths[segmin].vertices
+
+ # In pixel/screen space
+ slc = self.ax.transData.transform(lc)
+
+ # Get label width for rotating labels and breaking contours
+ lw = self.get_label_width(self.labelLevelList[lmin],
+ self.labelFmt, self.labelFontSizeList[lmin])
+ # lw is in points.
+ lw *= self.ax.figure.dpi / 72.0 # scale to screen coordinates
+ # now lw in pixels
+
+ # Figure out label rotation.
+ if inline:
+ lcarg = lc
+ else:
+ lcarg = None
+ rotation, nlc = self.calc_label_rot_and_inline(
+ slc, imin, lw, lcarg,
+ inline_spacing)
+
+ self.add_label(xmin, ymin, rotation, self.labelLevelList[lmin],
+ self.labelCValueList[lmin])
+
+ if inline:
+ # Remove old, not looping over paths so we can do this up front
+ paths.pop(segmin)
+
+ # Add paths if not empty or single point
+ for n in nlc:
+ if len(n) > 1:
+ paths.append(mpath.Path(n))
+
+ def pop_label(self, index=-1):
+ """Defaults to removing last label, but any index can be supplied"""
+ self.labelCValues.pop(index)
+ t = self.labelTexts.pop(index)
+ t.remove()
+
+ def labels(self, inline, inline_spacing):
+
+ if self._use_clabeltext:
+ add_label = self.add_label_clabeltext
+ else:
+ add_label = self.add_label
+
+ for icon, lev, fsize, cvalue in zip(
+ self.labelIndiceList, self.labelLevelList,
+ self.labelFontSizeList, self.labelCValueList):
+
+ con = self.collections[icon]
+ trans = con.get_transform()
+ lw = self.get_label_width(lev, self.labelFmt, fsize)
+ lw *= self.ax.figure.dpi / 72.0 # scale to screen coordinates
+ additions = []
+ paths = con.get_paths()
+ for segNum, linepath in enumerate(paths):
+ lc = linepath.vertices # Line contour
+ slc0 = trans.transform(lc) # Line contour in screen coords
+
+ # For closed polygons, add extra point to avoid division by
+ # zero in print_label and locate_label. Other than these
+ # functions, this is not necessary and should probably be
+ # eventually removed.
+ if _is_closed_polygon(lc):
+ slc = np.r_[slc0, slc0[1:2, :]]
+ else:
+ slc = slc0
+
+ # Check if long enough for a label
+ if self.print_label(slc, lw):
+ x, y, ind = self.locate_label(slc, lw)
+
+ if inline:
+ lcarg = lc
+ else:
+ lcarg = None
+ rotation, new = self.calc_label_rot_and_inline(
+ slc0, ind, lw, lcarg,
+ inline_spacing)
+
+ # Actually add the label
+ add_label(x, y, rotation, lev, cvalue)
+
+ # If inline, add new contours
+ if inline:
+ for n in new:
+ # Add path if not empty or single point
+ if len(n) > 1:
+ additions.append(mpath.Path(n))
+ else: # If not adding label, keep old path
+ additions.append(linepath)
+
+ # After looping over all segments on a contour, remove old
+ # paths and add new ones if inlining
+ if inline:
+ del paths[:]
+ paths.extend(additions)
+
+
+def _find_closest_point_on_leg(p1, p2, p0):
+ """Find the closest point to p0 on line segment connecting p1 and p2."""
+
+ # handle degenerate case
+ if np.all(p2 == p1):
+ d = np.sum((p0 - p1)**2)
+ return d, p1
+
+ d21 = p2 - p1
+ d01 = p0 - p1
+
+ # project on to line segment to find closest point
+ proj = np.dot(d01, d21) / np.dot(d21, d21)
+ if proj < 0:
+ proj = 0
+ if proj > 1:
+ proj = 1
+ pc = p1 + proj * d21
+
+ # find squared distance
+ d = np.sum((pc-p0)**2)
+
+ return d, pc
+
+
+def _is_closed_polygon(X):
+ """
+ Return whether first and last object in a sequence are the same. These are
+ presumably coordinates on a polygonal curve, in which case this function
+ tests if that curve is closed.
+ """
+ return np.all(X[0] == X[-1])
+
+
+def _find_closest_point_on_path(lc, point):
+ """
+ lc: coordinates of vertices
+ point: coordinates of test point
+ """
+
+ # find index of closest vertex for this segment
+ ds = np.sum((lc - point[None, :])**2, 1)
+ imin = np.argmin(ds)
+
+ dmin = np.inf
+ xcmin = None
+ legmin = (None, None)
+
+ closed = _is_closed_polygon(lc)
+
+ # build list of legs before and after this vertex
+ legs = []
+ if imin > 0 or closed:
+ legs.append(((imin-1) % len(lc), imin))
+ if imin < len(lc) - 1 or closed:
+ legs.append((imin, (imin+1) % len(lc)))
+
+ for leg in legs:
+ d, xc = _find_closest_point_on_leg(lc[leg[0]], lc[leg[1]], point)
+ if d < dmin:
+ dmin = d
+ xcmin = xc
+ legmin = leg
+
+ return (dmin, xcmin, legmin)
+
+
+class ContourSet(cm.ScalarMappable, ContourLabeler):
+ """
+ Store a set of contour lines or filled regions.
+
+ User-callable method: `~.axes.Axes.clabel`
+
+ Parameters
+ ----------
+ ax : `~.axes.Axes`
+
+ levels : [level0, level1, ..., leveln]
+ A list of floating point numbers indicating the contour
+ levels.
+
+ allsegs : [level0segs, level1segs, ...]
+ List of all the polygon segments for all the *levels*.
+ For contour lines ``len(allsegs) == len(levels)``, and for
+ filled contour regions ``len(allsegs) = len(levels)-1``. The lists
+ should look like::
+
+ level0segs = [polygon0, polygon1, ...]
+ polygon0 = array_like [[x0,y0], [x1,y1], ...]
+
+ allkinds : ``None`` or [level0kinds, level1kinds, ...]
+ Optional list of all the polygon vertex kinds (code types), as
+ described and used in Path. This is used to allow multiply-
+ connected paths such as holes within filled polygons.
+ If not ``None``, ``len(allkinds) == len(allsegs)``. The lists
+ should look like::
+
+ level0kinds = [polygon0kinds, ...]
+ polygon0kinds = [vertexcode0, vertexcode1, ...]
+
+ If *allkinds* is not ``None``, usually all polygons for a
+ particular contour level are grouped together so that
+ ``level0segs = [polygon0]`` and ``level0kinds = [polygon0kinds]``.
+
+ kwargs :
+ Keyword arguments are as described in the docstring of
+ `~.axes.Axes.contour`.
+
+ Attributes
+ ----------
+ ax:
+ The axes object in which the contours are drawn.
+
+ collections:
+ A silent_list of LineCollections or PolyCollections.
+
+ levels:
+ Contour levels.
+
+ layers:
+ Same as levels for line contours; half-way between
+ levels for filled contours. See :meth:`_process_colors`.
+ """
+
+ def __init__(self, ax, *args, **kwargs):
+ """
+ Draw contour lines or filled regions, depending on
+ whether keyword arg *filled* is ``False`` (default) or ``True``.
+
+ Call signature::
+
+ ContourSet(ax, levels, allsegs, [allkinds], **kwargs)
+
+ Parameters
+ ----------
+ ax :
+ The `~.axes.Axes` object to draw on.
+
+ levels : [level0, level1, ..., leveln]
+ A list of floating point numbers indicating the contour
+ levels.
+
+ allsegs : [level0segs, level1segs, ...]
+ List of all the polygon segments for all the *levels*.
+ For contour lines ``len(allsegs) == len(levels)``, and for
+ filled contour regions ``len(allsegs) = len(levels)-1``. The lists
+ should look like::
+
+ level0segs = [polygon0, polygon1, ...]
+ polygon0 = array_like [[x0,y0], [x1,y1], ...]
+
+ allkinds : [level0kinds, level1kinds, ...], optional
+ Optional list of all the polygon vertex kinds (code types), as
+ described and used in Path. This is used to allow multiply-
+ connected paths such as holes within filled polygons.
+ If not ``None``, ``len(allkinds) == len(allsegs)``. The lists
+ should look like::
+
+ level0kinds = [polygon0kinds, ...]
+ polygon0kinds = [vertexcode0, vertexcode1, ...]
+
+ If *allkinds* is not ``None``, usually all polygons for a
+ particular contour level are grouped together so that
+ ``level0segs = [polygon0]`` and ``level0kinds = [polygon0kinds]``.
+
+ **kwargs
+ Keyword arguments are as described in the docstring of
+ `~axes.Axes.contour`.
+ """
+ self.ax = ax
+ self.levels = kwargs.pop('levels', None)
+ self.filled = kwargs.pop('filled', False)
+ self.linewidths = kwargs.pop('linewidths', None)
+ self.linestyles = kwargs.pop('linestyles', None)
+
+ self.hatches = kwargs.pop('hatches', [None])
+
+ self.alpha = kwargs.pop('alpha', None)
+ self.origin = kwargs.pop('origin', None)
+ self.extent = kwargs.pop('extent', None)
+ cmap = kwargs.pop('cmap', None)
+ self.colors = kwargs.pop('colors', None)
+ norm = kwargs.pop('norm', None)
+ vmin = kwargs.pop('vmin', None)
+ vmax = kwargs.pop('vmax', None)
+ self.extend = kwargs.pop('extend', 'neither')
+ self.antialiased = kwargs.pop('antialiased', None)
+ if self.antialiased is None and self.filled:
+ self.antialiased = False # eliminate artifacts; we are not
+ # stroking the boundaries.
+ # The default for line contours will be taken from
+ # the LineCollection default, which uses the
+ # rcParams['lines.antialiased']
+
+ self.nchunk = kwargs.pop('nchunk', 0)
+ self.locator = kwargs.pop('locator', None)
+ if (isinstance(norm, colors.LogNorm)
+ or isinstance(self.locator, ticker.LogLocator)):
+ self.logscale = True
+ if norm is None:
+ norm = colors.LogNorm()
+ if self.extend is not 'neither':
+ raise ValueError('extend kwarg does not work yet with log '
+ ' scale')
+ else:
+ self.logscale = False
+
+ if self.origin not in [None, 'lower', 'upper', 'image']:
+ raise ValueError("If given, *origin* must be one of [ 'lower' |"
+ " 'upper' | 'image']")
+ if self.extent is not None and len(self.extent) != 4:
+ raise ValueError("If given, *extent* must be '[ *None* |"
+ " (x0,x1,y0,y1) ]'")
+ if self.colors is not None and cmap is not None:
+ raise ValueError('Either colors or cmap must be None')
+ if self.origin == 'image':
+ self.origin = mpl.rcParams['image.origin']
+
+ self._transform = kwargs.pop('transform', None)
+
+ kwargs = self._process_args(*args, **kwargs)
+ self._process_levels()
+
+ if self.colors is not None:
+ ncolors = len(self.levels)
+ if self.filled:
+ ncolors -= 1
+ i0 = 0
+
+ # Handle the case where colors are given for the extended
+ # parts of the contour.
+ extend_min = self.extend in ['min', 'both']
+ extend_max = self.extend in ['max', 'both']
+ use_set_under_over = False
+ # if we are extending the lower end, and we've been given enough
+ # colors then skip the first color in the resulting cmap. For the
+ # extend_max case we don't need to worry about passing more colors
+ # than ncolors as ListedColormap will clip.
+ total_levels = ncolors + int(extend_min) + int(extend_max)
+ if (len(self.colors) == total_levels and
+ any([extend_min, extend_max])):
+ use_set_under_over = True
+ if extend_min:
+ i0 = 1
+
+ cmap = colors.ListedColormap(self.colors[i0:None], N=ncolors)
+
+ if use_set_under_over:
+ if extend_min:
+ cmap.set_under(self.colors[0])
+ if extend_max:
+ cmap.set_over(self.colors[-1])
+
+ if self.filled:
+ self.collections = cbook.silent_list('mcoll.PathCollection')
+ else:
+ self.collections = cbook.silent_list('mcoll.LineCollection')
+ # label lists must be initialized here
+ self.labelTexts = []
+ self.labelCValues = []
+
+ kw = {'cmap': cmap}
+ if norm is not None:
+ kw['norm'] = norm
+ # sets self.cmap, norm if needed;
+ cm.ScalarMappable.__init__(self, **kw)
+ if vmin is not None:
+ self.norm.vmin = vmin
+ if vmax is not None:
+ self.norm.vmax = vmax
+ self._process_colors()
+
+ self.allsegs, self.allkinds = self._get_allsegs_and_allkinds()
+
+ if self.filled:
+ if self.linewidths is not None:
+ warnings.warn('linewidths is ignored by contourf')
+
+ # Lower and upper contour levels.
+ lowers, uppers = self._get_lowers_and_uppers()
+
+ # Ensure allkinds can be zipped below.
+ if self.allkinds is None:
+ self.allkinds = [None] * len(self.allsegs)
+
+ # Default zorder taken from Collection
+ zorder = kwargs.pop('zorder', 1)
+ for level, level_upper, segs, kinds in \
+ zip(lowers, uppers, self.allsegs, self.allkinds):
+ paths = self._make_paths(segs, kinds)
+
+ col = mcoll.PathCollection(
+ paths,
+ antialiaseds=(self.antialiased,),
+ edgecolors='none',
+ alpha=self.alpha,
+ transform=self.get_transform(),
+ zorder=zorder)
+ self.ax.add_collection(col, autolim=False)
+ self.collections.append(col)
+ else:
+ tlinewidths = self._process_linewidths()
+ self.tlinewidths = tlinewidths
+ tlinestyles = self._process_linestyles()
+ aa = self.antialiased
+ if aa is not None:
+ aa = (self.antialiased,)
+ # Default zorder taken from LineCollection
+ zorder = kwargs.pop('zorder', 2)
+ for level, width, lstyle, segs in \
+ zip(self.levels, tlinewidths, tlinestyles, self.allsegs):
+ col = mcoll.LineCollection(
+ segs,
+ antialiaseds=aa,
+ linewidths=width,
+ linestyles=[lstyle],
+ alpha=self.alpha,
+ transform=self.get_transform(),
+ zorder=zorder)
+ col.set_label('_nolegend_')
+ self.ax.add_collection(col, autolim=False)
+ self.collections.append(col)
+
+ for col in self.collections:
+ col.sticky_edges.x[:] = [self._mins[0], self._maxs[0]]
+ col.sticky_edges.y[:] = [self._mins[1], self._maxs[1]]
+ self.ax.update_datalim([self._mins, self._maxs])
+ self.ax.autoscale_view(tight=True)
+
+ self.changed() # set the colors
+
+ if kwargs:
+ s = ", ".join(map(repr, kwargs))
+ warnings.warn('The following kwargs were not used by contour: ' +
+ s)
+
+ def get_transform(self):
+ """
+ Return the :class:`~matplotlib.transforms.Transform`
+ instance used by this ContourSet.
+ """
+ if self._transform is None:
+ self._transform = self.ax.transData
+ elif (not isinstance(self._transform, mtransforms.Transform)
+ and hasattr(self._transform, '_as_mpl_transform')):
+ self._transform = self._transform._as_mpl_transform(self.ax)
+ return self._transform
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ # the C object _contour_generator cannot currently be pickled. This
+ # isn't a big issue as it is not actually used once the contour has
+ # been calculated.
+ state['_contour_generator'] = None
+ return state
+
+ def legend_elements(self, variable_name='x', str_format=str):
+ """
+ Return a list of artists and labels suitable for passing through
+ to :func:`plt.legend` which represent this ContourSet.
+
+ The labels have the form "0 < x <= 1" stating the data ranges which
+ the artists represent.
+
+ Parameters
+ ----------
+ variable_name : str
+ The string used inside the inequality used on the labels.
+
+ str_format : function: float -> str
+ Function used to format the numbers in the labels.
+
+ Returns
+ -------
+ artists : List[`.Artist`]
+ A list of the artists.
+
+ labels : List[str]
+ A list of the labels.
+
+ """
+ artists = []
+ labels = []
+
+ if self.filled:
+ lowers, uppers = self._get_lowers_and_uppers()
+ n_levels = len(self.collections)
+
+ for i, (collection, lower, upper) in enumerate(
+ zip(self.collections, lowers, uppers)):
+ patch = mpatches.Rectangle(
+ (0, 0), 1, 1,
+ facecolor=collection.get_facecolor()[0],
+ hatch=collection.get_hatch(),
+ alpha=collection.get_alpha())
+ artists.append(patch)
+
+ lower = str_format(lower)
+ upper = str_format(upper)
+
+ if i == 0 and self.extend in ('min', 'both'):
+ labels.append(r'$%s \leq %s$' % (variable_name,
+ lower))
+ elif i == n_levels - 1 and self.extend in ('max', 'both'):
+ labels.append(r'$%s > %s$' % (variable_name,
+ upper))
+ else:
+ labels.append(r'$%s < %s \leq %s$' % (lower,
+ variable_name,
+ upper))
+ else:
+ for collection, level in zip(self.collections, self.levels):
+
+ patch = mcoll.LineCollection(None)
+ patch.update_from(collection)
+
+ artists.append(patch)
+ # format the level for insertion into the labels
+ level = str_format(level)
+ labels.append(r'$%s = %s$' % (variable_name, level))
+
+ return artists, labels
+
+ def _process_args(self, *args, **kwargs):
+ """
+ Process *args* and *kwargs*; override in derived classes.
+
+ Must set self.levels, self.zmin and self.zmax, and update axes
+ limits.
+ """
+ self.levels = args[0]
+ self.allsegs = args[1]
+ self.allkinds = len(args) > 2 and args[2] or None
+ self.zmax = np.max(self.levels)
+ self.zmin = np.min(self.levels)
+ self._auto = False
+
+ # Check lengths of levels and allsegs.
+ if self.filled:
+ if len(self.allsegs) != len(self.levels) - 1:
+ raise ValueError('must be one less number of segments as '
+ 'levels')
+ else:
+ if len(self.allsegs) != len(self.levels):
+ raise ValueError('must be same number of segments as levels')
+
+ # Check length of allkinds.
+ if (self.allkinds is not None and
+ len(self.allkinds) != len(self.allsegs)):
+ raise ValueError('allkinds has different length to allsegs')
+
+ # Determine x,y bounds and update axes data limits.
+ flatseglist = [s for seg in self.allsegs for s in seg]
+ points = np.concatenate(flatseglist, axis=0)
+ self._mins = points.min(axis=0)
+ self._maxs = points.max(axis=0)
+
+ return kwargs
+
+ def _get_allsegs_and_allkinds(self):
+ """
+ Override in derived classes to create and return allsegs and allkinds.
+ allkinds can be None.
+ """
+ return self.allsegs, self.allkinds
+
+ def _get_lowers_and_uppers(self):
+ """
+ Return (lowers,uppers) for filled contours.
+ """
+ lowers = self._levels[:-1]
+ if self.zmin == lowers[0]:
+ # Include minimum values in lowest interval
+ lowers = lowers.copy() # so we don't change self._levels
+ if self.logscale:
+ lowers[0] = 0.99 * self.zmin
+ else:
+ lowers[0] -= 1
+ uppers = self._levels[1:]
+ return (lowers, uppers)
+
+ def _make_paths(self, segs, kinds):
+ if kinds is not None:
+ return [mpath.Path(seg, codes=kind)
+ for seg, kind in zip(segs, kinds)]
+ else:
+ return [mpath.Path(seg) for seg in segs]
+
+ def changed(self):
+ tcolors = [(tuple(rgba),)
+ for rgba in self.to_rgba(self.cvalues, alpha=self.alpha)]
+ self.tcolors = tcolors
+ hatches = self.hatches * len(tcolors)
+ for color, hatch, collection in zip(tcolors, hatches,
+ self.collections):
+ if self.filled:
+ collection.set_facecolor(color)
+ # update the collection's hatch (may be None)
+ collection.set_hatch(hatch)
+ else:
+ collection.set_color(color)
+ for label, cv in zip(self.labelTexts, self.labelCValues):
+ label.set_alpha(self.alpha)
+ label.set_color(self.labelMappable.to_rgba(cv))
+ # add label colors
+ cm.ScalarMappable.changed(self)
+
+ def _autolev(self, N):
+ """
+ Select contour levels to span the data.
+
+ We need two more levels for filled contours than for
+ line contours, because for the latter we need to specify
+ the lower and upper boundary of each range. For example,
+ a single contour boundary, say at z = 0, requires only
+ one contour line, but two filled regions, and therefore
+ three levels to provide boundaries for both regions.
+ """
+ if self.locator is None:
+ if self.logscale:
+ self.locator = ticker.LogLocator()
+ else:
+ self.locator = ticker.MaxNLocator(N + 1, min_n_ticks=1)
+
+ lev = self.locator.tick_values(self.zmin, self.zmax)
+ self._auto = True
+ return lev
+
+ def _contour_level_args(self, z, args):
+ """
+ Determine the contour levels and store in self.levels.
+ """
+ if self.filled:
+ fn = 'contourf'
+ else:
+ fn = 'contour'
+ self._auto = False
+ if self.levels is None:
+ if len(args) == 0:
+ lev = self._autolev(7)
+ else:
+ level_arg = args[0]
+ try:
+ if type(level_arg) == int:
+ lev = self._autolev(level_arg)
+ else:
+ lev = np.asarray(level_arg).astype(np.float64)
+ except:
+ raise TypeError(
+ "Last {0} arg must give levels; see help({0})"
+ .format(fn))
+ self.levels = lev
+ else:
+ self.levels = np.asarray(self.levels).astype(np.float64)
+
+ if not self.filled:
+ inside = (self.levels > self.zmin) & (self.levels < self.zmax)
+ self.levels = self.levels[inside]
+ if len(self.levels) == 0:
+ self.levels = [self.zmin]
+ warnings.warn("No contour levels were found"
+ " within the data range.")
+
+ if self.filled and len(self.levels) < 2:
+ raise ValueError("Filled contours require at least 2 levels.")
+
+ if len(self.levels) > 1 and np.min(np.diff(self.levels)) <= 0.0:
+ raise ValueError("Contour levels must be increasing")
+
+ def _process_levels(self):
+ """
+ Assign values to :attr:`layers` based on :attr:`levels`,
+ adding extended layers as needed if contours are filled.
+
+ For line contours, layers simply coincide with levels;
+ a line is a thin layer. No extended levels are needed
+ with line contours.
+ """
+ # Make a private _levels to include extended regions; we
+ # want to leave the original levels attribute unchanged.
+ # (Colorbar needs this even for line contours.)
+ self._levels = list(self.levels)
+
+ if self.extend in ('both', 'min'):
+ self._levels.insert(0, min(self.levels[0], self.zmin) - 1)
+ if self.extend in ('both', 'max'):
+ self._levels.append(max(self.levels[-1], self.zmax) + 1)
+ self._levels = np.asarray(self._levels)
+
+ if not self.filled:
+ self.layers = self.levels
+ return
+
+ # layer values are mid-way between levels
+ self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
+ # ...except that extended layers must be outside the
+ # normed range:
+ if self.extend in ('both', 'min'):
+ self.layers[0] = -1e150
+ if self.extend in ('both', 'max'):
+ self.layers[-1] = 1e150
+
+ def _process_colors(self):
+ """
+ Color argument processing for contouring.
+
+ Note that we base the color mapping on the contour levels
+ and layers, not on the actual range of the Z values. This
+ means we don't have to worry about bad values in Z, and we
+ always have the full dynamic range available for the selected
+ levels.
+
+ The color is based on the midpoint of the layer, except for
+ extended end layers. By default, the norm vmin and vmax
+ are the extreme values of the non-extended levels. Hence,
+ the layer color extremes are not the extreme values of
+ the colormap itself, but approach those values as the number
+ of levels increases. An advantage of this scheme is that
+ line contours, when added to filled contours, take on
+ colors that are consistent with those of the filled regions;
+ for example, a contour line on the boundary between two
+ regions will have a color intermediate between those
+ of the regions.
+
+ """
+ self.monochrome = self.cmap.monochrome
+ if self.colors is not None:
+ # Generate integers for direct indexing.
+ i0, i1 = 0, len(self.levels)
+ if self.filled:
+ i1 -= 1
+ # Out of range indices for over and under:
+ if self.extend in ('both', 'min'):
+ i0 -= 1
+ if self.extend in ('both', 'max'):
+ i1 += 1
+ self.cvalues = list(range(i0, i1))
+ self.set_norm(colors.NoNorm())
+ else:
+ self.cvalues = self.layers
+ self.set_array(self.levels)
+ self.autoscale_None()
+ if self.extend in ('both', 'max', 'min'):
+ self.norm.clip = False
+
+ # self.tcolors are set by the "changed" method
+
+ def _process_linewidths(self):
+ linewidths = self.linewidths
+ Nlev = len(self.levels)
+ if linewidths is None:
+ tlinewidths = [(mpl.rcParams['lines.linewidth'],)] * Nlev
+ else:
+ if not cbook.iterable(linewidths):
+ linewidths = [linewidths] * Nlev
+ else:
+ linewidths = list(linewidths)
+ if len(linewidths) < Nlev:
+ nreps = int(np.ceil(Nlev / len(linewidths)))
+ linewidths = linewidths * nreps
+ if len(linewidths) > Nlev:
+ linewidths = linewidths[:Nlev]
+ tlinewidths = [(w,) for w in linewidths]
+ return tlinewidths
+
+ def _process_linestyles(self):
+ linestyles = self.linestyles
+ Nlev = len(self.levels)
+ if linestyles is None:
+ tlinestyles = ['solid'] * Nlev
+ if self.monochrome:
+ neg_ls = mpl.rcParams['contour.negative_linestyle']
+ eps = - (self.zmax - self.zmin) * 1e-15
+ for i, lev in enumerate(self.levels):
+ if lev < eps:
+ tlinestyles[i] = neg_ls
+ else:
+ if isinstance(linestyles, six.string_types):
+ tlinestyles = [linestyles] * Nlev
+ elif cbook.iterable(linestyles):
+ tlinestyles = list(linestyles)
+ if len(tlinestyles) < Nlev:
+ nreps = int(np.ceil(Nlev / len(linestyles)))
+ tlinestyles = tlinestyles * nreps
+ if len(tlinestyles) > Nlev:
+ tlinestyles = tlinestyles[:Nlev]
+ else:
+ raise ValueError("Unrecognized type for linestyles kwarg")
+ return tlinestyles
+
+ def get_alpha(self):
+ """returns alpha to be applied to all ContourSet artists"""
+ return self.alpha
+
+ def set_alpha(self, alpha):
+ """
+ Set the alpha blending value for all ContourSet artists.
+ *alpha* must be between 0 (transparent) and 1 (opaque).
+ """
+ self.alpha = alpha
+ self.changed()
+
+ def find_nearest_contour(self, x, y, indices=None, pixel=True):
+ """
+ Finds contour that is closest to a point. Defaults to
+ measuring distance in pixels (screen space - useful for manual
+ contour labeling), but this can be controlled via a keyword
+ argument.
+
+ Returns a tuple containing the contour, segment, index of
+ segment, x & y of segment point and distance to minimum point.
+
+ Optional keyword arguments:
+
+ *indices*:
+ Indexes of contour levels to consider when looking for
+ nearest point. Defaults to using all levels.
+
+ *pixel*:
+ If *True*, measure distance in pixel space, if not, measure
+ distance in axes space. Defaults to *True*.
+
+ """
+
+ # This function uses a method that is probably quite
+ # inefficient based on converting each contour segment to
+ # pixel coordinates and then comparing the given point to
+ # those coordinates for each contour. This will probably be
+ # quite slow for complex contours, but for normal use it works
+ # sufficiently well that the time is not noticeable.
+ # Nonetheless, improvements could probably be made.
+
+ if indices is None:
+ indices = list(xrange(len(self.levels)))
+
+ dmin = np.inf
+ conmin = None
+ segmin = None
+ xmin = None
+ ymin = None
+
+ point = np.array([x, y])
+
+ for icon in indices:
+ con = self.collections[icon]
+ trans = con.get_transform()
+ paths = con.get_paths()
+
+ for segNum, linepath in enumerate(paths):
+ lc = linepath.vertices
+ # transfer all data points to screen coordinates if desired
+ if pixel:
+ lc = trans.transform(lc)
+
+ d, xc, leg = _find_closest_point_on_path(lc, point)
+ if d < dmin:
+ dmin = d
+ conmin = icon
+ segmin = segNum
+ imin = leg[1]
+ xmin = xc[0]
+ ymin = xc[1]
+
+ return (conmin, segmin, imin, xmin, ymin, dmin)
+
+
+class QuadContourSet(ContourSet):
+ """
+ Create and store a set of contour lines or filled regions.
+
+ User-callable method: `~axes.Axes.clabel`
+
+ Attributes
+ ----------
+ ax:
+ The axes object in which the contours are drawn.
+
+ collections:
+ A silent_list of LineCollections or PolyCollections.
+
+ levels:
+ Contour levels.
+
+ layers:
+ Same as levels for line contours; half-way between
+ levels for filled contours. See :meth:`_process_colors` method.
+ """
+
+ def _process_args(self, *args, **kwargs):
+ """
+ Process args and kwargs.
+ """
+ if isinstance(args[0], QuadContourSet):
+ if self.levels is None:
+ self.levels = args[0].levels
+ self.zmin = args[0].zmin
+ self.zmax = args[0].zmax
+ self._corner_mask = args[0]._corner_mask
+ contour_generator = args[0]._contour_generator
+ self._mins = args[0]._mins
+ self._maxs = args[0]._maxs
+ else:
+ self._corner_mask = kwargs.pop('corner_mask', None)
+ if self._corner_mask is None:
+ self._corner_mask = mpl.rcParams['contour.corner_mask']
+
+ x, y, z = self._contour_args(args, kwargs)
+
+ _mask = ma.getmask(z)
+ if _mask is ma.nomask or not _mask.any():
+ _mask = None
+
+ contour_generator = _contour.QuadContourGenerator(
+ x, y, z.filled(), _mask, self._corner_mask, self.nchunk)
+
+ t = self.get_transform()
+
+ # if the transform is not trans data, and some part of it
+ # contains transData, transform the xs and ys to data coordinates
+ if (t != self.ax.transData and
+ any(t.contains_branch_seperately(self.ax.transData))):
+ trans_to_data = t - self.ax.transData
+ pts = (np.vstack([x.flat, y.flat]).T)
+ transformed_pts = trans_to_data.transform(pts)
+ x = transformed_pts[..., 0]
+ y = transformed_pts[..., 1]
+
+ self._mins = [ma.min(x), ma.min(y)]
+ self._maxs = [ma.max(x), ma.max(y)]
+
+ self._contour_generator = contour_generator
+
+ return kwargs
+
+ def _get_allsegs_and_allkinds(self):
+ """Compute ``allsegs`` and ``allkinds`` using C extension."""
+ allsegs = []
+ if self.filled:
+ lowers, uppers = self._get_lowers_and_uppers()
+ allkinds = []
+ for level, level_upper in zip(lowers, uppers):
+ vertices, kinds = \
+ self._contour_generator.create_filled_contour(
+ level, level_upper)
+ allsegs.append(vertices)
+ allkinds.append(kinds)
+ else:
+ allkinds = None
+ for level in self.levels:
+ vertices = self._contour_generator.create_contour(level)
+ allsegs.append(vertices)
+ return allsegs, allkinds
+
+ def _contour_args(self, args, kwargs):
+ if self.filled:
+ fn = 'contourf'
+ else:
+ fn = 'contour'
+ Nargs = len(args)
+ if Nargs <= 2:
+ z = ma.asarray(args[0], dtype=np.float64)
+ x, y = self._initialize_x_y(z)
+ args = args[1:]
+ elif Nargs <= 4:
+ x, y, z = self._check_xyz(args[:3], kwargs)
+ args = args[3:]
+ else:
+ raise TypeError("Too many arguments to %s; see help(%s)" %
+ (fn, fn))
+ z = ma.masked_invalid(z, copy=False)
+ self.zmax = float(z.max())
+ self.zmin = float(z.min())
+ if self.logscale and self.zmin <= 0:
+ z = ma.masked_where(z <= 0, z)
+ warnings.warn('Log scale: values of z <= 0 have been masked')
+ self.zmin = float(z.min())
+ self._contour_level_args(z, args)
+ return (x, y, z)
+
+ def _check_xyz(self, args, kwargs):
+ """
+ For functions like contour, check that the dimensions
+ of the input arrays match; if x and y are 1D, convert
+ them to 2D using meshgrid.
+
+ Possible change: I think we should make and use an ArgumentError
+ Exception class (here and elsewhere).
+ """
+ x, y = args[:2]
+ kwargs = self.ax._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
+ x = self.ax.convert_xunits(x)
+ y = self.ax.convert_yunits(y)
+
+ x = np.asarray(x, dtype=np.float64)
+ y = np.asarray(y, dtype=np.float64)
+ z = ma.asarray(args[2], dtype=np.float64)
+
+ if z.ndim != 2:
+ raise TypeError("Input z must be a 2D array.")
+ elif z.shape[0] < 2 or z.shape[1] < 2:
+ raise TypeError("Input z must be at least a 2x2 array.")
+ else:
+ Ny, Nx = z.shape
+
+ if x.ndim != y.ndim:
+ raise TypeError("Number of dimensions of x and y should match.")
+
+ if x.ndim == 1:
+
+ nx, = x.shape
+ ny, = y.shape
+
+ if nx != Nx:
+ raise TypeError("Length of x must be number of columns in z.")
+
+ if ny != Ny:
+ raise TypeError("Length of y must be number of rows in z.")
+
+ x, y = np.meshgrid(x, y)
+
+ elif x.ndim == 2:
+
+ if x.shape != z.shape:
+ raise TypeError("Shape of x does not match that of z: found "
+ "{0} instead of {1}.".format(x.shape, z.shape))
+
+ if y.shape != z.shape:
+ raise TypeError("Shape of y does not match that of z: found "
+ "{0} instead of {1}.".format(y.shape, z.shape))
+ else:
+ raise TypeError("Inputs x and y must be 1D or 2D.")
+
+ return x, y, z
+
+ def _initialize_x_y(self, z):
+ """
+ Return X, Y arrays such that contour(Z) will match imshow(Z)
+ if origin is not None.
+ The center of pixel Z[i,j] depends on origin:
+ if origin is None, x = j, y = i;
+ if origin is 'lower', x = j + 0.5, y = i + 0.5;
+ if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
+ If extent is not None, x and y will be scaled to match,
+ as in imshow.
+ If origin is None and extent is not None, then extent
+ will give the minimum and maximum values of x and y.
+ """
+ if z.ndim != 2:
+ raise TypeError("Input must be a 2D array.")
+ elif z.shape[0] < 2 or z.shape[1] < 2:
+ raise TypeError("Input z must be at least a 2x2 array.")
+ else:
+ Ny, Nx = z.shape
+ if self.origin is None: # Not for image-matching.
+ if self.extent is None:
+ return np.meshgrid(np.arange(Nx), np.arange(Ny))
+ else:
+ x0, x1, y0, y1 = self.extent
+ x = np.linspace(x0, x1, Nx)
+ y = np.linspace(y0, y1, Ny)
+ return np.meshgrid(x, y)
+ # Match image behavior:
+ if self.extent is None:
+ x0, x1, y0, y1 = (0, Nx, 0, Ny)
+ else:
+ x0, x1, y0, y1 = self.extent
+ dx = (x1 - x0) / Nx
+ dy = (y1 - y0) / Ny
+ x = x0 + (np.arange(Nx) + 0.5) * dx
+ y = y0 + (np.arange(Ny) + 0.5) * dy
+ if self.origin == 'upper':
+ y = y[::-1]
+ return np.meshgrid(x, y)
+
+ _contour_doc = """
+ Plot contours.
+
+ Call signature::
+
+ contour([X, Y,] Z, [levels], **kwargs)
+
+ :func:`~matplotlib.pyplot.contour` and
+ :func:`~matplotlib.pyplot.contourf` draw contour lines and
+ filled contours, respectively. Except as noted, function
+ signatures and return values are the same for both versions.
+
+
+ Parameters
+ ----------
+ X, Y : array-like, optional
+ The coordinates of the values in *Z*.
+
+ *X* and *Y* must both be 2-D with the same shape as *Z* (e.g.
+ created via :func:`numpy.meshgrid`), or they must both be 1-D such
+ that ``len(X) == M`` is the number of columns in *Z* and
+ ``len(Y) == N`` is the number of rows in *Z*.
+
+ If not given, they are assumed to be integer indices, i.e.
+ ``X = range(M)``, ``Y = range(N)``.
+
+ Z : array-like(N, M)
+ The height values over which the contour is drawn.
+
+ levels : int or array-like, optional
+ Determines the number and positions of the contour lines / regions.
+
+ If an int *n*, use *n* data intervals; i.e. draw *n+1* contour
+ lines. The level heights are automatically chosen.
+
+ If array-like, draw contour lines at the specified levels.
+ The values must be in increasing order.
+
+ Returns
+ -------
+ :class:`~matplotlib.contour.QuadContourSet`
+
+ Other Parameters
+ ----------------
+ corner_mask : bool, optional
+ Enable/disable corner masking, which only has an effect if *Z* is
+ a masked array. If ``False``, any quad touching a masked point is
+ masked out. If ``True``, only the triangular corners of quads
+ nearest those points are always masked out, other triangular
+ corners comprising three unmasked points are contoured as usual.
+
+ Defaults to ``rcParams['contour.corner_mask']``, which defaults to
+ ``True``.
+
+ colors : color string or sequence of colors, optional
+ The colors of the levels, i.e. the lines for `.contour` and the
+ areas for `.contourf`.
+
+ The sequence is cycled for the levels in ascending order. If the
+ sequence is shorter than the number of levels, it's repeated.
+
+ As a shortcut, single color strings may be used in place of
+ one-element lists, i.e. ``'red'`` instead of ``['red']`` to color
+ all levels with the same color. This shortcut does only work for
+ color strings, not for other ways of specifying colors.
+
+ By default (value *None*), the colormap specified by *cmap*
+ will be used.
+
+ alpha : float, optional
+ The alpha blending value, between 0 (transparent) and 1 (opaque).
+
+ cmap : str or `.Colormap`, optional
+ A `.Colormap` instance or registered colormap name. The colormap
+ maps the level values to colors.
+ Defaults to :rc:`image.cmap`.
+
+ If given, *colors* take precedence over *cmap*.
+
+ norm : `~matplotlib.colors.Normalize`, optional
+ If a colormap is used, the `.Normalize` instance scales the level
+ values to the canonical colormap range [0, 1] for mapping to
+ colors. If not given, the default linear scaling is used.
+
+ vmin, vmax : float, optional
+ If not *None*, either or both of these values will be supplied to
+ the `.Normalize` instance, overriding the default color scaling
+ based on *levels*.
+
+ origin : {*None*, 'upper', 'lower', 'image'}, optional
+ Determines the orientation and exact position of *Z* by specifying
+ the position of ``Z[0, 0]``. This is only relevant, if *X*, *Y*
+ are not given.
+
+ - *None*: ``Z[0, 0]`` is at X=0, Y=0 in the lower left corner.
+ - 'lower': ``Z[0, 0]`` is at X=0.5, Y=0.5 in the lower left corner.
+ - 'upper': ``Z[0, 0]`` is at X=N+0.5, Y=0.5 in the upper left
+ corner.
+ - 'image': Use the value from :rc:`image.origin`. Note: The value
+ *None* in the rcParam is currently handled as 'lower'.
+
+ extent : (x0, x1, y0, y1), optional
+ If *origin* is not *None*, then *extent* is interpreted as
+ in :func:`matplotlib.pyplot.imshow`: it gives the outer
+ pixel boundaries. In this case, the position of Z[0,0]
+ is the center of the pixel, not a corner. If *origin* is
+ *None*, then (*x0*, *y0*) is the position of Z[0,0], and
+ (*x1*, *y1*) is the position of Z[-1,-1].
+
+ This keyword is not active if *X* and *Y* are specified in
+ the call to contour.
+
+ locator : ticker.Locator subclass, optional
+ The locator is used to determine the contour levels if they
+ are not given explicitly via *levels*.
+ Defaults to `~.ticker.MaxNLocator`.
+
+ extend : {'neither', 'both', 'min', 'max'}, optional
+ Unless this is 'neither', contour levels are automatically
+ added to one or both ends of the range so that all data
+ are included. These added ranges are then mapped to the
+ special colormap values which default to the ends of the
+ colormap range, but can be set via
+ :meth:`matplotlib.colors.Colormap.set_under` and
+ :meth:`matplotlib.colors.Colormap.set_over` methods.
+
+ xunits, yunits : registered units, optional
+ Override axis units by specifying an instance of a
+ :class:`matplotlib.units.ConversionInterface`.
+
+ antialiased : bool, optinal
+ Enable antialiasing, overriding the defaults. For
+ filled contours, the default is *True*. For line contours,
+ it is taken from :rc:`lines.antialiased`.
+
+ Nchunk : int >= 0, optional
+ If 0, no subdivision of the domain. Specify a positive integer to
+ divide the domain into subdomains of *nchunk* by *nchunk* quads.
+ Chunking reduces the maximum length of polygons generated by the
+ contouring algorithm which reduces the rendering workload passed
+ on to the backend and also requires slightly less RAM. It can
+ however introduce rendering artifacts at chunk boundaries depending
+ on the backend, the *antialiased* flag and value of *alpha*.
+
+ linewidths : float or sequence of float, optional
+ *Only applies to* `.contour`.
+
+ The line width of the contour lines.
+
+ If a number, all levels will be plotted with this linewidth.
+
+ If a sequence, the levels in ascending order will be plotted with
+ the linewidths in the order specified.
+
+ Defaults to :rc:`lines.linewidth`.
+
+ linestyles : {*None*, 'solid', 'dashed', 'dashdot', 'dotted'}, optional
+ *Only applies to* `.contour`.
+
+ If *linestyles* is *None*, the default is 'solid' unless the lines
+ are monochrome. In that case, negative contours will take their
+ linestyle from :rc:`contour.negative_linestyle` setting.
+
+ *linestyles* can also be an iterable of the above strings
+ specifying a set of linestyles to be used. If this
+ iterable is shorter than the number of contour levels
+ it will be repeated as necessary.
+
+ hatches : List[str], optional
+ *Only applies to* `.contourf`.
+
+ A list of cross hatch patterns to use on the filled areas.
+ If None, no hatching will be added to the contour.
+ Hatching is supported in the PostScript, PDF, SVG and Agg
+ backends only.
+
+
+ Notes
+ -----
+ 1. :func:`~matplotlib.pyplot.contourf` differs from the MATLAB
+ version in that it does not draw the polygon edges.
+ To draw edges, add line contours with
+ calls to :func:`~matplotlib.pyplot.contour`.
+
+ 2. contourf fills intervals that are closed at the top; that
+ is, for boundaries *z1* and *z2*, the filled region is::
+
+ z1 < Z <= z2
+
+ There is one exception: if the lowest boundary coincides with
+ the minimum value of the *Z* array, then that minimum value
+ will be included in the lowest interval.
+ """
diff --git a/contrib/python/matplotlib/py2/matplotlib/dates.py b/contrib/python/matplotlib/py2/matplotlib/dates.py
new file mode 100644
index 00000000000..290341146d8
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/dates.py
@@ -0,0 +1,1839 @@
+"""
+Matplotlib provides sophisticated date plotting capabilities, standing on the
+shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
+:mod:`dateutil`.
+
+
+.. _date-format:
+
+Matplotlib date format
+----------------------
+Matplotlib represents dates using floating point numbers specifying the number
+of days since 0001-01-01 UTC, plus 1. For example, 0001-01-01, 06:00 is 1.25,
+not 0.25. Values < 1, i.e. dates before 0001-01-01 UTC are not supported.
+
+There are a number of helper functions to convert between :mod:`datetime`
+objects and Matplotlib dates:
+
+.. currentmodule:: matplotlib.dates
+
+.. autosummary::
+ :nosignatures:
+
+ date2num
+ num2date
+ num2timedelta
+ epoch2num
+ num2epoch
+ mx2num
+ drange
+
+.. note::
+
+ Like Python's datetime, mpl uses the Gregorian calendar for all
+ conversions between dates and floating point numbers. This practice
+ is not universal, and calendar differences can cause confusing
+ differences between what Python and mpl give as the number of days
+ since 0001-01-01 and what other software and databases yield. For
+ example, the US Naval Observatory uses a calendar that switches
+ from Julian to Gregorian in October, 1582. Hence, using their
+ calculator, the number of days between 0001-01-01 and 2006-04-01 is
+ 732403, whereas using the Gregorian calendar via the datetime
+ module we find::
+
+ In [1]: date(2006, 4, 1).toordinal() - date(1, 1, 1).toordinal()
+ Out[1]: 732401
+
+All the Matplotlib date converters, tickers and formatters are timezone aware.
+If no explicit timezone is provided, the rcParam ``timezone`` is assumend. If
+you want to use a custom time zone, pass a :class:`pytz.timezone` instance
+with the tz keyword argument to :func:`num2date`, :func:`.plot_date`, and any
+custom date tickers or locators you create.
+See `pytz <http://pythonhosted.org/pytz/>`_ for information on :mod:`pytz` and
+timezone handling.
+
+A wide range of specific and general purpose date tick locators and
+formatters are provided in this module. See
+:mod:`matplotlib.ticker` for general information on tick locators
+and formatters. These are described below.
+
+
+The `dateutil module <https://dateutil.readthedocs.io/en/stable/>`_ provides
+additional code to handle date ticking, making it easy to place ticks
+on any kinds of dates. See examples below.
+
+Date tickers
+------------
+
+Most of the date tickers can locate single or multiple values. For
+example::
+
+ # import constants for the days of the week
+ from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
+
+ # tick on mondays every week
+ loc = WeekdayLocator(byweekday=MO, tz=tz)
+
+ # tick on mondays and saturdays
+ loc = WeekdayLocator(byweekday=(MO, SA))
+
+In addition, most of the constructors take an interval argument::
+
+ # tick on mondays every second week
+ loc = WeekdayLocator(byweekday=MO, interval=2)
+
+The rrule locator allows completely general date ticking::
+
+ # tick every 5th easter
+ rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
+ loc = RRuleLocator(rule)
+
+Here are all the date tickers:
+
+ * :class:`MicrosecondLocator`: locate microseconds
+
+ * :class:`SecondLocator`: locate seconds
+
+ * :class:`MinuteLocator`: locate minutes
+
+ * :class:`HourLocator`: locate hours
+
+ * :class:`DayLocator`: locate specified days of the month
+
+ * :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
+
+ * :class:`MonthLocator`: locate months, e.g., 7 for july
+
+ * :class:`YearLocator`: locate years that are multiples of base
+
+ * :class:`RRuleLocator`: locate using a
+ :class:`matplotlib.dates.rrulewrapper`. The
+ :class:`rrulewrapper` is a simple wrapper around a
+ :class:`dateutil.rrule` (`dateutil
+ <https://dateutil.readthedocs.io/en/stable/>`_) which allow almost
+ arbitrary date tick specifications. See `rrule example
+ <../gallery/ticks_and_spines/date_demo_rrule.html>`_.
+
+ * :class:`AutoDateLocator`: On autoscale, this class picks the best
+ :class:`DateLocator` (e.g., :class:`RRuleLocator`)
+ to set the view limits and the tick
+ locations. If called with ``interval_multiples=True`` it will
+ make ticks line up with sensible multiples of the tick intervals. E.g.
+ if the interval is 4 hours, it will pick hours 0, 4, 8, etc as ticks.
+ This behaviour is not guaranteed by default.
+
+Date formatters
+---------------
+
+Here all all the date formatters:
+
+ * :class:`AutoDateFormatter`: attempts to figure out the best format
+ to use. This is most useful when used with the :class:`AutoDateLocator`.
+
+ * :class:`DateFormatter`: use :func:`strftime` format strings
+
+ * :class:`IndexDateFormatter`: date plots with implicit *x*
+ indexing.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import zip
+import re
+import time
+import math
+import datetime
+import functools
+
+import warnings
+import logging
+
+from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
+ MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
+ SECONDLY)
+from dateutil.relativedelta import relativedelta
+import dateutil.parser
+import logging
+import numpy as np
+
+
+import matplotlib
+from matplotlib import rcParams
+import matplotlib.units as units
+import matplotlib.cbook as cbook
+import matplotlib.ticker as ticker
+
+_log = logging.getLogger(__name__)
+
+__all__ = ('date2num', 'num2date', 'num2timedelta', 'drange', 'epoch2num',
+ 'num2epoch', 'mx2num', 'DateFormatter',
+ 'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
+ 'RRuleLocator', 'AutoDateLocator', 'YearLocator',
+ 'MonthLocator', 'WeekdayLocator',
+ 'DayLocator', 'HourLocator', 'MinuteLocator',
+ 'SecondLocator', 'MicrosecondLocator',
+ 'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
+ 'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
+ 'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
+ 'seconds', 'minutes', 'hours', 'weeks')
+
+
+_log = logging.getLogger(__name__)
+
+
+# Make a simple UTC instance so we don't always have to import
+# pytz. From the python datetime library docs:
+
+class _UTC(datetime.tzinfo):
+ """UTC"""
+
+ def utcoffset(self, dt):
+ return datetime.timedelta(0)
+
+ def tzname(self, dt):
+ return str("UTC")
+
+ def dst(self, dt):
+ return datetime.timedelta(0)
+
+
+UTC = _UTC()
+
+
+def _get_rc_timezone():
+ """
+ Retrieve the preferred timeszone from the rcParams dictionary.
+ """
+ s = matplotlib.rcParams['timezone']
+ if s == 'UTC':
+ return UTC
+ import pytz
+ return pytz.timezone(s)
+
+
+"""
+Time-related constants.
+"""
+EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
+JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
+MICROSECONDLY = SECONDLY + 1
+HOURS_PER_DAY = 24.
+MIN_PER_HOUR = 60.
+SEC_PER_MIN = 60.
+MONTHS_PER_YEAR = 12.
+
+DAYS_PER_WEEK = 7.
+DAYS_PER_MONTH = 30.
+DAYS_PER_YEAR = 365.0
+
+MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
+
+SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
+SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
+SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
+
+MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
+
+MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
+ MO, TU, WE, TH, FR, SA, SU)
+WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
+
+
+def _to_ordinalf(dt):
+ """
+ Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
+ days, preserving hours, minutes, seconds and microseconds. Return value
+ is a :func:`float`.
+ """
+ # Convert to UTC
+ tzi = getattr(dt, 'tzinfo', None)
+ if tzi is not None:
+ dt = dt.astimezone(UTC)
+ tzi = UTC
+
+ base = float(dt.toordinal())
+
+ # If it's sufficiently datetime-like, it will have a `date()` method
+ cdate = getattr(dt, 'date', lambda: None)()
+ if cdate is not None:
+ # Get a datetime object at midnight UTC
+ midnight_time = datetime.time(0, tzinfo=tzi)
+
+ rdt = datetime.datetime.combine(cdate, midnight_time)
+
+ # Append the seconds as a fraction of a day
+ base += (dt - rdt).total_seconds() / SEC_PER_DAY
+
+ return base
+
+
+# a version of _to_ordinalf that can operate on numpy arrays
+_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
+
+
+def _dt64_to_ordinalf(d):
+ """
+ Convert `numpy.datetime64` or an ndarray of those types to Gregorian
+ date as UTC float. Roundoff is via float64 precision. Practically:
+ microseconds for dates between 290301 BC, 294241 AD, milliseconds for
+ larger dates (see `numpy.datetime64`). Nanoseconds aren't possible
+ because we do times compared to ``0001-01-01T00:00:00`` (plus one day).
+ """
+
+ # the "extra" ensures that we at least allow the dynamic range out to
+ # seconds. That should get out to +/-2e11 years.
+ extra = d - d.astype('datetime64[s]')
+ extra = extra.astype('timedelta64[ns]')
+ t0 = np.datetime64('0001-01-01T00:00:00').astype('datetime64[s]')
+ dt = (d.astype('datetime64[s]') - t0).astype(np.float64)
+ dt += extra.astype(np.float64) / 1.0e9
+ dt = dt / SEC_PER_DAY + 1.0
+
+ NaT_int = np.datetime64('NaT').astype(np.int64)
+ d_int = d.astype(np.int64)
+ try:
+ dt[d_int == NaT_int] = np.nan
+ except TypeError:
+ if d_int == NaT_int:
+ dt = np.nan
+ return dt
+
+
+def _from_ordinalf(x, tz=None):
+ """
+ Convert Gregorian float of the date, preserving hours, minutes,
+ seconds and microseconds. Return value is a `.datetime`.
+
+ The input date *x* is a float in ordinal days at UTC, and the output will
+ be the specified `.datetime` object corresponding to that time in
+ timezone *tz*, or if *tz* is ``None``, in the timezone specified in
+ :rc:`timezone`.
+ """
+ if tz is None:
+ tz = _get_rc_timezone()
+
+ ix, remainder = divmod(x, 1)
+ ix = int(ix)
+ if ix < 1:
+ raise ValueError('Cannot convert {} to a date. This often happens if '
+ 'non-datetime values are passed to an axis that '
+ 'expects datetime objects.'.format(ix))
+ dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
+
+ # Since the input date `x` float is unable to preserve microsecond
+ # precision of time representation in non-antique years, the
+ # resulting datetime is rounded to the nearest multiple of
+ # `musec_prec`. A value of 20 is appropriate for current dates.
+ musec_prec = 20
+ remainder_musec = int(round(remainder * MUSECONDS_PER_DAY / musec_prec)
+ * musec_prec)
+
+ # For people trying to plot with full microsecond precision, enable
+ # an early-year workaround
+ if x < 30 * 365:
+ remainder_musec = int(round(remainder * MUSECONDS_PER_DAY))
+
+ # add hours, minutes, seconds, microseconds
+ dt += datetime.timedelta(microseconds=remainder_musec)
+
+ return dt.astimezone(tz)
+
+
+# a version of _from_ordinalf that can operate on numpy arrays
+_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
+
+
+class strpdate2num(object):
+ """
+ Use this class to parse date strings to matplotlib datenums when
+ you know the date format string of the date you are parsing.
+ """
+ def __init__(self, fmt):
+ """ fmt: any valid strptime format is supported """
+ self.fmt = fmt
+
+ def __call__(self, s):
+ """s : string to be converted
+ return value: a date2num float
+ """
+ return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
+
+
+class bytespdate2num(strpdate2num):
+ """
+ Use this class to parse date strings to matplotlib datenums when
+ you know the date format string of the date you are parsing. See
+ :file:`examples/misc/load_converter.py`.
+ """
+ def __init__(self, fmt, encoding='utf-8'):
+ """
+ Args:
+ fmt: any valid strptime format is supported
+ encoding: encoding to use on byte input (default: 'utf-8')
+ """
+ super(bytespdate2num, self).__init__(fmt)
+ self.encoding = encoding
+
+ def __call__(self, b):
+ """
+ Args:
+ b: byte input to be converted
+ Returns:
+ A date2num float
+ """
+ s = b.decode(self.encoding)
+ return super(bytespdate2num, self).__call__(s)
+
+
+# a version of dateutil.parser.parse that can operate on nump0y arrays
+_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
+
+
+def datestr2num(d, default=None):
+ """
+ Convert a date string to a datenum using
+ :func:`dateutil.parser.parse`.
+
+ Parameters
+ ----------
+ d : string or sequence of strings
+ The dates to convert.
+
+ default : datetime instance, optional
+ The default date to use when fields are missing in *d*.
+ """
+ if isinstance(d, six.string_types):
+ dt = dateutil.parser.parse(d, default=default)
+ return date2num(dt)
+ else:
+ if default is not None:
+ d = [dateutil.parser.parse(s, default=default) for s in d]
+ d = np.asarray(d)
+ if not d.size:
+ return d
+ return date2num(_dateutil_parser_parse_np_vectorized(d))
+
+
+def date2num(d):
+ """
+ Convert datetime objects to Matplotlib dates.
+
+ Parameters
+ ----------
+ d : `datetime.datetime` or `numpy.datetime64` or sequences of these
+
+ Returns
+ -------
+ float or sequence of floats
+ Number of days (fraction part represents hours, minutes, seconds, ms)
+ since 0001-01-01 00:00:00 UTC, plus one.
+
+ Notes
+ -----
+ The addition of one here is a historical artifact. Also, note that the
+ Gregorian calendar is assumed; this is not universal practice.
+ For details see the module docstring.
+ """
+
+ if hasattr(d, "values"):
+ # this unpacks pandas series or dataframes...
+ d = d.values
+
+ if ((isinstance(d, np.ndarray) and np.issubdtype(d.dtype, np.datetime64))
+ or isinstance(d, np.datetime64)):
+ return _dt64_to_ordinalf(d)
+ if not cbook.iterable(d):
+ return _to_ordinalf(d)
+ else:
+ d = np.asarray(d)
+ if not d.size:
+ return d
+ return _to_ordinalf_np_vectorized(d)
+
+
+def julian2num(j):
+ """
+ Convert a Julian date (or sequence) to a Matplotlib date (or sequence).
+
+ Parameters
+ ----------
+ j : float or sequence of floats
+ Julian date(s)
+
+ Returns
+ -------
+ float or sequence of floats
+ Matplotlib date(s)
+ """
+ if cbook.iterable(j):
+ j = np.asarray(j)
+ return j - JULIAN_OFFSET
+
+
+def num2julian(n):
+ """
+ Convert a Matplotlib date (or sequence) to a Julian date (or sequence).
+
+ Parameters
+ ----------
+ n : float or sequence of floats
+ Matplotlib date(s)
+
+ Returns
+ -------
+ float or sequence of floats
+ Julian date(s)
+ """
+ if cbook.iterable(n):
+ n = np.asarray(n)
+ return n + JULIAN_OFFSET
+
+
+def num2date(x, tz=None):
+ """
+ Convert Matplotlib dates to `~datetime.datetime` objects.
+
+ Parameters
+ ----------
+ x : float or sequence of floats
+ Number of days (fraction part represents hours, minutes, seconds)
+ since 0001-01-01 00:00:00 UTC, plus one.
+ tz : string, optional
+ Timezone of *x* (defaults to rcparams ``timezone``).
+
+ Returns
+ -------
+ `~datetime.datetime` or sequence of `~datetime.datetime`
+ Dates are returned in timezone *tz*.
+
+ If *x* is a sequence, a sequence of :class:`datetime` objects will
+ be returned.
+
+ Notes
+ -----
+ The addition of one here is a historical artifact. Also, note that the
+ Gregorian calendar is assumed; this is not universal practice.
+ For details, see the module docstring.
+ """
+ if tz is None:
+ tz = _get_rc_timezone()
+ if not cbook.iterable(x):
+ return _from_ordinalf(x, tz)
+ else:
+ x = np.asarray(x)
+ if not x.size:
+ return x
+ return _from_ordinalf_np_vectorized(x, tz).tolist()
+
+
+def _ordinalf_to_timedelta(x):
+ return datetime.timedelta(days=x)
+
+
+_ordinalf_to_timedelta_np_vectorized = np.vectorize(_ordinalf_to_timedelta)
+
+
+def num2timedelta(x):
+ """
+ Convert number of days to a `~datetime.timedelta` object.
+
+ If *x* is a sequence, a sequence of `~datetime.timedelta` objects will
+ be returned.
+
+ Parameters
+ ----------
+ x : float, sequence of floats
+ Number of days. The fraction part represents hours, minutes, seconds.
+
+ Returns
+ -------
+ `datetime.timedelta` or list[`datetime.timedelta`]
+
+ """
+ if not cbook.iterable(x):
+ return _ordinalf_to_timedelta(x)
+ else:
+ x = np.asarray(x)
+ if not x.size:
+ return x
+ return _ordinalf_to_timedelta_np_vectorized(x).tolist()
+
+
+def drange(dstart, dend, delta):
+ """
+ Return a sequence of equally spaced Matplotlib dates.
+
+ The dates start at *dstart* and reach up to, but not including *dend*.
+ They are spaced by *delta*.
+
+ Parameters
+ ----------
+ dstart, dend : `~datetime.datetime`
+ The date limits.
+ delta : `datetime.timedelta`
+ Spacing of the dates.
+
+ Returns
+ -------
+ drange : `numpy.array`
+ A list floats representing Matplotlib dates.
+
+ """
+ f1 = date2num(dstart)
+ f2 = date2num(dend)
+ step = delta.total_seconds() / SEC_PER_DAY
+
+ # calculate the difference between dend and dstart in times of delta
+ num = int(np.ceil((f2 - f1) / step))
+
+ # calculate end of the interval which will be generated
+ dinterval_end = dstart + num * delta
+
+ # ensure, that an half open interval will be generated [dstart, dend)
+ if dinterval_end >= dend:
+ # if the endpoint is greated than dend, just subtract one delta
+ dinterval_end -= delta
+ num -= 1
+
+ f2 = date2num(dinterval_end) # new float-endpoint
+ return np.linspace(f1, f2, num + 1)
+
+### date tickers and formatters ###
+
+
+class DateFormatter(ticker.Formatter):
+ """
+ Tick location is seconds since the epoch. Use a :func:`strftime`
+ format string.
+
+ Python only supports :mod:`datetime` :func:`strftime` formatting
+ for years greater than 1900. Thanks to Andrew Dalke, Dalke
+ Scientific Software who contributed the :func:`strftime` code
+ below to include dates earlier than this year.
+ """
+
+ illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
+
+ def __init__(self, fmt, tz=None):
+ """
+ *fmt* is a :func:`strftime` format string; *tz* is the
+ :class:`tzinfo` instance.
+ """
+ if tz is None:
+ tz = _get_rc_timezone()
+ self.fmt = fmt
+ self.tz = tz
+
+ def __call__(self, x, pos=0):
+ if x == 0:
+ raise ValueError('DateFormatter found a value of x=0, which is '
+ 'an illegal date. This usually occurs because '
+ 'you have not informed the axis that it is '
+ 'plotting dates, e.g., with ax.xaxis_date()')
+ dt = num2date(x, self.tz)
+ return self.strftime(dt, self.fmt)
+
+ def set_tzinfo(self, tz):
+ self.tz = tz
+
+ def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
+ """Helper function for replacing substrings sub1 and sub2
+ located at the same indexes in strings s1 and s2 respectively,
+ with the string replacement. It is expected that sub1 and sub2
+ have the same length. Returns the pair s1, s2 after the
+ substitutions.
+ """
+ # Find common indexes of substrings sub1 in s1 and sub2 in s2
+ # and make substitutions inplace. Because this is inplace,
+ # it is okay if len(replacement) != len(sub1), len(sub2).
+ i = 0
+ while True:
+ j = s1.find(sub1, i)
+ if j == -1:
+ break
+
+ i = j + 1
+ if s2[j:j + len(sub2)] != sub2:
+ continue
+
+ s1 = s1[:j] + replacement + s1[j + len(sub1):]
+ s2 = s2[:j] + replacement + s2[j + len(sub2):]
+
+ return s1, s2
+
+ def strftime_pre_1900(self, dt, fmt=None):
+ """Call time.strftime for years before 1900 by rolling
+ forward a multiple of 28 years.
+
+ *fmt* is a :func:`strftime` format string.
+
+ Dalke: I hope I did this math right. Every 28 years the
+ calendar repeats, except through century leap years excepting
+ the 400 year leap years. But only if you're using the Gregorian
+ calendar.
+ """
+ if fmt is None:
+ fmt = self.fmt
+
+ # Since python's time module's strftime implementation does not
+ # support %f microsecond (but the datetime module does), use a
+ # regular expression substitution to replace instances of %f.
+ # Note that this can be useful since python's floating-point
+ # precision representation for datetime causes precision to be
+ # more accurate closer to year 0 (around the year 2000, precision
+ # can be at 10s of microseconds).
+ fmt = re.sub(r'((^|[^%])(%%)*)%f',
+ r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
+
+ year = dt.year
+ # For every non-leap year century, advance by
+ # 6 years to get into the 28-year repeat cycle
+ delta = 2000 - year
+ off = 6 * (delta // 100 + delta // 400)
+ year = year + off
+
+ # Move to between the years 1973 and 2000
+ year1 = year + ((2000 - year) // 28) * 28
+ year2 = year1 + 28
+ timetuple = dt.timetuple()
+ # Generate timestamp string for year and year+28
+ s1 = time.strftime(fmt, (year1,) + timetuple[1:])
+ s2 = time.strftime(fmt, (year2,) + timetuple[1:])
+
+ # Replace instances of respective years (both 2-digit and 4-digit)
+ # that are located at the same indexes of s1, s2 with dt's year.
+ # Note that C++'s strftime implementation does not use padded
+ # zeros or padded whitespace for %y or %Y for years before 100, but
+ # uses padded zeros for %x. (For example, try the runnable examples
+ # with .tm_year in the interval [-1900, -1800] on
+ # http://en.cppreference.com/w/c/chrono/strftime.) For ease of
+ # implementation, we always use padded zeros for %y, %Y, and %x.
+ s1, s2 = self._replace_common_substr(s1, s2,
+ "{0:04d}".format(year1),
+ "{0:04d}".format(year2),
+ "{0:04d}".format(dt.year))
+ s1, s2 = self._replace_common_substr(s1, s2,
+ "{0:02d}".format(year1 % 100),
+ "{0:02d}".format(year2 % 100),
+ "{0:02d}".format(dt.year % 100))
+ return cbook.unicode_safe(s1)
+
+ def strftime(self, dt, fmt=None):
+ """
+ Refer to documentation for :meth:`datetime.datetime.strftime`
+
+ *fmt* is a :meth:`datetime.datetime.strftime` format string.
+
+ Warning: For years before 1900, depending upon the current
+ locale it is possible that the year displayed with %x might
+ be incorrect. For years before 100, %y and %Y will yield
+ zero-padded strings.
+ """
+ if fmt is None:
+ fmt = self.fmt
+ fmt = self.illegal_s.sub(r"\1", fmt)
+ fmt = fmt.replace("%s", "s")
+ if dt.year >= 1900:
+ # Note: in python 3.3 this is okay for years >= 1000,
+ # refer to http://bugs.python.org/issue1777412
+ return cbook.unicode_safe(dt.strftime(fmt))
+
+ return self.strftime_pre_1900(dt, fmt)
+
+
+class IndexDateFormatter(ticker.Formatter):
+ """
+ Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
+ strings by index.
+ """
+ def __init__(self, t, fmt, tz=None):
+ """
+ *t* is a sequence of dates (floating point days). *fmt* is a
+ :func:`strftime` format string.
+ """
+ if tz is None:
+ tz = _get_rc_timezone()
+ self.t = t
+ self.fmt = fmt
+ self.tz = tz
+
+ def __call__(self, x, pos=0):
+ 'Return the label for time *x* at position *pos*'
+ ind = int(np.round(x))
+ if ind >= len(self.t) or ind <= 0:
+ return ''
+
+ dt = num2date(self.t[ind], self.tz)
+
+ return cbook.unicode_safe(dt.strftime(self.fmt))
+
+
+class AutoDateFormatter(ticker.Formatter):
+ """
+ This class attempts to figure out the best format to use. This is
+ most useful when used with the :class:`AutoDateLocator`.
+
+
+ The AutoDateFormatter has a scale dictionary that maps the scale
+ of the tick (the distance in days between one major tick) and a
+ format string. The default looks like this::
+
+ self.scaled = {
+ DAYS_PER_YEAR: rcParams['date.autoformat.year'],
+ DAYS_PER_MONTH: rcParams['date.autoformat.month'],
+ 1.0: rcParams['date.autoformat.day'],
+ 1. / HOURS_PER_DAY: rcParams['date.autoformat.hour'],
+ 1. / (MINUTES_PER_DAY): rcParams['date.autoformat.minute'],
+ 1. / (SEC_PER_DAY): rcParams['date.autoformat.second'],
+ 1. / (MUSECONDS_PER_DAY): rcParams['date.autoformat.microsecond'],
+ }
+
+
+ The algorithm picks the key in the dictionary that is >= the
+ current scale and uses that format string. You can customize this
+ dictionary by doing::
+
+
+ >>> locator = AutoDateLocator()
+ >>> formatter = AutoDateFormatter(locator)
+ >>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
+
+ A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
+ The following example shows how to use a custom format function to strip
+ trailing zeros from decimal seconds and adds the date to the first
+ ticklabel::
+
+ >>> def my_format_function(x, pos=None):
+ ... x = matplotlib.dates.num2date(x)
+ ... if pos == 0:
+ ... fmt = '%D %H:%M:%S.%f'
+ ... else:
+ ... fmt = '%H:%M:%S.%f'
+ ... label = x.strftime(fmt)
+ ... label = label.rstrip("0")
+ ... label = label.rstrip(".")
+ ... return label
+ >>> from matplotlib.ticker import FuncFormatter
+ >>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
+ """
+
+ # This can be improved by providing some user-level direction on
+ # how to choose the best format (precedence, etc...)
+
+ # Perhaps a 'struct' that has a field for each time-type where a
+ # zero would indicate "don't show" and a number would indicate
+ # "show" with some sort of priority. Same priorities could mean
+ # show all with the same priority.
+
+ # Or more simply, perhaps just a format string for each
+ # possibility...
+
+ def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
+ """
+ Autoformat the date labels. The default format is the one to use
+ if none of the values in ``self.scaled`` are greater than the unit
+ returned by ``locator._get_unit()``.
+ """
+ self._locator = locator
+ self._tz = tz
+ self.defaultfmt = defaultfmt
+ self._formatter = DateFormatter(self.defaultfmt, tz)
+ self.scaled = {DAYS_PER_YEAR: rcParams['date.autoformatter.year'],
+ DAYS_PER_MONTH: rcParams['date.autoformatter.month'],
+ 1.0: rcParams['date.autoformatter.day'],
+ 1. / HOURS_PER_DAY: rcParams['date.autoformatter.hour'],
+ 1. / (MINUTES_PER_DAY):
+ rcParams['date.autoformatter.minute'],
+ 1. / (SEC_PER_DAY):
+ rcParams['date.autoformatter.second'],
+ 1. / (MUSECONDS_PER_DAY):
+ rcParams['date.autoformatter.microsecond']}
+
+ def __call__(self, x, pos=None):
+ locator_unit_scale = float(self._locator._get_unit())
+ # Pick the first scale which is greater than the locator unit.
+ fmt = next((fmt for scale, fmt in sorted(self.scaled.items())
+ if scale >= locator_unit_scale),
+ self.defaultfmt)
+
+ if isinstance(fmt, six.string_types):
+ self._formatter = DateFormatter(fmt, self._tz)
+ result = self._formatter(x, pos)
+ elif callable(fmt):
+ result = fmt(x, pos)
+ else:
+ raise TypeError('Unexpected type passed to {0!r}.'.format(self))
+
+ return result
+
+
+class rrulewrapper(object):
+ def __init__(self, freq, tzinfo=None, **kwargs):
+ kwargs['freq'] = freq
+ self._base_tzinfo = tzinfo
+
+ self._update_rrule(**kwargs)
+
+ def set(self, **kwargs):
+ self._construct.update(kwargs)
+
+ self._update_rrule(**self._construct)
+
+ def _update_rrule(self, **kwargs):
+ tzinfo = self._base_tzinfo
+
+ # rrule does not play nicely with time zones - especially pytz time
+ # zones, it's best to use naive zones and attach timezones once the
+ # datetimes are returned
+ if 'dtstart' in kwargs:
+ dtstart = kwargs['dtstart']
+ if dtstart.tzinfo is not None:
+ if tzinfo is None:
+ tzinfo = dtstart.tzinfo
+ else:
+ dtstart = dtstart.astimezone(tzinfo)
+
+ kwargs['dtstart'] = dtstart.replace(tzinfo=None)
+
+ if 'until' in kwargs:
+ until = kwargs['until']
+ if until.tzinfo is not None:
+ if tzinfo is not None:
+ until = until.astimezone(tzinfo)
+ else:
+ raise ValueError('until cannot be aware if dtstart '
+ 'is naive and tzinfo is None')
+
+ kwargs['until'] = until.replace(tzinfo=None)
+
+ self._construct = kwargs.copy()
+ self._tzinfo = tzinfo
+ self._rrule = rrule(**self._construct)
+
+ def _attach_tzinfo(self, dt, tzinfo):
+ # pytz zones are attached by "localizing" the datetime
+ if hasattr(tzinfo, 'localize'):
+ return tzinfo.localize(dt, is_dst=True)
+
+ return dt.replace(tzinfo=tzinfo)
+
+ def _aware_return_wrapper(self, f, returns_list=False):
+ """Decorator function that allows rrule methods to handle tzinfo."""
+ # This is only necessary if we're actually attaching a tzinfo
+ if self._tzinfo is None:
+ return f
+
+ # All datetime arguments must be naive. If they are not naive, they are
+ # converted to the _tzinfo zone before dropping the zone.
+ def normalize_arg(arg):
+ if isinstance(arg, datetime.datetime) and arg.tzinfo is not None:
+ if arg.tzinfo is not self._tzinfo:
+ arg = arg.astimezone(self._tzinfo)
+
+ return arg.replace(tzinfo=None)
+
+ return arg
+
+ def normalize_args(args, kwargs):
+ args = tuple(normalize_arg(arg) for arg in args)
+ kwargs = {kw: normalize_arg(arg) for kw, arg in kwargs.items()}
+
+ return args, kwargs
+
+ # There are two kinds of functions we care about - ones that return
+ # dates and ones that return lists of dates.
+ if not returns_list:
+ def inner_func(*args, **kwargs):
+ args, kwargs = normalize_args(args, kwargs)
+ dt = f(*args, **kwargs)
+ return self._attach_tzinfo(dt, self._tzinfo)
+ else:
+ def inner_func(*args, **kwargs):
+ args, kwargs = normalize_args(args, kwargs)
+ dts = f(*args, **kwargs)
+ return [self._attach_tzinfo(dt, self._tzinfo) for dt in dts]
+
+ return functools.wraps(f)(inner_func)
+
+ def __getattr__(self, name):
+ if name in self.__dict__:
+ return self.__dict__[name]
+
+ f = getattr(self._rrule, name)
+
+ if name in {'after', 'before'}:
+ return self._aware_return_wrapper(f)
+ elif name in {'xafter', 'xbefore', 'between'}:
+ return self._aware_return_wrapper(f, returns_list=True)
+ else:
+ return f
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+
+
+class DateLocator(ticker.Locator):
+ """
+ Determines the tick locations when plotting dates.
+
+ This class is subclassed by other Locators and
+ is not meant to be used on its own.
+ """
+ hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
+
+ def __init__(self, tz=None):
+ """
+ *tz* is a :class:`tzinfo` instance.
+ """
+ if tz is None:
+ tz = _get_rc_timezone()
+ self.tz = tz
+
+ def set_tzinfo(self, tz):
+ """
+ Set time zone info.
+ """
+ self.tz = tz
+
+ def datalim_to_dt(self):
+ """
+ Convert axis data interval to datetime objects.
+ """
+ dmin, dmax = self.axis.get_data_interval()
+ if dmin > dmax:
+ dmin, dmax = dmax, dmin
+ if dmin < 1:
+ raise ValueError('datalim minimum {} is less than 1 and '
+ 'is an invalid Matplotlib date value. This often '
+ 'happens if you pass a non-datetime '
+ 'value to an axis that has datetime units'
+ .format(dmin))
+ return num2date(dmin, self.tz), num2date(dmax, self.tz)
+
+ def viewlim_to_dt(self):
+ """
+ Converts the view interval to datetime objects.
+ """
+ vmin, vmax = self.axis.get_view_interval()
+ if vmin > vmax:
+ vmin, vmax = vmax, vmin
+ if vmin < 1:
+ raise ValueError('view limit minimum {} is less than 1 and '
+ 'is an invalid Matplotlib date value. This '
+ 'often happens if you pass a non-datetime '
+ 'value to an axis that has datetime units'
+ .format(vmin))
+ return num2date(vmin, self.tz), num2date(vmax, self.tz)
+
+ def _get_unit(self):
+ """
+ Return how many days a unit of the locator is; used for
+ intelligent autoscaling.
+ """
+ return 1
+
+ def _get_interval(self):
+ """
+ Return the number of units for each tick.
+ """
+ return 1
+
+ def nonsingular(self, vmin, vmax):
+ """
+ Given the proposed upper and lower extent, adjust the range
+ if it is too close to being singular (i.e. a range of ~0).
+
+ """
+ unit = self._get_unit()
+ interval = self._get_interval()
+ if abs(vmax - vmin) < 1e-6:
+ vmin -= 2 * unit * interval
+ vmax += 2 * unit * interval
+ return vmin, vmax
+
+
+class RRuleLocator(DateLocator):
+ # use the dateutil rrule instance
+
+ def __init__(self, o, tz=None):
+ DateLocator.__init__(self, tz)
+ self.rule = o
+
+ def __call__(self):
+ # if no data have been set, this will tank with a ValueError
+ try:
+ dmin, dmax = self.viewlim_to_dt()
+ except ValueError:
+ return []
+
+ return self.tick_values(dmin, dmax)
+
+ def tick_values(self, vmin, vmax):
+ delta = relativedelta(vmax, vmin)
+
+ # We need to cap at the endpoints of valid datetime
+ try:
+ start = vmin - delta
+ except (ValueError, OverflowError):
+ start = _from_ordinalf(1.0)
+
+ try:
+ stop = vmax + delta
+ except (ValueError, OverflowError):
+ # The magic number!
+ stop = _from_ordinalf(3652059.9999999)
+
+ self.rule.set(dtstart=start, until=stop)
+
+ dates = self.rule.between(vmin, vmax, True)
+ if len(dates) == 0:
+ return date2num([vmin, vmax])
+ return self.raise_if_exceeds(date2num(dates))
+
+ def _get_unit(self):
+ """
+ Return how many days a unit of the locator is; used for
+ intelligent autoscaling.
+ """
+ freq = self.rule._rrule._freq
+ return self.get_unit_generic(freq)
+
+ @staticmethod
+ def get_unit_generic(freq):
+ if freq == YEARLY:
+ return DAYS_PER_YEAR
+ elif freq == MONTHLY:
+ return DAYS_PER_MONTH
+ elif freq == WEEKLY:
+ return DAYS_PER_WEEK
+ elif freq == DAILY:
+ return 1.0
+ elif freq == HOURLY:
+ return 1.0 / HOURS_PER_DAY
+ elif freq == MINUTELY:
+ return 1.0 / MINUTES_PER_DAY
+ elif freq == SECONDLY:
+ return 1.0 / SEC_PER_DAY
+ else:
+ # error
+ return -1 # or should this just return '1'?
+
+ def _get_interval(self):
+ return self.rule._rrule._interval
+
+ def autoscale(self):
+ """
+ Set the view limits to include the data range.
+ """
+ dmin, dmax = self.datalim_to_dt()
+ delta = relativedelta(dmax, dmin)
+
+ # We need to cap at the endpoints of valid datetime
+ try:
+ start = dmin - delta
+ except ValueError:
+ start = _from_ordinalf(1.0)
+
+ try:
+ stop = dmax + delta
+ except ValueError:
+ # The magic number!
+ stop = _from_ordinalf(3652059.9999999)
+
+ self.rule.set(dtstart=start, until=stop)
+ dmin, dmax = self.datalim_to_dt()
+
+ vmin = self.rule.before(dmin, True)
+ if not vmin:
+ vmin = dmin
+
+ vmax = self.rule.after(dmax, True)
+ if not vmax:
+ vmax = dmax
+
+ vmin = date2num(vmin)
+ vmax = date2num(vmax)
+
+ return self.nonsingular(vmin, vmax)
+
+
+class AutoDateLocator(DateLocator):
+ """
+ On autoscale, this class picks the best
+ :class:`DateLocator` to set the view limits and the tick
+ locations.
+ """
+ def __init__(self, tz=None, minticks=5, maxticks=None,
+ interval_multiples=False):
+ """
+ *minticks* is the minimum number of ticks desired, which is used to
+ select the type of ticking (yearly, monthly, etc.).
+
+ *maxticks* is the maximum number of ticks desired, which controls
+ any interval between ticks (ticking every other, every 3, etc.).
+ For really fine-grained control, this can be a dictionary mapping
+ individual rrule frequency constants (YEARLY, MONTHLY, etc.)
+ to their own maximum number of ticks. This can be used to keep
+ the number of ticks appropriate to the format chosen in
+ :class:`AutoDateFormatter`. Any frequency not specified in this
+ dictionary is given a default value.
+
+ *tz* is a :class:`tzinfo` instance.
+
+ *interval_multiples* is a boolean that indicates whether ticks
+ should be chosen to be multiple of the interval. This will lock
+ ticks to 'nicer' locations. For example, this will force the
+ ticks to be at hours 0,6,12,18 when hourly ticking is done at
+ 6 hour intervals.
+
+ The AutoDateLocator has an interval dictionary that maps the
+ frequency of the tick (a constant from dateutil.rrule) and a
+ multiple allowed for that ticking. The default looks like this::
+
+ self.intervald = {
+ YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
+ 1000, 2000, 4000, 5000, 10000],
+ MONTHLY : [1, 2, 3, 4, 6],
+ DAILY : [1, 2, 3, 7, 14],
+ HOURLY : [1, 2, 3, 4, 6, 12],
+ MINUTELY: [1, 5, 10, 15, 30],
+ SECONDLY: [1, 5, 10, 15, 30],
+ MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
+ 5000, 10000, 20000, 50000, 100000, 200000, 500000,
+ 1000000],
+ }
+
+ The interval is used to specify multiples that are appropriate for
+ the frequency of ticking. For instance, every 7 days is sensible
+ for daily ticks, but for minutes/seconds, 15 or 30 make sense.
+ You can customize this dictionary by doing::
+
+ locator = AutoDateLocator()
+ locator.intervald[HOURLY] = [3] # only show every 3 hours
+ """
+ DateLocator.__init__(self, tz)
+ self._locator = YearLocator()
+ self._freq = YEARLY
+ self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
+ SECONDLY, MICROSECONDLY]
+ self.minticks = minticks
+
+ self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
+ MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
+ if maxticks is not None:
+ try:
+ self.maxticks.update(maxticks)
+ except TypeError:
+ # Assume we were given an integer. Use this as the maximum
+ # number of ticks for every frequency and create a
+ # dictionary for this
+ self.maxticks = dict.fromkeys(self._freqs, maxticks)
+ self.interval_multiples = interval_multiples
+ self.intervald = {
+ YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
+ 1000, 2000, 4000, 5000, 10000],
+ MONTHLY: [1, 2, 3, 4, 6],
+ DAILY: [1, 2, 3, 7, 14, 21],
+ HOURLY: [1, 2, 3, 4, 6, 12],
+ MINUTELY: [1, 5, 10, 15, 30],
+ SECONDLY: [1, 5, 10, 15, 30],
+ MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
+ 5000, 10000, 20000, 50000, 100000, 200000, 500000,
+ 1000000]}
+ self._byranges = [None, range(1, 13), range(1, 32),
+ range(0, 24), range(0, 60), range(0, 60), None]
+
+ def __call__(self):
+ 'Return the locations of the ticks'
+ self.refresh()
+ return self._locator()
+
+ def tick_values(self, vmin, vmax):
+ return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
+
+ def nonsingular(self, vmin, vmax):
+ # whatever is thrown at us, we can scale the unit.
+ # But default nonsingular date plots at an ~4 year period.
+ if vmin == vmax:
+ vmin = vmin - DAYS_PER_YEAR * 2
+ vmax = vmax + DAYS_PER_YEAR * 2
+ return vmin, vmax
+
+ def set_axis(self, axis):
+ DateLocator.set_axis(self, axis)
+ self._locator.set_axis(axis)
+
+ def refresh(self):
+ 'Refresh internal information based on current limits.'
+ dmin, dmax = self.viewlim_to_dt()
+ self._locator = self.get_locator(dmin, dmax)
+
+ def _get_unit(self):
+ if self._freq in [MICROSECONDLY]:
+ return 1. / MUSECONDS_PER_DAY
+ else:
+ return RRuleLocator.get_unit_generic(self._freq)
+
+ def autoscale(self):
+ 'Try to choose the view limits intelligently.'
+ dmin, dmax = self.datalim_to_dt()
+ self._locator = self.get_locator(dmin, dmax)
+ return self._locator.autoscale()
+
+ def get_locator(self, dmin, dmax):
+ 'Pick the best locator based on a distance.'
+ delta = relativedelta(dmax, dmin)
+ tdelta = dmax - dmin
+
+ # take absolute difference
+ if dmin > dmax:
+ delta = -delta
+ tdelta = -tdelta
+
+ # The following uses a mix of calls to relativedelta and timedelta
+ # methods because there is incomplete overlap in the functionality of
+ # these similar functions, and it's best to avoid doing our own math
+ # whenever possible.
+ numYears = float(delta.years)
+ numMonths = numYears * MONTHS_PER_YEAR + delta.months
+ numDays = tdelta.days # Avoids estimates of days/month, days/year
+ numHours = numDays * HOURS_PER_DAY + delta.hours
+ numMinutes = numHours * MIN_PER_HOUR + delta.minutes
+ numSeconds = np.floor(tdelta.total_seconds())
+ numMicroseconds = np.floor(tdelta.total_seconds() * 1e6)
+
+ nums = [numYears, numMonths, numDays, numHours, numMinutes,
+ numSeconds, numMicroseconds]
+
+ use_rrule_locator = [True] * 6 + [False]
+
+ # Default setting of bymonth, etc. to pass to rrule
+ # [unused (for year), bymonth, bymonthday, byhour, byminute,
+ # bysecond, unused (for microseconds)]
+ byranges = [None, 1, 1, 0, 0, 0, None]
+
+ # Loop over all the frequencies and try to find one that gives at
+ # least a minticks tick positions. Once this is found, look for
+ # an interval from an list specific to that frequency that gives no
+ # more than maxticks tick positions. Also, set up some ranges
+ # (bymonth, etc.) as appropriate to be passed to rrulewrapper.
+ for i, (freq, num) in enumerate(zip(self._freqs, nums)):
+ # If this particular frequency doesn't give enough ticks, continue
+ if num < self.minticks:
+ # Since we're not using this particular frequency, set
+ # the corresponding by_ to None so the rrule can act as
+ # appropriate
+ byranges[i] = None
+ continue
+
+ # Find the first available interval that doesn't give too many
+ # ticks
+ for interval in self.intervald[freq]:
+ if num <= interval * (self.maxticks[freq] - 1):
+ break
+ else:
+ # We went through the whole loop without breaking, default to
+ # the last interval in the list and raise a warning
+ warnings.warn('AutoDateLocator was unable to pick an '
+ 'appropriate interval for this date range. '
+ 'It may be necessary to add an interval value '
+ "to the AutoDateLocator's intervald dictionary."
+ ' Defaulting to {0}.'.format(interval))
+
+ # Set some parameters as appropriate
+ self._freq = freq
+
+ if self._byranges[i] and self.interval_multiples:
+ byranges[i] = self._byranges[i][::interval]
+ interval = 1
+ else:
+ byranges[i] = self._byranges[i]
+
+ break
+ else:
+ raise ValueError('No sensible date limit could be found in the '
+ 'AutoDateLocator.')
+
+ if (freq == YEARLY) and self.interval_multiples:
+ locator = YearLocator(interval)
+ elif use_rrule_locator[i]:
+ _, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
+ rrule = rrulewrapper(self._freq, interval=interval,
+ dtstart=dmin, until=dmax,
+ bymonth=bymonth, bymonthday=bymonthday,
+ byhour=byhour, byminute=byminute,
+ bysecond=bysecond)
+
+ locator = RRuleLocator(rrule, self.tz)
+ else:
+ locator = MicrosecondLocator(interval, tz=self.tz)
+ if dmin.year > 20 and interval < 1000:
+ _log.warning('Plotting microsecond time intervals is not well '
+ 'supported. Please see the MicrosecondLocator '
+ 'documentation for details.')
+
+ locator.set_axis(self.axis)
+
+ if self.axis is not None:
+ locator.set_view_interval(*self.axis.get_view_interval())
+ locator.set_data_interval(*self.axis.get_data_interval())
+ return locator
+
+
+class YearLocator(DateLocator):
+ """
+ Make ticks on a given day of each year that is a multiple of base.
+
+ Examples::
+
+ # Tick every year on Jan 1st
+ locator = YearLocator()
+
+ # Tick every 5 years on July 4th
+ locator = YearLocator(5, month=7, day=4)
+ """
+ def __init__(self, base=1, month=1, day=1, tz=None):
+ """
+ Mark years that are multiple of base on a given month and day
+ (default jan 1).
+ """
+ DateLocator.__init__(self, tz)
+ self.base = ticker.Base(base)
+ self.replaced = {'month': month,
+ 'day': day,
+ 'hour': 0,
+ 'minute': 0,
+ 'second': 0,
+ 'tzinfo': tz
+ }
+
+ def __call__(self):
+ # if no data have been set, this will tank with a ValueError
+ try:
+ dmin, dmax = self.viewlim_to_dt()
+ except ValueError:
+ return []
+
+ return self.tick_values(dmin, dmax)
+
+ def tick_values(self, vmin, vmax):
+ ymin = self.base.le(vmin.year)
+ ymax = self.base.ge(vmax.year)
+
+ ticks = [vmin.replace(year=ymin, **self.replaced)]
+ while True:
+ dt = ticks[-1]
+ if dt.year >= ymax:
+ return date2num(ticks)
+ year = dt.year + self.base.get_base()
+ ticks.append(dt.replace(year=year, **self.replaced))
+
+ def autoscale(self):
+ """
+ Set the view limits to include the data range.
+ """
+ dmin, dmax = self.datalim_to_dt()
+
+ ymin = self.base.le(dmin.year)
+ ymax = self.base.ge(dmax.year)
+ vmin = dmin.replace(year=ymin, **self.replaced)
+ vmax = dmax.replace(year=ymax, **self.replaced)
+
+ vmin = date2num(vmin)
+ vmax = date2num(vmax)
+ return self.nonsingular(vmin, vmax)
+
+
+class MonthLocator(RRuleLocator):
+ """
+ Make ticks on occurrences of each month, e.g., 1, 3, 12.
+ """
+ def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
+ """
+ Mark every month in *bymonth*; *bymonth* can be an int or
+ sequence. Default is ``range(1,13)``, i.e. every month.
+
+ *interval* is the interval between each iteration. For
+ example, if ``interval=2``, mark every second occurrence.
+ """
+ if bymonth is None:
+ bymonth = range(1, 13)
+ elif isinstance(bymonth, np.ndarray):
+ # This fixes a bug in dateutil <= 2.3 which prevents the use of
+ # numpy arrays in (among other things) the bymonthday, byweekday
+ # and bymonth parameters.
+ bymonth = [x.item() for x in bymonth.astype(int)]
+
+ rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
+ interval=interval, **self.hms0d)
+ RRuleLocator.__init__(self, rule, tz)
+
+
+class WeekdayLocator(RRuleLocator):
+ """
+ Make ticks on occurrences of each weekday.
+ """
+
+ def __init__(self, byweekday=1, interval=1, tz=None):
+ """
+ Mark every weekday in *byweekday*; *byweekday* can be a number or
+ sequence.
+
+ Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
+ SU, the constants from :mod:`dateutil.rrule`, which have been
+ imported into the :mod:`matplotlib.dates` namespace.
+
+ *interval* specifies the number of weeks to skip. For example,
+ ``interval=2`` plots every second week.
+ """
+ if isinstance(byweekday, np.ndarray):
+ # This fixes a bug in dateutil <= 2.3 which prevents the use of
+ # numpy arrays in (among other things) the bymonthday, byweekday
+ # and bymonth parameters.
+ [x.item() for x in byweekday.astype(int)]
+
+ rule = rrulewrapper(DAILY, byweekday=byweekday,
+ interval=interval, **self.hms0d)
+ RRuleLocator.__init__(self, rule, tz)
+
+
+class DayLocator(RRuleLocator):
+ """
+ Make ticks on occurrences of each day of the month. For example,
+ 1, 15, 30.
+ """
+ def __init__(self, bymonthday=None, interval=1, tz=None):
+ """
+ Mark every day in *bymonthday*; *bymonthday* can be an int or
+ sequence.
+
+ Default is to tick every day of the month: ``bymonthday=range(1,32)``
+ """
+ if not interval == int(interval) or interval < 1:
+ raise ValueError("interval must be an integer greater than 0")
+ if bymonthday is None:
+ bymonthday = range(1, 32)
+ elif isinstance(bymonthday, np.ndarray):
+ # This fixes a bug in dateutil <= 2.3 which prevents the use of
+ # numpy arrays in (among other things) the bymonthday, byweekday
+ # and bymonth parameters.
+ bymonthday = [x.item() for x in bymonthday.astype(int)]
+
+ rule = rrulewrapper(DAILY, bymonthday=bymonthday,
+ interval=interval, **self.hms0d)
+ RRuleLocator.__init__(self, rule, tz)
+
+
+class HourLocator(RRuleLocator):
+ """
+ Make ticks on occurrences of each hour.
+ """
+ def __init__(self, byhour=None, interval=1, tz=None):
+ """
+ Mark every hour in *byhour*; *byhour* can be an int or sequence.
+ Default is to tick every hour: ``byhour=range(24)``
+
+ *interval* is the interval between each iteration. For
+ example, if ``interval=2``, mark every second occurrence.
+ """
+ if byhour is None:
+ byhour = range(24)
+
+ rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
+ byminute=0, bysecond=0)
+ RRuleLocator.__init__(self, rule, tz)
+
+
+class MinuteLocator(RRuleLocator):
+ """
+ Make ticks on occurrences of each minute.
+ """
+ def __init__(self, byminute=None, interval=1, tz=None):
+ """
+ Mark every minute in *byminute*; *byminute* can be an int or
+ sequence. Default is to tick every minute: ``byminute=range(60)``
+
+ *interval* is the interval between each iteration. For
+ example, if ``interval=2``, mark every second occurrence.
+ """
+ if byminute is None:
+ byminute = range(60)
+
+ rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
+ bysecond=0)
+ RRuleLocator.__init__(self, rule, tz)
+
+
+class SecondLocator(RRuleLocator):
+ """
+ Make ticks on occurrences of each second.
+ """
+ def __init__(self, bysecond=None, interval=1, tz=None):
+ """
+ Mark every second in *bysecond*; *bysecond* can be an int or
+ sequence. Default is to tick every second: ``bysecond = range(60)``
+
+ *interval* is the interval between each iteration. For
+ example, if ``interval=2``, mark every second occurrence.
+
+ """
+ if bysecond is None:
+ bysecond = range(60)
+
+ rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
+ RRuleLocator.__init__(self, rule, tz)
+
+
+class MicrosecondLocator(DateLocator):
+ """
+ Make ticks on regular intervals of one or more microsecond(s).
+
+ .. note::
+
+ Due to the floating point representation of time in days since
+ 0001-01-01 UTC (plus 1), plotting data with microsecond time
+ resolution does not work well with current dates.
+
+ If you want microsecond resolution time plots, it is strongly
+ recommended to use floating point seconds, not datetime-like
+ time representation.
+
+ If you really must use datetime.datetime() or similar and still
+ need microsecond precision, your only chance is to use very
+ early years; using year 0001 is recommended.
+
+ """
+ def __init__(self, interval=1, tz=None):
+ """
+ *interval* is the interval between each iteration. For
+ example, if ``interval=2``, mark every second microsecond.
+
+ """
+ self._interval = interval
+ self._wrapped_locator = ticker.MultipleLocator(interval)
+ self.tz = tz
+
+ def set_axis(self, axis):
+ self._wrapped_locator.set_axis(axis)
+ return DateLocator.set_axis(self, axis)
+
+ def set_view_interval(self, vmin, vmax):
+ self._wrapped_locator.set_view_interval(vmin, vmax)
+ return DateLocator.set_view_interval(self, vmin, vmax)
+
+ def set_data_interval(self, vmin, vmax):
+ self._wrapped_locator.set_data_interval(vmin, vmax)
+ return DateLocator.set_data_interval(self, vmin, vmax)
+
+ def __call__(self):
+ # if no data have been set, this will tank with a ValueError
+ try:
+ dmin, dmax = self.viewlim_to_dt()
+ except ValueError:
+ return []
+
+ return self.tick_values(dmin, dmax)
+
+ def tick_values(self, vmin, vmax):
+ nmin, nmax = date2num((vmin, vmax))
+ nmin *= MUSECONDS_PER_DAY
+ nmax *= MUSECONDS_PER_DAY
+ ticks = self._wrapped_locator.tick_values(nmin, nmax)
+ ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
+ return ticks
+
+ def _get_unit(self):
+ """
+ Return how many days a unit of the locator is; used for
+ intelligent autoscaling.
+ """
+ return 1. / MUSECONDS_PER_DAY
+
+ def _get_interval(self):
+ """
+ Return the number of units for each tick.
+ """
+ return self._interval
+
+
+def _close_to_dt(d1, d2, epsilon=5):
+ """
+ Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
+ """
+ delta = d2 - d1
+ mus = abs(delta.total_seconds() * 1e6)
+ assert mus < epsilon
+
+
+def _close_to_num(o1, o2, epsilon=5):
+ """
+ Assert that float ordinals *o1* and *o2* are within *epsilon*
+ microseconds.
+ """
+ delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
+ assert delta < epsilon
+
+
+def epoch2num(e):
+ """
+ Convert an epoch or sequence of epochs to the new date format,
+ that is days since 0001.
+ """
+ return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
+
+
+def num2epoch(d):
+ """
+ Convert days since 0001 to epoch. *d* can be a number or sequence.
+ """
+ return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
+
+
+def mx2num(mxdates):
+ """
+ Convert mx :class:`datetime` instance (or sequence of mx
+ instances) to the new date format.
+ """
+ scalar = False
+ if not cbook.iterable(mxdates):
+ scalar = True
+ mxdates = [mxdates]
+ ret = epoch2num([m.ticks() for m in mxdates])
+ if scalar:
+ return ret[0]
+ else:
+ return ret
+
+
+def date_ticker_factory(span, tz=None, numticks=5):
+ """
+ Create a date locator with *numticks* (approx) and a date formatter
+ for *span* in days. Return value is (locator, formatter).
+ """
+
+ if span == 0:
+ span = 1 / HOURS_PER_DAY
+
+ mins = span * MINUTES_PER_DAY
+ hrs = span * HOURS_PER_DAY
+ days = span
+ wks = span / DAYS_PER_WEEK
+ months = span / DAYS_PER_MONTH # Approx
+ years = span / DAYS_PER_YEAR # Approx
+
+ if years > numticks:
+ locator = YearLocator(int(years / numticks), tz=tz) # define
+ fmt = '%Y'
+ elif months > numticks:
+ locator = MonthLocator(tz=tz)
+ fmt = '%b %Y'
+ elif wks > numticks:
+ locator = WeekdayLocator(tz=tz)
+ fmt = '%a, %b %d'
+ elif days > numticks:
+ locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
+ fmt = '%b %d'
+ elif hrs > numticks:
+ locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
+ fmt = '%H:%M\n%b %d'
+ elif mins > numticks:
+ locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
+ tz=tz)
+ fmt = '%H:%M:%S'
+ else:
+ locator = MinuteLocator(tz=tz)
+ fmt = '%H:%M:%S'
+
+ formatter = DateFormatter(fmt, tz=tz)
+ return locator, formatter
+
+
+def seconds(s):
+ """
+ Return seconds as days.
+ """
+ return s / SEC_PER_DAY
+
+
+def minutes(m):
+ """
+ Return minutes as days.
+ """
+ return m / MINUTES_PER_DAY
+
+
+def hours(h):
+ """
+ Return hours as days.
+ """
+ return h / HOURS_PER_DAY
+
+
+def weeks(w):
+ """
+ Return weeks as days.
+ """
+ return w * DAYS_PER_WEEK
+
+
+class DateConverter(units.ConversionInterface):
+ """
+ Converter for datetime.date and datetime.datetime data,
+ or for date/time data represented as it would be converted
+ by :func:`date2num`.
+
+ The 'unit' tag for such data is None or a tzinfo instance.
+ """
+
+ @staticmethod
+ def axisinfo(unit, axis):
+ """
+ Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
+
+ *unit* is a tzinfo instance or None.
+ The *axis* argument is required but not used.
+ """
+ tz = unit
+
+ majloc = AutoDateLocator(tz=tz)
+ majfmt = AutoDateFormatter(majloc, tz=tz)
+ datemin = datetime.date(2000, 1, 1)
+ datemax = datetime.date(2010, 1, 1)
+
+ return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
+ default_limits=(datemin, datemax))
+
+ @staticmethod
+ def convert(value, unit, axis):
+ """
+ If *value* is not already a number or sequence of numbers,
+ convert it with :func:`date2num`.
+
+ The *unit* and *axis* arguments are not used.
+ """
+ return date2num(value)
+
+ @staticmethod
+ def default_units(x, axis):
+ """
+ Return the tzinfo instance of *x* or of its first element, or None
+ """
+ if isinstance(x, np.ndarray):
+ x = x.ravel()
+
+ try:
+ x = cbook.safe_first_element(x)
+ except (TypeError, StopIteration):
+ pass
+
+ try:
+ return x.tzinfo
+ except AttributeError:
+ pass
+ return None
+
+
+units.registry[np.datetime64] = DateConverter()
+units.registry[datetime.date] = DateConverter()
+units.registry[datetime.datetime] = DateConverter()
diff --git a/contrib/python/matplotlib/py2/matplotlib/docstring.py b/contrib/python/matplotlib/py2/matplotlib/docstring.py
new file mode 100644
index 00000000000..cf9537f0c6f
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/docstring.py
@@ -0,0 +1,128 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from matplotlib import cbook
+import sys
+import types
+
+
+class Substitution(object):
+ """
+ A decorator to take a function's docstring and perform string
+ substitution on it.
+
+ This decorator should be robust even if func.__doc__ is None
+ (for example, if -OO was passed to the interpreter)
+
+ Usage: construct a docstring.Substitution with a sequence or
+ dictionary suitable for performing substitution; then
+ decorate a suitable function with the constructed object. e.g.
+
+ sub_author_name = Substitution(author='Jason')
+
+ @sub_author_name
+ def some_function(x):
+ "%(author)s wrote this function"
+
+ # note that some_function.__doc__ is now "Jason wrote this function"
+
+ One can also use positional arguments.
+
+ sub_first_last_names = Substitution('Edgar Allen', 'Poe')
+
+ @sub_first_last_names
+ def some_function(x):
+ "%s %s wrote the Raven"
+ """
+ def __init__(self, *args, **kwargs):
+ assert not (len(args) and len(kwargs)), \
+ "Only positional or keyword args are allowed"
+ self.params = args or kwargs
+
+ def __call__(self, func):
+ func.__doc__ = func.__doc__ and func.__doc__ % self.params
+ return func
+
+ def update(self, *args, **kwargs):
+ "Assume self.params is a dict and update it with supplied args"
+ self.params.update(*args, **kwargs)
+
+ @classmethod
+ def from_params(cls, params):
+ """
+ In the case where the params is a mutable sequence (list or
+ dictionary) and it may change before this class is called, one may
+ explicitly use a reference to the params rather than using *args or
+ **kwargs which will copy the values and not reference them.
+ """
+ result = cls()
+ result.params = params
+ return result
+
+
+class Appender(object):
+ """
+ A function decorator that will append an addendum to the docstring
+ of the target function.
+
+ This decorator should be robust even if func.__doc__ is None
+ (for example, if -OO was passed to the interpreter).
+
+ Usage: construct a docstring.Appender with a string to be joined to
+ the original docstring. An optional 'join' parameter may be supplied
+ which will be used to join the docstring and addendum. e.g.
+
+ add_copyright = Appender("Copyright (c) 2009", join='\n')
+
+ @add_copyright
+ def my_dog(has='fleas'):
+ "This docstring will have a copyright below"
+ pass
+ """
+ def __init__(self, addendum, join=''):
+ self.addendum = addendum
+ self.join = join
+
+ def __call__(self, func):
+ docitems = [func.__doc__, self.addendum]
+ func.__doc__ = func.__doc__ and self.join.join(docitems)
+ return func
+
+
+def dedent(func):
+ "Dedent a docstring (if present)"
+ func.__doc__ = func.__doc__ and cbook.dedent(func.__doc__)
+ return func
+
+
+def copy(source):
+ "Copy a docstring from another source function (if present)"
+ def do_copy(target):
+ if source.__doc__:
+ target.__doc__ = source.__doc__
+ return target
+ return do_copy
+
+# create a decorator that will house the various documentation that
+# is reused throughout matplotlib
+interpd = Substitution()
+
+
+def dedent_interpd(func):
+ """A special case of the interpd that first performs a dedent on
+ the incoming docstring"""
+ if isinstance(func, types.MethodType) and not six.PY3:
+ func = func.im_func
+ return interpd(dedent(func))
+
+
+def copy_dedent(source):
+ """A decorator that will copy the docstring from the source and
+ then dedent it"""
+ # note the following is ugly because "Python is not a functional
+ # language" - GVR. Perhaps one day, functools.compose will exist.
+ # or perhaps not.
+ # http://mail.python.org/pipermail/patches/2007-February/021687.html
+ return lambda target: dedent(copy(source)(target))
diff --git a/contrib/python/matplotlib/py2/matplotlib/dviread.py b/contrib/python/matplotlib/py2/matplotlib/dviread.py
new file mode 100644
index 00000000000..b38af56e67e
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/dviread.py
@@ -0,0 +1,1083 @@
+"""
+A module for reading dvi files output by TeX. Several limitations make
+this not (currently) useful as a general-purpose dvi preprocessor, but
+it is currently used by the pdf backend for processing usetex text.
+
+Interface::
+
+ with Dvi(filename, 72) as dvi:
+ # iterate over pages:
+ for page in dvi:
+ w, h, d = page.width, page.height, page.descent
+ for x,y,font,glyph,width in page.text:
+ fontname = font.texname
+ pointsize = font.size
+ ...
+ for x,y,height,width in page.boxes:
+ ...
+
+"""
+from __future__ import absolute_import, division, print_function
+
+import six
+from six.moves import xrange
+
+from collections import namedtuple
+from functools import partial, wraps
+import logging
+import numpy as np
+import os
+import re
+import struct
+import sys
+import textwrap
+
+from matplotlib import cbook, rcParams
+from matplotlib.compat import subprocess
+
+try:
+ from functools import lru_cache
+except ImportError: # Py2
+ from backports.functools_lru_cache import lru_cache
+
+if six.PY3:
+ def ord(x):
+ return x
+
+_log = logging.getLogger(__name__)
+
+# Dvi is a bytecode format documented in
+# http://mirrors.ctan.org/systems/knuth/dist/texware/dvitype.web
+# http://texdoc.net/texmf-dist/doc/generic/knuth/texware/dvitype.pdf
+#
+# The file consists of a preamble, some number of pages, a postamble,
+# and a finale. Different opcodes are allowed in different contexts,
+# so the Dvi object has a parser state:
+#
+# pre: expecting the preamble
+# outer: between pages (followed by a page or the postamble,
+# also e.g. font definitions are allowed)
+# page: processing a page
+# post_post: state after the postamble (our current implementation
+# just stops reading)
+# finale: the finale (unimplemented in our current implementation)
+
+_dvistate = cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
+
+# The marks on a page consist of text and boxes. A page also has dimensions.
+Page = namedtuple('Page', 'text boxes height width descent')
+Text = namedtuple('Text', 'x y font glyph width')
+Box = namedtuple('Box', 'x y height width')
+
+
+# Opcode argument parsing
+#
+# Each of the following functions takes a Dvi object and delta,
+# which is the difference between the opcode and the minimum opcode
+# with the same meaning. Dvi opcodes often encode the number of
+# argument bytes in this delta.
+
+def _arg_raw(dvi, delta):
+ """Return *delta* without reading anything more from the dvi file"""
+ return delta
+
+
+def _arg(bytes, signed, dvi, _):
+ """Read *bytes* bytes, returning the bytes interpreted as a
+ signed integer if *signed* is true, unsigned otherwise."""
+ return dvi._arg(bytes, signed)
+
+
+def _arg_slen(dvi, delta):
+ """Signed, length *delta*
+
+ Read *delta* bytes, returning None if *delta* is zero, and
+ the bytes interpreted as a signed integer otherwise."""
+ if delta == 0:
+ return None
+ return dvi._arg(delta, True)
+
+
+def _arg_slen1(dvi, delta):
+ """Signed, length *delta*+1
+
+ Read *delta*+1 bytes, returning the bytes interpreted as signed."""
+ return dvi._arg(delta+1, True)
+
+
+def _arg_ulen1(dvi, delta):
+ """Unsigned length *delta*+1
+
+ Read *delta*+1 bytes, returning the bytes interpreted as unsigned."""
+ return dvi._arg(delta+1, False)
+
+
+def _arg_olen1(dvi, delta):
+ """Optionally signed, length *delta*+1
+
+ Read *delta*+1 bytes, returning the bytes interpreted as
+ unsigned integer for 0<=*delta*<3 and signed if *delta*==3."""
+ return dvi._arg(delta + 1, delta == 3)
+
+
+_arg_mapping = dict(raw=_arg_raw,
+ u1=partial(_arg, 1, False),
+ u4=partial(_arg, 4, False),
+ s4=partial(_arg, 4, True),
+ slen=_arg_slen,
+ olen1=_arg_olen1,
+ slen1=_arg_slen1,
+ ulen1=_arg_ulen1)
+
+
+def _dispatch(table, min, max=None, state=None, args=('raw',)):
+ """Decorator for dispatch by opcode. Sets the values in *table*
+ from *min* to *max* to this method, adds a check that the Dvi state
+ matches *state* if not None, reads arguments from the file according
+ to *args*.
+
+ *table*
+ the dispatch table to be filled in
+
+ *min*
+ minimum opcode for calling this function
+
+ *max*
+ maximum opcode for calling this function, None if only *min* is allowed
+
+ *state*
+ state of the Dvi object in which these opcodes are allowed
+
+ *args*
+ sequence of argument specifications:
+
+ ``'raw'``: opcode minus minimum
+ ``'u1'``: read one unsigned byte
+ ``'u4'``: read four bytes, treat as an unsigned number
+ ``'s4'``: read four bytes, treat as a signed number
+ ``'slen'``: read (opcode - minimum) bytes, treat as signed
+ ``'slen1'``: read (opcode - minimum + 1) bytes, treat as signed
+ ``'ulen1'``: read (opcode - minimum + 1) bytes, treat as unsigned
+ ``'olen1'``: read (opcode - minimum + 1) bytes, treat as unsigned
+ if under four bytes, signed if four bytes
+ """
+ def decorate(method):
+ get_args = [_arg_mapping[x] for x in args]
+
+ @wraps(method)
+ def wrapper(self, byte):
+ if state is not None and self.state != state:
+ raise ValueError("state precondition failed")
+ return method(self, *[f(self, byte-min) for f in get_args])
+ if max is None:
+ table[min] = wrapper
+ else:
+ for i in xrange(min, max+1):
+ assert table[i] is None
+ table[i] = wrapper
+ return wrapper
+ return decorate
+
+
+class Dvi(object):
+ """
+ A reader for a dvi ("device-independent") file, as produced by TeX.
+ The current implementation can only iterate through pages in order,
+ and does not even attempt to verify the postamble.
+
+ This class can be used as a context manager to close the underlying
+ file upon exit. Pages can be read via iteration. Here is an overly
+ simple way to extract text without trying to detect whitespace::
+
+ >>> with matplotlib.dviread.Dvi('input.dvi', 72) as dvi:
+ >>> for page in dvi:
+ >>> print(''.join(unichr(t.glyph) for t in page.text))
+ """
+ # dispatch table
+ _dtable = [None for _ in xrange(256)]
+ _dispatch = partial(_dispatch, _dtable)
+
+ def __init__(self, filename, dpi):
+ """
+ Read the data from the file named *filename* and convert
+ TeX's internal units to units of *dpi* per inch.
+ *dpi* only sets the units and does not limit the resolution.
+ Use None to return TeX's internal units.
+ """
+ _log.debug('Dvi: %s', filename)
+ self.file = open(filename, 'rb')
+ self.dpi = dpi
+ self.fonts = {}
+ self.state = _dvistate.pre
+ self.baseline = self._get_baseline(filename)
+
+ def _get_baseline(self, filename):
+ if rcParams['text.latex.preview']:
+ base, ext = os.path.splitext(filename)
+ baseline_filename = base + ".baseline"
+ if os.path.exists(baseline_filename):
+ with open(baseline_filename, 'rb') as fd:
+ l = fd.read().split()
+ height, depth, width = l
+ return float(depth)
+ return None
+
+ def __enter__(self):
+ """
+ Context manager enter method, does nothing.
+ """
+ return self
+
+ def __exit__(self, etype, evalue, etrace):
+ """
+ Context manager exit method, closes the underlying file if it is open.
+ """
+ self.close()
+
+ def __iter__(self):
+ """
+ Iterate through the pages of the file.
+
+ Yields
+ ------
+ Page
+ Details of all the text and box objects on the page.
+ The Page tuple contains lists of Text and Box tuples and
+ the page dimensions, and the Text and Box tuples contain
+ coordinates transformed into a standard Cartesian
+ coordinate system at the dpi value given when initializing.
+ The coordinates are floating point numbers, but otherwise
+ precision is not lost and coordinate values are not clipped to
+ integers.
+ """
+ while True:
+ have_page = self._read()
+ if have_page:
+ yield self._output()
+ else:
+ break
+
+ def close(self):
+ """
+ Close the underlying file if it is open.
+ """
+ if not self.file.closed:
+ self.file.close()
+
+ def _output(self):
+ """
+ Output the text and boxes belonging to the most recent page.
+ page = dvi._output()
+ """
+ minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
+ maxy_pure = -np.inf
+ for elt in self.text + self.boxes:
+ if isinstance(elt, Box):
+ x, y, h, w = elt
+ e = 0 # zero depth
+ else: # glyph
+ x, y, font, g, w = elt
+ h, e = font._height_depth_of(g)
+ minx = min(minx, x)
+ miny = min(miny, y - h)
+ maxx = max(maxx, x + w)
+ maxy = max(maxy, y + e)
+ maxy_pure = max(maxy_pure, y)
+
+ if self.dpi is None:
+ # special case for ease of debugging: output raw dvi coordinates
+ return Page(text=self.text, boxes=self.boxes,
+ width=maxx-minx, height=maxy_pure-miny,
+ descent=maxy-maxy_pure)
+
+ # convert from TeX's "scaled points" to dpi units
+ d = self.dpi / (72.27 * 2**16)
+ if self.baseline is None:
+ descent = (maxy - maxy_pure) * d
+ else:
+ descent = self.baseline
+
+ text = [Text((x-minx)*d, (maxy-y)*d - descent, f, g, w*d)
+ for (x, y, f, g, w) in self.text]
+ boxes = [Box((x-minx)*d, (maxy-y)*d - descent, h*d, w*d)
+ for (x, y, h, w) in self.boxes]
+
+ return Page(text=text, boxes=boxes, width=(maxx-minx)*d,
+ height=(maxy_pure-miny)*d, descent=descent)
+
+ def _read(self):
+ """
+ Read one page from the file. Return True if successful,
+ False if there were no more pages.
+ """
+ while True:
+ byte = ord(self.file.read(1)[0])
+ self._dtable[byte](self, byte)
+ if byte == 140: # end of page
+ return True
+ if self.state == _dvistate.post_post: # end of file
+ self.close()
+ return False
+
+ def _arg(self, nbytes, signed=False):
+ """
+ Read and return an integer argument *nbytes* long.
+ Signedness is determined by the *signed* keyword.
+ """
+ str = self.file.read(nbytes)
+ value = ord(str[0])
+ if signed and value >= 0x80:
+ value = value - 0x100
+ for i in range(1, nbytes):
+ value = 0x100*value + ord(str[i])
+ return value
+
+ @_dispatch(min=0, max=127, state=_dvistate.inpage)
+ def _set_char_immediate(self, char):
+ self._put_char_real(char)
+ self.h += self.fonts[self.f]._width_of(char)
+
+ @_dispatch(min=128, max=131, state=_dvistate.inpage, args=('olen1',))
+ def _set_char(self, char):
+ self._put_char_real(char)
+ self.h += self.fonts[self.f]._width_of(char)
+
+ @_dispatch(132, state=_dvistate.inpage, args=('s4', 's4'))
+ def _set_rule(self, a, b):
+ self._put_rule_real(a, b)
+ self.h += b
+
+ @_dispatch(min=133, max=136, state=_dvistate.inpage, args=('olen1',))
+ def _put_char(self, char):
+ self._put_char_real(char)
+
+ def _put_char_real(self, char):
+ font = self.fonts[self.f]
+ if font._vf is None:
+ self.text.append(Text(self.h, self.v, font, char,
+ font._width_of(char)))
+ else:
+ scale = font._scale
+ for x, y, f, g, w in font._vf[char].text:
+ newf = DviFont(scale=_mul2012(scale, f._scale),
+ tfm=f._tfm, texname=f.texname, vf=f._vf)
+ self.text.append(Text(self.h + _mul2012(x, scale),
+ self.v + _mul2012(y, scale),
+ newf, g, newf._width_of(g)))
+ self.boxes.extend([Box(self.h + _mul2012(x, scale),
+ self.v + _mul2012(y, scale),
+ _mul2012(a, scale), _mul2012(b, scale))
+ for x, y, a, b in font._vf[char].boxes])
+
+ @_dispatch(137, state=_dvistate.inpage, args=('s4', 's4'))
+ def _put_rule(self, a, b):
+ self._put_rule_real(a, b)
+
+ def _put_rule_real(self, a, b):
+ if a > 0 and b > 0:
+ self.boxes.append(Box(self.h, self.v, a, b))
+
+ @_dispatch(138)
+ def _nop(self, _):
+ pass
+
+ @_dispatch(139, state=_dvistate.outer, args=('s4',)*11)
+ def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
+ self.state = _dvistate.inpage
+ self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
+ self.stack = []
+ self.text = [] # list of Text objects
+ self.boxes = [] # list of Box objects
+
+ @_dispatch(140, state=_dvistate.inpage)
+ def _eop(self, _):
+ self.state = _dvistate.outer
+ del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
+
+ @_dispatch(141, state=_dvistate.inpage)
+ def _push(self, _):
+ self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
+
+ @_dispatch(142, state=_dvistate.inpage)
+ def _pop(self, _):
+ self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
+
+ @_dispatch(min=143, max=146, state=_dvistate.inpage, args=('slen1',))
+ def _right(self, b):
+ self.h += b
+
+ @_dispatch(min=147, max=151, state=_dvistate.inpage, args=('slen',))
+ def _right_w(self, new_w):
+ if new_w is not None:
+ self.w = new_w
+ self.h += self.w
+
+ @_dispatch(min=152, max=156, state=_dvistate.inpage, args=('slen',))
+ def _right_x(self, new_x):
+ if new_x is not None:
+ self.x = new_x
+ self.h += self.x
+
+ @_dispatch(min=157, max=160, state=_dvistate.inpage, args=('slen1',))
+ def _down(self, a):
+ self.v += a
+
+ @_dispatch(min=161, max=165, state=_dvistate.inpage, args=('slen',))
+ def _down_y(self, new_y):
+ if new_y is not None:
+ self.y = new_y
+ self.v += self.y
+
+ @_dispatch(min=166, max=170, state=_dvistate.inpage, args=('slen',))
+ def _down_z(self, new_z):
+ if new_z is not None:
+ self.z = new_z
+ self.v += self.z
+
+ @_dispatch(min=171, max=234, state=_dvistate.inpage)
+ def _fnt_num_immediate(self, k):
+ self.f = k
+
+ @_dispatch(min=235, max=238, state=_dvistate.inpage, args=('olen1',))
+ def _fnt_num(self, new_f):
+ self.f = new_f
+
+ @_dispatch(min=239, max=242, args=('ulen1',))
+ def _xxx(self, datalen):
+ special = self.file.read(datalen)
+ if six.PY3:
+ chr_ = chr
+ else:
+ def chr_(x):
+ return x
+ _log.debug(
+ 'Dvi._xxx: encountered special: %s',
+ ''.join([chr_(ch) if 32 <= ord(ch) < 127 else '<%02x>' % ord(ch)
+ for ch in special]))
+
+ @_dispatch(min=243, max=246, args=('olen1', 'u4', 'u4', 'u4', 'u1', 'u1'))
+ def _fnt_def(self, k, c, s, d, a, l):
+ self._fnt_def_real(k, c, s, d, a, l)
+
+ def _fnt_def_real(self, k, c, s, d, a, l):
+ n = self.file.read(a + l)
+ fontname = n[-l:].decode('ascii')
+ tfm = _tfmfile(fontname)
+ if tfm is None:
+ if six.PY2:
+ error_class = OSError
+ else:
+ error_class = FileNotFoundError
+ raise error_class("missing font metrics file: %s" % fontname)
+ if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
+ raise ValueError('tfm checksum mismatch: %s' % n)
+
+ vf = _vffile(fontname)
+
+ self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
+
+ @_dispatch(247, state=_dvistate.pre, args=('u1', 'u4', 'u4', 'u4', 'u1'))
+ def _pre(self, i, num, den, mag, k):
+ comment = self.file.read(k)
+ if i != 2:
+ raise ValueError("Unknown dvi format %d" % i)
+ if num != 25400000 or den != 7227 * 2**16:
+ raise ValueError("nonstandard units in dvi file")
+ # meaning: TeX always uses those exact values, so it
+ # should be enough for us to support those
+ # (There are 72.27 pt to an inch so 7227 pt =
+ # 7227 * 2**16 sp to 100 in. The numerator is multiplied
+ # by 10^5 to get units of 10**-7 meters.)
+ if mag != 1000:
+ raise ValueError("nonstandard magnification in dvi file")
+ # meaning: LaTeX seems to frown on setting \mag, so
+ # I think we can assume this is constant
+ self.state = _dvistate.outer
+
+ @_dispatch(248, state=_dvistate.outer)
+ def _post(self, _):
+ self.state = _dvistate.post_post
+ # TODO: actually read the postamble and finale?
+ # currently post_post just triggers closing the file
+
+ @_dispatch(249)
+ def _post_post(self, _):
+ raise NotImplementedError
+
+ @_dispatch(min=250, max=255)
+ def _malformed(self, offset):
+ raise ValueError("unknown command: byte %d", 250 + offset)
+
+
+class DviFont(object):
+ """
+ Encapsulation of a font that a DVI file can refer to.
+
+ This class holds a font's texname and size, supports comparison,
+ and knows the widths of glyphs in the same units as the AFM file.
+ There are also internal attributes (for use by dviread.py) that
+ are *not* used for comparison.
+
+ The size is in Adobe points (converted from TeX points).
+
+ Parameters
+ ----------
+
+ scale : float
+ Factor by which the font is scaled from its natural size.
+ tfm : Tfm
+ TeX font metrics for this font
+ texname : bytes
+ Name of the font as used internally by TeX and friends, as an
+ ASCII bytestring. This is usually very different from any external
+ font names, and :class:`dviread.PsfontsMap` can be used to find
+ the external name of the font.
+ vf : Vf
+ A TeX "virtual font" file, or None if this font is not virtual.
+
+ Attributes
+ ----------
+
+ texname : bytes
+ size : float
+ Size of the font in Adobe points, converted from the slightly
+ smaller TeX points.
+ widths : list
+ Widths of glyphs in glyph-space units, typically 1/1000ths of
+ the point size.
+
+ """
+ __slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
+
+ def __init__(self, scale, tfm, texname, vf):
+ if not isinstance(texname, bytes):
+ raise ValueError("texname must be a bytestring, got %s"
+ % type(texname))
+ self._scale, self._tfm, self.texname, self._vf = \
+ scale, tfm, texname, vf
+ self.size = scale * (72.0 / (72.27 * 2**16))
+ try:
+ nchars = max(tfm.width) + 1
+ except ValueError:
+ nchars = 0
+ self.widths = [(1000*tfm.width.get(char, 0)) >> 20
+ for char in xrange(nchars)]
+
+ def __eq__(self, other):
+ return self.__class__ == other.__class__ and \
+ self.texname == other.texname and self.size == other.size
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _width_of(self, char):
+ """
+ Width of char in dvi units. For internal use by dviread.py.
+ """
+
+ width = self._tfm.width.get(char, None)
+ if width is not None:
+ return _mul2012(width, self._scale)
+ _log.debug('No width for char %d in font %s.', char, self.texname)
+ return 0
+
+ def _height_depth_of(self, char):
+ """
+ Height and depth of char in dvi units. For internal use by dviread.py.
+ """
+
+ result = []
+ for metric, name in ((self._tfm.height, "height"),
+ (self._tfm.depth, "depth")):
+ value = metric.get(char, None)
+ if value is None:
+ _log.debug('No %s for char %d in font %s',
+ name, char, self.texname)
+ result.append(0)
+ else:
+ result.append(_mul2012(value, self._scale))
+ return result
+
+
+class Vf(Dvi):
+ """
+ A virtual font (\\*.vf file) containing subroutines for dvi files.
+
+ Usage::
+
+ vf = Vf(filename)
+ glyph = vf[code]
+ glyph.text, glyph.boxes, glyph.width
+
+ Parameters
+ ----------
+
+ filename : string or bytestring
+
+ Notes
+ -----
+
+ The virtual font format is a derivative of dvi:
+ http://mirrors.ctan.org/info/knuth/virtual-fonts
+ This class reuses some of the machinery of `Dvi`
+ but replaces the `_read` loop and dispatch mechanism.
+ """
+
+ def __init__(self, filename):
+ Dvi.__init__(self, filename, 0)
+ try:
+ self._first_font = None
+ self._chars = {}
+ self._read()
+ finally:
+ self.close()
+
+ def __getitem__(self, code):
+ return self._chars[code]
+
+ def _read(self):
+ """
+ Read one page from the file. Return True if successful,
+ False if there were no more pages.
+ """
+ packet_char, packet_ends = None, None
+ packet_len, packet_width = None, None
+ while True:
+ byte = ord(self.file.read(1)[0])
+ # If we are in a packet, execute the dvi instructions
+ if self.state == _dvistate.inpage:
+ byte_at = self.file.tell()-1
+ if byte_at == packet_ends:
+ self._finalize_packet(packet_char, packet_width)
+ packet_len, packet_char, packet_width = None, None, None
+ # fall through to out-of-packet code
+ elif byte_at > packet_ends:
+ raise ValueError("Packet length mismatch in vf file")
+ else:
+ if byte in (139, 140) or byte >= 243:
+ raise ValueError(
+ "Inappropriate opcode %d in vf file" % byte)
+ Dvi._dtable[byte](self, byte)
+ continue
+
+ # We are outside a packet
+ if byte < 242: # a short packet (length given by byte)
+ packet_len = byte
+ packet_char, packet_width = self._arg(1), self._arg(3)
+ packet_ends = self._init_packet(byte)
+ self.state = _dvistate.inpage
+ elif byte == 242: # a long packet
+ packet_len, packet_char, packet_width = \
+ [self._arg(x) for x in (4, 4, 4)]
+ self._init_packet(packet_len)
+ elif 243 <= byte <= 246:
+ k = self._arg(byte - 242, byte == 246)
+ c, s, d, a, l = [self._arg(x) for x in (4, 4, 4, 1, 1)]
+ self._fnt_def_real(k, c, s, d, a, l)
+ if self._first_font is None:
+ self._first_font = k
+ elif byte == 247: # preamble
+ i, k = self._arg(1), self._arg(1)
+ x = self.file.read(k)
+ cs, ds = self._arg(4), self._arg(4)
+ self._pre(i, x, cs, ds)
+ elif byte == 248: # postamble (just some number of 248s)
+ break
+ else:
+ raise ValueError("unknown vf opcode %d" % byte)
+
+ def _init_packet(self, pl):
+ if self.state != _dvistate.outer:
+ raise ValueError("Misplaced packet in vf file")
+ self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
+ self.stack, self.text, self.boxes = [], [], []
+ self.f = self._first_font
+ return self.file.tell() + pl
+
+ def _finalize_packet(self, packet_char, packet_width):
+ self._chars[packet_char] = Page(
+ text=self.text, boxes=self.boxes, width=packet_width,
+ height=None, descent=None)
+ self.state = _dvistate.outer
+
+ def _pre(self, i, x, cs, ds):
+ if self.state != _dvistate.pre:
+ raise ValueError("pre command in middle of vf file")
+ if i != 202:
+ raise ValueError("Unknown vf format %d" % i)
+ if len(x):
+ _log.debug('vf file comment: %s', x)
+ self.state = _dvistate.outer
+ # cs = checksum, ds = design size
+
+
+def _fix2comp(num):
+ """
+ Convert from two's complement to negative.
+ """
+ assert 0 <= num < 2**32
+ if num & 2**31:
+ return num - 2**32
+ else:
+ return num
+
+
+def _mul2012(num1, num2):
+ """
+ Multiply two numbers in 20.12 fixed point format.
+ """
+ # Separated into a function because >> has surprising precedence
+ return (num1*num2) >> 20
+
+
+class Tfm(object):
+ """
+ A TeX Font Metric file.
+
+ This implementation covers only the bare minimum needed by the Dvi class.
+
+ Parameters
+ ----------
+ filename : string or bytestring
+
+ Attributes
+ ----------
+ checksum : int
+ Used for verifying against the dvi file.
+ design_size : int
+ Design size of the font (unknown units)
+ width, height, depth : dict
+ Dimensions of each character, need to be scaled by the factor
+ specified in the dvi file. These are dicts because indexing may
+ not start from 0.
+ """
+ __slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
+
+ def __init__(self, filename):
+ _log.debug('opening tfm file %s', filename)
+ with open(filename, 'rb') as file:
+ header1 = file.read(24)
+ lh, bc, ec, nw, nh, nd = \
+ struct.unpack('!6H', header1[2:14])
+ _log.debug('lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d',
+ lh, bc, ec, nw, nh, nd)
+ header2 = file.read(4*lh)
+ self.checksum, self.design_size = \
+ struct.unpack('!2I', header2[:8])
+ # there is also encoding information etc.
+ char_info = file.read(4*(ec-bc+1))
+ widths = file.read(4*nw)
+ heights = file.read(4*nh)
+ depths = file.read(4*nd)
+
+ self.width, self.height, self.depth = {}, {}, {}
+ widths, heights, depths = \
+ [struct.unpack('!%dI' % (len(x)/4), x)
+ for x in (widths, heights, depths)]
+ for idx, char in enumerate(xrange(bc, ec+1)):
+ byte0 = ord(char_info[4*idx])
+ byte1 = ord(char_info[4*idx+1])
+ self.width[char] = _fix2comp(widths[byte0])
+ self.height[char] = _fix2comp(heights[byte1 >> 4])
+ self.depth[char] = _fix2comp(depths[byte1 & 0xf])
+
+
+PsFont = namedtuple('Font', 'texname psname effects encoding filename')
+
+
+class PsfontsMap(object):
+ """
+ A psfonts.map formatted file, mapping TeX fonts to PS fonts.
+
+ Usage::
+
+ >>> map = PsfontsMap(find_tex_file('pdftex.map'))
+ >>> entry = map[b'ptmbo8r']
+ >>> entry.texname
+ b'ptmbo8r'
+ >>> entry.psname
+ b'Times-Bold'
+ >>> entry.encoding
+ '/usr/local/texlive/2008/texmf-dist/fonts/enc/dvips/base/8r.enc'
+ >>> entry.effects
+ {'slant': 0.16700000000000001}
+ >>> entry.filename
+
+ Parameters
+ ----------
+
+ filename : string or bytestring
+
+ Notes
+ -----
+
+ For historical reasons, TeX knows many Type-1 fonts by different
+ names than the outside world. (For one thing, the names have to
+ fit in eight characters.) Also, TeX's native fonts are not Type-1
+ but Metafont, which is nontrivial to convert to PostScript except
+ as a bitmap. While high-quality conversions to Type-1 format exist
+ and are shipped with modern TeX distributions, we need to know
+ which Type-1 fonts are the counterparts of which native fonts. For
+ these reasons a mapping is needed from internal font names to font
+ file names.
+
+ A texmf tree typically includes mapping files called e.g.
+ :file:`psfonts.map`, :file:`pdftex.map`, or :file:`dvipdfm.map`.
+ The file :file:`psfonts.map` is used by :program:`dvips`,
+ :file:`pdftex.map` by :program:`pdfTeX`, and :file:`dvipdfm.map`
+ by :program:`dvipdfm`. :file:`psfonts.map` might avoid embedding
+ the 35 PostScript fonts (i.e., have no filename for them, as in
+ the Times-Bold example above), while the pdf-related files perhaps
+ only avoid the "Base 14" pdf fonts. But the user may have
+ configured these files differently.
+ """
+ __slots__ = ('_font', '_filename')
+
+ def __init__(self, filename):
+ self._font = {}
+ self._filename = filename
+ if six.PY3 and isinstance(filename, bytes):
+ encoding = sys.getfilesystemencoding() or 'utf-8'
+ self._filename = filename.decode(encoding, errors='replace')
+ with open(filename, 'rb') as file:
+ self._parse(file)
+
+ def __getitem__(self, texname):
+ assert isinstance(texname, bytes)
+ try:
+ result = self._font[texname]
+ except KeyError:
+ fmt = ('A PostScript file for the font whose TeX name is "{0}" '
+ 'could not be found in the file "{1}". The dviread module '
+ 'can only handle fonts that have an associated PostScript '
+ 'font file. '
+ 'This problem can often be solved by installing '
+ 'a suitable PostScript font package in your (TeX) '
+ 'package manager.')
+ msg = fmt.format(texname.decode('ascii'), self._filename)
+ msg = textwrap.fill(msg, break_on_hyphens=False,
+ break_long_words=False)
+ _log.info(msg)
+ raise
+ fn, enc = result.filename, result.encoding
+ if fn is not None and not fn.startswith(b'/'):
+ fn = find_tex_file(fn)
+ if enc is not None and not enc.startswith(b'/'):
+ enc = find_tex_file(result.encoding)
+ return result._replace(filename=fn, encoding=enc)
+
+ def _parse(self, file):
+ """
+ Parse the font mapping file.
+
+ The format is, AFAIK: texname fontname [effects and filenames]
+ Effects are PostScript snippets like ".177 SlantFont",
+ filenames begin with one or two less-than signs. A filename
+ ending in enc is an encoding file, other filenames are font
+ files. This can be overridden with a left bracket: <[foobar
+ indicates an encoding file named foobar.
+
+ There is some difference between <foo.pfb and <<bar.pfb in
+ subsetting, but I have no example of << in my TeX installation.
+ """
+ # If the map file specifies multiple encodings for a font, we
+ # follow pdfTeX in choosing the last one specified. Such
+ # entries are probably mistakes but they have occurred.
+ # http://tex.stackexchange.com/questions/10826/
+ # http://article.gmane.org/gmane.comp.tex.pdftex/4914
+
+ empty_re = re.compile(br'%|\s*$')
+ word_re = re.compile(
+ br'''(?x) (?:
+ "<\[ (?P<enc1> [^"]+ )" | # quoted encoding marked by [
+ "< (?P<enc2> [^"]+.enc)" | # quoted encoding, ends in .enc
+ "<<? (?P<file1> [^"]+ )" | # quoted font file name
+ " (?P<eff1> [^"]+ )" | # quoted effects or font name
+ <\[ (?P<enc3> \S+ ) | # encoding marked by [
+ < (?P<enc4> \S+ .enc) | # encoding, ends in .enc
+ <<? (?P<file2> \S+ ) | # font file name
+ (?P<eff2> \S+ ) # effects or font name
+ )''')
+ effects_re = re.compile(
+ br'''(?x) (?P<slant> -?[0-9]*(?:\.[0-9]+)) \s* SlantFont
+ | (?P<extend>-?[0-9]*(?:\.[0-9]+)) \s* ExtendFont''')
+
+ lines = (line.strip()
+ for line in file
+ if not empty_re.match(line))
+ for line in lines:
+ effects, encoding, filename = b'', None, None
+ words = word_re.finditer(line)
+
+ # The named groups are mutually exclusive and are
+ # referenced below at an estimated order of probability of
+ # occurrence based on looking at my copy of pdftex.map.
+ # The font names are probably unquoted:
+ w = next(words)
+ texname = w.group('eff2') or w.group('eff1')
+ w = next(words)
+ psname = w.group('eff2') or w.group('eff1')
+
+ for w in words:
+ # Any effects are almost always quoted:
+ eff = w.group('eff1') or w.group('eff2')
+ if eff:
+ effects = eff
+ continue
+ # Encoding files usually have the .enc suffix
+ # and almost never need quoting:
+ enc = (w.group('enc4') or w.group('enc3') or
+ w.group('enc2') or w.group('enc1'))
+ if enc:
+ if encoding is not None:
+ _log.debug('Multiple encodings for %s = %s',
+ texname, psname)
+ encoding = enc
+ continue
+ # File names are probably unquoted:
+ filename = w.group('file2') or w.group('file1')
+
+ effects_dict = {}
+ for match in effects_re.finditer(effects):
+ slant = match.group('slant')
+ if slant:
+ effects_dict['slant'] = float(slant)
+ else:
+ effects_dict['extend'] = float(match.group('extend'))
+
+ self._font[texname] = PsFont(
+ texname=texname, psname=psname, effects=effects_dict,
+ encoding=encoding, filename=filename)
+
+
+class Encoding(object):
+ """
+ Parses a \\*.enc file referenced from a psfonts.map style file.
+ The format this class understands is a very limited subset of
+ PostScript.
+
+ Usage (subject to change)::
+
+ for name in Encoding(filename):
+ whatever(name)
+
+ Parameters
+ ----------
+ filename : string or bytestring
+
+ Attributes
+ ----------
+ encoding : list
+ List of character names
+ """
+ __slots__ = ('encoding',)
+
+ def __init__(self, filename):
+ with open(filename, 'rb') as file:
+ _log.debug('Parsing TeX encoding %s', filename)
+ self.encoding = self._parse(file)
+ _log.debug('Result: %s', self.encoding)
+
+ def __iter__(self):
+ for name in self.encoding:
+ yield name
+
+ def _parse(self, file):
+ result = []
+
+ lines = (line.split(b'%', 1)[0].strip() for line in file)
+ data = b''.join(lines)
+ beginning = data.find(b'[')
+ if beginning < 0:
+ raise ValueError("Cannot locate beginning of encoding in {}"
+ .format(file))
+ data = data[beginning:]
+ end = data.find(b']')
+ if end < 0:
+ raise ValueError("Cannot locate end of encoding in {}"
+ .format(file))
+ data = data[:end]
+
+ return re.findall(br'/([^][{}<>\s]+)', data)
+
+
+def find_tex_file(filename, format=None):
+ """
+ Find a file in the texmf tree.
+
+ Calls :program:`kpsewhich` which is an interface to the kpathsea
+ library [1]_. Most existing TeX distributions on Unix-like systems use
+ kpathsea. It is also available as part of MikTeX, a popular
+ distribution on Windows.
+
+ Parameters
+ ----------
+ filename : string or bytestring
+ format : string or bytestring
+ Used as the value of the `--format` option to :program:`kpsewhich`.
+ Could be e.g. 'tfm' or 'vf' to limit the search to that type of files.
+
+ References
+ ----------
+
+ .. [1] `Kpathsea documentation <http://www.tug.org/kpathsea/>`_
+ The library that :program:`kpsewhich` is part of.
+ """
+
+ if six.PY3:
+ # we expect these to always be ascii encoded, but use utf-8
+ # out of caution
+ if isinstance(filename, bytes):
+ filename = filename.decode('utf-8', errors='replace')
+ if isinstance(format, bytes):
+ format = format.decode('utf-8', errors='replace')
+
+ cmd = ['kpsewhich']
+ if format is not None:
+ cmd += ['--format=' + format]
+ cmd += [filename]
+ _log.debug('find_tex_file(%s): %s', filename, cmd)
+ # stderr is unused, but reading it avoids a subprocess optimization
+ # that breaks EINTR handling in some Python versions:
+ # http://bugs.python.org/issue12493
+ # https://github.com/matplotlib/matplotlib/issues/633
+ pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ result = pipe.communicate()[0].rstrip()
+ _log.debug('find_tex_file result: %s', result)
+ return result.decode('ascii')
+
+
+# With multiple text objects per figure (e.g., tick labels) we may end
+# up reading the same tfm and vf files many times, so we implement a
+# simple cache. TODO: is this worth making persistent?
+
+@lru_cache()
+def _fontfile(cls, suffix, texname):
+ filename = find_tex_file(texname + suffix)
+ return cls(filename) if filename else None
+
+
+_tfmfile = partial(_fontfile, Tfm, ".tfm")
+_vffile = partial(_fontfile, Vf, ".vf")
+
+
+if __name__ == '__main__':
+ import sys
+ fname = sys.argv[1]
+ try:
+ dpi = float(sys.argv[2])
+ except IndexError:
+ dpi = None
+ with Dvi(fname, dpi) as dvi:
+ fontmap = PsfontsMap(find_tex_file('pdftex.map'))
+ for page in dvi:
+ print('=== new page ===')
+ fPrev = None
+ for x, y, f, c, w in page.text:
+ if f != fPrev:
+ print('font', f.texname, 'scaled', f._scale/pow(2.0, 20))
+ fPrev = f
+ print(x, y, c, 32 <= c < 128 and chr(c) or '.', w)
+ for x, y, w, h in page.boxes:
+ print(x, y, 'BOX', w, h)
diff --git a/contrib/python/matplotlib/py2/matplotlib/figure.py b/contrib/python/matplotlib/py2/matplotlib/figure.py
new file mode 100644
index 00000000000..d80b16d55ab
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/figure.py
@@ -0,0 +1,2532 @@
+"""
+The figure module provides the top-level
+:class:`~matplotlib.artist.Artist`, the :class:`Figure`, which
+contains all the plot elements. The following classes are defined
+
+:class:`SubplotParams`
+ control the default spacing of the subplots
+
+:class:`Figure`
+ Top level container for all plot elements.
+
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import logging
+import warnings
+
+import numpy as np
+
+from matplotlib import rcParams
+from matplotlib import docstring
+from matplotlib import __version__ as _mpl_version
+
+import matplotlib.artist as martist
+from matplotlib.artist import Artist, allow_rasterization
+
+import matplotlib.cbook as cbook
+
+from matplotlib.cbook import Stack, iterable
+
+from matplotlib import image as mimage
+from matplotlib.image import FigureImage
+
+import matplotlib.colorbar as cbar
+
+from matplotlib.axes import Axes, SubplotBase, subplot_class_factory
+from matplotlib.blocking_input import BlockingMouseInput, BlockingKeyMouseInput
+from matplotlib.gridspec import GridSpec
+import matplotlib.legend as mlegend
+from matplotlib.patches import Rectangle
+from matplotlib.projections import (get_projection_names,
+ process_projection_requirements)
+from matplotlib.text import Text, _process_text_args
+from matplotlib.transforms import (Affine2D, Bbox, BboxTransformTo,
+ TransformedBbox)
+import matplotlib._layoutbox as layoutbox
+from matplotlib.backend_bases import NonGuiException
+
+_log = logging.getLogger(__name__)
+
+docstring.interpd.update(projection_names=get_projection_names())
+
+
+def _stale_figure_callback(self, val):
+ if self.figure:
+ self.figure.stale = val
+
+
+class AxesStack(Stack):
+ """
+ Specialization of the `.Stack` to handle all tracking of
+ `~matplotlib.axes.Axes` in a `.Figure`.
+ This stack stores ``key, (ind, axes)`` pairs, where:
+
+ * **key** should be a hash of the args and kwargs
+ used in generating the Axes.
+ * **ind** is a serial number for tracking the order
+ in which axes were added.
+
+ The AxesStack is a callable, where ``ax_stack()`` returns
+ the current axes. Alternatively the :meth:`current_key_axes` will
+ return the current key and associated axes.
+
+ """
+ def __init__(self):
+ Stack.__init__(self)
+ self._ind = 0
+
+ def as_list(self):
+ """
+ Return a list of the Axes instances that have been added to the figure
+ """
+ ia_list = [a for k, a in self._elements]
+ ia_list.sort()
+ return [a for i, a in ia_list]
+
+ def get(self, key):
+ """
+ Return the Axes instance that was added with *key*.
+ If it is not present, return None.
+ """
+ item = dict(self._elements).get(key)
+ if item is None:
+ return None
+ cbook.warn_deprecated(
+ "2.1",
+ "Adding an axes using the same arguments as a previous axes "
+ "currently reuses the earlier instance. In a future version, "
+ "a new instance will always be created and returned. Meanwhile, "
+ "this warning can be suppressed, and the future behavior ensured, "
+ "by passing a unique label to each axes instance.")
+ return item[1]
+
+ def _entry_from_axes(self, e):
+ ind, k = {a: (ind, k) for k, (ind, a) in self._elements}[e]
+ return (k, (ind, e))
+
+ def remove(self, a):
+ """Remove the axes from the stack."""
+ Stack.remove(self, self._entry_from_axes(a))
+
+ def bubble(self, a):
+ """
+ Move the given axes, which must already exist in the
+ stack, to the top.
+ """
+ return Stack.bubble(self, self._entry_from_axes(a))
+
+ def add(self, key, a):
+ """
+ Add Axes *a*, with key *key*, to the stack, and return the stack.
+
+ If *key* is unhashable, replace it by a unique, arbitrary object.
+
+ If *a* is already on the stack, don't add it again, but
+ return *None*.
+ """
+ # All the error checking may be unnecessary; but this method
+ # is called so seldom that the overhead is negligible.
+ if not isinstance(a, Axes):
+ raise ValueError("second argument, {!r}, is not an Axes".format(a))
+ try:
+ hash(key)
+ except TypeError:
+ key = object()
+
+ a_existing = self.get(key)
+ if a_existing is not None:
+ Stack.remove(self, (key, a_existing))
+ warnings.warn(
+ "key {!r} already existed; Axes is being replaced".format(key))
+ # I don't think the above should ever happen.
+
+ if a in self:
+ return None
+ self._ind += 1
+ return Stack.push(self, (key, (self._ind, a)))
+
+ def current_key_axes(self):
+ """
+ Return a tuple of ``(key, axes)`` for the active axes.
+
+ If no axes exists on the stack, then returns ``(None, None)``.
+ """
+ if not len(self._elements):
+ return self._default, self._default
+ else:
+ key, (index, axes) = self._elements[self._pos]
+ return key, axes
+
+ def __call__(self):
+ return self.current_key_axes()[1]
+
+ def __contains__(self, a):
+ return a in self.as_list()
+
+
+class SubplotParams(object):
+ """
+ A class to hold the parameters for a subplot.
+ """
+ def __init__(self, left=None, bottom=None, right=None, top=None,
+ wspace=None, hspace=None):
+ """
+ All dimensions are fractions of the figure width or height.
+ Defaults are given by :rc:`figure.subplot.[name]`.
+
+ Parameters
+ ----------
+ left : float
+ The left side of the subplots of the figure.
+
+ right : float
+ The right side of the subplots of the figure.
+
+ bottom : float
+ The bottom of the subplots of the figure.
+
+ top : float
+ The top of the subplots of the figure.
+
+ wspace : float
+ The amount of width reserved for space between subplots,
+ expressed as a fraction of the average axis width.
+
+ hspace : float
+ The amount of height reserved for space between subplots,
+ expressed as a fraction of the average axis height.
+ """
+ self.validate = True
+ self.update(left, bottom, right, top, wspace, hspace)
+
+ def update(self, left=None, bottom=None, right=None, top=None,
+ wspace=None, hspace=None):
+ """
+ Update the dimensions of the passed parameters. *None* means unchanged.
+ """
+ thisleft = getattr(self, 'left', None)
+ thisright = getattr(self, 'right', None)
+ thistop = getattr(self, 'top', None)
+ thisbottom = getattr(self, 'bottom', None)
+ thiswspace = getattr(self, 'wspace', None)
+ thishspace = getattr(self, 'hspace', None)
+
+ self._update_this('left', left)
+ self._update_this('right', right)
+ self._update_this('bottom', bottom)
+ self._update_this('top', top)
+ self._update_this('wspace', wspace)
+ self._update_this('hspace', hspace)
+
+ def reset():
+ self.left = thisleft
+ self.right = thisright
+ self.top = thistop
+ self.bottom = thisbottom
+ self.wspace = thiswspace
+ self.hspace = thishspace
+
+ if self.validate:
+ if self.left >= self.right:
+ reset()
+ raise ValueError('left cannot be >= right')
+
+ if self.bottom >= self.top:
+ reset()
+ raise ValueError('bottom cannot be >= top')
+
+ def _update_this(self, s, val):
+ if val is None:
+ val = getattr(self, s, None)
+ if val is None:
+ key = 'figure.subplot.' + s
+ val = rcParams[key]
+
+ setattr(self, s, val)
+
+
+class Figure(Artist):
+ """
+ The top level container for all the plot elements.
+
+ The Figure instance supports callbacks through a *callbacks* attribute
+ which is a `.CallbackRegistry` instance. The events you can connect to
+ are 'dpi_changed', and the callback will be called with ``func(fig)`` where
+ fig is the `Figure` instance.
+
+ Attributes
+ ----------
+ patch
+ The `.Rectangle` instance representing the figure patch.
+
+ suppressComposite
+ For multiple figure images, the figure will make composite images
+ depending on the renderer option_image_nocomposite function. If
+ *suppressComposite* is a boolean, this will override the renderer.
+ """
+
+ def __str__(self):
+ return "Figure(%gx%g)" % tuple(self.bbox.size)
+
+ def __repr__(self):
+ return "<{clsname} size {h:g}x{w:g} with {naxes} Axes>".format(
+ clsname=self.__class__.__name__,
+ h=self.bbox.size[0], w=self.bbox.size[1],
+ naxes=len(self.axes),
+ )
+
+ def __init__(self,
+ figsize=None,
+ dpi=None,
+ facecolor=None,
+ edgecolor=None,
+ linewidth=0.0,
+ frameon=None,
+ subplotpars=None, # default to rc
+ tight_layout=None, # default to rc figure.autolayout
+ constrained_layout=None, # default to rc
+ #figure.constrained_layout.use
+ ):
+ """
+ Parameters
+ ----------
+ figsize : 2-tuple of floats, default: :rc:`figure.figsize`
+ Figure dimension ``(width, height)`` in inches.
+
+ dpi : float, default: :rc:`figure.dpi`
+ Dots per inch.
+
+ facecolor : default: :rc:`figure.facecolor`
+ The figure patch facecolor.
+
+ edgecolor : default: :rc:`figure.edgecolor`
+ The figure patch edge color.
+
+ linewidth : float
+ The linewidth of the frame (i.e. the edge linewidth of the figure
+ patch).
+
+ frameon : bool, default: :rc:`figure.frameon`
+ If ``False``, suppress drawing the figure frame.
+
+ subplotpars : :class:`SubplotParams`
+ Subplot parameters. If not given, the default subplot
+ parameters :rc:`figure.subplot.*` are used.
+
+ tight_layout : bool or dict, default: :rc:`figure.autolayout`
+ If ``False`` use *subplotpars*. If ``True`` adjust subplot
+ parameters using `.tight_layout` with default padding.
+ When providing a dict containing the keys ``pad``, ``w_pad``,
+ ``h_pad``, and ``rect``, the default `.tight_layout` paddings
+ will be overridden.
+
+ constrained_layout : bool
+ If ``True`` use constrained layout to adjust positioning of plot
+ elements. Like ``tight_layout``, but designed to be more
+ flexible. See
+ :doc:`/tutorials/intermediate/constrainedlayout_guide`
+ for examples. (Note: does not work with :meth:`.subplot` or
+ :meth:`.subplot2grid`.)
+ Defaults to :rc:`figure.constrained_layout.use`.
+ """
+ Artist.__init__(self)
+ # remove the non-figure artist _axes property
+ # as it makes no sense for a figure to be _in_ an axes
+ # this is used by the property methods in the artist base class
+ # which are over-ridden in this class
+ del self._axes
+ self.callbacks = cbook.CallbackRegistry()
+
+ if figsize is None:
+ figsize = rcParams['figure.figsize']
+ if dpi is None:
+ dpi = rcParams['figure.dpi']
+ if facecolor is None:
+ facecolor = rcParams['figure.facecolor']
+ if edgecolor is None:
+ edgecolor = rcParams['figure.edgecolor']
+ if frameon is None:
+ frameon = rcParams['figure.frameon']
+
+ if not np.isfinite(figsize).all():
+ raise ValueError('figure size must be finite not '
+ '{}'.format(figsize))
+ self.bbox_inches = Bbox.from_bounds(0, 0, *figsize)
+
+ self.dpi_scale_trans = Affine2D().scale(dpi, dpi)
+ # do not use property as it will trigger
+ self._dpi = dpi
+ self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)
+
+ self.frameon = frameon
+
+ self.transFigure = BboxTransformTo(self.bbox)
+
+ self.patch = Rectangle(
+ xy=(0, 0), width=1, height=1,
+ facecolor=facecolor, edgecolor=edgecolor, linewidth=linewidth)
+ self._set_artist_props(self.patch)
+ self.patch.set_aa(False)
+
+ self._hold = rcParams['axes.hold']
+ if self._hold is None:
+ self._hold = True
+
+ self.canvas = None
+ self._suptitle = None
+
+ if subplotpars is None:
+ subplotpars = SubplotParams()
+
+ self.subplotpars = subplotpars
+ # constrained_layout:
+ self._layoutbox = None
+ # set in set_constrained_layout_pads()
+ self.set_constrained_layout(constrained_layout)
+
+ self.set_tight_layout(tight_layout)
+
+ self._axstack = AxesStack() # track all figure axes and current axes
+ self.clf()
+ self._cachedRenderer = None
+
+ # groupers to keep track of x and y labels we want to align.
+ # see self.align_xlabels and self.align_ylabels and
+ # axis._get_tick_boxes_siblings
+ self._align_xlabel_grp = cbook.Grouper()
+ self._align_ylabel_grp = cbook.Grouper()
+
+ @property
+ @cbook.deprecated("2.1", alternative="`.Figure.patch`")
+ def figurePatch(self):
+ return self.patch
+
+ # TODO: I'd like to dynamically add the _repr_html_ method
+ # to the figure in the right context, but then IPython doesn't
+ # use it, for some reason.
+
+ def _repr_html_(self):
+ # We can't use "isinstance" here, because then we'd end up importing
+ # webagg unconditiionally.
+ if (self.canvas is not None and
+ 'WebAgg' in self.canvas.__class__.__name__):
+ from matplotlib.backends import backend_webagg
+ return backend_webagg.ipython_inline_display(self)
+
+ def show(self, warn=True):
+ """
+ If using a GUI backend with pyplot, display the figure window.
+
+ If the figure was not created using
+ :func:`~matplotlib.pyplot.figure`, it will lack a
+ :class:`~matplotlib.backend_bases.FigureManagerBase`, and
+ will raise an AttributeError.
+
+ Parameters
+ ----------
+ warm : bool
+ If ``True``, issue warning when called on a non-GUI backend
+
+ Notes
+ -----
+ For non-GUI backends, this does nothing, in which case a warning will
+ be issued if *warn* is ``True`` (default).
+ """
+ try:
+ manager = getattr(self.canvas, 'manager')
+ except AttributeError as err:
+ raise AttributeError("%s\n"
+ "Figure.show works only "
+ "for figures managed by pyplot, normally "
+ "created by pyplot.figure()." % err)
+
+ if manager is not None:
+ try:
+ manager.show()
+ return
+ except NonGuiException:
+ pass
+ if warn:
+ import warnings
+ warnings.warn(
+ "matplotlib is currently using a non-GUI backend, "
+ "so cannot show the figure")
+
+ def _get_axes(self):
+ return self._axstack.as_list()
+
+ axes = property(fget=_get_axes,
+ doc="List of axes in the Figure. You can access the "
+ "axes in the Figure through this list. "
+ "Do not modify the list itself. Instead, use "
+ "`~Figure.add_axes`, `~.Figure.subplot` or "
+ "`~.Figure.delaxes` to add or remove an axes.")
+
+ def _get_dpi(self):
+ return self._dpi
+
+ def _set_dpi(self, dpi, forward=True):
+ """
+ Parameters
+ ----------
+ dpi : float
+
+ forward : bool
+ Passed on to `~.Figure.set_size_inches`
+ """
+ self._dpi = dpi
+ self.dpi_scale_trans.clear().scale(dpi, dpi)
+ w, h = self.get_size_inches()
+ self.set_size_inches(w, h, forward=forward)
+ self.callbacks.process('dpi_changed', self)
+
+ dpi = property(_get_dpi, _set_dpi, doc="The resolution in dots per inch.")
+
+ def get_tight_layout(self):
+ """Return whether `.tight_layout` is called when drawing."""
+ return self._tight
+
+ def set_tight_layout(self, tight):
+ """
+ Set whether and how `.tight_layout` is called when drawing.
+
+ Parameters
+ ----------
+ tight : bool or dict with keys "pad", "w_pad", "h_pad", "rect" or None
+ If a bool, sets whether to call `.tight_layout` upon drawing.
+ If ``None``, use the ``figure.autolayout`` rcparam instead.
+ If a dict, pass it as kwargs to `.tight_layout`, overriding the
+ default paddings.
+
+ ..
+ ACCEPTS: [ bool
+ | dict with keys "pad", "w_pad", "h_pad", "rect"
+ | None ]
+ """
+ if tight is None:
+ tight = rcParams['figure.autolayout']
+ self._tight = bool(tight)
+ self._tight_parameters = tight if isinstance(tight, dict) else {}
+ self.stale = True
+
+ def get_constrained_layout(self):
+ """
+ Return a boolean: True means constrained layout is being used.
+
+ See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
+ """
+ return self._constrained
+
+ def set_constrained_layout(self, constrained):
+ """
+ Set whether ``constrained_layout`` is used upon drawing. If None,
+ the rcParams['figure.constrained_layout.use'] value will be used.
+
+ When providing a dict containing the keys `w_pad`, `h_pad`
+ the default ``constrained_layout`` paddings will be
+ overridden. These pads are in inches and default to 3.0/72.0.
+ ``w_pad`` is the width padding and ``h_pad`` is the height padding.
+
+ ACCEPTS: [True | False | dict | None ]
+
+ See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
+ """
+ self._constrained_layout_pads = dict()
+ self._constrained_layout_pads['w_pad'] = None
+ self._constrained_layout_pads['h_pad'] = None
+ self._constrained_layout_pads['wspace'] = None
+ self._constrained_layout_pads['hspace'] = None
+ if constrained is None:
+ constrained = rcParams['figure.constrained_layout.use']
+ self._constrained = bool(constrained)
+ if isinstance(constrained, dict):
+ self.set_constrained_layout_pads(**constrained)
+ else:
+ self.set_constrained_layout_pads()
+
+ self.stale = True
+
+ def set_constrained_layout_pads(self, **kwargs):
+ """
+ Set padding for ``constrained_layout``. Note the kwargs can be passed
+ as a dictionary ``fig.set_constrained_layout(**paddict)``.
+
+ See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
+
+ Parameters
+ ----------
+
+ w_pad : scalar
+ Width padding in inches. This is the pad around axes
+ and is meant to make sure there is enough room for fonts to
+ look good. Defaults to 3 pts = 0.04167 inches
+
+ h_pad : scalar
+ Height padding in inches. Defaults to 3 pts.
+
+ wspace: scalar
+ Width padding between subplots, expressed as a fraction of the
+ subplot width. The total padding ends up being w_pad + wspace.
+
+ hspace: scalar
+ Height padding between subplots, expressed as a fraction of the
+ subplot width. The total padding ends up being h_pad + hspace.
+
+ """
+
+ todo = ['w_pad', 'h_pad', 'wspace', 'hspace']
+ for td in todo:
+ if td in kwargs and kwargs[td] is not None:
+ self._constrained_layout_pads[td] = kwargs[td]
+ else:
+ self._constrained_layout_pads[td] = (
+ rcParams['figure.constrained_layout.' + td])
+
+ def get_constrained_layout_pads(self, relative=False):
+ """
+ Get padding for ``constrained_layout``.
+
+ Returns a list of `w_pad, h_pad` in inches and
+ `wspace` and `hspace` as fractions of the subplot.
+
+ See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
+
+ Parameters
+ ----------
+
+ relative : boolean
+ If `True`, then convert from inches to figure relative.
+ """
+ w_pad = self._constrained_layout_pads['w_pad']
+ h_pad = self._constrained_layout_pads['h_pad']
+ wspace = self._constrained_layout_pads['wspace']
+ hspace = self._constrained_layout_pads['hspace']
+
+ if relative and ((w_pad is not None) or (h_pad is not None)):
+ renderer0 = layoutbox.get_renderer(self)
+ dpi = renderer0.dpi
+ w_pad = w_pad * dpi / renderer0.width
+ h_pad = h_pad * dpi / renderer0.height
+
+ return w_pad, h_pad, wspace, hspace
+
+ def autofmt_xdate(self, bottom=0.2, rotation=30, ha='right', which=None):
+ """
+ Date ticklabels often overlap, so it is useful to rotate them
+ and right align them. Also, a common use case is a number of
+ subplots with shared xaxes where the x-axis is date data. The
+ ticklabels are often long, and it helps to rotate them on the
+ bottom subplot and turn them off on other subplots, as well as
+ turn off xlabels.
+
+ Parameters
+ ----------
+ bottom : scalar
+ The bottom of the subplots for :meth:`subplots_adjust`.
+
+ rotation : angle in degrees
+ The rotation of the xtick labels.
+
+ ha : string
+ The horizontal alignment of the xticklabels.
+
+ which : {None, 'major', 'minor', 'both'}
+ Selects which ticklabels to rotate. Default is None which works
+ the same as major.
+ """
+ allsubplots = all(hasattr(ax, 'is_last_row') for ax in self.axes)
+ if len(self.axes) == 1:
+ for label in self.axes[0].get_xticklabels(which=which):
+ label.set_ha(ha)
+ label.set_rotation(rotation)
+ else:
+ if allsubplots:
+ for ax in self.get_axes():
+ if ax.is_last_row():
+ for label in ax.get_xticklabels(which=which):
+ label.set_ha(ha)
+ label.set_rotation(rotation)
+ else:
+ for label in ax.get_xticklabels(which=which):
+ label.set_visible(False)
+ ax.set_xlabel('')
+
+ if allsubplots:
+ self.subplots_adjust(bottom=bottom)
+ self.stale = True
+
+ def get_children(self):
+ """Get a list of artists contained in the figure."""
+ children = [self.patch]
+ children.extend(self.artists)
+ children.extend(self.axes)
+ children.extend(self.lines)
+ children.extend(self.patches)
+ children.extend(self.texts)
+ children.extend(self.images)
+ children.extend(self.legends)
+ return children
+
+ def contains(self, mouseevent):
+ """
+ Test whether the mouse event occurred on the figure.
+
+ Returns
+ -------
+ bool, {}
+ """
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+ inside = self.bbox.contains(mouseevent.x, mouseevent.y)
+ return inside, {}
+
+ def get_window_extent(self, *args, **kwargs):
+ """
+ Return the figure bounding box in display space. Arguments are ignored.
+ """
+ return self.bbox
+
+ def suptitle(self, t, **kwargs):
+ """
+ Add a centered title to the figure.
+
+ kwargs are :class:`matplotlib.text.Text` properties. Using figure
+ coordinates, the defaults are:
+
+ x : 0.5
+ The x location of the text in figure coords
+
+ y : 0.98
+ The y location of the text in figure coords
+
+ horizontalalignment : 'center'
+ The horizontal alignment of the text
+
+ verticalalignment : 'top'
+ The vertical alignment of the text
+
+ If the `fontproperties` keyword argument is given then the
+ rcParams defaults for `fontsize` (`figure.titlesize`) and
+ `fontweight` (`figure.titleweight`) will be ignored in favour
+ of the `FontProperties` defaults.
+
+ A :class:`matplotlib.text.Text` instance is returned.
+
+ Example::
+
+ fig.suptitle('this is the figure title', fontsize=12)
+ """
+ manual_position = ('x' in kwargs or 'y' in kwargs)
+
+ x = kwargs.pop('x', 0.5)
+ y = kwargs.pop('y', 0.98)
+
+ if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):
+ kwargs['horizontalalignment'] = 'center'
+ if ('verticalalignment' not in kwargs) and ('va' not in kwargs):
+ kwargs['verticalalignment'] = 'top'
+
+ if 'fontproperties' not in kwargs:
+ if 'fontsize' not in kwargs and 'size' not in kwargs:
+ kwargs['size'] = rcParams['figure.titlesize']
+ if 'fontweight' not in kwargs and 'weight' not in kwargs:
+ kwargs['weight'] = rcParams['figure.titleweight']
+
+ sup = self.text(x, y, t, **kwargs)
+ if self._suptitle is not None:
+ self._suptitle.set_text(t)
+ self._suptitle.set_position((x, y))
+ self._suptitle.update_from(sup)
+ sup.remove()
+ else:
+ self._suptitle = sup
+ self._suptitle._layoutbox = None
+ if self._layoutbox is not None and not manual_position:
+ w_pad, h_pad, wspace, hspace = \
+ self.get_constrained_layout_pads(relative=True)
+ figlb = self._layoutbox
+ self._suptitle._layoutbox = layoutbox.LayoutBox(
+ parent=figlb, artist=self._suptitle,
+ name=figlb.name+'.suptitle')
+ # stack the suptitle on top of all the children.
+ # Some day this should be on top of all the children in the
+ # gridspec only.
+ for child in figlb.children:
+ if child is not self._suptitle._layoutbox:
+ layoutbox.vstack([self._suptitle._layoutbox,
+ child],
+ padding=h_pad*2., strength='required')
+ self.stale = True
+ return self._suptitle
+
+ def set_canvas(self, canvas):
+ """
+ Set the canvas that contains the figure
+
+ ACCEPTS: a FigureCanvas instance
+ """
+ self.canvas = canvas
+
+ @cbook.deprecated("2.0")
+ def hold(self, b=None):
+ """
+ Set the hold state. If hold is None (default), toggle the
+ hold state. Else set the hold state to boolean value b.
+
+ e.g.::
+
+ hold() # toggle hold
+ hold(True) # hold is on
+ hold(False) # hold is off
+
+ All "hold" machinery is deprecated.
+ """
+ if b is None:
+ self._hold = not self._hold
+ else:
+ self._hold = b
+
+ def figimage(self, X, xo=0, yo=0, alpha=None, norm=None, cmap=None,
+ vmin=None, vmax=None, origin=None, resize=False, **kwargs):
+ """
+ Add a non-resampled image to the figure.
+
+ The image is attached to the lower or upper left corner depending on
+ *origin*.
+
+ Parameters
+ ----------
+ X
+ The image data. This is an array of one of the following shapes:
+
+ - MxN: luminance (grayscale) values
+ - MxNx3: RGB values
+ - MxNx4: RGBA values
+
+ xo, yo : int
+ The *x*/*y* image offset in pixels.
+
+ alpha : None or float
+ The alpha blending value.
+
+ norm : :class:`matplotlib.colors.Normalize`
+ A :class:`.Normalize` instance to map the luminance to the
+ interval [0, 1].
+
+ cmap : str or :class:`matplotlib.colors.Colormap`
+ The colormap to use. Default: :rc:`image.cmap`.
+
+ vmin, vmax : scalar
+ If *norm* is not given, these values set the data limits for the
+ colormap.
+
+ origin : {'upper', 'lower'}
+ Indicates where the [0, 0] index of the array is in the upper left
+ or lower left corner of the axes. Defaults to :rc:`image.origin`.
+
+ resize : bool
+ If *True*, resize the figure to match the given image size.
+
+ Returns
+ -------
+ :class:`matplotlib.image.FigureImage`
+
+ Other Parameters
+ ----------------
+ **kwargs
+ Additional kwargs are `.Artist` kwargs passed on to `.FigureImage`.
+
+ Notes
+ -----
+ figimage complements the axes image
+ (:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled
+ to fit the current axes. If you want a resampled image to
+ fill the entire figure, you can define an
+ :class:`~matplotlib.axes.Axes` with extent [0,0,1,1].
+
+
+ Examples::
+
+ f = plt.figure()
+ nx = int(f.get_figwidth() * f.dpi)
+ ny = int(f.get_figheight() * f.dpi)
+ data = np.random.random((ny, nx))
+ f.figimage(data)
+ plt.show()
+
+ """
+
+ if not self._hold:
+ self.clf()
+
+ if resize:
+ dpi = self.get_dpi()
+ figsize = [x / dpi for x in (X.shape[1], X.shape[0])]
+ self.set_size_inches(figsize, forward=True)
+
+ im = FigureImage(self, cmap, norm, xo, yo, origin, **kwargs)
+ im.stale_callback = _stale_figure_callback
+
+ im.set_array(X)
+ im.set_alpha(alpha)
+ if norm is None:
+ im.set_clim(vmin, vmax)
+ self.images.append(im)
+ im._remove_method = lambda h: self.images.remove(h)
+ self.stale = True
+ return im
+
+ def set_size_inches(self, w, h=None, forward=True):
+ """Set the figure size in inches (1in == 2.54cm)
+
+ Usage ::
+
+ fig.set_size_inches(w, h) # OR
+ fig.set_size_inches((w, h))
+
+ optional kwarg *forward=True* will cause the canvas size to be
+ automatically updated; e.g., you can resize the figure window
+ from the shell
+
+ ACCEPTS: a w, h tuple with w, h in inches
+
+ See Also
+ --------
+ matplotlib.Figure.get_size_inches
+ """
+
+ # the width and height have been passed in as a tuple to the first
+ # argument, so unpack them
+ if h is None:
+ w, h = w
+ if not all(np.isfinite(_) for _ in (w, h)):
+ raise ValueError('figure size must be finite not '
+ '({}, {})'.format(w, h))
+ self.bbox_inches.p1 = w, h
+
+ if forward:
+ canvas = getattr(self, 'canvas')
+ if canvas is not None:
+ ratio = getattr(self.canvas, '_dpi_ratio', 1)
+ dpival = self.dpi / ratio
+ canvasw = w * dpival
+ canvash = h * dpival
+ manager = getattr(self.canvas, 'manager', None)
+ if manager is not None:
+ manager.resize(int(canvasw), int(canvash))
+ self.stale = True
+
+ def get_size_inches(self):
+ """
+ Returns the current size of the figure in inches.
+
+ Returns
+ -------
+ size : ndarray
+ The size (width, height) of the figure in inches.
+
+ See Also
+ --------
+ matplotlib.Figure.set_size_inches
+ """
+ return np.array(self.bbox_inches.p1)
+
+ def get_edgecolor(self):
+ """Get the edge color of the Figure rectangle."""
+ return self.patch.get_edgecolor()
+
+ def get_facecolor(self):
+ """Get the face color of the Figure rectangle."""
+ return self.patch.get_facecolor()
+
+ def get_figwidth(self):
+ """Return the figure width as a float."""
+ return self.bbox_inches.width
+
+ def get_figheight(self):
+ """Return the figure height as a float."""
+ return self.bbox_inches.height
+
+ def get_dpi(self):
+ """Return the resolution in dots per inch as a float."""
+ return self.dpi
+
+ def get_frameon(self):
+ """Return whether the figure frame will be drawn."""
+ return self.frameon
+
+ def set_edgecolor(self, color):
+ """
+ Set the edge color of the Figure rectangle.
+
+ ACCEPTS: any matplotlib color - see help(colors)
+ """
+ self.patch.set_edgecolor(color)
+
+ def set_facecolor(self, color):
+ """
+ Set the face color of the Figure rectangle.
+
+ ACCEPTS: any matplotlib color - see help(colors)
+ """
+ self.patch.set_facecolor(color)
+
+ def set_dpi(self, val):
+ """
+ Set the dots-per-inch of the figure.
+
+ ACCEPTS: float
+ """
+ self.dpi = val
+ self.stale = True
+
+ def set_figwidth(self, val, forward=True):
+ """
+ Set the width of the figure in inches.
+
+ ACCEPTS: float
+ """
+ self.set_size_inches(val, self.get_figheight(), forward=forward)
+
+ def set_figheight(self, val, forward=True):
+ """
+ Set the height of the figure in inches.
+
+ ACCEPTS: float
+ """
+ self.set_size_inches(self.get_figwidth(), val, forward=forward)
+
+ def set_frameon(self, b):
+ """
+ Set whether the figure frame (background) is displayed or invisible
+
+ ACCEPTS: boolean
+ """
+ self.frameon = b
+ self.stale = True
+
+ def delaxes(self, ax):
+ """
+ Remove the `~matplotlib.axes.Axes` *ax* from the figure and update the
+ current axes.
+ """
+ self._axstack.remove(ax)
+ for func in self._axobservers:
+ func(self)
+ self.stale = True
+
+ def _make_key(self, *args, **kwargs):
+ """Make a hashable key out of args and kwargs."""
+
+ def fixitems(items):
+ # items may have arrays and lists in them, so convert them
+ # to tuples for the key
+ ret = []
+ for k, v in items:
+ # some objects can define __getitem__ without being
+ # iterable and in those cases the conversion to tuples
+ # will fail. So instead of using the iterable(v) function
+ # we simply try and convert to a tuple, and proceed if not.
+ try:
+ v = tuple(v)
+ except Exception:
+ pass
+ ret.append((k, v))
+ return tuple(ret)
+
+ def fixlist(args):
+ ret = []
+ for a in args:
+ if iterable(a):
+ a = tuple(a)
+ ret.append(a)
+ return tuple(ret)
+
+ key = fixlist(args), fixitems(six.iteritems(kwargs))
+ return key
+
+ def add_axes(self, *args, **kwargs):
+ """
+ Add an axes to the figure.
+
+ Call signature::
+
+ add_axes(rect, projection=None, polar=False, **kwargs)
+
+ Parameters
+ ----------
+ rect : sequence of float
+ The dimensions [left, bottom, width, height] of the new axes. All
+ quantities are in fractions of figure width and height.
+
+ projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
+'polar', rectilinear'}, optional
+ The projection type of the axes.
+
+ polar : boolean, optional
+ If True, equivalent to projection='polar'.
+
+ **kwargs
+ This method also takes the keyword arguments for
+ :class:`~matplotlib.axes.Axes`.
+
+ Returns
+ -------
+ axes : Axes
+ The added axes.
+
+ Examples
+ --------
+ Some simple examples::
+
+ rect = l, b, w, h
+ fig.add_axes(rect)
+ fig.add_axes(rect, frameon=False, facecolor='g')
+ fig.add_axes(rect, polar=True)
+ fig.add_axes(rect, projection='polar')
+ fig.add_axes(ax)
+
+ If the figure already has an axes with the same parameters, then it
+ will simply make that axes current and return it. This behavior
+ has been deprecated as of Matplotlib 2.1. Meanwhile, if you do
+ not want this behavior (i.e., you want to force the creation of a
+ new Axes), you must use a unique set of args and kwargs. The axes
+ :attr:`~matplotlib.axes.Axes.label` attribute has been exposed for this
+ purpose: if you want two axes that are otherwise identical to be added
+ to the figure, make sure you give them unique labels::
+
+ fig.add_axes(rect, label='axes1')
+ fig.add_axes(rect, label='axes2')
+
+ In rare circumstances, add_axes may be called with a single
+ argument, an Axes instance already created in the present
+ figure but not in the figure's list of axes. For example,
+ if an axes has been removed with :meth:`delaxes`, it can
+ be restored with::
+
+ fig.add_axes(ax)
+
+ In all cases, the :class:`~matplotlib.axes.Axes` instance
+ will be returned.
+ """
+ if not len(args):
+ return
+
+ # shortcut the projection "key" modifications later on, if an axes
+ # with the exact args/kwargs exists, return it immediately.
+ key = self._make_key(*args, **kwargs)
+ ax = self._axstack.get(key)
+ if ax is not None:
+ self.sca(ax)
+ return ax
+
+ if isinstance(args[0], Axes):
+ a = args[0]
+ if a.get_figure() is not self:
+ raise ValueError(
+ "The Axes must have been created in the present figure")
+ else:
+ rect = args[0]
+ if not np.isfinite(rect).all():
+ raise ValueError('all entries in rect must be finite '
+ 'not {}'.format(rect))
+ projection_class, kwargs, key = process_projection_requirements(
+ self, *args, **kwargs)
+
+ # check that an axes of this type doesn't already exist, if it
+ # does, set it as active and return it
+ ax = self._axstack.get(key)
+ if isinstance(ax, projection_class):
+ self.sca(ax)
+ return ax
+
+ # create the new axes using the axes class given
+ a = projection_class(self, rect, **kwargs)
+
+ self._axstack.add(key, a)
+ self.sca(a)
+ a._remove_method = self.__remove_ax
+ self.stale = True
+ a.stale_callback = _stale_figure_callback
+ return a
+
+ def add_subplot(self, *args, **kwargs):
+ """
+ Add a subplot.
+
+ Call signatures::
+
+ add_subplot(nrows, ncols, index, **kwargs)
+ add_subplot(pos, **kwargs)
+
+ Parameters
+ ----------
+ *args
+ Either a 3-digit integer or three separate integers
+ describing the position of the subplot. If the three
+ integers are R, C, and P in order, the subplot will take
+ the Pth position on a grid with R rows and C columns.
+
+ projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
+'polar', rectilinear'}, optional
+ The projection type of the axes.
+
+ polar : boolean, optional
+ If True, equivalent to projection='polar'.
+
+ **kwargs
+ This method also takes the keyword arguments for
+ :class:`~matplotlib.axes.Axes`.
+
+ Returns
+ -------
+ axes : Axes
+ The axes of the subplot.
+
+ Notes
+ -----
+ If the figure already has a subplot with key (*args*,
+ *kwargs*) then it will simply make that subplot current and
+ return it. This behavior is deprecated.
+
+ Examples
+ --------
+ ::
+
+ fig.add_subplot(111)
+
+ # equivalent but more general
+ fig.add_subplot(1, 1, 1)
+
+ # add subplot with red background
+ fig.add_subplot(212, facecolor='r')
+
+ # add a polar subplot
+ fig.add_subplot(111, projection='polar')
+
+ # add Subplot instance sub
+ fig.add_subplot(sub)
+
+ See Also
+ --------
+ matplotlib.pyplot.subplot : for an explanation of the args.
+ """
+ if not len(args):
+ return
+
+ if len(args) == 1 and isinstance(args[0], int):
+ if not 100 <= args[0] <= 999:
+ raise ValueError("Integer subplot specification must be a "
+ "three-digit number, not {}".format(args[0]))
+ args = tuple(map(int, str(args[0])))
+
+ if isinstance(args[0], SubplotBase):
+
+ a = args[0]
+ if a.get_figure() is not self:
+ raise ValueError(
+ "The Subplot must have been created in the present figure")
+ # make a key for the subplot (which includes the axes object id
+ # in the hash)
+ key = self._make_key(*args, **kwargs)
+ else:
+ projection_class, kwargs, key = process_projection_requirements(
+ self, *args, **kwargs)
+
+ # try to find the axes with this key in the stack
+ ax = self._axstack.get(key)
+
+ if ax is not None:
+ if isinstance(ax, projection_class):
+ # the axes already existed, so set it as active & return
+ self.sca(ax)
+ return ax
+ else:
+ # Undocumented convenience behavior:
+ # subplot(111); subplot(111, projection='polar')
+ # will replace the first with the second.
+ # Without this, add_subplot would be simpler and
+ # more similar to add_axes.
+ self._axstack.remove(ax)
+
+ a = subplot_class_factory(projection_class)(self, *args, **kwargs)
+ self._axstack.add(key, a)
+ self.sca(a)
+ a._remove_method = self.__remove_ax
+ self.stale = True
+ a.stale_callback = _stale_figure_callback
+ return a
+
+ def subplots(self, nrows=1, ncols=1, sharex=False, sharey=False,
+ squeeze=True, subplot_kw=None, gridspec_kw=None):
+ """
+ Add a set of subplots to this figure.
+
+ Parameters
+ ----------
+ nrows, ncols : int, default: 1
+ Number of rows/cols of the subplot grid.
+
+ sharex, sharey : bool or {'none', 'all', 'row', 'col'}, default: False
+ Controls sharing of properties among x (`sharex`) or y (`sharey`)
+ axes:
+
+ - True or 'all': x- or y-axis will be shared among all
+ subplots.
+ - False or 'none': each subplot x- or y-axis will be
+ independent.
+ - 'row': each subplot row will share an x- or y-axis.
+ - 'col': each subplot column will share an x- or y-axis.
+
+ When subplots have a shared x-axis along a column, only the x tick
+ labels of the bottom subplot are created. Similarly, when subplots
+ have a shared y-axis along a row, only the y tick labels of the
+ first column subplot are created. To later turn other subplots'
+ ticklabels on, use :meth:`~matplotlib.axes.Axes.tick_params`.
+
+ squeeze : bool, optional, default: True
+ - If True, extra dimensions are squeezed out from the returned
+ array of Axes:
+
+ - if only one subplot is constructed (nrows=ncols=1), the
+ resulting single Axes object is returned as a scalar.
+ - for Nx1 or 1xM subplots, the returned object is a 1D numpy
+ object array of Axes objects.
+ - for NxM, subplots with N>1 and M>1 are returned
+ as a 2D array.
+
+ - If False, no squeezing at all is done: the returned Axes object
+ is always a 2D array containing Axes instances, even if it ends
+ up being 1x1.
+
+ subplot_kw : dict, default: {}
+ Dict with keywords passed to the
+ :meth:`~matplotlib.figure.Figure.add_subplot` call used to create
+ each subplots.
+
+ gridspec_kw : dict, default: {}
+ Dict with keywords passed to the
+ :class:`~matplotlib.gridspec.GridSpec` constructor used to create
+ the grid the subplots are placed on.
+
+ Returns
+ -------
+ ax : single Axes object or array of Axes objects
+ The added axes. The dimensions of the resulting array can be
+ controlled with the squeeze keyword, see above.
+
+ See Also
+ --------
+ pyplot.subplots : pyplot API; docstring includes examples.
+ """
+
+ # for backwards compatibility
+ if isinstance(sharex, bool):
+ sharex = "all" if sharex else "none"
+ if isinstance(sharey, bool):
+ sharey = "all" if sharey else "none"
+ share_values = ["all", "row", "col", "none"]
+ if sharex not in share_values:
+ # This check was added because it is very easy to type
+ # `subplots(1, 2, 1)` when `subplot(1, 2, 1)` was intended.
+ # In most cases, no error will ever occur, but mysterious behavior
+ # will result because what was intended to be the subplot index is
+ # instead treated as a bool for sharex.
+ if isinstance(sharex, int):
+ warnings.warn(
+ "sharex argument to subplots() was an integer. "
+ "Did you intend to use subplot() (without 's')?")
+
+ raise ValueError("sharex [%s] must be one of %s" %
+ (sharex, share_values))
+ if sharey not in share_values:
+ raise ValueError("sharey [%s] must be one of %s" %
+ (sharey, share_values))
+ if subplot_kw is None:
+ subplot_kw = {}
+ if gridspec_kw is None:
+ gridspec_kw = {}
+
+ if self.get_constrained_layout():
+ gs = GridSpec(nrows, ncols, figure=self, **gridspec_kw)
+ else:
+ # this should turn constrained_layout off if we don't want it
+ gs = GridSpec(nrows, ncols, figure=None, **gridspec_kw)
+
+ # Create array to hold all axes.
+ axarr = np.empty((nrows, ncols), dtype=object)
+ for row in range(nrows):
+ for col in range(ncols):
+ shared_with = {"none": None, "all": axarr[0, 0],
+ "row": axarr[row, 0], "col": axarr[0, col]}
+ subplot_kw["sharex"] = shared_with[sharex]
+ subplot_kw["sharey"] = shared_with[sharey]
+ axarr[row, col] = self.add_subplot(gs[row, col], **subplot_kw)
+
+ # turn off redundant tick labeling
+ if sharex in ["col", "all"]:
+ # turn off all but the bottom row
+ for ax in axarr[:-1, :].flat:
+ ax.xaxis.set_tick_params(which='both',
+ labelbottom=False, labeltop=False)
+ ax.xaxis.offsetText.set_visible(False)
+ if sharey in ["row", "all"]:
+ # turn off all but the first column
+ for ax in axarr[:, 1:].flat:
+ ax.yaxis.set_tick_params(which='both',
+ labelleft=False, labelright=False)
+ ax.yaxis.offsetText.set_visible(False)
+
+ if squeeze:
+ # Discarding unneeded dimensions that equal 1. If we only have one
+ # subplot, just return it instead of a 1-element array.
+ return axarr.item() if axarr.size == 1 else axarr.squeeze()
+ else:
+ # Returned axis array will be always 2-d, even if nrows=ncols=1.
+ return axarr
+
+ def __remove_ax(self, ax):
+ def _reset_loc_form(axis):
+ axis.set_major_formatter(axis.get_major_formatter())
+ axis.set_major_locator(axis.get_major_locator())
+ axis.set_minor_formatter(axis.get_minor_formatter())
+ axis.set_minor_locator(axis.get_minor_locator())
+
+ def _break_share_link(ax, grouper):
+ siblings = grouper.get_siblings(ax)
+ if len(siblings) > 1:
+ grouper.remove(ax)
+ for last_ax in siblings:
+ if ax is last_ax:
+ continue
+ return last_ax
+ return None
+
+ self.delaxes(ax)
+ last_ax = _break_share_link(ax, ax._shared_y_axes)
+ if last_ax is not None:
+ _reset_loc_form(last_ax.yaxis)
+
+ last_ax = _break_share_link(ax, ax._shared_x_axes)
+ if last_ax is not None:
+ _reset_loc_form(last_ax.xaxis)
+
+ def clf(self, keep_observers=False):
+ """
+ Clear the figure.
+
+ Set *keep_observers* to True if, for example,
+ a gui widget is tracking the axes in the figure.
+ """
+ self.suppressComposite = None
+ self.callbacks = cbook.CallbackRegistry()
+
+ for ax in tuple(self.axes): # Iterate over the copy.
+ ax.cla()
+ self.delaxes(ax) # removes ax from self._axstack
+
+ toolbar = getattr(self.canvas, 'toolbar', None)
+ if toolbar is not None:
+ toolbar.update()
+ self._axstack.clear()
+ self.artists = []
+ self.lines = []
+ self.patches = []
+ self.texts = []
+ self.images = []
+ self.legends = []
+ if not keep_observers:
+ self._axobservers = []
+ self._suptitle = None
+ if self.get_constrained_layout():
+ layoutbox.nonetree(self._layoutbox)
+ self.stale = True
+
+ def clear(self, keep_observers=False):
+ """
+ Clear the figure -- synonym for :meth:`clf`.
+ """
+ self.clf(keep_observers=keep_observers)
+
+ @allow_rasterization
+ def draw(self, renderer):
+ """
+ Render the figure using :class:`matplotlib.backend_bases.RendererBase`
+ instance *renderer*.
+ """
+
+ # draw the figure bounding box, perhaps none for white figure
+ if not self.get_visible():
+ return
+
+ artists = sorted(
+ (artist for artist in (self.patches + self.lines + self.artists
+ + self.images + self.axes + self.texts
+ + self.legends)
+ if not artist.get_animated()),
+ key=lambda artist: artist.get_zorder())
+
+ try:
+ renderer.open_group('figure')
+ if self.get_constrained_layout() and self.axes:
+ if True:
+ self.execute_constrained_layout(renderer)
+ else:
+ pass
+ if self.get_tight_layout() and self.axes:
+ try:
+ self.tight_layout(renderer,
+ **self._tight_parameters)
+ except ValueError:
+ pass
+ # ValueError can occur when resizing a window.
+
+ if self.frameon:
+ self.patch.draw(renderer)
+
+ mimage._draw_list_compositing_images(
+ renderer, self, artists, self.suppressComposite)
+
+ renderer.close_group('figure')
+ finally:
+ self.stale = False
+
+ self._cachedRenderer = renderer
+ self.canvas.draw_event(renderer)
+
+ def draw_artist(self, a):
+ """
+ Draw :class:`matplotlib.artist.Artist` instance *a* only.
+ This is available only after the figure is drawn.
+ """
+ if self._cachedRenderer is None:
+ raise AttributeError("draw_artist can only be used after an "
+ "initial draw which caches the renderer")
+ a.draw(self._cachedRenderer)
+
+ def get_axes(self):
+ """
+ Return a list of axes in the Figure. You can access and modify the
+ axes in the Figure through this list.
+
+ Do not modify the list itself. Instead, use `~Figure.add_axes`,
+ `~.Figure.subplot` or `~.Figure.delaxes` to add or remove an axes.
+
+ Note: This is equivalent to the property `~.Figure.axes`.
+ """
+ return self.axes
+
+ @docstring.dedent_interpd
+ def legend(self, *args, **kwargs):
+ """
+ Place a legend on the figure.
+
+ To make a legend from existing artists on every axes::
+
+ legend()
+
+ To make a legend for a list of lines and labels::
+
+ legend( (line1, line2, line3),
+ ('label1', 'label2', 'label3'),
+ loc='upper right')
+
+ These can also be specified by keyword::
+
+ legend(handles=(line1, line2, line3),
+ labels=('label1', 'label2', 'label3'),
+ loc='upper right')
+
+ Parameters
+ ----------
+
+ handles : sequence of `.Artist`, optional
+ A list of Artists (lines, patches) to be added to the legend.
+ Use this together with *labels*, if you need full control on what
+ is shown in the legend and the automatic mechanism described above
+ is not sufficient.
+
+ The length of handles and labels should be the same in this
+ case. If they are not, they are truncated to the smaller length.
+
+ labels : sequence of strings, optional
+ A list of labels to show next to the artists.
+ Use this together with *handles*, if you need full control on what
+ is shown in the legend and the automatic mechanism described above
+ is not sufficient.
+
+ Other Parameters
+ ----------------
+
+ loc : int or string or pair of floats, default: 'upper right'
+ The location of the legend. Possible codes are:
+
+ =============== =============
+ Location String Location Code
+ =============== =============
+ 'best' 0
+ 'upper right' 1
+ 'upper left' 2
+ 'lower left' 3
+ 'lower right' 4
+ 'right' 5
+ 'center left' 6
+ 'center right' 7
+ 'lower center' 8
+ 'upper center' 9
+ 'center' 10
+ =============== =============
+
+
+ Alternatively can be a 2-tuple giving ``x, y`` of the lower-left
+ corner of the legend in axes coordinates (in which case
+ ``bbox_to_anchor`` will be ignored).
+
+ bbox_to_anchor : `.BboxBase` or pair of floats
+ Specify any arbitrary location for the legend in `bbox_transform`
+ coordinates (default Axes coordinates).
+
+ For example, to put the legend's upper right hand corner in the
+ center of the axes the following keywords can be used::
+
+ loc='upper right', bbox_to_anchor=(0.5, 0.5)
+
+ ncol : integer
+ The number of columns that the legend has. Default is 1.
+
+ prop : None or :class:`matplotlib.font_manager.FontProperties` or dict
+ The font properties of the legend. If None (default), the current
+ :data:`matplotlib.rcParams` will be used.
+
+ fontsize : int or float or {'xx-small', 'x-small', 'small', 'medium', \
+'large', 'x-large', 'xx-large'}
+ Controls the font size of the legend. If the value is numeric the
+ size will be the absolute font size in points. String values are
+ relative to the current default font size. This argument is only
+ used if `prop` is not specified.
+
+ numpoints : None or int
+ The number of marker points in the legend when creating a legend
+ entry for a `.Line2D` (line).
+ Default is ``None``, which will take the value from
+ :rc:`legend.numpoints`.
+
+ scatterpoints : None or int
+ The number of marker points in the legend when creating
+ a legend entry for a `.PathCollection` (scatter plot).
+ Default is ``None``, which will take the value from
+ :rc:`legend.scatterpoints`.
+
+ scatteryoffsets : iterable of floats
+ The vertical offset (relative to the font size) for the markers
+ created for a scatter plot legend entry. 0.0 is at the base the
+ legend text, and 1.0 is at the top. To draw all markers at the
+ same height, set to ``[0.5]``. Default is ``[0.375, 0.5, 0.3125]``.
+
+ markerscale : None or int or float
+ The relative size of legend markers compared with the originally
+ drawn ones.
+ Default is ``None``, which will take the value from
+ :rc:`legend.markerscale`.
+
+ markerfirst : bool
+ If *True*, legend marker is placed to the left of the legend label.
+ If *False*, legend marker is placed to the right of the legend
+ label.
+ Default is *True*.
+
+ frameon : None or bool
+ Control whether the legend should be drawn on a patch
+ (frame).
+ Default is ``None``, which will take the value from
+ :rc:`legend.frameon`.
+
+ fancybox : None or bool
+ Control whether round edges should be enabled around the
+ :class:`~matplotlib.patches.FancyBboxPatch` which makes up the
+ legend's background.
+ Default is ``None``, which will take the value from
+ :rc:`legend.fancybox`.
+
+ shadow : None or bool
+ Control whether to draw a shadow behind the legend.
+ Default is ``None``, which will take the value from
+ :rc:`legend.shadow`.
+
+ framealpha : None or float
+ Control the alpha transparency of the legend's background.
+ Default is ``None``, which will take the value from
+ :rc:`legend.framealpha`. If shadow is activated and
+ *framealpha* is ``None``, the default value is ignored.
+
+ facecolor : None or "inherit" or a color spec
+ Control the legend's background color.
+ Default is ``None``, which will take the value from
+ :rc:`legend.facecolor`. If ``"inherit"``, it will take
+ :rc:`axes.facecolor`.
+
+ edgecolor : None or "inherit" or a color spec
+ Control the legend's background patch edge color.
+ Default is ``None``, which will take the value from
+ :rc:`legend.edgecolor` If ``"inherit"``, it will take
+ :rc:`axes.edgecolor`.
+
+ mode : {"expand", None}
+ If `mode` is set to ``"expand"`` the legend will be horizontally
+ expanded to fill the axes area (or `bbox_to_anchor` if defines
+ the legend's size).
+
+ bbox_transform : None or :class:`matplotlib.transforms.Transform`
+ The transform for the bounding box (`bbox_to_anchor`). For a value
+ of ``None`` (default) the Axes'
+ :data:`~matplotlib.axes.Axes.transAxes` transform will be used.
+
+ title : str or None
+ The legend's title. Default is no title (``None``).
+
+ borderpad : float or None
+ The fractional whitespace inside the legend border.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.borderpad`.
+
+ labelspacing : float or None
+ The vertical space between the legend entries.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.labelspacing`.
+
+ handlelength : float or None
+ The length of the legend handles.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.handlelength`.
+
+ handletextpad : float or None
+ The pad between the legend handle and text.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.handletextpad`.
+
+ borderaxespad : float or None
+ The pad between the axes and legend border.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.borderaxespad`.
+
+ columnspacing : float or None
+ The spacing between columns.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.columnspacing`.
+
+ handler_map : dict or None
+ The custom dictionary mapping instances or types to a legend
+ handler. This `handler_map` updates the default handler map
+ found at :func:`matplotlib.legend.Legend.get_legend_handler_map`.
+
+ Returns
+ -------
+ :class:`matplotlib.legend.Legend` instance
+
+ Notes
+ -----
+ Not all kinds of artist are supported by the legend command. See
+ :doc:`/tutorials/intermediate/legend_guide` for details.
+ """
+
+ handles, labels, extra_args, kwargs = mlegend._parse_legend_args(
+ self.axes,
+ *args,
+ **kwargs)
+ # check for third arg
+ if len(extra_args):
+ # cbook.warn_deprecated(
+ # "2.1",
+ # "Figure.legend will accept no more than two "
+ # "positional arguments in the future. Use "
+ # "'fig.legend(handles, labels, loc=location)' "
+ # "instead.")
+ # kwargs['loc'] = extra_args[0]
+ # extra_args = extra_args[1:]
+ pass
+ l = mlegend.Legend(self, handles, labels, *extra_args, **kwargs)
+ self.legends.append(l)
+ l._remove_method = lambda h: self.legends.remove(h)
+ self.stale = True
+ return l
+
+ @docstring.dedent_interpd
+ def text(self, x, y, s, *args, **kwargs):
+ """
+ Add text to figure.
+
+ Call signature::
+
+ text(x, y, s, fontdict=None, **kwargs)
+
+ Add text to figure at location *x*, *y* (relative 0-1
+ coords). See :func:`~matplotlib.pyplot.text` for the meaning
+ of the other arguments.
+
+ kwargs control the :class:`~matplotlib.text.Text` properties:
+
+ %(Text)s
+ """
+
+ override = _process_text_args({}, *args, **kwargs)
+ t = Text(x=x, y=y, text=s)
+
+ t.update(override)
+ self._set_artist_props(t)
+ self.texts.append(t)
+ t._remove_method = lambda h: self.texts.remove(h)
+ self.stale = True
+ return t
+
+ def _set_artist_props(self, a):
+ if a != self:
+ a.set_figure(self)
+ a.stale_callback = _stale_figure_callback
+ a.set_transform(self.transFigure)
+
+ @docstring.dedent_interpd
+ def gca(self, **kwargs):
+ """
+ Get the current axes, creating one if necessary.
+
+ The following kwargs are supported for ensuring the returned axes
+ adheres to the given projection etc., and for axes creation if
+ the active axes does not exist:
+
+ %(Axes)s
+
+ """
+ ckey, cax = self._axstack.current_key_axes()
+ # if there exists an axes on the stack see if it maches
+ # the desired axes configuration
+ if cax is not None:
+
+ # if no kwargs are given just return the current axes
+ # this is a convenience for gca() on axes such as polar etc.
+ if not kwargs:
+ return cax
+
+ # if the user has specified particular projection detail
+ # then build up a key which can represent this
+ else:
+ # we don't want to modify the original kwargs
+ # so take a copy so that we can do what we like to it
+ kwargs_copy = kwargs.copy()
+ projection_class, _, key = process_projection_requirements(
+ self, **kwargs_copy)
+
+ # let the returned axes have any gridspec by removing it from
+ # the key
+ ckey = ckey[1:]
+ key = key[1:]
+
+ # if the cax matches this key then return the axes, otherwise
+ # continue and a new axes will be created
+ if key == ckey and isinstance(cax, projection_class):
+ return cax
+ else:
+ warnings.warn('Requested projection is different from '
+ 'current axis projection, creating new axis '
+ 'with requested projection.', stacklevel=2)
+
+ # no axes found, so create one which spans the figure
+ return self.add_subplot(1, 1, 1, **kwargs)
+
+ def sca(self, a):
+ """Set the current axes to be a and return a."""
+ self._axstack.bubble(a)
+ for func in self._axobservers:
+ func(self)
+ return a
+
+ def _gci(self):
+ """
+ Helper for :func:`~matplotlib.pyplot.gci`. Do not use elsewhere.
+ """
+ # Look first for an image in the current Axes:
+ cax = self._axstack.current_key_axes()[1]
+ if cax is None:
+ return None
+ im = cax._gci()
+ if im is not None:
+ return im
+
+ # If there is no image in the current Axes, search for
+ # one in a previously created Axes. Whether this makes
+ # sense is debatable, but it is the documented behavior.
+ for ax in reversed(self.axes):
+ im = ax._gci()
+ if im is not None:
+ return im
+ return None
+
+ def __getstate__(self):
+ state = super(Figure, self).__getstate__()
+
+ # print('\n\n\nStarting pickle')
+ # the axobservers cannot currently be pickled.
+ # Additionally, the canvas cannot currently be pickled, but this has
+ # the benefit of meaning that a figure can be detached from one canvas,
+ # and re-attached to another.
+ for attr_to_pop in ('_axobservers', 'show',
+ 'canvas', '_cachedRenderer'):
+ state.pop(attr_to_pop, None)
+
+ # add version information to the state
+ state['__mpl_version__'] = _mpl_version
+
+ # check to see if the figure has a manager and whether it is registered
+ # with pyplot
+ if getattr(self.canvas, 'manager', None) is not None:
+ manager = self.canvas.manager
+ import matplotlib._pylab_helpers
+ if manager in list(six.itervalues(
+ matplotlib._pylab_helpers.Gcf.figs)):
+ state['_restore_to_pylab'] = True
+
+ # set all the layoutbox information to None. kiwisolver
+ # objects can't be pickeled, so we lose the layout options
+ # at this point.
+ state.pop('_layoutbox', None)
+ # suptitle:
+ if self._suptitle is not None:
+ self._suptitle._layoutbox = None
+
+ return state
+
+ def __setstate__(self, state):
+ version = state.pop('__mpl_version__')
+ restore_to_pylab = state.pop('_restore_to_pylab', False)
+
+ if version != _mpl_version:
+ import warnings
+ warnings.warn("This figure was saved with matplotlib version %s "
+ "and is unlikely to function correctly." %
+ (version, ))
+
+ self.__dict__ = state
+
+ # re-initialise some of the unstored state information
+ self._axobservers = []
+ self.canvas = None
+ self._layoutbox = None
+
+ if restore_to_pylab:
+ # lazy import to avoid circularity
+ import matplotlib.pyplot as plt
+ import matplotlib._pylab_helpers as pylab_helpers
+ allnums = plt.get_fignums()
+ num = max(allnums) + 1 if allnums else 1
+ mgr = plt._backend_mod.new_figure_manager_given_figure(num, self)
+
+ # XXX The following is a copy and paste from pyplot. Consider
+ # factoring to pylab_helpers
+
+ if self.get_label():
+ mgr.set_window_title(self.get_label())
+
+ # make this figure current on button press event
+ def make_active(event):
+ pylab_helpers.Gcf.set_active(mgr)
+
+ mgr._cidgcf = mgr.canvas.mpl_connect('button_press_event',
+ make_active)
+
+ pylab_helpers.Gcf.set_active(mgr)
+ self.number = num
+
+ plt.draw_if_interactive()
+ self.stale = True
+
+ def add_axobserver(self, func):
+ """Whenever the axes state change, ``func(self)`` will be called."""
+ self._axobservers.append(func)
+
+ def savefig(self, fname, **kwargs):
+ """
+ Save the current figure.
+
+ Call signature::
+
+ savefig(fname, dpi=None, facecolor='w', edgecolor='w',
+ orientation='portrait', papertype=None, format=None,
+ transparent=False, bbox_inches=None, pad_inches=0.1,
+ frameon=None)
+
+ The output formats available depend on the backend being used.
+
+ Parameters
+ ----------
+
+ fname : str or file-like object
+ A string containing a path to a filename, or a Python
+ file-like object, or possibly some backend-dependent object
+ such as :class:`~matplotlib.backends.backend_pdf.PdfPages`.
+
+ If *format* is *None* and *fname* is a string, the output
+ format is deduced from the extension of the filename. If
+ the filename has no extension, the value of the rc parameter
+ ``savefig.format`` is used.
+
+ If *fname* is not a string, remember to specify *format* to
+ ensure that the correct backend is used.
+
+ Other Parameters
+ ----------------
+
+ dpi : [ *None* | scalar > 0 | 'figure']
+ The resolution in dots per inch. If *None* it will default to
+ the value ``savefig.dpi`` in the matplotlibrc file. If 'figure'
+ it will set the dpi to be the value of the figure.
+
+ facecolor : color spec or None, optional
+ the facecolor of the figure; if None, defaults to savefig.facecolor
+
+ edgecolor : color spec or None, optional
+ the edgecolor of the figure; if None, defaults to savefig.edgecolor
+
+ orientation : {'landscape', 'portrait'}
+ not supported on all backends; currently only on postscript output
+
+ papertype : str
+ One of 'letter', 'legal', 'executive', 'ledger', 'a0' through
+ 'a10', 'b0' through 'b10'. Only supported for postscript
+ output.
+
+ format : str
+ One of the file extensions supported by the active
+ backend. Most backends support png, pdf, ps, eps and svg.
+
+ transparent : bool
+ If *True*, the axes patches will all be transparent; the
+ figure patch will also be transparent unless facecolor
+ and/or edgecolor are specified via kwargs.
+ This is useful, for example, for displaying
+ a plot on top of a colored background on a web page. The
+ transparency of these patches will be restored to their
+ original values upon exit of this function.
+
+ frameon : bool
+ If *True*, the figure patch will be colored, if *False*, the
+ figure background will be transparent. If not provided, the
+ rcParam 'savefig.frameon' will be used.
+
+ bbox_inches : str or `~matplotlib.transforms.Bbox`, optional
+ Bbox in inches. Only the given portion of the figure is
+ saved. If 'tight', try to figure out the tight bbox of
+ the figure. If None, use savefig.bbox
+
+ pad_inches : scalar, optional
+ Amount of padding around the figure when bbox_inches is
+ 'tight'. If None, use savefig.pad_inches
+
+ bbox_extra_artists : list of `~matplotlib.artist.Artist`, optional
+ A list of extra artists that will be considered when the
+ tight bbox is calculated.
+
+ """
+ kwargs.setdefault('dpi', rcParams['savefig.dpi'])
+ frameon = kwargs.pop('frameon', rcParams['savefig.frameon'])
+ transparent = kwargs.pop('transparent',
+ rcParams['savefig.transparent'])
+
+ if transparent:
+ kwargs.setdefault('facecolor', 'none')
+ kwargs.setdefault('edgecolor', 'none')
+ original_axes_colors = []
+ for ax in self.axes:
+ patch = ax.patch
+ original_axes_colors.append((patch.get_facecolor(),
+ patch.get_edgecolor()))
+ patch.set_facecolor('none')
+ patch.set_edgecolor('none')
+ else:
+ kwargs.setdefault('facecolor', rcParams['savefig.facecolor'])
+ kwargs.setdefault('edgecolor', rcParams['savefig.edgecolor'])
+
+ if frameon:
+ original_frameon = self.get_frameon()
+ self.set_frameon(frameon)
+
+ self.canvas.print_figure(fname, **kwargs)
+
+ if frameon:
+ self.set_frameon(original_frameon)
+
+ if transparent:
+ for ax, cc in zip(self.axes, original_axes_colors):
+ ax.patch.set_facecolor(cc[0])
+ ax.patch.set_edgecolor(cc[1])
+
+ @docstring.dedent_interpd
+ def colorbar(self, mappable, cax=None, ax=None, use_gridspec=True, **kw):
+ """
+ Create a colorbar for a ScalarMappable instance, *mappable*.
+
+ Documentation for the pylab thin wrapper:
+ %(colorbar_doc)s
+ """
+ if ax is None:
+ ax = self.gca()
+
+ # Store the value of gca so that we can set it back later on.
+ current_ax = self.gca()
+
+ if cax is None:
+ if use_gridspec and isinstance(ax, SubplotBase) \
+ and (not self.get_constrained_layout()):
+ cax, kw = cbar.make_axes_gridspec(ax, **kw)
+ else:
+ cax, kw = cbar.make_axes(ax, **kw)
+ cax._hold = True
+
+ # need to remove kws that cannot be passed to Colorbar
+ NON_COLORBAR_KEYS = ['fraction', 'pad', 'shrink', 'aspect', 'anchor',
+ 'panchor']
+ cb_kw = {k: v for k, v in kw.items() if k not in NON_COLORBAR_KEYS}
+ cb = cbar.colorbar_factory(cax, mappable, **cb_kw)
+
+ self.sca(current_ax)
+ self.stale = True
+ return cb
+
+ def subplots_adjust(self, *args, **kwargs):
+ """
+ Call signature::
+
+ subplots_adjust(left=None, bottom=None, right=None, top=None,
+ wspace=None, hspace=None)
+
+ Update the :class:`SubplotParams` with *kwargs* (defaulting to rc when
+ *None*) and update the subplot locations.
+
+ """
+ if self.get_constrained_layout():
+ self.set_constrained_layout(False)
+ warnings.warn("This figure was using constrained_layout==True, "
+ "but that is incompatible with subplots_adjust and "
+ "or tight_layout: setting "
+ "constrained_layout==False. ")
+ self.subplotpars.update(*args, **kwargs)
+
+ for ax in self.axes:
+ if not isinstance(ax, SubplotBase):
+ # Check if sharing a subplots axis
+ if isinstance(ax._sharex, SubplotBase):
+ ax._sharex.update_params()
+ ax.set_position(ax._sharex.figbox)
+ elif isinstance(ax._sharey, SubplotBase):
+ ax._sharey.update_params()
+ ax.set_position(ax._sharey.figbox)
+ else:
+ ax.update_params()
+ ax.set_position(ax.figbox)
+ self.stale = True
+
+ def ginput(self, n=1, timeout=30, show_clicks=True, mouse_add=1,
+ mouse_pop=3, mouse_stop=2):
+ """
+ Blocking call to interact with a figure.
+
+ Wait until the user clicks *n* times on the figure, and return the
+ coordinates of each click in a list.
+
+ The buttons used for the various actions (adding points, removing
+ points, terminating the inputs) can be overridden via the
+ arguments *mouse_add*, *mouse_pop* and *mouse_stop*, that give
+ the associated mouse button: 1 for left, 2 for middle, 3 for
+ right.
+
+ Parameters
+ ----------
+ n : int, optional, default: 1
+ Number of mouse clicks to accumulate. If negative, accumulate
+ clicks until the input is terminated manually.
+ timeout : scalar, optional, default: 30
+ Number of seconds to wait before timing out. If zero or negative
+ will never timeout.
+ show_clicks : bool, optional, default: False
+ If True, show a red cross at the location of each click.
+ mouse_add : int, one of (1, 2, 3), optional, default: 1 (left click)
+ Mouse button used to add points.
+ mouse_pop : int, one of (1, 2, 3), optional, default: 3 (right click)
+ Mouse button used to remove the most recently added point.
+ mouse_stop : int, one of (1, 2, 3), optional, default: 2 (middle click)
+ Mouse button used to stop input.
+
+ Returns
+ -------
+ points : list of tuples
+ A list of the clicked (x, y) coordinates.
+
+ Notes
+ -----
+ The keyboard can also be used to select points in case your mouse
+ does not have one or more of the buttons. The delete and backspace
+ keys act like right clicking (i.e., remove last point), the enter key
+ terminates input and any other key (not already used by the window
+ manager) selects a point.
+ """
+
+ blocking_mouse_input = BlockingMouseInput(self,
+ mouse_add=mouse_add,
+ mouse_pop=mouse_pop,
+ mouse_stop=mouse_stop)
+ return blocking_mouse_input(n=n, timeout=timeout,
+ show_clicks=show_clicks)
+
+ def waitforbuttonpress(self, timeout=-1):
+ """
+ Blocking call to interact with the figure.
+
+ This will return True is a key was pressed, False if a mouse
+ button was pressed and None if *timeout* was reached without
+ either being pressed.
+
+ If *timeout* is negative, does not timeout.
+ """
+
+ blocking_input = BlockingKeyMouseInput(self)
+ return blocking_input(timeout=timeout)
+
+ def get_default_bbox_extra_artists(self):
+ bbox_artists = [artist for artist in self.get_children()
+ if artist.get_visible()]
+ for ax in self.axes:
+ if ax.get_visible():
+ bbox_artists.extend(ax.get_default_bbox_extra_artists())
+ # we don't want the figure's patch to influence the bbox calculation
+ bbox_artists.remove(self.patch)
+ return bbox_artists
+
+ def get_tightbbox(self, renderer):
+ """
+ Return a (tight) bounding box of the figure in inches.
+
+ It only accounts axes title, axis labels, and axis
+ ticklabels. Needs improvement.
+ """
+
+ bb = []
+ for ax in self.axes:
+ if ax.get_visible():
+ bb.append(ax.get_tightbbox(renderer))
+
+ if len(bb) == 0:
+ return self.bbox_inches
+
+ _bbox = Bbox.union([b for b in bb if b.width != 0 or b.height != 0])
+
+ bbox_inches = TransformedBbox(_bbox,
+ Affine2D().scale(1. / self.dpi))
+
+ return bbox_inches
+
+ def init_layoutbox(self):
+ """Initialize the layoutbox for use in constrained_layout."""
+ if self._layoutbox is None:
+ self._layoutbox = layoutbox.LayoutBox(parent=None,
+ name='figlb',
+ artist=self)
+ self._layoutbox.constrain_geometry(0., 0., 1., 1.)
+
+ def execute_constrained_layout(self, renderer=None):
+ """
+ Use ``layoutbox`` to determine pos positions within axes.
+
+ See also `.set_constrained_layout_pads`.
+ """
+
+ from matplotlib._constrained_layout import (do_constrained_layout)
+
+ _log.debug('Executing constrainedlayout')
+ if self._layoutbox is None:
+ warnings.warn("Calling figure.constrained_layout, but figure "
+ "not setup to do constrained layout. "
+ " You either called GridSpec without the "
+ "fig keyword, you are using plt.subplot, "
+ "or you need to call figure or subplots"
+ "with the constrained_layout=True kwarg.")
+ return
+ w_pad, h_pad, wspace, hspace = self.get_constrained_layout_pads()
+ # convert to unit-relative lengths
+
+ fig = self
+ width, height = fig.get_size_inches()
+ w_pad = w_pad / width
+ h_pad = h_pad / height
+ if renderer is None:
+ renderer = layoutbox.get_renderer(fig)
+ do_constrained_layout(fig, renderer, h_pad, w_pad, hspace, wspace)
+
+ def tight_layout(self, renderer=None, pad=1.08, h_pad=None, w_pad=None,
+ rect=None):
+ """
+ Adjust subplot parameters to give specified padding.
+
+ Parameters
+ ----------
+ pad : float
+ padding between the figure edge and the edges of subplots,
+ as a fraction of the font-size.
+
+ h_pad, w_pad : float, optional
+ padding (height/width) between edges of adjacent subplots.
+ Defaults to `pad_inches`.
+
+ rect : tuple (left, bottom, right, top), optional
+ a rectangle (left, bottom, right, top) in the normalized
+ figure coordinate that the whole subplots area (including
+ labels) will fit into. Default is (0, 0, 1, 1).
+ """
+
+ from .tight_layout import (
+ get_renderer, get_subplotspec_list, get_tight_layout_figure)
+
+ subplotspec_list = get_subplotspec_list(self.axes)
+ if None in subplotspec_list:
+ warnings.warn("This figure includes Axes that are not compatible "
+ "with tight_layout, so results might be incorrect.")
+
+ if renderer is None:
+ renderer = get_renderer(self)
+
+ kwargs = get_tight_layout_figure(
+ self, self.axes, subplotspec_list, renderer,
+ pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
+ self.subplots_adjust(**kwargs)
+
+ def align_xlabels(self, axs=None):
+ """
+ Align the ylabels of subplots in the same subplot column if label
+ alignment is being done automatically (i.e. the label position is
+ not manually set).
+
+ Alignment persists for draw events after this is called.
+
+ If a label is on the bottom, it is aligned with labels on axes that
+ also have their label on the bottom and that have the same
+ bottom-most subplot row. If the label is on the top,
+ it is aligned with labels on axes with the same top-most row.
+
+ Parameters
+ ----------
+ axs : list of `~matplotlib.axes.Axes`
+ Optional list of (or ndarray) `~matplotlib.axes.Axes`
+ to align the xlabels.
+ Default is to align all axes on the figure.
+
+ See Also
+ --------
+ matplotlib.figure.Figure.align_ylabels
+
+ matplotlib.figure.Figure.align_labels
+
+ Notes
+ -----
+ This assumes that ``axs`` are from the same `.GridSpec`, so that
+ their `.SubplotSpec` positions correspond to figure positions.
+
+ Examples
+ --------
+ Example with rotated xtick labels::
+
+ fig, axs = plt.subplots(1, 2)
+ for tick in axs[0].get_xticklabels():
+ tick.set_rotation(55)
+ axs[0].set_xlabel('XLabel 0')
+ axs[1].set_xlabel('XLabel 1')
+ fig.align_xlabels()
+
+ """
+
+ if axs is None:
+ axs = self.axes
+ axs = np.asarray(axs).ravel()
+ for ax in axs:
+ _log.debug(' Working on: %s', ax.get_xlabel())
+ ss = ax.get_subplotspec()
+ nrows, ncols, row0, row1, col0, col1 = ss.get_rows_columns()
+ labpo = ax.xaxis.get_label_position() # top or bottom
+
+ # loop through other axes, and search for label positions
+ # that are same as this one, and that share the appropriate
+ # row number.
+ # Add to a grouper associated with each axes of sibblings.
+ # This list is inspected in `axis.draw` by
+ # `axis._update_label_position`.
+ for axc in axs:
+ if axc.xaxis.get_label_position() == labpo:
+ ss = axc.get_subplotspec()
+ nrows, ncols, rowc0, rowc1, colc, col1 = \
+ ss.get_rows_columns()
+ if (labpo == 'bottom' and rowc1 == row1 or
+ labpo == 'top' and rowc0 == row0):
+ # grouper for groups of xlabels to align
+ self._align_xlabel_grp.join(ax, axc)
+
+ def align_ylabels(self, axs=None):
+ """
+ Align the ylabels of subplots in the same subplot column if label
+ alignment is being done automatically (i.e. the label position is
+ not manually set).
+
+ Alignment persists for draw events after this is called.
+
+ If a label is on the left, it is aligned with labels on axes that
+ also have their label on the left and that have the same
+ left-most subplot column. If the label is on the right,
+ it is aligned with labels on axes with the same right-most column.
+
+ Parameters
+ ----------
+ axs : list of `~matplotlib.axes.Axes`
+ Optional list (or ndarray) of `~matplotlib.axes.Axes`
+ to align the ylabels.
+ Default is to align all axes on the figure.
+
+ See Also
+ --------
+ matplotlib.figure.Figure.align_xlabels
+
+ matplotlib.figure.Figure.align_labels
+
+ Notes
+ -----
+ This assumes that ``axs`` are from the same `.GridSpec`, so that
+ their `.SubplotSpec` positions correspond to figure positions.
+
+ Examples
+ --------
+ Example with large yticks labels::
+
+ fig, axs = plt.subplots(2, 1)
+ axs[0].plot(np.arange(0, 1000, 50))
+ axs[0].set_ylabel('YLabel 0')
+ axs[1].set_ylabel('YLabel 1')
+ fig.align_ylabels()
+
+ """
+
+ if axs is None:
+ axs = self.axes
+ axs = np.asarray(axs).ravel()
+ for ax in axs:
+ _log.debug(' Working on: %s', ax.get_ylabel())
+ ss = ax.get_subplotspec()
+ nrows, ncols, row0, row1, col0, col1 = ss.get_rows_columns()
+ same = [ax]
+ labpo = ax.yaxis.get_label_position() # left or right
+ # loop through other axes, and search for label positions
+ # that are same as this one, and that share the appropriate
+ # column number.
+ # Add to a list associated with each axes of sibblings.
+ # This list is inspected in `axis.draw` by
+ # `axis._update_label_position`.
+ for axc in axs:
+ if axc != ax:
+ if axc.yaxis.get_label_position() == labpo:
+ ss = axc.get_subplotspec()
+ nrows, ncols, row0, row1, colc0, colc1 = \
+ ss.get_rows_columns()
+ if (labpo == 'left' and colc0 == col0 or
+ labpo == 'right' and colc1 == col1):
+ # grouper for groups of ylabels to align
+ self._align_ylabel_grp.join(ax, axc)
+
+ def align_labels(self, axs=None):
+ """
+ Align the xlabels and ylabels of subplots with the same subplots
+ row or column (respectively) if label alignment is being
+ done automatically (i.e. the label position is not manually set).
+
+ Alignment persists for draw events after this is called.
+
+ Parameters
+ ----------
+ axs : list of `~matplotlib.axes.Axes`
+ Optional list (or ndarray) of `~matplotlib.axes.Axes`
+ to align the labels.
+ Default is to align all axes on the figure.
+
+ See Also
+ --------
+ matplotlib.figure.Figure.align_xlabels
+
+ matplotlib.figure.Figure.align_ylabels
+ """
+ self.align_xlabels(axs=axs)
+ self.align_ylabels(axs=axs)
+
+
+def figaspect(arg):
+ """
+ Create a figure with specified aspect ratio. If *arg* is a number,
+ use that aspect ratio. If *arg* is an array, figaspect will
+ determine the width and height for a figure that would fit array
+ preserving aspect ratio. The figure width, height in inches are
+ returned. Be sure to create an axes with equal with and height,
+ e.g.,
+
+ Example usage::
+
+ # make a figure twice as tall as it is wide
+ w, h = figaspect(2.)
+ fig = Figure(figsize=(w,h))
+ ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
+ ax.imshow(A, **kwargs)
+
+
+ # make a figure with the proper aspect for an array
+ A = rand(5,3)
+ w, h = figaspect(A)
+ fig = Figure(figsize=(w,h))
+ ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
+ ax.imshow(A, **kwargs)
+
+ Thanks to Fernando Perez for this function
+ """
+
+ isarray = hasattr(arg, 'shape') and not np.isscalar(arg)
+
+ # min/max sizes to respect when autoscaling. If John likes the idea, they
+ # could become rc parameters, for now they're hardwired.
+ figsize_min = np.array((4.0, 2.0)) # min length for width/height
+ figsize_max = np.array((16.0, 16.0)) # max length for width/height
+
+ # Extract the aspect ratio of the array
+ if isarray:
+ nr, nc = arg.shape[:2]
+ arr_ratio = nr / nc
+ else:
+ arr_ratio = arg
+
+ # Height of user figure defaults
+ fig_height = rcParams['figure.figsize'][1]
+
+ # New size for the figure, keeping the aspect ratio of the caller
+ newsize = np.array((fig_height / arr_ratio, fig_height))
+
+ # Sanity checks, don't drop either dimension below figsize_min
+ newsize /= min(1.0, *(newsize / figsize_min))
+
+ # Avoid humongous windows as well
+ newsize /= max(1.0, *(newsize / figsize_max))
+
+ # Finally, if we have a really funky aspect ratio, break it but respect
+ # the min/max dimensions (we don't want figures 10 feet tall!)
+ newsize = np.clip(newsize, figsize_min, figsize_max)
+ return newsize
+
+docstring.interpd.update(Figure=martist.kwdoc(Figure))
diff --git a/contrib/python/matplotlib/py2/matplotlib/font_manager.py b/contrib/python/matplotlib/py2/matplotlib/font_manager.py
new file mode 100644
index 00000000000..5900fc9b184
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/font_manager.py
@@ -0,0 +1,1479 @@
+"""
+A module for finding, managing, and using fonts across platforms.
+
+This module provides a single :class:`FontManager` instance that can
+be shared across backends and platforms. The :func:`findfont`
+function returns the best TrueType (TTF) font file in the local or
+system font path that matches the specified :class:`FontProperties`
+instance. The :class:`FontManager` also handles Adobe Font Metrics
+(AFM) font files for use by the PostScript backend.
+
+The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
+font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.
+Future versions may implement the Level 2 or 2.1 specifications.
+
+Experimental support is included for using `fontconfig` on Unix
+variant platforms (Linux, OS X, Solaris). To enable it, set the
+constant ``USE_FONTCONFIG`` in this file to ``True``. Fontconfig has
+the advantage that it is the standard way to look up fonts on X11
+platforms, so if a font is installed, it is much more likely to be
+found.
+"""
+from __future__ import absolute_import, division, print_function
+
+import six
+
+"""
+KNOWN ISSUES
+
+ - documentation
+ - font variant is untested
+ - font stretch is incomplete
+ - font size is incomplete
+ - default font algorithm needs improvement and testing
+ - setWeights function needs improvement
+ - 'light' is an invalid weight value, remove it.
+ - update_fonts not implemented
+
+Authors : John Hunter <jdhunter@ace.bsd.uchicago.edu>
+ Paul Barrett <Barrett@STScI.Edu>
+ Michael Droettboom <mdroe@STScI.edu>
+Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005)
+License : matplotlib license (PSF compatible)
+ The font directory code is from ttfquery,
+ see license/LICENSE_TTFQUERY.
+"""
+
+from collections import Iterable
+import json
+import os
+import sys
+try:
+ from threading import Timer
+except ImportError:
+ from dummy_threading import Timer
+import warnings
+import logging
+
+from matplotlib import afm, cbook, ft2font, rcParams, get_cachedir
+from matplotlib.compat import subprocess
+from matplotlib.fontconfig_pattern import (
+ parse_fontconfig_pattern, generate_fontconfig_pattern)
+
+try:
+ from functools import lru_cache
+except ImportError:
+ from backports.functools_lru_cache import lru_cache
+
+_log = logging.getLogger(__name__)
+
+USE_FONTCONFIG = False
+
+font_scalings = {
+ 'xx-small' : 0.579,
+ 'x-small' : 0.694,
+ 'small' : 0.833,
+ 'medium' : 1.0,
+ 'large' : 1.200,
+ 'x-large' : 1.440,
+ 'xx-large' : 1.728,
+ 'larger' : 1.2,
+ 'smaller' : 0.833,
+ None : 1.0}
+
+stretch_dict = {
+ 'ultra-condensed' : 100,
+ 'extra-condensed' : 200,
+ 'condensed' : 300,
+ 'semi-condensed' : 400,
+ 'normal' : 500,
+ 'semi-expanded' : 600,
+ 'expanded' : 700,
+ 'extra-expanded' : 800,
+ 'ultra-expanded' : 900}
+
+weight_dict = {
+ 'ultralight' : 100,
+ 'light' : 200,
+ 'normal' : 400,
+ 'regular' : 400,
+ 'book' : 400,
+ 'medium' : 500,
+ 'roman' : 500,
+ 'semibold' : 600,
+ 'demibold' : 600,
+ 'demi' : 600,
+ 'bold' : 700,
+ 'heavy' : 800,
+ 'extra bold' : 800,
+ 'black' : 900}
+
+font_family_aliases = {
+ 'serif',
+ 'sans-serif',
+ 'sans serif',
+ 'cursive',
+ 'fantasy',
+ 'monospace',
+ 'sans'}
+
+# OS Font paths
+MSFolders = \
+ r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
+
+
+MSFontDirectories = [
+ r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
+ r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
+
+
+X11FontDirectories = [
+ # an old standard installation point
+ "/usr/X11R6/lib/X11/fonts/TTF/",
+ "/usr/X11/lib/X11/fonts",
+ # here is the new standard location for fonts
+ "/usr/share/fonts/",
+ # documented as a good place to install new fonts
+ "/usr/local/share/fonts/",
+ # common application, not really useful
+ "/usr/lib/openoffice/share/fonts/truetype/",
+ ]
+
+OSXFontDirectories = [
+ "/Library/Fonts/",
+ "/Network/Library/Fonts/",
+ "/System/Library/Fonts/",
+ # fonts installed via MacPorts
+ "/opt/local/share/fonts"
+ ""
+]
+
+if not USE_FONTCONFIG and sys.platform != 'win32':
+ home = os.environ.get('HOME')
+ if home is not None:
+ # user fonts on OSX
+ path = os.path.join(home, 'Library', 'Fonts')
+ OSXFontDirectories.append(path)
+ path = os.path.join(home, '.fonts')
+ X11FontDirectories.append(path)
+
+
+def get_fontext_synonyms(fontext):
+ """
+ Return a list of file extensions extensions that are synonyms for
+ the given file extension *fileext*.
+ """
+ return {'ttf': ('ttf', 'otf'),
+ 'otf': ('ttf', 'otf'),
+ 'afm': ('afm',)}[fontext]
+
+
+def list_fonts(directory, extensions):
+ """
+ Return a list of all fonts matching any of the extensions,
+ possibly upper-cased, found recursively under the directory.
+ """
+ pattern = ';'.join(['*.%s;*.%s' % (ext, ext.upper())
+ for ext in extensions])
+ return cbook.listFiles(directory, pattern)
+
+
+def win32FontDirectory():
+ """
+ Return the user-specified font directory for Win32. This is
+ looked up from the registry key::
+
+ \\\\HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\Fonts
+
+ If the key is not found, $WINDIR/Fonts will be returned.
+ """
+ try:
+ from six.moves import winreg
+ except ImportError:
+ pass # Fall through to default
+ else:
+ try:
+ user = winreg.OpenKey(winreg.HKEY_CURRENT_USER, MSFolders)
+ try:
+ try:
+ return winreg.QueryValueEx(user, 'Fonts')[0]
+ except OSError:
+ pass # Fall through to default
+ finally:
+ winreg.CloseKey(user)
+ except OSError:
+ pass # Fall through to default
+ return os.path.join(os.environ['WINDIR'], 'Fonts')
+
+
+def win32InstalledFonts(directory=None, fontext='ttf'):
+ """
+ Search for fonts in the specified font directory, or use the
+ system directories if none given. A list of TrueType font
+ filenames are returned by default, or AFM fonts if *fontext* ==
+ 'afm'.
+ """
+
+ from six.moves import winreg
+ if directory is None:
+ directory = win32FontDirectory()
+
+ fontext = get_fontext_synonyms(fontext)
+
+ key, items = None, set()
+ for fontdir in MSFontDirectories:
+ try:
+ local = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, fontdir)
+ except OSError:
+ continue
+ if not local:
+ return list_fonts(directory, fontext)
+ try:
+ for j in range(winreg.QueryInfoKey(local)[1]):
+ try:
+ key, direc, tp = winreg.EnumValue(local, j)
+ if not isinstance(direc, six.string_types):
+ continue
+ # Work around for https://bugs.python.org/issue25778, which
+ # is fixed in Py>=3.6.1.
+ direc = direc.split("\0", 1)[0]
+ if not os.path.dirname(direc):
+ direc = os.path.join(directory, direc)
+ direc = os.path.abspath(direc).lower()
+ if os.path.splitext(direc)[1][1:] in fontext:
+ items.add(direc)
+ except EnvironmentError:
+ continue
+ except WindowsError:
+ continue
+ except MemoryError:
+ continue
+ return list(items)
+ finally:
+ winreg.CloseKey(local)
+ return None
+
+
+def OSXInstalledFonts(directories=None, fontext='ttf'):
+ """
+ Get list of font files on OS X - ignores font suffix by default.
+ """
+ if directories is None:
+ directories = OSXFontDirectories
+
+ fontext = get_fontext_synonyms(fontext)
+
+ files = []
+ for path in directories:
+ if fontext is None:
+ files.extend(cbook.listFiles(path, '*'))
+ else:
+ files.extend(list_fonts(path, fontext))
+ return files
+
+
+@lru_cache()
+def _call_fc_list():
+ """Cache and list the font filenames known to `fc-list`.
+ """
+ # Delay the warning by 5s.
+ timer = Timer(5, lambda: warnings.warn(
+ 'Matplotlib is building the font cache using fc-list. '
+ 'This may take a moment.'))
+ timer.start()
+ try:
+ out = subprocess.check_output([str('fc-list'), '--format=%{file}\\n'])
+ except (OSError, subprocess.CalledProcessError):
+ return []
+ finally:
+ timer.cancel()
+ fnames = []
+ for fname in out.split(b'\n'):
+ try:
+ fname = six.text_type(fname, sys.getfilesystemencoding())
+ except UnicodeDecodeError:
+ continue
+ fnames.append(fname)
+ return fnames
+
+
+def get_fontconfig_fonts(fontext='ttf'):
+ """List the font filenames known to `fc-list` having the given extension.
+ """
+ fontext = get_fontext_synonyms(fontext)
+ return [fname for fname in _call_fc_list()
+ if os.path.splitext(fname)[1][1:] in fontext]
+
+
+def findSystemFonts(fontpaths=None, fontext='ttf'):
+ """
+ Search for fonts in the specified font paths. If no paths are
+ given, will use a standard set of system paths, as well as the
+ list of fonts tracked by fontconfig if fontconfig is installed and
+ available. A list of TrueType fonts are returned by default with
+ AFM fonts as an option.
+ """
+ fontfiles = set()
+ fontexts = get_fontext_synonyms(fontext)
+
+ if fontpaths is None:
+ if sys.platform == 'win32':
+ fontdir = win32FontDirectory()
+
+ fontpaths = [fontdir]
+ # now get all installed fonts directly...
+ for f in win32InstalledFonts(fontdir):
+ base, ext = os.path.splitext(f)
+ if len(ext)>1 and ext[1:].lower() in fontexts:
+ fontfiles.add(f)
+ else:
+ fontpaths = X11FontDirectories
+ # check for OS X & load its fonts if present
+ if sys.platform == 'darwin':
+ for f in OSXInstalledFonts(fontext=fontext):
+ fontfiles.add(f)
+
+ for f in get_fontconfig_fonts(fontext):
+ fontfiles.add(f)
+
+ elif isinstance(fontpaths, six.string_types):
+ fontpaths = [fontpaths]
+
+ for path in fontpaths:
+ files = list_fonts(path, fontexts)
+ for fname in files:
+ fontfiles.add(os.path.abspath(fname))
+
+ return [fname for fname in fontfiles if os.path.exists(fname)]
+
+
+@cbook.deprecated("2.1")
+def weight_as_number(weight):
+ """
+ Return the weight property as a numeric value. String values
+ are converted to their corresponding numeric value.
+ """
+ if isinstance(weight, six.string_types):
+ try:
+ weight = weight_dict[weight.lower()]
+ except KeyError:
+ weight = 400
+ elif weight in range(100, 1000, 100):
+ pass
+ else:
+ raise ValueError('weight not a valid integer')
+ return weight
+
+
+class FontEntry(object):
+ """
+ A class for storing Font properties. It is used when populating
+ the font lookup dictionary.
+ """
+ def __init__(self,
+ fname ='',
+ name ='',
+ style ='normal',
+ variant='normal',
+ weight ='normal',
+ stretch='normal',
+ size ='medium',
+ ):
+ self.fname = fname
+ self.name = name
+ self.style = style
+ self.variant = variant
+ self.weight = weight
+ self.stretch = stretch
+ try:
+ self.size = str(float(size))
+ except ValueError:
+ self.size = size
+
+ def __repr__(self):
+ return "<Font '%s' (%s) %s %s %s %s>" % (
+ self.name, os.path.basename(self.fname), self.style, self.variant,
+ self.weight, self.stretch)
+
+
+def ttfFontProperty(font):
+ """
+ Extract information from a TrueType font file.
+
+ Parameters
+ ----------
+ font : `.FT2Font`
+ The TrueType font file from which information will be extracted.
+
+ Returns
+ -------
+ `FontEntry`
+ The extracted font properties.
+
+ """
+ name = font.family_name
+
+ # Styles are: italic, oblique, and normal (default)
+
+ sfnt = font.get_sfnt()
+ sfnt2 = sfnt.get((1,0,0,2))
+ sfnt4 = sfnt.get((1,0,0,4))
+ if sfnt2:
+ sfnt2 = sfnt2.decode('mac_roman').lower()
+ else:
+ sfnt2 = ''
+ if sfnt4:
+ sfnt4 = sfnt4.decode('mac_roman').lower()
+ else:
+ sfnt4 = ''
+ if sfnt4.find('oblique') >= 0:
+ style = 'oblique'
+ elif sfnt4.find('italic') >= 0:
+ style = 'italic'
+ elif sfnt2.find('regular') >= 0:
+ style = 'normal'
+ elif font.style_flags & ft2font.ITALIC:
+ style = 'italic'
+ else:
+ style = 'normal'
+
+ # Variants are: small-caps and normal (default)
+
+ # !!!! Untested
+ if name.lower() in ['capitals', 'small-caps']:
+ variant = 'small-caps'
+ else:
+ variant = 'normal'
+
+ weight = next((w for w in weight_dict if sfnt4.find(w) >= 0), None)
+ if not weight:
+ if font.style_flags & ft2font.BOLD:
+ weight = 700
+ else:
+ weight = 400
+
+ # Stretch can be absolute and relative
+ # Absolute stretches are: ultra-condensed, extra-condensed, condensed,
+ # semi-condensed, normal, semi-expanded, expanded, extra-expanded,
+ # and ultra-expanded.
+ # Relative stretches are: wider, narrower
+ # Child value is: inherit
+
+ if (sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or
+ sfnt4.find('cond') >= 0):
+ stretch = 'condensed'
+ elif sfnt4.find('demi cond') >= 0:
+ stretch = 'semi-condensed'
+ elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0:
+ stretch = 'expanded'
+ else:
+ stretch = 'normal'
+
+ # Sizes can be absolute and relative.
+ # Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
+ # and xx-large.
+ # Relative sizes are: larger, smaller
+ # Length value is an absolute font size, e.g., 12pt
+ # Percentage values are in 'em's. Most robust specification.
+
+ if not font.scalable:
+ raise NotImplementedError("Non-scalable fonts are not supported")
+ size = 'scalable'
+
+ return FontEntry(font.fname, name, style, variant, weight, stretch, size)
+
+
+def afmFontProperty(fontpath, font):
+ """
+ Extract information from an AFM font file.
+
+ Parameters
+ ----------
+ font : `.AFM`
+ The AFM font file from which information will be extracted.
+
+ Returns
+ -------
+ `FontEntry`
+ The extracted font properties.
+
+ """
+
+ name = font.get_familyname()
+ fontname = font.get_fontname().lower()
+
+ # Styles are: italic, oblique, and normal (default)
+
+ if font.get_angle() != 0 or name.lower().find('italic') >= 0:
+ style = 'italic'
+ elif name.lower().find('oblique') >= 0:
+ style = 'oblique'
+ else:
+ style = 'normal'
+
+ # Variants are: small-caps and normal (default)
+
+ # !!!! Untested
+ if name.lower() in ['capitals', 'small-caps']:
+ variant = 'small-caps'
+ else:
+ variant = 'normal'
+
+ weight = font.get_weight().lower()
+
+ # Stretch can be absolute and relative
+ # Absolute stretches are: ultra-condensed, extra-condensed, condensed,
+ # semi-condensed, normal, semi-expanded, expanded, extra-expanded,
+ # and ultra-expanded.
+ # Relative stretches are: wider, narrower
+ # Child value is: inherit
+ if fontname.find('narrow') >= 0 or fontname.find('condensed') >= 0 or \
+ fontname.find('cond') >= 0:
+ stretch = 'condensed'
+ elif fontname.find('demi cond') >= 0:
+ stretch = 'semi-condensed'
+ elif fontname.find('wide') >= 0 or fontname.find('expanded') >= 0:
+ stretch = 'expanded'
+ else:
+ stretch = 'normal'
+
+ # Sizes can be absolute and relative.
+ # Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
+ # and xx-large.
+ # Relative sizes are: larger, smaller
+ # Length value is an absolute font size, e.g., 12pt
+ # Percentage values are in 'em's. Most robust specification.
+
+ # All AFM fonts are apparently scalable.
+
+ size = 'scalable'
+
+ return FontEntry(fontpath, name, style, variant, weight, stretch, size)
+
+
+def createFontList(fontfiles, fontext='ttf'):
+ """
+ A function to create a font lookup list. The default is to create
+ a list of TrueType fonts. An AFM font list can optionally be
+ created.
+ """
+
+ fontlist = []
+ # Add fonts from list of known font files.
+ seen = set()
+ for fpath in fontfiles:
+ _log.debug('createFontDict: %s', fpath)
+ fname = os.path.split(fpath)[1]
+ if fname in seen:
+ continue
+ else:
+ seen.add(fname)
+ if fontext == 'afm':
+ try:
+ fh = open(fpath, 'rb')
+ except EnvironmentError:
+ _log.info("Could not open font file %s", fpath)
+ continue
+ try:
+ font = afm.AFM(fh)
+ except RuntimeError:
+ _log.info("Could not parse font file %s", fpath)
+ continue
+ finally:
+ fh.close()
+ try:
+ prop = afmFontProperty(fpath, font)
+ except KeyError:
+ continue
+ else:
+ try:
+ font = ft2font.FT2Font(fpath)
+ except RuntimeError:
+ _log.info("Could not open font file %s", fpath)
+ continue
+ except UnicodeError:
+ _log.info("Cannot handle unicode filenames")
+ continue
+ except IOError:
+ _log.info("IO error - cannot open font file %s", fpath)
+ continue
+ try:
+ prop = ttfFontProperty(font)
+ except (KeyError, RuntimeError, ValueError, NotImplementedError):
+ continue
+
+ fontlist.append(prop)
+ return fontlist
+
+
+class FontProperties(object):
+ """
+ A class for storing and manipulating font properties.
+
+ The font properties are those described in the `W3C Cascading
+ Style Sheet, Level 1
+ <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font
+ specification. The six properties are:
+
+ - family: A list of font names in decreasing order of priority.
+ The items may include a generic font family name, either
+ 'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'.
+ In that case, the actual font to be used will be looked up
+ from the associated rcParam in :file:`matplotlibrc`.
+
+ - style: Either 'normal', 'italic' or 'oblique'.
+
+ - variant: Either 'normal' or 'small-caps'.
+
+ - stretch: A numeric value in the range 0-1000 or one of
+ 'ultra-condensed', 'extra-condensed', 'condensed',
+ 'semi-condensed', 'normal', 'semi-expanded', 'expanded',
+ 'extra-expanded' or 'ultra-expanded'
+
+ - weight: A numeric value in the range 0-1000 or one of
+ 'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
+ 'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
+ 'extra bold', 'black'
+
+ - size: Either an relative value of 'xx-small', 'x-small',
+ 'small', 'medium', 'large', 'x-large', 'xx-large' or an
+ absolute font size, e.g., 12
+
+ The default font property for TrueType fonts (as specified in the
+ default :file:`matplotlibrc` file) is::
+
+ sans-serif, normal, normal, normal, normal, scalable.
+
+ Alternatively, a font may be specified using an absolute path to a
+ .ttf file, by using the *fname* kwarg.
+
+ The preferred usage of font sizes is to use the relative values,
+ e.g., 'large', instead of absolute font sizes, e.g., 12. This
+ approach allows all text sizes to be made larger or smaller based
+ on the font manager's default font size.
+
+ This class will also accept a `fontconfig
+ <https://www.freedesktop.org/wiki/Software/fontconfig/>`_ pattern, if it is
+ the only argument provided. See the documentation on `fontconfig patterns
+ <https://www.freedesktop.org/software/fontconfig/fontconfig-user.html>`_.
+ This support does not require fontconfig to be installed. We are merely
+ borrowing its pattern syntax for use here.
+
+ Note that matplotlib's internal font manager and fontconfig use a
+ different algorithm to lookup fonts, so the results of the same pattern
+ may be different in matplotlib than in other applications that use
+ fontconfig.
+ """
+
+ def __init__(self,
+ family = None,
+ style = None,
+ variant= None,
+ weight = None,
+ stretch= None,
+ size = None,
+ fname = None, # if this is set, it's a hardcoded filename to use
+ _init = None # used only by copy()
+ ):
+ self._family = _normalize_font_family(rcParams['font.family'])
+ self._slant = rcParams['font.style']
+ self._variant = rcParams['font.variant']
+ self._weight = rcParams['font.weight']
+ self._stretch = rcParams['font.stretch']
+ self._size = rcParams['font.size']
+ self._file = None
+
+ # This is used only by copy()
+ if _init is not None:
+ self.__dict__.update(_init.__dict__)
+ return
+
+ if isinstance(family, six.string_types):
+ # Treat family as a fontconfig pattern if it is the only
+ # parameter provided.
+ if (style is None and
+ variant is None and
+ weight is None and
+ stretch is None and
+ size is None and
+ fname is None):
+ self.set_fontconfig_pattern(family)
+ return
+
+ self.set_family(family)
+ self.set_style(style)
+ self.set_variant(variant)
+ self.set_weight(weight)
+ self.set_stretch(stretch)
+ self.set_file(fname)
+ self.set_size(size)
+
+ def _parse_fontconfig_pattern(self, pattern):
+ return parse_fontconfig_pattern(pattern)
+
+ def __hash__(self):
+ l = (tuple(self.get_family()),
+ self.get_slant(),
+ self.get_variant(),
+ self.get_weight(),
+ self.get_stretch(),
+ self.get_size_in_points(),
+ self.get_file())
+ return hash(l)
+
+ def __eq__(self, other):
+ return hash(self) == hash(other)
+
+ def __ne__(self, other):
+ return hash(self) != hash(other)
+
+ def __str__(self):
+ return self.get_fontconfig_pattern()
+
+ def get_family(self):
+ """
+ Return a list of font names that comprise the font family.
+ """
+ return self._family
+
+ def get_name(self):
+ """
+ Return the name of the font that best matches the font
+ properties.
+ """
+ return get_font(findfont(self)).family_name
+
+ def get_style(self):
+ """
+ Return the font style. Values are: 'normal', 'italic' or
+ 'oblique'.
+ """
+ return self._slant
+ get_slant = get_style
+
+ def get_variant(self):
+ """
+ Return the font variant. Values are: 'normal' or
+ 'small-caps'.
+ """
+ return self._variant
+
+ def get_weight(self):
+ """
+ Set the font weight. Options are: A numeric value in the
+ range 0-1000 or one of 'light', 'normal', 'regular', 'book',
+ 'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
+ 'heavy', 'extra bold', 'black'
+ """
+ return self._weight
+
+ def get_stretch(self):
+ """
+ Return the font stretch or width. Options are: 'ultra-condensed',
+ 'extra-condensed', 'condensed', 'semi-condensed', 'normal',
+ 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
+ """
+ return self._stretch
+
+ def get_size(self):
+ """
+ Return the font size.
+ """
+ return self._size
+
+ def get_size_in_points(self):
+ return self._size
+
+ def get_file(self):
+ """
+ Return the filename of the associated font.
+ """
+ return self._file
+
+ def get_fontconfig_pattern(self):
+ """
+ Get a fontconfig pattern suitable for looking up the font as
+ specified with fontconfig's ``fc-match`` utility.
+
+ See the documentation on `fontconfig patterns
+ <https://www.freedesktop.org/software/fontconfig/fontconfig-user.html>`_.
+
+ This support does not require fontconfig to be installed or
+ support for it to be enabled. We are merely borrowing its
+ pattern syntax for use here.
+ """
+ return generate_fontconfig_pattern(self)
+
+ def set_family(self, family):
+ """
+ Change the font family. May be either an alias (generic name
+ is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
+ 'fantasy', or 'monospace', a real font name or a list of real
+ font names. Real font names are not supported when
+ `text.usetex` is `True`.
+ """
+ if family is None:
+ family = rcParams['font.family']
+ self._family = _normalize_font_family(family)
+ set_name = set_family
+
+ def set_style(self, style):
+ """
+ Set the font style. Values are: 'normal', 'italic' or
+ 'oblique'.
+ """
+ if style is None:
+ style = rcParams['font.style']
+ if style not in ('normal', 'italic', 'oblique'):
+ raise ValueError("style must be normal, italic or oblique")
+ self._slant = style
+ set_slant = set_style
+
+ def set_variant(self, variant):
+ """
+ Set the font variant. Values are: 'normal' or 'small-caps'.
+ """
+ if variant is None:
+ variant = rcParams['font.variant']
+ if variant not in ('normal', 'small-caps'):
+ raise ValueError("variant must be normal or small-caps")
+ self._variant = variant
+
+ def set_weight(self, weight):
+ """
+ Set the font weight. May be either a numeric value in the
+ range 0-1000 or one of 'ultralight', 'light', 'normal',
+ 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
+ 'demi', 'bold', 'heavy', 'extra bold', 'black'
+ """
+ if weight is None:
+ weight = rcParams['font.weight']
+ try:
+ weight = int(weight)
+ if weight < 0 or weight > 1000:
+ raise ValueError()
+ except ValueError:
+ if weight not in weight_dict:
+ raise ValueError("weight is invalid")
+ self._weight = weight
+
+ def set_stretch(self, stretch):
+ """
+ Set the font stretch or width. Options are: 'ultra-condensed',
+ 'extra-condensed', 'condensed', 'semi-condensed', 'normal',
+ 'semi-expanded', 'expanded', 'extra-expanded' or
+ 'ultra-expanded', or a numeric value in the range 0-1000.
+ """
+ if stretch is None:
+ stretch = rcParams['font.stretch']
+ try:
+ stretch = int(stretch)
+ if stretch < 0 or stretch > 1000:
+ raise ValueError()
+ except ValueError:
+ if stretch not in stretch_dict:
+ raise ValueError("stretch is invalid")
+ self._stretch = stretch
+
+ def set_size(self, size):
+ """
+ Set the font size. Either an relative value of 'xx-small',
+ 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
+ or an absolute font size, e.g., 12.
+ """
+ if size is None:
+ size = rcParams['font.size']
+ try:
+ size = float(size)
+ except ValueError:
+ try:
+ scale = font_scalings[size]
+ except KeyError:
+ raise ValueError(
+ "Size is invalid. Valid font size are "
+ + ", ".join(map(str, font_scalings)))
+ else:
+ size = scale * FontManager.get_default_size()
+ if size < 1.0:
+ _log.info('Fontsize %1.2f < 1.0 pt not allowed by FreeType. '
+ 'Setting fontsize = 1 pt', size)
+ size = 1.0
+ self._size = size
+
+ def set_file(self, file):
+ """
+ Set the filename of the fontfile to use. In this case, all
+ other properties will be ignored.
+ """
+ self._file = file
+
+ def set_fontconfig_pattern(self, pattern):
+ """
+ Set the properties by parsing a fontconfig *pattern*.
+
+ See the documentation on `fontconfig patterns
+ <https://www.freedesktop.org/software/fontconfig/fontconfig-user.html>`_.
+
+ This support does not require fontconfig to be installed or
+ support for it to be enabled. We are merely borrowing its
+ pattern syntax for use here.
+ """
+ for key, val in six.iteritems(self._parse_fontconfig_pattern(pattern)):
+ if type(val) == list:
+ getattr(self, "set_" + key)(val[0])
+ else:
+ getattr(self, "set_" + key)(val)
+
+ def copy(self):
+ """Return a deep copy of self"""
+ return FontProperties(_init=self)
+
+
+@cbook.deprecated("2.1")
+def ttfdict_to_fnames(d):
+ """
+ flatten a ttfdict to all the filenames it contains
+ """
+ fnames = []
+ for named in six.itervalues(d):
+ for styled in six.itervalues(named):
+ for variantd in six.itervalues(styled):
+ for weightd in six.itervalues(variantd):
+ for stretchd in six.itervalues(weightd):
+ for fname in six.itervalues(stretchd):
+ fnames.append(fname)
+ return fnames
+
+
+class JSONEncoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, FontManager):
+ return dict(o.__dict__, _class='FontManager')
+ elif isinstance(o, FontEntry):
+ return dict(o.__dict__, _class='FontEntry')
+ else:
+ return super(JSONEncoder, self).default(o)
+
+
+def _json_decode(o):
+ cls = o.pop('_class', None)
+ if cls is None:
+ return o
+ elif cls == 'FontManager':
+ r = FontManager.__new__(FontManager)
+ r.__dict__.update(o)
+ return r
+ elif cls == 'FontEntry':
+ r = FontEntry.__new__(FontEntry)
+ r.__dict__.update(o)
+ return r
+ else:
+ raise ValueError("don't know how to deserialize _class=%s" % cls)
+
+
+def json_dump(data, filename):
+ """Dumps a data structure as JSON in the named file.
+ Handles FontManager and its fields."""
+
+ with open(filename, 'w') as fh:
+ try:
+ json.dump(data, fh, cls=JSONEncoder, indent=2)
+ except IOError as e:
+ warnings.warn('Could not save font_manager cache {}'.format(e))
+
+def json_load(filename):
+ """Loads a data structure as JSON from the named file.
+ Handles FontManager and its fields."""
+
+ with open(filename, 'r') as fh:
+ return json.load(fh, object_hook=_json_decode)
+
+
+def _normalize_font_family(family):
+ if isinstance(family, six.string_types):
+ family = [six.text_type(family)]
+ elif isinstance(family, Iterable):
+ family = [six.text_type(f) for f in family]
+ return family
+
+
+class TempCache(object):
+ """
+ A class to store temporary caches that are (a) not saved to disk
+ and (b) invalidated whenever certain font-related
+ rcParams---namely the family lookup lists---are changed or the
+ font cache is reloaded. This avoids the expensive linear search
+ through all fonts every time a font is looked up.
+ """
+ # A list of rcparam names that, when changed, invalidated this
+ # cache.
+ invalidating_rcparams = (
+ 'font.serif', 'font.sans-serif', 'font.cursive', 'font.fantasy',
+ 'font.monospace')
+
+ def __init__(self):
+ self._lookup_cache = {}
+ self._last_rcParams = self.make_rcparams_key()
+
+ def make_rcparams_key(self):
+ return [id(fontManager)] + [
+ rcParams[param] for param in self.invalidating_rcparams]
+
+ def get(self, prop):
+ key = self.make_rcparams_key()
+ if key != self._last_rcParams:
+ self._lookup_cache = {}
+ self._last_rcParams = key
+ return self._lookup_cache.get(prop)
+
+ def set(self, prop, value):
+ key = self.make_rcparams_key()
+ if key != self._last_rcParams:
+ self._lookup_cache = {}
+ self._last_rcParams = key
+ self._lookup_cache[prop] = value
+
+
+class FontManager(object):
+ """
+ On import, the :class:`FontManager` singleton instance creates a
+ list of TrueType fonts based on the font properties: name, style,
+ variant, weight, stretch, and size. The :meth:`findfont` method
+ does a nearest neighbor search to find the font that most closely
+ matches the specification. If no good enough match is found, a
+ default font is returned.
+ """
+ # Increment this version number whenever the font cache data
+ # format or behavior has changed and requires a existing font
+ # cache files to be rebuilt.
+ __version__ = 201
+
+ def __init__(self, size=None, weight='normal'):
+ self._version = self.__version__
+
+ self.__default_weight = weight
+ self.default_size = size
+
+ paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'),
+ os.path.join(rcParams['datapath'], 'fonts', 'afm'),
+ os.path.join(rcParams['datapath'], 'fonts', 'pdfcorefonts')]
+
+ # Create list of font paths
+ for pathname in ['TTFPATH', 'AFMPATH']:
+ if pathname in os.environ:
+ ttfpath = os.environ[pathname]
+ if ttfpath.find(';') >= 0: #win32 style
+ paths.extend(ttfpath.split(';'))
+ elif ttfpath.find(':') >= 0: # unix style
+ paths.extend(ttfpath.split(':'))
+ else:
+ paths.append(ttfpath)
+ _log.info('font search path %s', str(paths))
+ # Load TrueType fonts and create font dictionary.
+
+ self.ttffiles = findSystemFonts(paths) + findSystemFonts()
+ self.defaultFamily = {
+ 'ttf': 'DejaVu Sans',
+ 'afm': 'Helvetica'}
+ self.defaultFont = {}
+
+ for fname in self.ttffiles:
+ _log.debug('trying fontname %s', fname)
+ if fname.lower().find('DejaVuSans.ttf')>=0:
+ self.defaultFont['ttf'] = fname
+ break
+ else:
+ # use anything
+ self.defaultFont['ttf'] = self.ttffiles[0]
+
+ self.ttflist = createFontList(self.ttffiles)
+
+ self.afmfiles = (findSystemFonts(paths, fontext='afm')
+ + findSystemFonts(fontext='afm'))
+ self.afmlist = createFontList(self.afmfiles, fontext='afm')
+ if len(self.afmfiles):
+ self.defaultFont['afm'] = self.afmfiles[0]
+ else:
+ self.defaultFont['afm'] = None
+
+ def get_default_weight(self):
+ """
+ Return the default font weight.
+ """
+ return self.__default_weight
+
+ @staticmethod
+ def get_default_size():
+ """
+ Return the default font size.
+ """
+ return rcParams['font.size']
+
+ def set_default_weight(self, weight):
+ """
+ Set the default font weight. The initial value is 'normal'.
+ """
+ self.__default_weight = weight
+
+ def update_fonts(self, filenames):
+ """
+ Update the font dictionary with new font files.
+ Currently not implemented.
+ """
+ # !!!! Needs implementing
+ raise NotImplementedError
+
+ # Each of the scoring functions below should return a value between
+ # 0.0 (perfect match) and 1.0 (terrible match)
+ def score_family(self, families, family2):
+ """
+ Returns a match score between the list of font families in
+ *families* and the font family name *family2*.
+
+ An exact match at the head of the list returns 0.0.
+
+ A match further down the list will return between 0 and 1.
+
+ No match will return 1.0.
+ """
+ if not isinstance(families, (list, tuple)):
+ families = [families]
+ elif len(families) == 0:
+ return 1.0
+ family2 = family2.lower()
+ step = 1 / len(families)
+ for i, family1 in enumerate(families):
+ family1 = family1.lower()
+ if family1 in font_family_aliases:
+ if family1 in ('sans', 'sans serif'):
+ family1 = 'sans-serif'
+ options = rcParams['font.' + family1]
+ options = [x.lower() for x in options]
+ if family2 in options:
+ idx = options.index(family2)
+ return (i + (idx / len(options))) * step
+ elif family1 == family2:
+ # The score should be weighted by where in the
+ # list the font was found.
+ return i * step
+ return 1.0
+
+ def score_style(self, style1, style2):
+ """
+ Returns a match score between *style1* and *style2*.
+
+ An exact match returns 0.0.
+
+ A match between 'italic' and 'oblique' returns 0.1.
+
+ No match returns 1.0.
+ """
+ if style1 == style2:
+ return 0.0
+ elif style1 in ('italic', 'oblique') and \
+ style2 in ('italic', 'oblique'):
+ return 0.1
+ return 1.0
+
+ def score_variant(self, variant1, variant2):
+ """
+ Returns a match score between *variant1* and *variant2*.
+
+ An exact match returns 0.0, otherwise 1.0.
+ """
+ if variant1 == variant2:
+ return 0.0
+ else:
+ return 1.0
+
+ def score_stretch(self, stretch1, stretch2):
+ """
+ Returns a match score between *stretch1* and *stretch2*.
+
+ The result is the absolute value of the difference between the
+ CSS numeric values of *stretch1* and *stretch2*, normalized
+ between 0.0 and 1.0.
+ """
+ try:
+ stretchval1 = int(stretch1)
+ except ValueError:
+ stretchval1 = stretch_dict.get(stretch1, 500)
+ try:
+ stretchval2 = int(stretch2)
+ except ValueError:
+ stretchval2 = stretch_dict.get(stretch2, 500)
+ return abs(stretchval1 - stretchval2) / 1000.0
+
+ def score_weight(self, weight1, weight2):
+ """
+ Returns a match score between *weight1* and *weight2*.
+
+ The result is 0.0 if both weight1 and weight 2 are given as strings
+ and have the same value.
+
+ Otherwise, the result is the absolute value of the difference between the
+ CSS numeric values of *weight1* and *weight2*, normalized
+ between 0.05 and 1.0.
+ """
+
+ # exact match of the weight names (e.g. weight1 == weight2 == "regular")
+ if (isinstance(weight1, six.string_types) and
+ isinstance(weight2, six.string_types) and
+ weight1 == weight2):
+ return 0.0
+ try:
+ weightval1 = int(weight1)
+ except ValueError:
+ weightval1 = weight_dict.get(weight1, 500)
+ try:
+ weightval2 = int(weight2)
+ except ValueError:
+ weightval2 = weight_dict.get(weight2, 500)
+ return 0.95*(abs(weightval1 - weightval2) / 1000.0) + 0.05
+
+ def score_size(self, size1, size2):
+ """
+ Returns a match score between *size1* and *size2*.
+
+ If *size2* (the size specified in the font file) is 'scalable', this
+ function always returns 0.0, since any font size can be generated.
+
+ Otherwise, the result is the absolute distance between *size1* and
+ *size2*, normalized so that the usual range of font sizes (6pt -
+ 72pt) will lie between 0.0 and 1.0.
+ """
+ if size2 == 'scalable':
+ return 0.0
+ # Size value should have already been
+ try:
+ sizeval1 = float(size1)
+ except ValueError:
+ sizeval1 = self.default_size * font_scalings[size1]
+ try:
+ sizeval2 = float(size2)
+ except ValueError:
+ return 1.0
+ return abs(sizeval1 - sizeval2) / 72.0
+
+ def findfont(self, prop, fontext='ttf', directory=None,
+ fallback_to_default=True, rebuild_if_missing=True):
+ """
+ Search the font list for the font that most closely matches
+ the :class:`FontProperties` *prop*.
+
+ :meth:`findfont` performs a nearest neighbor search. Each
+ font is given a similarity score to the target font
+ properties. The first font with the highest score is
+ returned. If no matches below a certain threshold are found,
+ the default font (usually DejaVu Sans) is returned.
+
+ `directory`, is specified, will only return fonts from the
+ given directory (or subdirectory of that directory).
+
+ The result is cached, so subsequent lookups don't have to
+ perform the O(n) nearest neighbor search.
+
+ If `fallback_to_default` is True, will fallback to the default
+ font family (usually "DejaVu Sans" or "Helvetica") if
+ the first lookup hard-fails.
+
+ See the `W3C Cascading Style Sheet, Level 1
+ <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
+ for a description of the font finding algorithm.
+ """
+ if not isinstance(prop, FontProperties):
+ prop = FontProperties(prop)
+ fname = prop.get_file()
+
+ if fname is not None:
+ _log.debug('findfont returning %s', fname)
+ return fname
+
+ if fontext == 'afm':
+ fontlist = self.afmlist
+ else:
+ fontlist = self.ttflist
+
+ if directory is None:
+ cached = _lookup_cache[fontext].get(prop)
+ if cached is not None:
+ return cached
+ else:
+ directory = os.path.normcase(directory)
+
+ best_score = 1e64
+ best_font = None
+
+ for font in fontlist:
+ if (directory is not None and
+ os.path.commonprefix([os.path.normcase(font.fname),
+ directory]) != directory):
+ continue
+ # Matching family should have highest priority, so it is multiplied
+ # by 10.0
+ score = \
+ self.score_family(prop.get_family(), font.name) * 10.0 + \
+ self.score_style(prop.get_style(), font.style) + \
+ self.score_variant(prop.get_variant(), font.variant) + \
+ self.score_weight(prop.get_weight(), font.weight) + \
+ self.score_stretch(prop.get_stretch(), font.stretch) + \
+ self.score_size(prop.get_size(), font.size)
+ if score < best_score:
+ best_score = score
+ best_font = font
+ if score == 0:
+ break
+
+ if best_font is None or best_score >= 10.0:
+ if fallback_to_default:
+ warnings.warn(
+ 'findfont: Font family %s not found. Falling back to %s' %
+ (prop.get_family(), self.defaultFamily[fontext]))
+ default_prop = prop.copy()
+ default_prop.set_family(self.defaultFamily[fontext])
+ return self.findfont(default_prop, fontext, directory, False)
+ else:
+ # This is a hard fail -- we can't find anything reasonable,
+ # so just return the DejuVuSans.ttf
+ warnings.warn(
+ 'findfont: Could not match %s. Returning %s' %
+ (prop, self.defaultFont[fontext]),
+ UserWarning)
+ result = self.defaultFont[fontext]
+ else:
+ _log.debug(
+ 'findfont: Matching %s to %s (%s) with score of %f' %
+ (prop, best_font.name, repr(best_font.fname), best_score))
+ result = best_font.fname
+
+ if not os.path.isfile(result):
+ if rebuild_if_missing:
+ _log.info(
+ 'findfont: Found a missing font file. Rebuilding cache.')
+ _rebuild()
+ return fontManager.findfont(
+ prop, fontext, directory, True, False)
+ else:
+ raise ValueError("No valid font could be found")
+
+ if directory is None:
+ _lookup_cache[fontext].set(prop, result)
+ return result
+
+_is_opentype_cff_font_cache = {}
+def is_opentype_cff_font(filename):
+ """
+ Returns True if the given font is a Postscript Compact Font Format
+ Font embedded in an OpenType wrapper. Used by the PostScript and
+ PDF backends that can not subset these fonts.
+ """
+ if os.path.splitext(filename)[1].lower() == '.otf':
+ result = _is_opentype_cff_font_cache.get(filename)
+ if result is None:
+ with open(filename, 'rb') as fd:
+ tag = fd.read(4)
+ result = (tag == b'OTTO')
+ _is_opentype_cff_font_cache[filename] = result
+ return result
+ return False
+
+fontManager = None
+_fmcache = None
+
+
+_get_font = lru_cache(64)(ft2font.FT2Font)
+
+def get_font(filename, hinting_factor=None):
+ if hinting_factor is None:
+ hinting_factor = rcParams['text.hinting_factor']
+ return _get_font(filename, hinting_factor)
+
+
+# The experimental fontconfig-based backend.
+if USE_FONTCONFIG and sys.platform != 'win32':
+
+ def fc_match(pattern, fontext):
+ fontexts = get_fontext_synonyms(fontext)
+ ext = "." + fontext
+ try:
+ pipe = subprocess.Popen(
+ ['fc-match', '-s', '--format=%{file}\\n', pattern],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ output = pipe.communicate()[0]
+ except (OSError, IOError):
+ return None
+
+ # The bulk of the output from fc-list is ascii, so we keep the
+ # result in bytes and parse it as bytes, until we extract the
+ # filename, which is in sys.filesystemencoding().
+ if pipe.returncode == 0:
+ for fname in output.split(b'\n'):
+ try:
+ fname = six.text_type(fname, sys.getfilesystemencoding())
+ except UnicodeDecodeError:
+ continue
+ if os.path.splitext(fname)[1][1:] in fontexts:
+ return fname
+ return None
+
+ _fc_match_cache = {}
+
+ def findfont(prop, fontext='ttf'):
+ if not isinstance(prop, six.string_types):
+ prop = prop.get_fontconfig_pattern()
+ cached = _fc_match_cache.get(prop)
+ if cached is not None:
+ return cached
+
+ result = fc_match(prop, fontext)
+ if result is None:
+ result = fc_match(':', fontext)
+
+ _fc_match_cache[prop] = result
+ return result
+
+else:
+ _fmcache = None
+
+ cachedir = get_cachedir()
+ if cachedir is not None:
+ _fmcache = os.path.join(cachedir, 'fontList.json')
+
+ fontManager = None
+
+ _lookup_cache = {
+ 'ttf': TempCache(),
+ 'afm': TempCache()
+ }
+
+ def _rebuild():
+ global fontManager
+
+ fontManager = FontManager()
+
+ if _fmcache:
+ with cbook.Locked(cachedir):
+ json_dump(fontManager, _fmcache)
+ _log.info("generated new fontManager")
+
+ if _fmcache:
+ try:
+ fontManager = json_load(_fmcache)
+ if (not hasattr(fontManager, '_version') or
+ fontManager._version != FontManager.__version__):
+ _rebuild()
+ else:
+ fontManager.default_size = None
+ _log.debug("Using fontManager instance from %s", _fmcache)
+ except cbook.Locked.TimeoutError:
+ raise
+ except:
+ _rebuild()
+ else:
+ _rebuild()
+
+ def findfont(prop, **kw):
+ global fontManager
+ font = fontManager.findfont(prop, **kw)
+ return font
diff --git a/contrib/python/matplotlib/py2/matplotlib/fontconfig_pattern.py b/contrib/python/matplotlib/py2/matplotlib/fontconfig_pattern.py
new file mode 100644
index 00000000000..5104c25d362
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/fontconfig_pattern.py
@@ -0,0 +1,196 @@
+"""
+A module for parsing and generating fontconfig patterns.
+
+See the `fontconfig pattern specification
+<https://www.freedesktop.org/software/fontconfig/fontconfig-user.html>`_ for
+more information.
+"""
+
+# This class is defined here because it must be available in:
+# - The old-style config framework (:file:`rcsetup.py`)
+# - The font manager (:file:`font_manager.py`)
+
+# It probably logically belongs in :file:`font_manager.py`, but placing it
+# there would have created cyclical dependency problems.
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import re
+from pyparsing import (Literal, ZeroOrMore, Optional, Regex, StringEnd,
+ ParseException, Suppress)
+
+try:
+ from functools import lru_cache
+except ImportError:
+ from backports.functools_lru_cache import lru_cache
+
+family_punc = r'\\\-:,'
+family_unescape = re.compile(r'\\([%s])' % family_punc).sub
+family_escape = re.compile(r'([%s])' % family_punc).sub
+
+value_punc = r'\\=_:,'
+value_unescape = re.compile(r'\\([%s])' % value_punc).sub
+value_escape = re.compile(r'([%s])' % value_punc).sub
+
+class FontconfigPatternParser(object):
+ """A simple pyparsing-based parser for fontconfig-style patterns.
+
+ See the `fontconfig pattern specification
+ <https://www.freedesktop.org/software/fontconfig/fontconfig-user.html>`_
+ for more information.
+ """
+
+ _constants = {
+ 'thin' : ('weight', 'light'),
+ 'extralight' : ('weight', 'light'),
+ 'ultralight' : ('weight', 'light'),
+ 'light' : ('weight', 'light'),
+ 'book' : ('weight', 'book'),
+ 'regular' : ('weight', 'regular'),
+ 'normal' : ('weight', 'normal'),
+ 'medium' : ('weight', 'medium'),
+ 'demibold' : ('weight', 'demibold'),
+ 'semibold' : ('weight', 'semibold'),
+ 'bold' : ('weight', 'bold'),
+ 'extrabold' : ('weight', 'extra bold'),
+ 'black' : ('weight', 'black'),
+ 'heavy' : ('weight', 'heavy'),
+ 'roman' : ('slant', 'normal'),
+ 'italic' : ('slant', 'italic'),
+ 'oblique' : ('slant', 'oblique'),
+ 'ultracondensed' : ('width', 'ultra-condensed'),
+ 'extracondensed' : ('width', 'extra-condensed'),
+ 'condensed' : ('width', 'condensed'),
+ 'semicondensed' : ('width', 'semi-condensed'),
+ 'expanded' : ('width', 'expanded'),
+ 'extraexpanded' : ('width', 'extra-expanded'),
+ 'ultraexpanded' : ('width', 'ultra-expanded')
+ }
+
+ def __init__(self):
+ family = Regex(r'([^%s]|(\\[%s]))*' %
+ (family_punc, family_punc)) \
+ .setParseAction(self._family)
+ size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
+ .setParseAction(self._size)
+ name = Regex(r'[a-z]+') \
+ .setParseAction(self._name)
+ value = Regex(r'([^%s]|(\\[%s]))*' %
+ (value_punc, value_punc)) \
+ .setParseAction(self._value)
+
+ families =(family
+ + ZeroOrMore(
+ Literal(',')
+ + family)
+ ).setParseAction(self._families)
+
+ point_sizes =(size
+ + ZeroOrMore(
+ Literal(',')
+ + size)
+ ).setParseAction(self._point_sizes)
+
+ property =( (name
+ + Suppress(Literal('='))
+ + value
+ + ZeroOrMore(
+ Suppress(Literal(','))
+ + value)
+ )
+ | name
+ ).setParseAction(self._property)
+
+ pattern =(Optional(
+ families)
+ + Optional(
+ Literal('-')
+ + point_sizes)
+ + ZeroOrMore(
+ Literal(':')
+ + property)
+ + StringEnd()
+ )
+
+ self._parser = pattern
+ self.ParseException = ParseException
+
+ def parse(self, pattern):
+ """
+ Parse the given fontconfig *pattern* and return a dictionary
+ of key/value pairs useful for initializing a
+ :class:`font_manager.FontProperties` object.
+ """
+ props = self._properties = {}
+ try:
+ self._parser.parseString(pattern)
+ except self.ParseException as e:
+ raise ValueError(
+ "Could not parse font string: '%s'\n%s" % (pattern, e))
+
+ self._properties = None
+
+ self._parser.resetCache()
+
+ return props
+
+ def _family(self, s, loc, tokens):
+ return [family_unescape(r'\1', str(tokens[0]))]
+
+ def _size(self, s, loc, tokens):
+ return [float(tokens[0])]
+
+ def _name(self, s, loc, tokens):
+ return [str(tokens[0])]
+
+ def _value(self, s, loc, tokens):
+ return [value_unescape(r'\1', str(tokens[0]))]
+
+ def _families(self, s, loc, tokens):
+ self._properties['family'] = [str(x) for x in tokens]
+ return []
+
+ def _point_sizes(self, s, loc, tokens):
+ self._properties['size'] = [str(x) for x in tokens]
+ return []
+
+ def _property(self, s, loc, tokens):
+ if len(tokens) == 1:
+ if tokens[0] in self._constants:
+ key, val = self._constants[tokens[0]]
+ self._properties.setdefault(key, []).append(val)
+ else:
+ key = tokens[0]
+ val = tokens[1:]
+ self._properties.setdefault(key, []).extend(val)
+ return []
+
+
+# `parse_fontconfig_pattern` is a bottleneck during the tests because it is
+# repeatedly called when the rcParams are reset (to validate the default
+# fonts). In practice, the cache size doesn't grow beyond a few dozen entries
+# during the test suite.
+parse_fontconfig_pattern = lru_cache()(FontconfigPatternParser().parse)
+
+
+def generate_fontconfig_pattern(d):
+ """
+ Given a dictionary of key/value pairs, generates a fontconfig
+ pattern string.
+ """
+ props = []
+ families = ''
+ size = ''
+ for key in 'family style variant weight stretch file size'.split():
+ val = getattr(d, 'get_' + key)()
+ if val is not None and val != []:
+ if type(val) == list:
+ val = [value_escape(r'\\\1', str(x)) for x in val
+ if x is not None]
+ if val != []:
+ val = ','.join(val)
+ props.append(":%s=%s" % (key, val))
+ return ''.join(props)
diff --git a/contrib/python/matplotlib/py2/matplotlib/gridspec.py b/contrib/python/matplotlib/py2/matplotlib/gridspec.py
new file mode 100644
index 00000000000..281d605dda7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/gridspec.py
@@ -0,0 +1,498 @@
+"""
+:mod:`~matplotlib.gridspec` is a module which specifies the location
+of the subplot in the figure.
+
+ `GridSpec`
+ specifies the geometry of the grid that a subplot will be
+ placed. The number of rows and number of columns of the grid
+ need to be set. Optionally, the subplot layout parameters
+ (e.g., left, right, etc.) can be tuned.
+
+ `SubplotSpec`
+ specifies the location of the subplot in the given `GridSpec`.
+
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import six
+
+import copy
+import logging
+import warnings
+
+import numpy as np
+
+import matplotlib as mpl
+from matplotlib import _pylab_helpers, tight_layout, rcParams
+from matplotlib.transforms import Bbox
+import matplotlib._layoutbox as layoutbox
+from matplotlib.cbook import mplDeprecation
+
+_log = logging.getLogger(__name__)
+
+
+class GridSpecBase(object):
+ """
+ A base class of GridSpec that specifies the geometry of the grid
+ that a subplot will be placed.
+ """
+
+ def __init__(self, nrows, ncols, height_ratios=None, width_ratios=None):
+ """
+ The number of rows and number of columns of the grid need to
+ be set. Optionally, the ratio of heights and widths of rows and
+ columns can be specified.
+ """
+ self._nrows, self._ncols = nrows, ncols
+ self.set_height_ratios(height_ratios)
+ self.set_width_ratios(width_ratios)
+
+ def get_geometry(self):
+ 'get the geometry of the grid, e.g., 2,3'
+ return self._nrows, self._ncols
+
+ def get_subplot_params(self, figure=None, fig=None):
+ pass
+
+ def new_subplotspec(self, loc, rowspan=1, colspan=1):
+ """
+ create and return a SuplotSpec instance.
+ """
+ loc1, loc2 = loc
+ subplotspec = self[loc1:loc1+rowspan, loc2:loc2+colspan]
+ return subplotspec
+
+ def set_width_ratios(self, width_ratios):
+ if width_ratios is not None and len(width_ratios) != self._ncols:
+ raise ValueError('Expected the given number of width ratios to '
+ 'match the number of columns of the grid')
+ self._col_width_ratios = width_ratios
+
+ def get_width_ratios(self):
+ return self._col_width_ratios
+
+ def set_height_ratios(self, height_ratios):
+ if height_ratios is not None and len(height_ratios) != self._nrows:
+ raise ValueError('Expected the given number of height ratios to '
+ 'match the number of rows of the grid')
+ self._row_height_ratios = height_ratios
+
+ def get_height_ratios(self):
+ return self._row_height_ratios
+
+ def get_grid_positions(self, fig, raw=False):
+ """
+ return lists of bottom and top position of rows, left and
+ right positions of columns.
+
+ If raw=True, then these are all in units relative to the container
+ with no margins. (used for constrained_layout).
+ """
+ nrows, ncols = self.get_geometry()
+
+ if raw:
+ left = 0.
+ right = 1.
+ bottom = 0.
+ top = 1.
+ wspace = 0.
+ hspace = 0.
+ else:
+ subplot_params = self.get_subplot_params(fig)
+ left = subplot_params.left
+ right = subplot_params.right
+ bottom = subplot_params.bottom
+ top = subplot_params.top
+ wspace = subplot_params.wspace
+ hspace = subplot_params.hspace
+ tot_width = right - left
+ tot_height = top - bottom
+
+ # calculate accumulated heights of columns
+ cell_h = tot_height / (nrows + hspace*(nrows-1))
+ sep_h = hspace * cell_h
+ if self._row_height_ratios is not None:
+ norm = cell_h * nrows / sum(self._row_height_ratios)
+ cell_heights = [r * norm for r in self._row_height_ratios]
+ else:
+ cell_heights = [cell_h] * nrows
+ sep_heights = [0] + ([sep_h] * (nrows-1))
+ cell_hs = np.cumsum(np.column_stack([sep_heights, cell_heights]).flat)
+
+ # calculate accumulated widths of rows
+ cell_w = tot_width / (ncols + wspace*(ncols-1))
+ sep_w = wspace * cell_w
+ if self._col_width_ratios is not None:
+ norm = cell_w * ncols / sum(self._col_width_ratios)
+ cell_widths = [r * norm for r in self._col_width_ratios]
+ else:
+ cell_widths = [cell_w] * ncols
+ sep_widths = [0] + ([sep_w] * (ncols-1))
+ cell_ws = np.cumsum(np.column_stack([sep_widths, cell_widths]).flat)
+
+ fig_tops, fig_bottoms = (top - cell_hs).reshape((-1, 2)).T
+ fig_lefts, fig_rights = (left + cell_ws).reshape((-1, 2)).T
+ return fig_bottoms, fig_tops, fig_lefts, fig_rights
+
+ def __getitem__(self, key):
+ """Create and return a SuplotSpec instance.
+ """
+ nrows, ncols = self.get_geometry()
+
+ def _normalize(key, size): # Includes last index.
+ if isinstance(key, slice):
+ start, stop, _ = key.indices(size)
+ if stop > start:
+ return start, stop - 1
+ else:
+ if key < 0:
+ key += size
+ if 0 <= key < size:
+ return key, key
+ raise IndexError("invalid index")
+
+ if isinstance(key, tuple):
+ try:
+ k1, k2 = key
+ except ValueError:
+ raise ValueError("unrecognized subplot spec")
+ num1, num2 = np.ravel_multi_index(
+ [_normalize(k1, nrows), _normalize(k2, ncols)], (nrows, ncols))
+ else: # Single key
+ num1, num2 = _normalize(key, nrows * ncols)
+
+ return SubplotSpec(self, num1, num2)
+
+
+class GridSpec(GridSpecBase):
+ """
+ A class that specifies the geometry of the grid that a subplot
+ will be placed. The location of grid is determined by similar way
+ as the SubplotParams.
+ """
+
+ def __init__(self, nrows, ncols, figure=None,
+ left=None, bottom=None, right=None, top=None,
+ wspace=None, hspace=None,
+ width_ratios=None, height_ratios=None):
+ """
+ The number of rows and number of columns of the grid need to be set.
+ Optionally, the subplot layout parameters (e.g., left, right, etc.)
+ can be tuned.
+
+ Parameters
+ ----------
+ nrows : int
+ Number of rows in grid.
+
+ ncols : int
+ Number or columns in grid.
+
+ Notes
+ -----
+ See `~.figure.SubplotParams` for descriptions of the layout parameters.
+ """
+ self.left = left
+ self.bottom = bottom
+ self.right = right
+ self.top = top
+ self.wspace = wspace
+ self.hspace = hspace
+ self.figure = figure
+
+ GridSpecBase.__init__(self, nrows, ncols,
+ width_ratios=width_ratios,
+ height_ratios=height_ratios)
+
+ if (self.figure is None) or not self.figure.get_constrained_layout():
+ self._layoutbox = None
+ else:
+ self.figure.init_layoutbox()
+ self._layoutbox = layoutbox.LayoutBox(
+ parent=self.figure._layoutbox,
+ name='gridspec' + layoutbox.seq_id(),
+ artist=self)
+ # by default the layoutbox for a gridsepc will fill a figure.
+ # but this can change below if the gridspec is created from a
+ # subplotspec. (GridSpecFromSubplotSpec)
+
+ _AllowedKeys = ["left", "bottom", "right", "top", "wspace", "hspace"]
+
+ def __getstate__(self):
+ state = self.__dict__
+ try:
+ state.pop('_layoutbox')
+ except KeyError:
+ pass
+ return state
+
+ def __setstate__(self, state):
+ self.__dict__ = state
+ # layoutboxes don't survive pickling...
+ self._layoutbox = None
+
+ def update(self, **kwargs):
+ """
+ Update the current values. If any kwarg is None, default to
+ the current value, if set, otherwise to rc.
+ """
+
+ for k, v in six.iteritems(kwargs):
+ if k in self._AllowedKeys:
+ setattr(self, k, v)
+ else:
+ raise AttributeError("%s is unknown keyword" % (k,))
+
+ for figmanager in six.itervalues(_pylab_helpers.Gcf.figs):
+ for ax in figmanager.canvas.figure.axes:
+ # copied from Figure.subplots_adjust
+ if not isinstance(ax, mpl.axes.SubplotBase):
+ # Check if sharing a subplots axis
+ if isinstance(ax._sharex, mpl.axes.SubplotBase):
+ if ax._sharex.get_subplotspec().get_gridspec() == self:
+ ax._sharex.update_params()
+ ax._set_position(ax._sharex.figbox)
+ elif isinstance(ax._sharey, mpl.axes.SubplotBase):
+ if ax._sharey.get_subplotspec().get_gridspec() == self:
+ ax._sharey.update_params()
+ ax._set_position(ax._sharey.figbox)
+ else:
+ ss = ax.get_subplotspec().get_topmost_subplotspec()
+ if ss.get_gridspec() == self:
+ ax.update_params()
+ ax._set_position(ax.figbox)
+
+ def get_subplot_params(self, figure=None, fig=None):
+ """
+ Return a dictionary of subplot layout parameters. The default
+ parameters are from rcParams unless a figure attribute is set.
+ """
+ if fig is not None:
+ warnings.warn("the 'fig' kwarg is deprecated "
+ "use 'figure' instead", mplDeprecation)
+ if figure is None:
+ figure = fig
+
+ if figure is None:
+ kw = {k: rcParams["figure.subplot."+k] for k in self._AllowedKeys}
+ subplotpars = mpl.figure.SubplotParams(**kw)
+ else:
+ subplotpars = copy.copy(figure.subplotpars)
+
+ update_kw = {k: getattr(self, k) for k in self._AllowedKeys}
+ subplotpars.update(**update_kw)
+
+ return subplotpars
+
+ def locally_modified_subplot_params(self):
+ return [k for k in self._AllowedKeys if getattr(self, k)]
+
+ def tight_layout(self, figure, renderer=None,
+ pad=1.08, h_pad=None, w_pad=None, rect=None):
+ """
+ Adjust subplot parameters to give specified padding.
+
+ Parameters
+ ----------
+
+ pad : float
+ Padding between the figure edge and the edges of subplots, as a
+ fraction of the font-size.
+ h_pad, w_pad : float, optional
+ Padding (height/width) between edges of adjacent subplots.
+ Defaults to ``pad_inches``.
+ rect : tuple of 4 floats, optional
+ (left, bottom, right, top) rectangle in normalized figure
+ coordinates that the whole subplots area (including labels) will
+ fit into. Default is (0, 0, 1, 1).
+ """
+
+ subplotspec_list = tight_layout.get_subplotspec_list(
+ figure.axes, grid_spec=self)
+ if None in subplotspec_list:
+ warnings.warn("This figure includes Axes that are not compatible "
+ "with tight_layout, so results might be incorrect.")
+
+ if renderer is None:
+ renderer = tight_layout.get_renderer(figure)
+
+ kwargs = tight_layout.get_tight_layout_figure(
+ figure, figure.axes, subplotspec_list, renderer,
+ pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
+ self.update(**kwargs)
+
+
+class GridSpecFromSubplotSpec(GridSpecBase):
+ """
+ GridSpec whose subplot layout parameters are inherited from the
+ location specified by a given SubplotSpec.
+ """
+ def __init__(self, nrows, ncols,
+ subplot_spec,
+ wspace=None, hspace=None,
+ height_ratios=None, width_ratios=None):
+ """
+ The number of rows and number of columns of the grid need to
+ be set. An instance of SubplotSpec is also needed to be set
+ from which the layout parameters will be inherited. The wspace
+ and hspace of the layout can be optionally specified or the
+ default values (from the figure or rcParams) will be used.
+ """
+ self._wspace = wspace
+ self._hspace = hspace
+ self._subplot_spec = subplot_spec
+ GridSpecBase.__init__(self, nrows, ncols,
+ width_ratios=width_ratios,
+ height_ratios=height_ratios)
+ # do the layoutboxes
+ subspeclb = subplot_spec._layoutbox
+ if subspeclb is None:
+ self._layoutbox = None
+ else:
+ # OK, this is needed to divide the figure.
+ self._layoutbox = subspeclb.layout_from_subplotspec(
+ subplot_spec,
+ name=subspeclb.name + '.gridspec' + layoutbox.seq_id(),
+ artist=self)
+
+ def get_subplot_params(self, figure=None, fig=None):
+ """Return a dictionary of subplot layout parameters.
+ """
+ if fig is not None:
+ warnings.warn("the 'fig' kwarg is deprecated "
+ "use 'figure' instead", mplDeprecation)
+ if figure is None:
+ figure = fig
+
+ hspace = (self._hspace if self._hspace is not None
+ else figure.subplotpars.hspace if figure is not None
+ else rcParams["figure.subplot.hspace"])
+ wspace = (self._wspace if self._wspace is not None
+ else figure.subplotpars.wspace if figure is not None
+ else rcParams["figure.subplot.wspace"])
+
+ figbox = self._subplot_spec.get_position(figure)
+ left, bottom, right, top = figbox.extents
+
+ return mpl.figure.SubplotParams(left=left, right=right,
+ bottom=bottom, top=top,
+ wspace=wspace, hspace=hspace)
+
+ def get_topmost_subplotspec(self):
+ """Get the topmost SubplotSpec instance associated with the subplot."""
+ return self._subplot_spec.get_topmost_subplotspec()
+
+
+class SubplotSpec(object):
+ """Specifies the location of the subplot in the given `GridSpec`.
+ """
+
+ def __init__(self, gridspec, num1, num2=None):
+ """
+ The subplot will occupy the num1-th cell of the given
+ gridspec. If num2 is provided, the subplot will span between
+ num1-th cell and num2-th cell.
+
+ The index starts from 0.
+ """
+ self._gridspec = gridspec
+ self.num1 = num1
+ self.num2 = num2
+ if gridspec._layoutbox is not None:
+ glb = gridspec._layoutbox
+ # So note that here we don't assign any layout yet,
+ # just make the layoutbox that will conatin all items
+ # associated w/ this axis. This can include other axes like
+ # a colorbar or a legend.
+ self._layoutbox = layoutbox.LayoutBox(
+ parent=glb,
+ name=glb.name + '.ss' + layoutbox.seq_id(),
+ artist=self)
+ else:
+ self._layoutbox = None
+
+ def __getstate__(self):
+ state = self.__dict__
+ try:
+ state.pop('_layoutbox')
+ except KeyError:
+ pass
+ return state
+
+ def __setstate__(self, state):
+ self.__dict__ = state
+ # layoutboxes don't survive pickling...
+ self._layoutbox = None
+
+ def get_gridspec(self):
+ return self._gridspec
+
+ def get_geometry(self):
+ """
+ Get the subplot geometry (``n_rows, n_cols, start, stop``).
+
+ start and stop are the index of the start and stop of the
+ subplot.
+ """
+ rows, cols = self.get_gridspec().get_geometry()
+ return rows, cols, self.num1, self.num2
+
+ def get_rows_columns(self):
+ """
+ Get the subplot row and column numbers:
+ (``n_rows, n_cols, row_start, row_stop, col_start, col_stop``)
+ """
+ gridspec = self.get_gridspec()
+ nrows, ncols = gridspec.get_geometry()
+ row_start, col_start = divmod(self.num1, ncols)
+ if self.num2 is not None:
+ row_stop, col_stop = divmod(self.num2, ncols)
+ else:
+ row_stop = row_start
+ col_stop = col_start
+ return nrows, ncols, row_start, row_stop, col_start, col_stop
+
+ def get_position(self, figure, return_all=False):
+ """Update the subplot position from ``figure.subplotpars``.
+ """
+ gridspec = self.get_gridspec()
+ nrows, ncols = gridspec.get_geometry()
+ rows, cols = np.unravel_index(
+ [self.num1] if self.num2 is None else [self.num1, self.num2],
+ (nrows, ncols))
+ fig_bottoms, fig_tops, fig_lefts, fig_rights = \
+ gridspec.get_grid_positions(figure)
+
+ fig_bottom = fig_bottoms[rows].min()
+ fig_top = fig_tops[rows].max()
+ fig_left = fig_lefts[cols].min()
+ fig_right = fig_rights[cols].max()
+ figbox = Bbox.from_extents(fig_left, fig_bottom, fig_right, fig_top)
+
+ if return_all:
+ return figbox, rows[0], cols[0], nrows, ncols
+ else:
+ return figbox
+
+ def get_topmost_subplotspec(self):
+ 'get the topmost SubplotSpec instance associated with the subplot'
+ gridspec = self.get_gridspec()
+ if hasattr(gridspec, "get_topmost_subplotspec"):
+ return gridspec.get_topmost_subplotspec()
+ else:
+ return self
+
+ def __eq__(self, other):
+ # other may not even have the attributes we are checking.
+ return ((self._gridspec, self.num1, self.num2)
+ == (getattr(other, "_gridspec", object()),
+ getattr(other, "num1", object()),
+ getattr(other, "num2", object())))
+
+ if six.PY2:
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash((self._gridspec, self.num1, self.num2))
diff --git a/contrib/python/matplotlib/py2/matplotlib/hatch.py b/contrib/python/matplotlib/py2/matplotlib/hatch.py
new file mode 100644
index 00000000000..94294afdf8a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/hatch.py
@@ -0,0 +1,220 @@
+"""
+Contains a classes for generating hatch patterns.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange
+
+import numpy as np
+from matplotlib.path import Path
+
+
+class HatchPatternBase(object):
+ """
+ The base class for a hatch pattern.
+ """
+ pass
+
+
+class HorizontalHatch(HatchPatternBase):
+ def __init__(self, hatch, density):
+ self.num_lines = int((hatch.count('-') + hatch.count('+')) * density)
+ self.num_vertices = self.num_lines * 2
+
+ def set_vertices_and_codes(self, vertices, codes):
+ steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
+ retstep=True)
+ steps += stepsize / 2.
+ vertices[0::2, 0] = 0.0
+ vertices[0::2, 1] = steps
+ vertices[1::2, 0] = 1.0
+ vertices[1::2, 1] = steps
+ codes[0::2] = Path.MOVETO
+ codes[1::2] = Path.LINETO
+
+
+class VerticalHatch(HatchPatternBase):
+ def __init__(self, hatch, density):
+ self.num_lines = int((hatch.count('|') + hatch.count('+')) * density)
+ self.num_vertices = self.num_lines * 2
+
+ def set_vertices_and_codes(self, vertices, codes):
+ steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
+ retstep=True)
+ steps += stepsize / 2.
+ vertices[0::2, 0] = steps
+ vertices[0::2, 1] = 0.0
+ vertices[1::2, 0] = steps
+ vertices[1::2, 1] = 1.0
+ codes[0::2] = Path.MOVETO
+ codes[1::2] = Path.LINETO
+
+
+class NorthEastHatch(HatchPatternBase):
+ def __init__(self, hatch, density):
+ self.num_lines = int((hatch.count('/') + hatch.count('x') +
+ hatch.count('X')) * density)
+ if self.num_lines:
+ self.num_vertices = (self.num_lines + 1) * 2
+ else:
+ self.num_vertices = 0
+
+ def set_vertices_and_codes(self, vertices, codes):
+ steps = np.linspace(-0.5, 0.5, self.num_lines + 1, True)
+ vertices[0::2, 0] = 0.0 + steps
+ vertices[0::2, 1] = 0.0 - steps
+ vertices[1::2, 0] = 1.0 + steps
+ vertices[1::2, 1] = 1.0 - steps
+ codes[0::2] = Path.MOVETO
+ codes[1::2] = Path.LINETO
+
+
+class SouthEastHatch(HatchPatternBase):
+ def __init__(self, hatch, density):
+ self.num_lines = int((hatch.count('\\') + hatch.count('x') +
+ hatch.count('X')) * density)
+ self.num_vertices = (self.num_lines + 1) * 2
+ if self.num_lines:
+ self.num_vertices = (self.num_lines + 1) * 2
+ else:
+ self.num_vertices = 0
+
+ def set_vertices_and_codes(self, vertices, codes):
+ steps = np.linspace(-0.5, 0.5, self.num_lines + 1, True)
+ vertices[0::2, 0] = 0.0 + steps
+ vertices[0::2, 1] = 1.0 + steps
+ vertices[1::2, 0] = 1.0 + steps
+ vertices[1::2, 1] = 0.0 + steps
+ codes[0::2] = Path.MOVETO
+ codes[1::2] = Path.LINETO
+
+
+class Shapes(HatchPatternBase):
+ filled = False
+
+ def __init__(self, hatch, density):
+ if self.num_rows == 0:
+ self.num_shapes = 0
+ self.num_vertices = 0
+ else:
+ self.num_shapes = ((self.num_rows // 2 + 1) * (self.num_rows + 1) +
+ (self.num_rows // 2) * (self.num_rows))
+ self.num_vertices = (self.num_shapes *
+ len(self.shape_vertices) *
+ (self.filled and 1 or 2))
+
+ def set_vertices_and_codes(self, vertices, codes):
+ offset = 1.0 / self.num_rows
+ shape_vertices = self.shape_vertices * offset * self.size
+ if not self.filled:
+ inner_vertices = shape_vertices[::-1] * 0.9
+ shape_codes = self.shape_codes
+ shape_size = len(shape_vertices)
+
+ cursor = 0
+ for row in xrange(self.num_rows + 1):
+ if row % 2 == 0:
+ cols = np.linspace(0.0, 1.0, self.num_rows + 1, True)
+ else:
+ cols = np.linspace(offset / 2.0, 1.0 - offset / 2.0,
+ self.num_rows, True)
+ row_pos = row * offset
+ for col_pos in cols:
+ vertices[cursor:cursor + shape_size] = (shape_vertices +
+ (col_pos, row_pos))
+ codes[cursor:cursor + shape_size] = shape_codes
+ cursor += shape_size
+ if not self.filled:
+ vertices[cursor:cursor + shape_size] = (inner_vertices +
+ (col_pos, row_pos))
+ codes[cursor:cursor + shape_size] = shape_codes
+ cursor += shape_size
+
+
+class Circles(Shapes):
+ def __init__(self, hatch, density):
+ path = Path.unit_circle()
+ self.shape_vertices = path.vertices
+ self.shape_codes = path.codes
+ Shapes.__init__(self, hatch, density)
+
+
+class SmallCircles(Circles):
+ size = 0.2
+
+ def __init__(self, hatch, density):
+ self.num_rows = (hatch.count('o')) * density
+ Circles.__init__(self, hatch, density)
+
+
+class LargeCircles(Circles):
+ size = 0.35
+
+ def __init__(self, hatch, density):
+ self.num_rows = (hatch.count('O')) * density
+ Circles.__init__(self, hatch, density)
+
+
+class SmallFilledCircles(SmallCircles):
+ size = 0.1
+ filled = True
+
+ def __init__(self, hatch, density):
+ self.num_rows = (hatch.count('.')) * density
+ Circles.__init__(self, hatch, density)
+
+
+class Stars(Shapes):
+ size = 1.0 / 3.0
+ filled = True
+
+ def __init__(self, hatch, density):
+ self.num_rows = (hatch.count('*')) * density
+ path = Path.unit_regular_star(5)
+ self.shape_vertices = path.vertices
+ self.shape_codes = np.ones(len(self.shape_vertices)) * Path.LINETO
+ self.shape_codes[0] = Path.MOVETO
+ Shapes.__init__(self, hatch, density)
+
+_hatch_types = [
+ HorizontalHatch,
+ VerticalHatch,
+ NorthEastHatch,
+ SouthEastHatch,
+ SmallCircles,
+ LargeCircles,
+ SmallFilledCircles,
+ Stars
+ ]
+
+
+def get_path(hatchpattern, density=6):
+ """
+ Given a hatch specifier, *hatchpattern*, generates Path to render
+ the hatch in a unit square. *density* is the number of lines per
+ unit square.
+ """
+ density = int(density)
+
+ patterns = [hatch_type(hatchpattern, density)
+ for hatch_type in _hatch_types]
+ num_vertices = sum([pattern.num_vertices for pattern in patterns])
+
+ if num_vertices == 0:
+ return Path(np.empty((0, 2)))
+
+ vertices = np.empty((num_vertices, 2))
+ codes = np.empty((num_vertices,), np.uint8)
+
+ cursor = 0
+ for pattern in patterns:
+ if pattern.num_vertices != 0:
+ vertices_chunk = vertices[cursor:cursor + pattern.num_vertices]
+ codes_chunk = codes[cursor:cursor + pattern.num_vertices]
+ pattern.set_vertices_and_codes(vertices_chunk, codes_chunk)
+ cursor += pattern.num_vertices
+
+ return Path(vertices, codes)
diff --git a/contrib/python/matplotlib/py2/matplotlib/image.py b/contrib/python/matplotlib/py2/matplotlib/image.py
new file mode 100644
index 00000000000..e03778ca9f8
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/image.py
@@ -0,0 +1,1534 @@
+"""
+The image module supports basic image loading, rescaling and display
+operations.
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves.urllib.parse import urlparse
+from six.moves.urllib.request import urlopen
+from io import BytesIO
+
+from math import ceil
+import os
+import logging
+
+import numpy as np
+
+from matplotlib import rcParams
+import matplotlib.artist as martist
+from matplotlib.artist import allow_rasterization
+import matplotlib.colors as mcolors
+import matplotlib.cm as cm
+import matplotlib.cbook as cbook
+
+# For clarity, names from _image are given explicitly in this module:
+import matplotlib._image as _image
+import matplotlib._png as _png
+
+# For user convenience, the names from _image are also imported into
+# the image namespace:
+from matplotlib._image import *
+
+from matplotlib.transforms import (Affine2D, BboxBase, Bbox, BboxTransform,
+ IdentityTransform, TransformedBbox)
+
+_log = logging.getLogger(__name__)
+
+# map interpolation strings to module constants
+_interpd_ = {
+ 'none': _image.NEAREST, # fall back to nearest when not supported
+ 'nearest': _image.NEAREST,
+ 'bilinear': _image.BILINEAR,
+ 'bicubic': _image.BICUBIC,
+ 'spline16': _image.SPLINE16,
+ 'spline36': _image.SPLINE36,
+ 'hanning': _image.HANNING,
+ 'hamming': _image.HAMMING,
+ 'hermite': _image.HERMITE,
+ 'kaiser': _image.KAISER,
+ 'quadric': _image.QUADRIC,
+ 'catrom': _image.CATROM,
+ 'gaussian': _image.GAUSSIAN,
+ 'bessel': _image.BESSEL,
+ 'mitchell': _image.MITCHELL,
+ 'sinc': _image.SINC,
+ 'lanczos': _image.LANCZOS,
+ 'blackman': _image.BLACKMAN,
+}
+
+interpolations_names = set(_interpd_)
+
+
+def composite_images(images, renderer, magnification=1.0):
+ """
+ Composite a number of RGBA images into one. The images are
+ composited in the order in which they appear in the `images` list.
+
+ Parameters
+ ----------
+ images : list of Images
+ Each must have a `make_image` method. For each image,
+ `can_composite` should return `True`, though this is not
+ enforced by this function. Each image must have a purely
+ affine transformation with no shear.
+
+ renderer : RendererBase instance
+
+ magnification : float
+ The additional magnification to apply for the renderer in use.
+
+ Returns
+ -------
+ tuple : image, offset_x, offset_y
+ Returns the tuple:
+
+ - image: A numpy array of the same type as the input images.
+
+ - offset_x, offset_y: The offset of the image (left, bottom)
+ in the output figure.
+ """
+ if len(images) == 0:
+ return np.empty((0, 0, 4), dtype=np.uint8), 0, 0
+
+ parts = []
+ bboxes = []
+ for image in images:
+ data, x, y, trans = image.make_image(renderer, magnification)
+ if data is not None:
+ x *= magnification
+ y *= magnification
+ parts.append((data, x, y, image.get_alpha() or 1.0))
+ bboxes.append(
+ Bbox([[x, y], [x + data.shape[1], y + data.shape[0]]]))
+
+ if len(parts) == 0:
+ return np.empty((0, 0, 4), dtype=np.uint8), 0, 0
+
+ bbox = Bbox.union(bboxes)
+
+ output = np.zeros(
+ (int(bbox.height), int(bbox.width), 4), dtype=np.uint8)
+
+ for data, x, y, alpha in parts:
+ trans = Affine2D().translate(x - bbox.x0, y - bbox.y0)
+ _image.resample(data, output, trans, _image.NEAREST,
+ resample=False, alpha=alpha)
+
+ return output, bbox.x0 / magnification, bbox.y0 / magnification
+
+
+def _draw_list_compositing_images(
+ renderer, parent, artists, suppress_composite=None):
+ """
+ Draw a sorted list of artists, compositing images into a single
+ image where possible.
+
+ For internal matplotlib use only: It is here to reduce duplication
+ between `Figure.draw` and `Axes.draw`, but otherwise should not be
+ generally useful.
+ """
+ has_images = any(isinstance(x, _ImageBase) for x in artists)
+
+ # override the renderer default if suppressComposite is not None
+ not_composite = (suppress_composite if suppress_composite is not None
+ else renderer.option_image_nocomposite())
+
+ if not_composite or not has_images:
+ for a in artists:
+ a.draw(renderer)
+ else:
+ # Composite any adjacent images together
+ image_group = []
+ mag = renderer.get_image_magnification()
+
+ def flush_images():
+ if len(image_group) == 1:
+ image_group[0].draw(renderer)
+ elif len(image_group) > 1:
+ data, l, b = composite_images(image_group, renderer, mag)
+ if data.size != 0:
+ gc = renderer.new_gc()
+ gc.set_clip_rectangle(parent.bbox)
+ gc.set_clip_path(parent.get_clip_path())
+ renderer.draw_image(gc, np.round(l), np.round(b), data)
+ gc.restore()
+ del image_group[:]
+
+ for a in artists:
+ if isinstance(a, _ImageBase) and a.can_composite():
+ image_group.append(a)
+ else:
+ flush_images()
+ a.draw(renderer)
+ flush_images()
+
+
+def _rgb_to_rgba(A):
+ """
+ Convert an RGB image to RGBA, as required by the image resample C++
+ extension.
+ """
+ rgba = np.zeros((A.shape[0], A.shape[1], 4), dtype=A.dtype)
+ rgba[:, :, :3] = A
+ if rgba.dtype == np.uint8:
+ rgba[:, :, 3] = 255
+ else:
+ rgba[:, :, 3] = 1.0
+ return rgba
+
+
+class _ImageBase(martist.Artist, cm.ScalarMappable):
+ zorder = 0
+
+ @property
+ @cbook.deprecated("2.1")
+ def _interpd(self):
+ return _interpd_
+
+ @property
+ @cbook.deprecated("2.1")
+ def _interpdr(self):
+ return {v: k for k, v in six.iteritems(_interpd_)}
+
+ @property
+ @cbook.deprecated("2.1", alternative="mpl.image.interpolation_names")
+ def iterpnames(self):
+ return interpolations_names
+
+ def __str__(self):
+ return "AxesImage(%g,%g;%gx%g)" % tuple(self.axes.bbox.bounds)
+
+ def __init__(self, ax,
+ cmap=None,
+ norm=None,
+ interpolation=None,
+ origin=None,
+ filternorm=1,
+ filterrad=4.0,
+ resample=False,
+ **kwargs
+ ):
+ """
+ interpolation and cmap default to their rc settings
+
+ cmap is a colors.Colormap instance
+ norm is a colors.Normalize instance to map luminance to 0-1
+
+ extent is data axes (left, right, bottom, top) for making image plots
+ registered with data plots. Default is to label the pixel
+ centers with the zero-based row and column indices.
+
+ Additional kwargs are matplotlib.artist properties
+
+ """
+ martist.Artist.__init__(self)
+ cm.ScalarMappable.__init__(self, norm, cmap)
+ self._mouseover = True
+ if origin is None:
+ origin = rcParams['image.origin']
+ self.origin = origin
+ self.set_filternorm(filternorm)
+ self.set_filterrad(filterrad)
+ self.set_interpolation(interpolation)
+ self.set_resample(resample)
+ self.axes = ax
+
+ self._imcache = None
+
+ self.update(kwargs)
+
+ def __getstate__(self):
+ state = super(_ImageBase, self).__getstate__()
+ # We can't pickle the C Image cached object.
+ state['_imcache'] = None
+ return state
+
+ def get_size(self):
+ """Get the numrows, numcols of the input image"""
+ if self._A is None:
+ raise RuntimeError('You must first set the image array')
+
+ return self._A.shape[:2]
+
+ def set_alpha(self, alpha):
+ """
+ Set the alpha value used for blending - not supported on
+ all backends
+
+ ACCEPTS: float
+ """
+ martist.Artist.set_alpha(self, alpha)
+ self._imcache = None
+
+ def changed(self):
+ """
+ Call this whenever the mappable is changed so observers can
+ update state
+ """
+ self._imcache = None
+ self._rgbacache = None
+ cm.ScalarMappable.changed(self)
+
+ def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
+ unsampled=False, round_to_pixel_border=True):
+ """
+ Normalize, rescale and color the image `A` from the given
+ in_bbox (in data space), to the given out_bbox (in pixel
+ space) clipped to the given clip_bbox (also in pixel space),
+ and magnified by the magnification factor.
+
+ `A` may be a greyscale image (MxN) with a dtype of `float32`,
+ `float64`, `uint16` or `uint8`, or an RGBA image (MxNx4) with
+ a dtype of `float32`, `float64`, or `uint8`.
+
+ If `unsampled` is True, the image will not be scaled, but an
+ appropriate affine transformation will be returned instead.
+
+ If `round_to_pixel_border` is True, the output image size will
+ be rounded to the nearest pixel boundary. This makes the
+ images align correctly with the axes. It should not be used
+ in cases where you want exact scaling, however, such as
+ FigureImage.
+
+ Returns the resulting (image, x, y, trans), where (x, y) is
+ the upper left corner of the result in pixel space, and
+ `trans` is the affine transformation from the image to pixel
+ space.
+ """
+ if A is None:
+ raise RuntimeError('You must first set the image '
+ 'array or the image attribute')
+ if A.size == 0:
+ raise RuntimeError("_make_image must get a non-empty image. "
+ "Your Artist's draw method must filter before "
+ "this method is called.")
+
+ clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)
+
+ if clipped_bbox is None:
+ return None, 0, 0, None
+
+ out_width_base = clipped_bbox.width * magnification
+ out_height_base = clipped_bbox.height * magnification
+
+ if out_width_base == 0 or out_height_base == 0:
+ return None, 0, 0, None
+
+ if self.origin == 'upper':
+ # Flip the input image using a transform. This avoids the
+ # problem with flipping the array, which results in a copy
+ # when it is converted to contiguous in the C wrapper
+ t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
+ else:
+ t0 = IdentityTransform()
+
+ t0 += (
+ Affine2D()
+ .scale(
+ in_bbox.width / A.shape[1],
+ in_bbox.height / A.shape[0])
+ .translate(in_bbox.x0, in_bbox.y0)
+ + self.get_transform())
+
+ t = (t0
+ + Affine2D().translate(
+ -clipped_bbox.x0,
+ -clipped_bbox.y0)
+ .scale(magnification, magnification))
+
+ # So that the image is aligned with the edge of the axes, we want
+ # to round up the output width to the next integer. This also
+ # means scaling the transform just slightly to account for the
+ # extra subpixel.
+ if (t.is_affine and round_to_pixel_border and
+ (out_width_base % 1.0 != 0.0 or out_height_base % 1.0 != 0.0)):
+ out_width = int(ceil(out_width_base))
+ out_height = int(ceil(out_height_base))
+ extra_width = (out_width - out_width_base) / out_width_base
+ extra_height = (out_height - out_height_base) / out_height_base
+ t += Affine2D().scale(1.0 + extra_width, 1.0 + extra_height)
+ else:
+ out_width = int(out_width_base)
+ out_height = int(out_height_base)
+
+ if not unsampled:
+ if A.ndim not in (2, 3):
+ raise ValueError("Invalid dimensions, got {}".format(A.shape))
+
+ if A.ndim == 2:
+ # if we are a 2D array, then we are running through the
+ # norm + colormap transformation. However, in general the
+ # input data is not going to match the size on the screen so we
+ # have to resample to the correct number of pixels
+ # need to
+
+ # TODO slice input array first
+ inp_dtype = A.dtype
+ a_min = A.min()
+ a_max = A.max()
+ # figure out the type we should scale to. For floats,
+ # leave as is. For integers cast to an appropriate-sized
+ # float. Small integers get smaller floats in an attempt
+ # to keep the memory footprint reasonable.
+ if a_min is np.ma.masked:
+ # all masked, so values don't matter
+ a_min, a_max = np.int32(0), np.int32(1)
+ if inp_dtype.kind == 'f':
+ scaled_dtype = A.dtype
+ else:
+ # probably an integer of some type.
+ da = a_max.astype(np.float64) - a_min.astype(np.float64)
+ if da > 1e8:
+ # give more breathing room if a big dynamic range
+ scaled_dtype = np.float64
+ else:
+ scaled_dtype = np.float32
+
+ # scale the input data to [.1, .9]. The Agg
+ # interpolators clip to [0, 1] internally, use a
+ # smaller input scale to identify which of the
+ # interpolated points need to be should be flagged as
+ # over / under.
+ # This may introduce numeric instabilities in very broadly
+ # scaled data
+ A_scaled = np.empty(A.shape, dtype=scaled_dtype)
+ A_scaled[:] = A
+ # clip scaled data around norm if necessary.
+ # This is necessary for big numbers at the edge of
+ # float64's ability to represent changes. Applying
+ # a norm first would be good, but ruins the interpolation
+ # of over numbers.
+ self.norm.autoscale_None(A)
+ dv = (np.float64(self.norm.vmax) -
+ np.float64(self.norm.vmin))
+ vmid = self.norm.vmin + dv / 2
+ fact = 1e7 if scaled_dtype == np.float64 else 1e4
+ newmin = vmid - dv * fact
+ if newmin < a_min:
+ newmin = None
+ else:
+ a_min = np.float64(newmin)
+ newmax = vmid + dv * fact
+ if newmax > a_max:
+ newmax = None
+ else:
+ a_max = np.float64(newmax)
+ if newmax is not None or newmin is not None:
+ A_scaled = np.clip(A_scaled, newmin, newmax)
+
+ A_scaled -= a_min
+ # a_min and a_max might be ndarray subclasses so use
+ # item to avoid errors
+ a_min = a_min.astype(scaled_dtype).item()
+ a_max = a_max.astype(scaled_dtype).item()
+
+ if a_min != a_max:
+ A_scaled /= ((a_max - a_min) / 0.8)
+ A_scaled += 0.1
+ A_resampled = np.zeros((out_height, out_width),
+ dtype=A_scaled.dtype)
+ # resample the input data to the correct resolution and shape
+ _image.resample(A_scaled, A_resampled,
+ t,
+ _interpd_[self.get_interpolation()],
+ self.get_resample(), 1.0,
+ self.get_filternorm() or 0.0,
+ self.get_filterrad() or 0.0)
+
+ # we are done with A_scaled now, remove from namespace
+ # to be sure!
+ del A_scaled
+ # un-scale the resampled data to approximately the
+ # original range things that interpolated to above /
+ # below the original min/max will still be above /
+ # below, but possibly clipped in the case of higher order
+ # interpolation + drastically changing data.
+ A_resampled -= 0.1
+ if a_min != a_max:
+ A_resampled *= ((a_max - a_min) / 0.8)
+ A_resampled += a_min
+ # if using NoNorm, cast back to the original datatype
+ if isinstance(self.norm, mcolors.NoNorm):
+ A_resampled = A_resampled.astype(A.dtype)
+
+ mask = np.empty(A.shape, dtype=np.float32)
+ if A.mask.shape == A.shape:
+ # this is the case of a nontrivial mask
+ mask[:] = np.where(A.mask, np.float32(np.nan),
+ np.float32(1))
+ else:
+ mask[:] = 1
+
+ # we always have to interpolate the mask to account for
+ # non-affine transformations
+ out_mask = np.zeros((out_height, out_width),
+ dtype=mask.dtype)
+ _image.resample(mask, out_mask,
+ t,
+ _interpd_[self.get_interpolation()],
+ True, 1,
+ self.get_filternorm() or 0.0,
+ self.get_filterrad() or 0.0)
+ # we are done with the mask, delete from namespace to be sure!
+ del mask
+ # Agg updates the out_mask in place. If the pixel has
+ # no image data it will not be updated (and still be 0
+ # as we initialized it), if input data that would go
+ # into that output pixel than it will be `nan`, if all
+ # the input data for a pixel is good it will be 1, and
+ # if there is _some_ good data in that output pixel it
+ # will be between [0, 1] (such as a rotated image).
+
+ out_alpha = np.array(out_mask)
+ out_mask = np.isnan(out_mask)
+ out_alpha[out_mask] = 1
+
+ # mask and run through the norm
+ output = self.norm(np.ma.masked_array(A_resampled, out_mask))
+ else:
+ # Always convert to RGBA, even if only RGB input
+ if A.shape[2] == 3:
+ A = _rgb_to_rgba(A)
+ elif A.shape[2] != 4:
+ raise ValueError("Invalid dimensions, got %s" % (A.shape,))
+
+ output = np.zeros((out_height, out_width, 4), dtype=A.dtype)
+
+ alpha = self.get_alpha()
+ if alpha is None:
+ alpha = 1.0
+
+ _image.resample(
+ A, output, t, _interpd_[self.get_interpolation()],
+ self.get_resample(), alpha,
+ self.get_filternorm() or 0.0, self.get_filterrad() or 0.0)
+
+ # at this point output is either a 2D array of normed data
+ # (of int or float)
+ # or an RGBA array of re-sampled input
+ output = self.to_rgba(output, bytes=True, norm=False)
+ # output is now a correctly sized RGBA array of uint8
+
+ # Apply alpha *after* if the input was greyscale without a mask
+ if A.ndim == 2:
+ alpha = self.get_alpha()
+ if alpha is None:
+ alpha = 1
+ alpha_channel = output[:, :, 3]
+ alpha_channel[:] = np.asarray(
+ np.asarray(alpha_channel, np.float32) * out_alpha * alpha,
+ np.uint8)
+
+ else:
+ if self._imcache is None:
+ self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
+ output = self._imcache
+
+ # Subset the input image to only the part that will be
+ # displayed
+ subset = TransformedBbox(
+ clip_bbox, t0.frozen().inverted()).frozen()
+ output = output[
+ int(max(subset.ymin, 0)):
+ int(min(subset.ymax + 1, output.shape[0])),
+ int(max(subset.xmin, 0)):
+ int(min(subset.xmax + 1, output.shape[1]))]
+
+ t = Affine2D().translate(
+ int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t
+
+ return output, clipped_bbox.x0, clipped_bbox.y0, t
+
+ def make_image(self, renderer, magnification=1.0, unsampled=False):
+ raise RuntimeError('The make_image method must be overridden.')
+
+ def _draw_unsampled_image(self, renderer, gc):
+ """
+ draw unsampled image. The renderer should support a draw_image method
+ with scale parameter.
+ """
+
+ im, l, b, trans = self.make_image(renderer, unsampled=True)
+
+ if im is None:
+ return
+
+ trans = Affine2D().scale(im.shape[1], im.shape[0]) + trans
+
+ renderer.draw_image(gc, l, b, im, trans)
+
+ def _check_unsampled_image(self, renderer):
+ """
+ return True if the image is better to be drawn unsampled.
+ The derived class needs to override it.
+ """
+ return False
+
+ @allow_rasterization
+ def draw(self, renderer, *args, **kwargs):
+ # if not visible, declare victory and return
+ if not self.get_visible():
+ self.stale = False
+ return
+
+ # for empty images, there is nothing to draw!
+ if self.get_array().size == 0:
+ self.stale = False
+ return
+
+ # actually render the image.
+ gc = renderer.new_gc()
+ self._set_gc_clip(gc)
+ gc.set_alpha(self.get_alpha())
+ gc.set_url(self.get_url())
+ gc.set_gid(self.get_gid())
+
+ if (self._check_unsampled_image(renderer) and
+ self.get_transform().is_affine):
+ self._draw_unsampled_image(renderer, gc)
+ else:
+ im, l, b, trans = self.make_image(
+ renderer, renderer.get_image_magnification())
+ if im is not None:
+ renderer.draw_image(gc, l, b, im)
+ gc.restore()
+ self.stale = False
+
+ def contains(self, mouseevent):
+ """
+ Test whether the mouse event occurred within the image.
+ """
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+ # TODO: make sure this is consistent with patch and patch
+ # collection on nonlinear transformed coordinates.
+ # TODO: consider returning image coordinates (shouldn't
+ # be too difficult given that the image is rectilinear
+ x, y = mouseevent.xdata, mouseevent.ydata
+ xmin, xmax, ymin, ymax = self.get_extent()
+ if xmin > xmax:
+ xmin, xmax = xmax, xmin
+ if ymin > ymax:
+ ymin, ymax = ymax, ymin
+
+ if x is not None and y is not None:
+ inside = (xmin <= x <= xmax) and (ymin <= y <= ymax)
+ else:
+ inside = False
+
+ return inside, {}
+
+ def write_png(self, fname):
+ """Write the image to png file with fname"""
+ im = self.to_rgba(self._A[::-1] if self.origin == 'lower' else self._A,
+ bytes=True, norm=True)
+ _png.write_png(im, fname)
+
+ def set_data(self, A):
+ """
+ Set the image array.
+
+ ACCEPTS: numpy/PIL Image A
+
+ Note that this function does *not* update the normalization used.
+ """
+ self._A = cbook.safe_masked_invalid(A, copy=True)
+
+ if (self._A.dtype != np.uint8 and
+ not np.can_cast(self._A.dtype, float, "same_kind")):
+ raise TypeError("Image data cannot be converted to float")
+
+ if not (self._A.ndim == 2
+ or self._A.ndim == 3 and self._A.shape[-1] in [3, 4]):
+ raise TypeError("Invalid dimensions for image data")
+
+ if self._A.ndim == 3:
+ # If the input data has values outside the valid range (after
+ # normalisation), we issue a warning and then clip X to the bounds
+ # - otherwise casting wraps extreme values, hiding outliers and
+ # making reliable interpretation impossible.
+ high = 255 if np.issubdtype(self._A.dtype, np.integer) else 1
+ if self._A.min() < 0 or high < self._A.max():
+ _log.warning(
+ 'Clipping input data to the valid range for imshow with '
+ 'RGB data ([0..1] for floats or [0..255] for integers).'
+ )
+ self._A = np.clip(self._A, 0, high)
+ # Cast unsupported integer types to uint8
+ if self._A.dtype != np.uint8 and np.issubdtype(self._A.dtype,
+ np.integer):
+ self._A = self._A.astype(np.uint8)
+
+ self._imcache = None
+ self._rgbacache = None
+ self.stale = True
+
+ def set_array(self, A):
+ """
+ Retained for backwards compatibility - use set_data instead
+
+ ACCEPTS: numpy array A or PIL Image
+ """
+ # This also needs to be here to override the inherited
+ # cm.ScalarMappable.set_array method so it is not invoked
+ # by mistake.
+
+ self.set_data(A)
+
+ def get_interpolation(self):
+ """
+ Return the interpolation method the image uses when resizing.
+
+ One of 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36',
+ 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom',
+ 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', or 'none'.
+
+ """
+ return self._interpolation
+
+ def set_interpolation(self, s):
+ """
+ Set the interpolation method the image uses when resizing.
+
+ if None, use a value from rc setting. If 'none', the image is
+ shown as is without interpolating. 'none' is only supported in
+ agg, ps and pdf backends and will fall back to 'nearest' mode
+ for other backends.
+
+ .. ACCEPTS: ['nearest' | 'bilinear' | 'bicubic' | 'spline16' |
+ 'spline36' | 'hanning' | 'hamming' | 'hermite' | 'kaiser' |
+ 'quadric' | 'catrom' | 'gaussian' | 'bessel' | 'mitchell' |
+ 'sinc' | 'lanczos' | 'none' ]
+
+ """
+ if s is None:
+ s = rcParams['image.interpolation']
+ s = s.lower()
+ if s not in _interpd_:
+ raise ValueError('Illegal interpolation string')
+ self._interpolation = s
+ self.stale = True
+
+ def can_composite(self):
+ """
+ Returns `True` if the image can be composited with its neighbors.
+ """
+ trans = self.get_transform()
+ return (
+ self._interpolation != 'none' and
+ trans.is_affine and
+ trans.is_separable)
+
+ def set_resample(self, v):
+ """
+ Set whether or not image resampling is used.
+
+ ACCEPTS: True|False
+ """
+ if v is None:
+ v = rcParams['image.resample']
+ self._resample = v
+ self.stale = True
+
+ def get_resample(self):
+ """Return the image resample boolean."""
+ return self._resample
+
+ def set_filternorm(self, filternorm):
+ """
+ Set whether the resize filter norms the weights -- see
+ help for imshow
+
+ ACCEPTS: 0 or 1
+ """
+ if filternorm:
+ self._filternorm = 1
+ else:
+ self._filternorm = 0
+
+ self.stale = True
+
+ def get_filternorm(self):
+ """Return the filternorm setting."""
+ return self._filternorm
+
+ def set_filterrad(self, filterrad):
+ """
+ Set the resize filter radius only applicable to some
+ interpolation schemes -- see help for imshow
+
+ ACCEPTS: positive float
+ """
+ r = float(filterrad)
+ if r <= 0:
+ raise ValueError("The filter radius must be a positive number")
+ self._filterrad = r
+ self.stale = True
+
+ def get_filterrad(self):
+ """Return the filterrad setting."""
+ return self._filterrad
+
+
+class AxesImage(_ImageBase):
+ def __str__(self):
+ return "AxesImage(%g,%g;%gx%g)" % tuple(self.axes.bbox.bounds)
+
+ def __init__(self, ax,
+ cmap=None,
+ norm=None,
+ interpolation=None,
+ origin=None,
+ extent=None,
+ filternorm=1,
+ filterrad=4.0,
+ resample=False,
+ **kwargs
+ ):
+
+ """
+ interpolation and cmap default to their rc settings
+
+ cmap is a colors.Colormap instance
+ norm is a colors.Normalize instance to map luminance to 0-1
+
+ extent is data axes (left, right, bottom, top) for making image plots
+ registered with data plots. Default is to label the pixel
+ centers with the zero-based row and column indices.
+
+ Additional kwargs are matplotlib.artist properties
+
+ """
+
+ self._extent = extent
+
+ super(AxesImage, self).__init__(
+ ax,
+ cmap=cmap,
+ norm=norm,
+ interpolation=interpolation,
+ origin=origin,
+ filternorm=filternorm,
+ filterrad=filterrad,
+ resample=resample,
+ **kwargs
+ )
+
+ def get_window_extent(self, renderer=None):
+ x0, x1, y0, y1 = self._extent
+ bbox = Bbox.from_extents([x0, y0, x1, y1])
+ return bbox.transformed(self.axes.transData)
+
+ def make_image(self, renderer, magnification=1.0, unsampled=False):
+ trans = self.get_transform()
+ # image is created in the canvas coordinate.
+ x1, x2, y1, y2 = self.get_extent()
+ bbox = Bbox(np.array([[x1, y1], [x2, y2]]))
+ transformed_bbox = TransformedBbox(bbox, trans)
+
+ return self._make_image(
+ self._A, bbox, transformed_bbox, self.axes.bbox, magnification,
+ unsampled=unsampled)
+
+ def _check_unsampled_image(self, renderer):
+ """
+ Return whether the image would be better drawn unsampled.
+ """
+ return (self.get_interpolation() == "none"
+ and renderer.option_scale_image())
+
+ def set_extent(self, extent):
+ """
+ extent is data axes (left, right, bottom, top) for making image plots
+
+ This updates ax.dataLim, and, if autoscaling, sets viewLim
+ to tightly fit the image, regardless of dataLim. Autoscaling
+ state is not changed, so following this with ax.autoscale_view
+ will redo the autoscaling in accord with dataLim.
+ """
+ self._extent = xmin, xmax, ymin, ymax = extent
+ corners = (xmin, ymin), (xmax, ymax)
+ self.axes.update_datalim(corners)
+ self.sticky_edges.x[:] = [xmin, xmax]
+ self.sticky_edges.y[:] = [ymin, ymax]
+ if self.axes._autoscaleXon:
+ self.axes.set_xlim((xmin, xmax), auto=None)
+ if self.axes._autoscaleYon:
+ self.axes.set_ylim((ymin, ymax), auto=None)
+ self.stale = True
+
+ def get_extent(self):
+ """Get the image extent: left, right, bottom, top"""
+ if self._extent is not None:
+ return self._extent
+ else:
+ sz = self.get_size()
+ numrows, numcols = sz
+ if self.origin == 'upper':
+ return (-0.5, numcols-0.5, numrows-0.5, -0.5)
+ else:
+ return (-0.5, numcols-0.5, -0.5, numrows-0.5)
+
+ def get_cursor_data(self, event):
+ """Get the cursor data for a given event"""
+ xmin, xmax, ymin, ymax = self.get_extent()
+ if self.origin == 'upper':
+ ymin, ymax = ymax, ymin
+ arr = self.get_array()
+ data_extent = Bbox([[ymin, xmin], [ymax, xmax]])
+ array_extent = Bbox([[0, 0], arr.shape[:2]])
+ trans = BboxTransform(boxin=data_extent, boxout=array_extent)
+ y, x = event.ydata, event.xdata
+ point = trans.transform_point([y, x])
+ if any(np.isnan(point)):
+ return None
+ i, j = point.astype(int)
+ # Clip the coordinates at array bounds
+ if not (0 <= i < arr.shape[0]) or not (0 <= j < arr.shape[1]):
+ return None
+ else:
+ return arr[i, j]
+
+
+class NonUniformImage(AxesImage):
+ def __init__(self, ax, **kwargs):
+ """
+ kwargs are identical to those for AxesImage, except
+ that 'nearest' and 'bilinear' are the only supported 'interpolation'
+ options.
+ """
+ interp = kwargs.pop('interpolation', 'nearest')
+ super(NonUniformImage, self).__init__(ax, **kwargs)
+ self.set_interpolation(interp)
+
+ def _check_unsampled_image(self, renderer):
+ """
+ return False. Do not use unsampled image.
+ """
+ return False
+
+ def make_image(self, renderer, magnification=1.0, unsampled=False):
+ if self._A is None:
+ raise RuntimeError('You must first set the image array')
+
+ if unsampled:
+ raise ValueError('unsampled not supported on NonUniformImage')
+
+ A = self._A
+ if A.ndim == 2:
+ if A.dtype != np.uint8:
+ A = self.to_rgba(A, bytes=True)
+ self.is_grayscale = self.cmap.is_gray()
+ else:
+ A = np.repeat(A[:, :, np.newaxis], 4, 2)
+ A[:, :, 3] = 255
+ self.is_grayscale = True
+ else:
+ if A.dtype != np.uint8:
+ A = (255*A).astype(np.uint8)
+ if A.shape[2] == 3:
+ B = np.zeros(tuple(list(A.shape[0:2]) + [4]), np.uint8)
+ B[:, :, 0:3] = A
+ B[:, :, 3] = 255
+ A = B
+ self.is_grayscale = False
+
+ x0, y0, v_width, v_height = self.axes.viewLim.bounds
+ l, b, r, t = self.axes.bbox.extents
+ width = (np.round(r) + 0.5) - (np.round(l) - 0.5)
+ height = (np.round(t) + 0.5) - (np.round(b) - 0.5)
+ width *= magnification
+ height *= magnification
+ im = _image.pcolor(self._Ax, self._Ay, A,
+ int(height), int(width),
+ (x0, x0+v_width, y0, y0+v_height),
+ _interpd_[self._interpolation])
+
+ return im, l, b, IdentityTransform()
+
+ def set_data(self, x, y, A):
+ """
+ Set the grid for the pixel centers, and the pixel values.
+
+ *x* and *y* are monotonic 1-D ndarrays of lengths N and M,
+ respectively, specifying pixel centers
+
+ *A* is an (M,N) ndarray or masked array of values to be
+ colormapped, or a (M,N,3) RGB array, or a (M,N,4) RGBA
+ array.
+ """
+ x = np.array(x, np.float32)
+ y = np.array(y, np.float32)
+ A = cbook.safe_masked_invalid(A, copy=True)
+ if not (x.ndim == y.ndim == 1 and A.shape[0:2] == y.shape + x.shape):
+ raise TypeError("Axes don't match array shape")
+ if A.ndim not in [2, 3]:
+ raise TypeError("Can only plot 2D or 3D data")
+ if A.ndim == 3 and A.shape[2] not in [1, 3, 4]:
+ raise TypeError("3D arrays must have three (RGB) "
+ "or four (RGBA) color components")
+ if A.ndim == 3 and A.shape[2] == 1:
+ A.shape = A.shape[0:2]
+ self._A = A
+ self._Ax = x
+ self._Ay = y
+ self._imcache = None
+
+ self.stale = True
+
+ def set_array(self, *args):
+ raise NotImplementedError('Method not supported')
+
+ def set_interpolation(self, s):
+ """
+ Parameters
+ ----------
+ s : str, None
+ Either 'nearest', 'bilinear', or ``None``.
+ """
+ if s is not None and s not in ('nearest', 'bilinear'):
+ raise NotImplementedError('Only nearest neighbor and '
+ 'bilinear interpolations are supported')
+ AxesImage.set_interpolation(self, s)
+
+ def get_extent(self):
+ if self._A is None:
+ raise RuntimeError('Must set data first')
+ return self._Ax[0], self._Ax[-1], self._Ay[0], self._Ay[-1]
+
+ def set_filternorm(self, s):
+ pass
+
+ def set_filterrad(self, s):
+ pass
+
+ def set_norm(self, norm):
+ if self._A is not None:
+ raise RuntimeError('Cannot change colors after loading data')
+ super(NonUniformImage, self).set_norm(norm)
+
+ def set_cmap(self, cmap):
+ if self._A is not None:
+ raise RuntimeError('Cannot change colors after loading data')
+ super(NonUniformImage, self).set_cmap(cmap)
+
+
+class PcolorImage(AxesImage):
+ """
+ Make a pcolor-style plot with an irregular rectangular grid.
+
+ This uses a variation of the original irregular image code,
+ and it is used by pcolorfast for the corresponding grid type.
+ """
+ def __init__(self, ax,
+ x=None,
+ y=None,
+ A=None,
+ cmap=None,
+ norm=None,
+ **kwargs
+ ):
+ """
+ cmap defaults to its rc setting
+
+ cmap is a colors.Colormap instance
+ norm is a colors.Normalize instance to map luminance to 0-1
+
+ Additional kwargs are matplotlib.artist properties
+ """
+ super(PcolorImage, self).__init__(ax, norm=norm, cmap=cmap)
+ self.update(kwargs)
+ if A is not None:
+ self.set_data(x, y, A)
+
+ def make_image(self, renderer, magnification=1.0, unsampled=False):
+ if self._A is None:
+ raise RuntimeError('You must first set the image array')
+ if unsampled:
+ raise ValueError('unsampled not supported on PColorImage')
+ fc = self.axes.patch.get_facecolor()
+ bg = mcolors.to_rgba(fc, 0)
+ bg = (np.array(bg)*255).astype(np.uint8)
+ l, b, r, t = self.axes.bbox.extents
+ width = (np.round(r) + 0.5) - (np.round(l) - 0.5)
+ height = (np.round(t) + 0.5) - (np.round(b) - 0.5)
+ # The extra cast-to-int is only needed for python2
+ width = int(np.round(width * magnification))
+ height = int(np.round(height * magnification))
+ if self._rgbacache is None:
+ A = self.to_rgba(self._A, bytes=True)
+ self._rgbacache = A
+ if self._A.ndim == 2:
+ self.is_grayscale = self.cmap.is_gray()
+ else:
+ A = self._rgbacache
+ vl = self.axes.viewLim
+ im = _image.pcolor2(self._Ax, self._Ay, A,
+ height,
+ width,
+ (vl.x0, vl.x1, vl.y0, vl.y1),
+ bg)
+ return im, l, b, IdentityTransform()
+
+ def _check_unsampled_image(self, renderer):
+ return False
+
+ def set_data(self, x, y, A):
+ """
+ Set the grid for the rectangle boundaries, and the data values.
+
+ *x* and *y* are monotonic 1-D ndarrays of lengths N+1 and M+1,
+ respectively, specifying rectangle boundaries. If None,
+ they will be created as uniform arrays from 0 through N
+ and 0 through M, respectively.
+
+ *A* is an (M,N) ndarray or masked array of values to be
+ colormapped, or a (M,N,3) RGB array, or a (M,N,4) RGBA
+ array.
+
+ """
+ A = cbook.safe_masked_invalid(A, copy=True)
+ if x is None:
+ x = np.arange(0, A.shape[1]+1, dtype=np.float64)
+ else:
+ x = np.array(x, np.float64).ravel()
+ if y is None:
+ y = np.arange(0, A.shape[0]+1, dtype=np.float64)
+ else:
+ y = np.array(y, np.float64).ravel()
+
+ if A.shape[:2] != (y.size-1, x.size-1):
+ raise ValueError(
+ "Axes don't match array shape. Got %s, expected %s." %
+ (A.shape[:2], (y.size - 1, x.size - 1)))
+ if A.ndim not in [2, 3]:
+ raise ValueError("A must be 2D or 3D")
+ if A.ndim == 3 and A.shape[2] == 1:
+ A.shape = A.shape[:2]
+ self.is_grayscale = False
+ if A.ndim == 3:
+ if A.shape[2] in [3, 4]:
+ if ((A[:, :, 0] == A[:, :, 1]).all() and
+ (A[:, :, 0] == A[:, :, 2]).all()):
+ self.is_grayscale = True
+ else:
+ raise ValueError("3D arrays must have RGB or RGBA as last dim")
+
+ # For efficient cursor readout, ensure x and y are increasing.
+ if x[-1] < x[0]:
+ x = x[::-1]
+ A = A[:, ::-1]
+ if y[-1] < y[0]:
+ y = y[::-1]
+ A = A[::-1]
+
+ self._A = A
+ self._Ax = x
+ self._Ay = y
+ self._rgbacache = None
+ self.stale = True
+
+ def set_array(self, *args):
+ raise NotImplementedError('Method not supported')
+
+ def get_cursor_data(self, event):
+ """Get the cursor data for a given event"""
+ x, y = event.xdata, event.ydata
+ if (x < self._Ax[0] or x > self._Ax[-1] or
+ y < self._Ay[0] or y > self._Ay[-1]):
+ return None
+ j = np.searchsorted(self._Ax, x) - 1
+ i = np.searchsorted(self._Ay, y) - 1
+ try:
+ return self._A[i, j]
+ except IndexError:
+ return None
+
+
+class FigureImage(_ImageBase):
+ zorder = 0
+
+ _interpolation = 'nearest'
+
+ def __init__(self, fig,
+ cmap=None,
+ norm=None,
+ offsetx=0,
+ offsety=0,
+ origin=None,
+ **kwargs
+ ):
+ """
+ cmap is a colors.Colormap instance
+ norm is a colors.Normalize instance to map luminance to 0-1
+
+ kwargs are an optional list of Artist keyword args
+ """
+ super(FigureImage, self).__init__(
+ None,
+ norm=norm,
+ cmap=cmap,
+ origin=origin
+ )
+ self.figure = fig
+ self.ox = offsetx
+ self.oy = offsety
+ self.update(kwargs)
+ self.magnification = 1.0
+
+ def get_extent(self):
+ """Get the image extent: left, right, bottom, top"""
+ numrows, numcols = self.get_size()
+ return (-0.5 + self.ox, numcols-0.5 + self.ox,
+ -0.5 + self.oy, numrows-0.5 + self.oy)
+
+ def make_image(self, renderer, magnification=1.0, unsampled=False):
+ fac = renderer.dpi/self.figure.dpi
+ # fac here is to account for pdf, eps, svg backends where
+ # figure.dpi is set to 72. This means we need to scale the
+ # image (using magification) and offset it appropriately.
+ bbox = Bbox([[self.ox/fac, self.oy/fac],
+ [(self.ox/fac + self._A.shape[1]),
+ (self.oy/fac + self._A.shape[0])]])
+ width, height = self.figure.get_size_inches()
+ width *= renderer.dpi
+ height *= renderer.dpi
+ clip = Bbox([[0, 0], [width, height]])
+
+ return self._make_image(
+ self._A, bbox, bbox, clip, magnification=magnification / fac,
+ unsampled=unsampled, round_to_pixel_border=False)
+
+ def set_data(self, A):
+ """Set the image array."""
+ cm.ScalarMappable.set_array(self,
+ cbook.safe_masked_invalid(A, copy=True))
+ self.stale = True
+
+
+class BboxImage(_ImageBase):
+ """The Image class whose size is determined by the given bbox."""
+ def __init__(self, bbox,
+ cmap=None,
+ norm=None,
+ interpolation=None,
+ origin=None,
+ filternorm=1,
+ filterrad=4.0,
+ resample=False,
+ interp_at_native=True,
+ **kwargs
+ ):
+ """
+ cmap is a colors.Colormap instance
+ norm is a colors.Normalize instance to map luminance to 0-1
+
+ interp_at_native is a flag that determines whether or not
+ interpolation should still be applied when the image is
+ displayed at its native resolution. A common use case for this
+ is when displaying an image for annotational purposes; it is
+ treated similarly to Photoshop (interpolation is only used when
+ displaying the image at non-native resolutions).
+
+
+ kwargs are an optional list of Artist keyword args
+
+ """
+ super(BboxImage, self).__init__(
+ None,
+ cmap=cmap,
+ norm=norm,
+ interpolation=interpolation,
+ origin=origin,
+ filternorm=filternorm,
+ filterrad=filterrad,
+ resample=resample,
+ **kwargs
+ )
+
+ self.bbox = bbox
+ self.interp_at_native = interp_at_native
+ self._transform = IdentityTransform()
+
+ def get_transform(self):
+ return self._transform
+
+ def get_window_extent(self, renderer=None):
+ if renderer is None:
+ renderer = self.get_figure()._cachedRenderer
+
+ if isinstance(self.bbox, BboxBase):
+ return self.bbox
+ elif callable(self.bbox):
+ return self.bbox(renderer)
+ else:
+ raise ValueError("unknown type of bbox")
+
+ def contains(self, mouseevent):
+ """Test whether the mouse event occurred within the image."""
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+
+ if not self.get_visible(): # or self.get_figure()._renderer is None:
+ return False, {}
+
+ x, y = mouseevent.x, mouseevent.y
+ inside = self.get_window_extent().contains(x, y)
+
+ return inside, {}
+
+ def make_image(self, renderer, magnification=1.0, unsampled=False):
+ width, height = renderer.get_canvas_width_height()
+
+ bbox_in = self.get_window_extent(renderer).frozen()
+ bbox_in._points /= [width, height]
+ bbox_out = self.get_window_extent(renderer)
+ clip = Bbox([[0, 0], [width, height]])
+ self._transform = BboxTransform(Bbox([[0, 0], [1, 1]]), clip)
+
+ return self._make_image(
+ self._A,
+ bbox_in, bbox_out, clip, magnification, unsampled=unsampled)
+
+
+def imread(fname, format=None):
+ """
+ Read an image from a file into an array.
+
+ *fname* may be a string path, a valid URL, or a Python
+ file-like object. If using a file object, it must be opened in binary
+ mode.
+
+ If *format* is provided, will try to read file of that type,
+ otherwise the format is deduced from the filename. If nothing can
+ be deduced, PNG is tried.
+
+ Return value is a :class:`numpy.array`. For grayscale images, the
+ return array is MxN. For RGB images, the return value is MxNx3.
+ For RGBA images the return value is MxNx4.
+
+ matplotlib can only read PNGs natively, but if `PIL
+ <http://www.pythonware.com/products/pil/>`_ is installed, it will
+ use it to load the image and return an array (if possible) which
+ can be used with :func:`~matplotlib.pyplot.imshow`. Note, URL strings
+ may not be compatible with PIL. Check the PIL documentation for more
+ information.
+ """
+
+ def pilread(fname):
+ """try to load the image with PIL or return None"""
+ try:
+ from PIL import Image
+ except ImportError:
+ return None
+ with Image.open(fname) as image:
+ return pil_to_array(image)
+
+ handlers = {'png': _png.read_png, }
+ if format is None:
+ if isinstance(fname, six.string_types):
+ parsed = urlparse(fname)
+ # If the string is a URL, assume png
+ if len(parsed.scheme) > 1:
+ ext = 'png'
+ else:
+ basename, ext = os.path.splitext(fname)
+ ext = ext.lower()[1:]
+ elif hasattr(fname, 'name'):
+ basename, ext = os.path.splitext(fname.name)
+ ext = ext.lower()[1:]
+ else:
+ ext = 'png'
+ else:
+ ext = format
+
+ if ext not in handlers:
+ im = pilread(fname)
+ if im is None:
+ raise ValueError('Only know how to handle extensions: %s; '
+ 'with Pillow installed matplotlib can handle '
+ 'more images' % list(handlers))
+ return im
+
+ handler = handlers[ext]
+
+ # To handle Unicode filenames, we pass a file object to the PNG
+ # reader extension, since Python handles them quite well, but it's
+ # tricky in C.
+ if isinstance(fname, six.string_types):
+ parsed = urlparse(fname)
+ # If fname is a URL, download the data
+ if len(parsed.scheme) > 1:
+ fd = BytesIO(urlopen(fname).read())
+ return handler(fd)
+ else:
+ with open(fname, 'rb') as fd:
+ return handler(fd)
+ else:
+ return handler(fname)
+
+
+def imsave(fname, arr, vmin=None, vmax=None, cmap=None, format=None,
+ origin=None, dpi=100):
+ """
+ Save an array as in image file.
+
+ The output formats available depend on the backend being used.
+
+ Parameters
+ ----------
+ fname : str or file-like
+ Path string to a filename, or a Python file-like object.
+ If *format* is *None* and *fname* is a string, the output
+ format is deduced from the extension of the filename.
+ arr : array-like
+ An MxN (luminance), MxNx3 (RGB) or MxNx4 (RGBA) array.
+ vmin, vmax: [ None | scalar ]
+ *vmin* and *vmax* set the color scaling for the image by fixing the
+ values that map to the colormap color limits. If either *vmin*
+ or *vmax* is None, that limit is determined from the *arr*
+ min/max value.
+ cmap : matplotlib.colors.Colormap, optional
+ For example, ``cm.viridis``. If ``None``, defaults to the
+ ``image.cmap`` rcParam.
+ format : str
+ One of the file extensions supported by the active backend. Most
+ backends support png, pdf, ps, eps and svg.
+ origin : [ 'upper' | 'lower' ]
+ Indicates whether the ``(0, 0)`` index of the array is in the
+ upper left or lower left corner of the axes. Defaults to the
+ ``image.origin`` rcParam.
+ dpi : int
+ The DPI to store in the metadata of the file. This does not affect the
+ resolution of the output image.
+ """
+ from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
+ from matplotlib.figure import Figure
+ if isinstance(fname, getattr(os, "PathLike", ())):
+ fname = os.fspath(fname)
+ if (format == 'png'
+ or (format is None
+ and isinstance(fname, six.string_types)
+ and fname.lower().endswith('.png'))):
+ image = AxesImage(None, cmap=cmap, origin=origin)
+ image.set_data(arr)
+ image.set_clim(vmin, vmax)
+ image.write_png(fname)
+ else:
+ fig = Figure(dpi=dpi, frameon=False)
+ FigureCanvas(fig)
+ fig.figimage(arr, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin,
+ resize=True)
+ fig.savefig(fname, dpi=dpi, format=format, transparent=True)
+
+
+def pil_to_array(pilImage):
+ """Load a PIL image and return it as a numpy array.
+
+ Grayscale images are returned as ``(M, N)`` arrays. RGB images are
+ returned as ``(M, N, 3)`` arrays. RGBA images are returned as ``(M, N,
+ 4)`` arrays.
+ """
+ if pilImage.mode in ['RGBA', 'RGBX', 'RGB', 'L']:
+ # return MxNx4 RGBA, MxNx3 RBA, or MxN luminance array
+ return np.asarray(pilImage)
+ elif pilImage.mode.startswith('I;16'):
+ # return MxN luminance array of uint16
+ raw = pilImage.tobytes('raw', pilImage.mode)
+ if pilImage.mode.endswith('B'):
+ x = np.frombuffer(raw, '>u2')
+ else:
+ x = np.frombuffer(raw, '<u2')
+ return x.reshape(pilImage.size[::-1]).astype('=u2')
+ else: # try to convert to an rgba image
+ try:
+ pilImage = pilImage.convert('RGBA')
+ except ValueError:
+ raise RuntimeError('Unknown image mode')
+ return np.asarray(pilImage) # return MxNx4 RGBA array
+
+
+def thumbnail(infile, thumbfile, scale=0.1, interpolation='bilinear',
+ preview=False):
+ """
+ make a thumbnail of image in *infile* with output filename
+ *thumbfile*.
+
+ *infile* the image file -- must be PNG or Pillow-readable if you
+ have `Pillow <http://python-pillow.org/>`_ installed
+
+ *thumbfile*
+ the thumbnail filename
+
+ *scale*
+ the scale factor for the thumbnail
+
+ *interpolation*
+ the interpolation scheme used in the resampling
+
+
+ *preview*
+ if True, the default backend (presumably a user interface
+ backend) will be used which will cause a figure to be raised
+ if :func:`~matplotlib.pyplot.show` is called. If it is False,
+ a pure image backend will be used depending on the extension,
+ 'png'->FigureCanvasAgg, 'pdf'->FigureCanvasPdf,
+ 'svg'->FigureCanvasSVG
+
+
+ See examples/misc/image_thumbnail.py.
+
+ .. htmlonly::
+
+ :ref:`sphx_glr_gallery_misc_image_thumbnail_sgskip.py`
+
+ Return value is the figure instance containing the thumbnail
+
+ """
+ basedir, basename = os.path.split(infile)
+ baseout, extout = os.path.splitext(thumbfile)
+
+ im = imread(infile)
+ rows, cols, depth = im.shape
+
+ # this doesn't really matter, it will cancel in the end, but we
+ # need it for the mpl API
+ dpi = 100
+
+ height = rows / dpi * scale
+ width = cols / dpi * scale
+
+ extension = extout.lower()
+
+ if preview:
+ # let the UI backend do everything
+ import matplotlib.pyplot as plt
+ fig = plt.figure(figsize=(width, height), dpi=dpi)
+ else:
+ if extension == '.png':
+ from matplotlib.backends.backend_agg \
+ import FigureCanvasAgg as FigureCanvas
+ elif extension == '.pdf':
+ from matplotlib.backends.backend_pdf \
+ import FigureCanvasPdf as FigureCanvas
+ elif extension == '.svg':
+ from matplotlib.backends.backend_svg \
+ import FigureCanvasSVG as FigureCanvas
+ else:
+ raise ValueError("Can only handle "
+ "extensions 'png', 'svg' or 'pdf'")
+
+ from matplotlib.figure import Figure
+ fig = Figure(figsize=(width, height), dpi=dpi)
+ FigureCanvas(fig)
+
+ ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
+ frameon=False, xticks=[], yticks=[])
+
+ basename, ext = os.path.splitext(basename)
+ ax.imshow(im, aspect='auto', resample=True, interpolation=interpolation)
+ fig.savefig(thumbfile, dpi=dpi)
+ return fig
diff --git a/contrib/python/matplotlib/py2/matplotlib/legend.py b/contrib/python/matplotlib/py2/matplotlib/legend.py
new file mode 100644
index 00000000000..4e48a950915
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/legend.py
@@ -0,0 +1,1401 @@
+"""
+The legend module defines the Legend class, which is responsible for
+drawing legends associated with axes and/or figures.
+
+.. important::
+
+ It is unlikely that you would ever create a Legend instance
+ manually. Most users would normally create a legend via the
+ :meth:`~matplotlib.axes.Axes.legend` function. For more details on legends
+ there is also a :doc:`legend guide </tutorials/intermediate/legend_guide>`.
+
+The Legend class can be considered as a container of legend handles and
+legend texts. Creation of corresponding legend handles from the plot elements
+in the axes or figures (e.g., lines, patches, etc.) are specified by the
+handler map, which defines the mapping between the plot elements and the
+legend handlers to be used (the default legend handlers are defined in the
+:mod:`~matplotlib.legend_handler` module). Note that not all kinds of
+artist are supported by the legend yet by default but it is possible to
+extend the legend handler's capabilities to support arbitrary objects. See
+the :doc:`legend guide </tutorials/intermediate/legend_guide>` for more
+information.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import logging
+import warnings
+
+import numpy as np
+
+from matplotlib import rcParams
+from matplotlib import docstring
+from matplotlib.artist import Artist, allow_rasterization
+from matplotlib.cbook import silent_list, is_hashable
+import matplotlib.colors as colors
+from matplotlib.font_manager import FontProperties
+from matplotlib.lines import Line2D
+from matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch
+from matplotlib.collections import (LineCollection, RegularPolyCollection,
+ CircleCollection, PathCollection,
+ PolyCollection)
+from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
+from matplotlib.transforms import BboxTransformTo, BboxTransformFrom
+
+from matplotlib.offsetbox import HPacker, VPacker, TextArea, DrawingArea
+from matplotlib.offsetbox import DraggableOffsetBox
+
+from matplotlib.container import ErrorbarContainer, BarContainer, StemContainer
+from . import legend_handler
+
+
+class DraggableLegend(DraggableOffsetBox):
+ def __init__(self, legend, use_blit=False, update="loc"):
+ """
+ Parameters
+ ----------
+ update : string
+ If "loc", update *loc* parameter of legend upon finalizing.
+ If "bbox", update *bbox_to_anchor* parameter.
+ """
+ self.legend = legend
+
+ if update in ["loc", "bbox"]:
+ self._update = update
+ else:
+ raise ValueError("update parameter '%s' is not supported." %
+ update)
+
+ DraggableOffsetBox.__init__(self, legend, legend._legend_box,
+ use_blit=use_blit)
+
+ def artist_picker(self, legend, evt):
+ return self.legend.contains(evt)
+
+ def finalize_offset(self):
+ loc_in_canvas = self.get_loc_in_canvas()
+
+ if self._update == "loc":
+ self._update_loc(loc_in_canvas)
+ elif self._update == "bbox":
+ self._update_bbox_to_anchor(loc_in_canvas)
+ else:
+ raise RuntimeError("update parameter '%s' is not supported." %
+ self.update)
+
+ def _update_loc(self, loc_in_canvas):
+ bbox = self.legend.get_bbox_to_anchor()
+
+ # if bbox has zero width or height, the transformation is
+ # ill-defined. Fall back to the defaul bbox_to_anchor.
+ if bbox.width == 0 or bbox.height == 0:
+ self.legend.set_bbox_to_anchor(None)
+ bbox = self.legend.get_bbox_to_anchor()
+
+ _bbox_transform = BboxTransformFrom(bbox)
+ self.legend._loc = tuple(
+ _bbox_transform.transform_point(loc_in_canvas)
+ )
+
+ def _update_bbox_to_anchor(self, loc_in_canvas):
+
+ tr = self.legend.axes.transAxes
+ loc_in_bbox = tr.transform_point(loc_in_canvas)
+
+ self.legend.set_bbox_to_anchor(loc_in_bbox)
+
+
+_legend_kw_doc = '''
+loc : int or string or pair of floats, default: :rc:`legend.loc` ('best' for \
+axes, 'upper right' for figures)
+ The location of the legend. Possible codes are:
+
+ =============== =============
+ Location String Location Code
+ =============== =============
+ 'best' 0
+ 'upper right' 1
+ 'upper left' 2
+ 'lower left' 3
+ 'lower right' 4
+ 'right' 5
+ 'center left' 6
+ 'center right' 7
+ 'lower center' 8
+ 'upper center' 9
+ 'center' 10
+ =============== =============
+
+
+ Alternatively can be a 2-tuple giving ``x, y`` of the lower-left
+ corner of the legend in axes coordinates (in which case
+ ``bbox_to_anchor`` will be ignored).
+
+bbox_to_anchor : `.BboxBase` or pair of floats
+ Specify any arbitrary location for the legend in `bbox_transform`
+ coordinates (default Axes coordinates).
+
+ For example, to put the legend's upper right hand corner in the
+ center of the axes the following keywords can be used::
+
+ loc='upper right', bbox_to_anchor=(0.5, 0.5)
+
+ncol : integer
+ The number of columns that the legend has. Default is 1.
+
+prop : None or :class:`matplotlib.font_manager.FontProperties` or dict
+ The font properties of the legend. If None (default), the current
+ :data:`matplotlib.rcParams` will be used.
+
+fontsize : int or float or {'xx-small', 'x-small', 'small', 'medium', \
+'large', 'x-large', 'xx-large'}
+ Controls the font size of the legend. If the value is numeric the
+ size will be the absolute font size in points. String values are
+ relative to the current default font size. This argument is only
+ used if `prop` is not specified.
+
+numpoints : None or int
+ The number of marker points in the legend when creating a legend
+ entry for a `.Line2D` (line).
+ Default is ``None``, which will take the value from
+ :rc:`legend.numpoints`.
+
+scatterpoints : None or int
+ The number of marker points in the legend when creating
+ a legend entry for a `.PathCollection` (scatter plot).
+ Default is ``None``, which will take the value from
+ :rc:`legend.scatterpoints`.
+
+scatteryoffsets : iterable of floats
+ The vertical offset (relative to the font size) for the markers
+ created for a scatter plot legend entry. 0.0 is at the base the
+ legend text, and 1.0 is at the top. To draw all markers at the
+ same height, set to ``[0.5]``. Default is ``[0.375, 0.5, 0.3125]``.
+
+markerscale : None or int or float
+ The relative size of legend markers compared with the originally
+ drawn ones.
+ Default is ``None``, which will take the value from
+ :rc:`legend.markerscale`.
+
+markerfirst : bool
+ If *True*, legend marker is placed to the left of the legend label.
+ If *False*, legend marker is placed to the right of the legend
+ label.
+ Default is *True*.
+
+frameon : None or bool
+ Control whether the legend should be drawn on a patch
+ (frame).
+ Default is ``None``, which will take the value from
+ :rc:`legend.frameon`.
+
+fancybox : None or bool
+ Control whether round edges should be enabled around the
+ :class:`~matplotlib.patches.FancyBboxPatch` which makes up the
+ legend's background.
+ Default is ``None``, which will take the value from
+ :rc:`legend.fancybox`.
+
+shadow : None or bool
+ Control whether to draw a shadow behind the legend.
+ Default is ``None``, which will take the value from
+ :rc:`legend.shadow`.
+
+framealpha : None or float
+ Control the alpha transparency of the legend's background.
+ Default is ``None``, which will take the value from
+ :rc:`legend.framealpha`. If shadow is activated and
+ *framealpha* is ``None``, the default value is ignored.
+
+facecolor : None or "inherit" or a color spec
+ Control the legend's background color.
+ Default is ``None``, which will take the value from
+ :rc:`legend.facecolor`. If ``"inherit"``, it will take
+ :rc:`axes.facecolor`.
+
+edgecolor : None or "inherit" or a color spec
+ Control the legend's background patch edge color.
+ Default is ``None``, which will take the value from
+ :rc:`legend.edgecolor` If ``"inherit"``, it will take
+ :rc:`axes.edgecolor`.
+
+mode : {"expand", None}
+ If `mode` is set to ``"expand"`` the legend will be horizontally
+ expanded to fill the axes area (or `bbox_to_anchor` if defines
+ the legend's size).
+
+bbox_transform : None or :class:`matplotlib.transforms.Transform`
+ The transform for the bounding box (`bbox_to_anchor`). For a value
+ of ``None`` (default) the Axes'
+ :data:`~matplotlib.axes.Axes.transAxes` transform will be used.
+
+title : str or None
+ The legend's title. Default is no title (``None``).
+
+borderpad : float or None
+ The fractional whitespace inside the legend border.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.borderpad`.
+
+labelspacing : float or None
+ The vertical space between the legend entries.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.labelspacing`.
+
+handlelength : float or None
+ The length of the legend handles.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.handlelength`.
+
+handletextpad : float or None
+ The pad between the legend handle and text.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.handletextpad`.
+
+borderaxespad : float or None
+ The pad between the axes and legend border.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.borderaxespad`.
+
+columnspacing : float or None
+ The spacing between columns.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.columnspacing`.
+
+handler_map : dict or None
+ The custom dictionary mapping instances or types to a legend
+ handler. This `handler_map` updates the default handler map
+ found at :func:`matplotlib.legend.Legend.get_legend_handler_map`.
+
+'''
+docstring.interpd.update(_legend_kw_doc=_legend_kw_doc)
+
+
+class Legend(Artist):
+ """
+ Place a legend on the axes at location loc.
+
+ """
+ codes = {'best': 0, # only implemented for axes legends
+ 'upper right': 1,
+ 'upper left': 2,
+ 'lower left': 3,
+ 'lower right': 4,
+ 'right': 5,
+ 'center left': 6,
+ 'center right': 7,
+ 'lower center': 8,
+ 'upper center': 9,
+ 'center': 10,
+ }
+
+ zorder = 5
+
+ def __str__(self):
+ return "Legend"
+
+ @docstring.dedent_interpd
+ def __init__(self, parent, handles, labels,
+ loc=None,
+ numpoints=None, # the number of points in the legend line
+ markerscale=None, # the relative size of legend markers
+ # vs. original
+ markerfirst=True, # controls ordering (left-to-right) of
+ # legend marker and label
+ scatterpoints=None, # number of scatter points
+ scatteryoffsets=None,
+ prop=None, # properties for the legend texts
+ fontsize=None, # keyword to set font size directly
+
+ # spacing & pad defined as a fraction of the font-size
+ borderpad=None, # the whitespace inside the legend border
+ labelspacing=None, # the vertical space between the legend
+ # entries
+ handlelength=None, # the length of the legend handles
+ handleheight=None, # the height of the legend handles
+ handletextpad=None, # the pad between the legend handle
+ # and text
+ borderaxespad=None, # the pad between the axes and legend
+ # border
+ columnspacing=None, # spacing between columns
+
+ ncol=1, # number of columns
+ mode=None, # mode for horizontal distribution of columns.
+ # None, "expand"
+
+ fancybox=None, # True use a fancy box, false use a rounded
+ # box, none use rc
+ shadow=None,
+ title=None, # set a title for the legend
+
+ framealpha=None, # set frame alpha
+ edgecolor=None, # frame patch edgecolor
+ facecolor=None, # frame patch facecolor
+
+ bbox_to_anchor=None, # bbox that the legend will be anchored.
+ bbox_transform=None, # transform for the bbox
+ frameon=None, # draw frame
+ handler_map=None,
+ ):
+ """
+
+ Parameters
+ ----------
+ parent : `~matplotlib.axes.Axes` or `.Figure`
+ The artist that contains the legend.
+
+ handles : sequence of `.Artist`
+ A list of Artists (lines, patches) to be added to the legend.
+
+ labels : sequence of strings
+ A list of labels to show next to the artists. The length of handles
+ and labels should be the same. If they are not, they are truncated
+ to the smaller of both lengths.
+
+ Other Parameters
+ ----------------
+
+ loc : int or string or pair of floats, default: 'upper right'
+ The location of the legend. Possible codes are:
+
+ =============== =============
+ Location String Location Code
+ =============== =============
+ 'best' 0
+ 'upper right' 1
+ 'upper left' 2
+ 'lower left' 3
+ 'lower right' 4
+ 'right' 5
+ 'center left' 6
+ 'center right' 7
+ 'lower center' 8
+ 'upper center' 9
+ 'center' 10
+ =============== =============
+
+
+ Alternatively can be a 2-tuple giving ``x, y`` of the lower-left
+ corner of the legend in axes coordinates (in which case
+ ``bbox_to_anchor`` will be ignored).
+
+ bbox_to_anchor : `.BboxBase` or pair of floats
+ Specify any arbitrary location for the legend in `bbox_transform`
+ coordinates (default Axes coordinates).
+
+ For example, to put the legend's upper right hand corner in the
+ center of the axes the following keywords can be used::
+
+ loc='upper right', bbox_to_anchor=(0.5, 0.5)
+
+ ncol : integer
+ The number of columns that the legend has. Default is 1.
+
+ prop : None or :class:`matplotlib.font_manager.FontProperties` or dict
+ The font properties of the legend. If None (default), the current
+ :data:`matplotlib.rcParams` will be used.
+
+ fontsize : int or float or {'xx-small', 'x-small', 'small', 'medium', \
+'large', 'x-large', 'xx-large'}
+ Controls the font size of the legend. If the value is numeric the
+ size will be the absolute font size in points. String values are
+ relative to the current default font size. This argument is only
+ used if `prop` is not specified.
+
+ numpoints : None or int
+ The number of marker points in the legend when creating a legend
+ entry for a `.Line2D` (line).
+ Default is ``None``, which will take the value from
+ :rc:`legend.numpoints`.
+
+ scatterpoints : None or int
+ The number of marker points in the legend when creating
+ a legend entry for a `.PathCollection` (scatter plot).
+ Default is ``None``, which will take the value from
+ :rc:`legend.scatterpoints`.
+
+ scatteryoffsets : iterable of floats
+ The vertical offset (relative to the font size) for the markers
+ created for a scatter plot legend entry. 0.0 is at the base the
+ legend text, and 1.0 is at the top. To draw all markers at the
+ same height, set to ``[0.5]``. Default is ``[0.375, 0.5, 0.3125]``.
+
+ markerscale : None or int or float
+ The relative size of legend markers compared with the originally
+ drawn ones.
+ Default is ``None``, which will take the value from
+ :rc:`legend.markerscale`.
+
+ markerfirst : bool
+ If *True*, legend marker is placed to the left of the legend label.
+ If *False*, legend marker is placed to the right of the legend
+ label.
+ Default is *True*.
+
+ frameon : None or bool
+ Control whether the legend should be drawn on a patch
+ (frame).
+ Default is ``None``, which will take the value from
+ :rc:`legend.frameon`.
+
+ fancybox : None or bool
+ Control whether round edges should be enabled around the
+ :class:`~matplotlib.patches.FancyBboxPatch` which makes up the
+ legend's background.
+ Default is ``None``, which will take the value from
+ :rc:`legend.fancybox`.
+
+ shadow : None or bool
+ Control whether to draw a shadow behind the legend.
+ Default is ``None``, which will take the value from
+ :rc:`legend.shadow`.
+
+ framealpha : None or float
+ Control the alpha transparency of the legend's background.
+ Default is ``None``, which will take the value from
+ :rc:`legend.framealpha`. If shadow is activated and
+ *framealpha* is ``None``, the default value is ignored.
+
+ facecolor : None or "inherit" or a color spec
+ Control the legend's background color.
+ Default is ``None``, which will take the value from
+ :rc:`legend.facecolor`. If ``"inherit"``, it will take
+ :rc:`axes.facecolor`.
+
+ edgecolor : None or "inherit" or a color spec
+ Control the legend's background patch edge color.
+ Default is ``None``, which will take the value from
+ :rc:`legend.edgecolor` If ``"inherit"``, it will take
+ :rc:`axes.edgecolor`.
+
+ mode : {"expand", None}
+ If `mode` is set to ``"expand"`` the legend will be horizontally
+ expanded to fill the axes area (or `bbox_to_anchor` if defines
+ the legend's size).
+
+ bbox_transform : None or :class:`matplotlib.transforms.Transform`
+ The transform for the bounding box (`bbox_to_anchor`). For a value
+ of ``None`` (default) the Axes'
+ :data:`~matplotlib.axes.Axes.transAxes` transform will be used.
+
+ title : str or None
+ The legend's title. Default is no title (``None``).
+
+ borderpad : float or None
+ The fractional whitespace inside the legend border.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.borderpad`.
+
+ labelspacing : float or None
+ The vertical space between the legend entries.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.labelspacing`.
+
+ handlelength : float or None
+ The length of the legend handles.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.handlelength`.
+
+ handletextpad : float or None
+ The pad between the legend handle and text.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.handletextpad`.
+
+ borderaxespad : float or None
+ The pad between the axes and legend border.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.borderaxespad`.
+
+ columnspacing : float or None
+ The spacing between columns.
+ Measured in font-size units.
+ Default is ``None``, which will take the value from
+ :rc:`legend.columnspacing`.
+
+ handler_map : dict or None
+ The custom dictionary mapping instances or types to a legend
+ handler. This `handler_map` updates the default handler map
+ found at :func:`matplotlib.legend.Legend.get_legend_handler_map`.
+
+ Notes
+ -----
+
+ Users can specify any arbitrary location for the legend using the
+ *bbox_to_anchor* keyword argument. bbox_to_anchor can be an instance
+ of BboxBase(or its derivatives) or a tuple of 2 or 4 floats.
+ See :meth:`set_bbox_to_anchor` for more detail.
+
+ The legend location can be specified by setting *loc* with a tuple of
+ 2 floats, which is interpreted as the lower-left corner of the legend
+ in the normalized axes coordinate.
+ """
+ # local import only to avoid circularity
+ from matplotlib.axes import Axes
+ from matplotlib.figure import Figure
+
+ Artist.__init__(self)
+
+ if prop is None:
+ if fontsize is not None:
+ self.prop = FontProperties(size=fontsize)
+ else:
+ self.prop = FontProperties(size=rcParams["legend.fontsize"])
+ elif isinstance(prop, dict):
+ self.prop = FontProperties(**prop)
+ if "size" not in prop:
+ self.prop.set_size(rcParams["legend.fontsize"])
+ else:
+ self.prop = prop
+
+ self._fontsize = self.prop.get_size_in_points()
+
+ self.texts = []
+ self.legendHandles = []
+ self._legend_title_box = None
+
+ #: A dictionary with the extra handler mappings for this Legend
+ #: instance.
+ self._custom_handler_map = handler_map
+
+ locals_view = locals()
+ for name in ["numpoints", "markerscale", "shadow", "columnspacing",
+ "scatterpoints", "handleheight", 'borderpad',
+ 'labelspacing', 'handlelength', 'handletextpad',
+ 'borderaxespad']:
+ if locals_view[name] is None:
+ value = rcParams["legend." + name]
+ else:
+ value = locals_view[name]
+ setattr(self, name, value)
+ del locals_view
+ # trim handles and labels if illegal label...
+ _lab, _hand = [], []
+ for label, handle in zip(labels, handles):
+ if (isinstance(label, six.string_types) and
+ label.startswith('_')):
+ warnings.warn('The handle {!r} has a label of {!r} which '
+ 'cannot be automatically added to the '
+ 'legend.'.format(handle, label))
+ else:
+ _lab.append(label)
+ _hand.append(handle)
+ labels, handles = _lab, _hand
+
+ handles = list(handles)
+ if len(handles) < 2:
+ ncol = 1
+ self._ncol = ncol
+
+ if self.numpoints <= 0:
+ raise ValueError("numpoints must be > 0; it was %d" % numpoints)
+
+ # introduce y-offset for handles of the scatter plot
+ if scatteryoffsets is None:
+ self._scatteryoffsets = np.array([3. / 8., 4. / 8., 2.5 / 8.])
+ else:
+ self._scatteryoffsets = np.asarray(scatteryoffsets)
+ reps = self.scatterpoints // len(self._scatteryoffsets) + 1
+ self._scatteryoffsets = np.tile(self._scatteryoffsets,
+ reps)[:self.scatterpoints]
+
+ # _legend_box is an OffsetBox instance that contains all
+ # legend items and will be initialized from _init_legend_box()
+ # method.
+ self._legend_box = None
+
+ if isinstance(parent, Axes):
+ self.isaxes = True
+ self.axes = parent
+ self.set_figure(parent.figure)
+ elif isinstance(parent, Figure):
+ self.isaxes = False
+ self.set_figure(parent)
+ else:
+ raise TypeError("Legend needs either Axes or Figure as parent")
+ self.parent = parent
+
+ if loc is None:
+ loc = rcParams["legend.loc"]
+ if not self.isaxes and loc in [0, 'best']:
+ loc = 'upper right'
+ if isinstance(loc, six.string_types):
+ if loc not in self.codes:
+ if self.isaxes:
+ warnings.warn('Unrecognized location "%s". Falling back '
+ 'on "best"; valid locations are\n\t%s\n'
+ % (loc, '\n\t'.join(self.codes)))
+ loc = 0
+ else:
+ warnings.warn('Unrecognized location "%s". Falling back '
+ 'on "upper right"; '
+ 'valid locations are\n\t%s\n'
+ % (loc, '\n\t'.join(self.codes)))
+ loc = 1
+ else:
+ loc = self.codes[loc]
+ if not self.isaxes and loc == 0:
+ warnings.warn('Automatic legend placement (loc="best") not '
+ 'implemented for figure legend. '
+ 'Falling back on "upper right".')
+ loc = 1
+
+ self._mode = mode
+ self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
+
+ # We use FancyBboxPatch to draw a legend frame. The location
+ # and size of the box will be updated during the drawing time.
+
+ if facecolor is None:
+ facecolor = rcParams["legend.facecolor"]
+ if facecolor == 'inherit':
+ facecolor = rcParams["axes.facecolor"]
+
+ if edgecolor is None:
+ edgecolor = rcParams["legend.edgecolor"]
+ if edgecolor == 'inherit':
+ edgecolor = rcParams["axes.edgecolor"]
+
+ self.legendPatch = FancyBboxPatch(
+ xy=(0.0, 0.0), width=1., height=1.,
+ facecolor=facecolor,
+ edgecolor=edgecolor,
+ mutation_scale=self._fontsize,
+ snap=True
+ )
+
+ # The width and height of the legendPatch will be set (in the
+ # draw()) to the length that includes the padding. Thus we set
+ # pad=0 here.
+ if fancybox is None:
+ fancybox = rcParams["legend.fancybox"]
+
+ if fancybox:
+ self.legendPatch.set_boxstyle("round", pad=0,
+ rounding_size=0.2)
+ else:
+ self.legendPatch.set_boxstyle("square", pad=0)
+
+ self._set_artist_props(self.legendPatch)
+
+ self._drawFrame = frameon
+ if frameon is None:
+ self._drawFrame = rcParams["legend.frameon"]
+
+ # init with null renderer
+ self._init_legend_box(handles, labels, markerfirst)
+
+ # If shadow is activated use framealpha if not
+ # explicitly passed. See Issue 8943
+ if framealpha is None:
+ if shadow:
+ self.get_frame().set_alpha(1)
+ else:
+ self.get_frame().set_alpha(rcParams["legend.framealpha"])
+ else:
+ self.get_frame().set_alpha(framealpha)
+
+ self._loc = loc
+ self.set_title(title)
+ self._last_fontsize_points = self._fontsize
+ self._draggable = None
+
+ def _set_artist_props(self, a):
+ """
+ Set the boilerplate props for artists added to axes.
+ """
+ a.set_figure(self.figure)
+ if self.isaxes:
+ # a.set_axes(self.axes)
+ a.axes = self.axes
+
+ a.set_transform(self.get_transform())
+
+ def _set_loc(self, loc):
+ # find_offset function will be provided to _legend_box and
+ # _legend_box will draw itself at the location of the return
+ # value of the find_offset.
+ self._loc_real = loc
+ self.stale = True
+ self._legend_box.set_offset(self._findoffset)
+
+ def _get_loc(self):
+ return self._loc_real
+
+ _loc = property(_get_loc, _set_loc)
+
+ def _findoffset(self, width, height, xdescent, ydescent, renderer):
+ "Helper function to locate the legend."
+
+ if self._loc == 0: # "best".
+ x, y = self._find_best_position(width, height, renderer)
+ elif self._loc in Legend.codes.values(): # Fixed location.
+ bbox = Bbox.from_bounds(0, 0, width, height)
+ x, y = self._get_anchored_bbox(self._loc, bbox,
+ self.get_bbox_to_anchor(),
+ renderer)
+ else: # Axes or figure coordinates.
+ fx, fy = self._loc
+ bbox = self.get_bbox_to_anchor()
+ x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
+
+ return x + xdescent, y + ydescent
+
+ @allow_rasterization
+ def draw(self, renderer):
+ "Draw everything that belongs to the legend."
+ if not self.get_visible():
+ return
+
+ renderer.open_group('legend')
+
+ fontsize = renderer.points_to_pixels(self._fontsize)
+
+ # if mode == fill, set the width of the legend_box to the
+ # width of the paret (minus pads)
+ if self._mode in ["expand"]:
+ pad = 2 * (self.borderaxespad + self.borderpad) * fontsize
+ self._legend_box.set_width(self.get_bbox_to_anchor().width - pad)
+
+ # update the location and size of the legend. This needs to
+ # be done in any case to clip the figure right.
+ bbox = self._legend_box.get_window_extent(renderer)
+ self.legendPatch.set_bounds(bbox.x0, bbox.y0,
+ bbox.width, bbox.height)
+ self.legendPatch.set_mutation_scale(fontsize)
+
+ if self._drawFrame:
+ if self.shadow:
+ shadow = Shadow(self.legendPatch, 2, -2)
+ shadow.draw(renderer)
+
+ self.legendPatch.draw(renderer)
+
+ self._legend_box.draw(renderer)
+
+ renderer.close_group('legend')
+ self.stale = False
+
+ def _approx_text_height(self, renderer=None):
+ """
+ Return the approximate height of the text. This is used to place
+ the legend handle.
+ """
+ if renderer is None:
+ return self._fontsize
+ else:
+ return renderer.points_to_pixels(self._fontsize)
+
+ # _default_handler_map defines the default mapping between plot
+ # elements and the legend handlers.
+
+ _default_handler_map = {
+ StemContainer: legend_handler.HandlerStem(),
+ ErrorbarContainer: legend_handler.HandlerErrorbar(),
+ Line2D: legend_handler.HandlerLine2D(),
+ Patch: legend_handler.HandlerPatch(),
+ LineCollection: legend_handler.HandlerLineCollection(),
+ RegularPolyCollection: legend_handler.HandlerRegularPolyCollection(),
+ CircleCollection: legend_handler.HandlerCircleCollection(),
+ BarContainer: legend_handler.HandlerPatch(
+ update_func=legend_handler.update_from_first_child),
+ tuple: legend_handler.HandlerTuple(),
+ PathCollection: legend_handler.HandlerPathCollection(),
+ PolyCollection: legend_handler.HandlerPolyCollection()
+ }
+
+ # (get|set|update)_default_handler_maps are public interfaces to
+ # modify the default handler map.
+
+ @classmethod
+ def get_default_handler_map(cls):
+ """
+ A class method that returns the default handler map.
+ """
+ return cls._default_handler_map
+
+ @classmethod
+ def set_default_handler_map(cls, handler_map):
+ """
+ A class method to set the default handler map.
+ """
+ cls._default_handler_map = handler_map
+
+ @classmethod
+ def update_default_handler_map(cls, handler_map):
+ """
+ A class method to update the default handler map.
+ """
+ cls._default_handler_map.update(handler_map)
+
+ def get_legend_handler_map(self):
+ """
+ Return the handler map.
+ """
+
+ default_handler_map = self.get_default_handler_map()
+
+ if self._custom_handler_map:
+ hm = default_handler_map.copy()
+ hm.update(self._custom_handler_map)
+ return hm
+ else:
+ return default_handler_map
+
+ @staticmethod
+ def get_legend_handler(legend_handler_map, orig_handle):
+ """
+ Return a legend handler from *legend_handler_map* that
+ corresponds to *orig_handler*.
+
+ *legend_handler_map* should be a dictionary object (that is
+ returned by the get_legend_handler_map method).
+
+ It first checks if the *orig_handle* itself is a key in the
+ *legend_hanler_map* and return the associated value.
+ Otherwise, it checks for each of the classes in its
+ method-resolution-order. If no matching key is found, it
+ returns ``None``.
+ """
+ if is_hashable(orig_handle):
+ try:
+ return legend_handler_map[orig_handle]
+ except KeyError:
+ pass
+
+ for handle_type in type(orig_handle).mro():
+ try:
+ return legend_handler_map[handle_type]
+ except KeyError:
+ pass
+
+ return None
+
+ def _init_legend_box(self, handles, labels, markerfirst=True):
+ """
+ Initialize the legend_box. The legend_box is an instance of
+ the OffsetBox, which is packed with legend handles and
+ texts. Once packed, their location is calculated during the
+ drawing time.
+ """
+
+ fontsize = self._fontsize
+
+ # legend_box is a HPacker, horizontally packed with
+ # columns. Each column is a VPacker, vertically packed with
+ # legend items. Each legend item is HPacker packed with
+ # legend handleBox and labelBox. handleBox is an instance of
+ # offsetbox.DrawingArea which contains legend handle. labelBox
+ # is an instance of offsetbox.TextArea which contains legend
+ # text.
+
+ text_list = [] # the list of text instances
+ handle_list = [] # the list of text instances
+ handles_and_labels = []
+
+ label_prop = dict(verticalalignment='baseline',
+ horizontalalignment='left',
+ fontproperties=self.prop,
+ )
+
+ # The approximate height and descent of text. These values are
+ # only used for plotting the legend handle.
+ descent = 0.35 * self._approx_text_height() * (self.handleheight - 0.7)
+ # 0.35 and 0.7 are just heuristic numbers and may need to be improved.
+ height = self._approx_text_height() * self.handleheight - descent
+ # each handle needs to be drawn inside a box of (x, y, w, h) =
+ # (0, -descent, width, height). And their coordinates should
+ # be given in the display coordinates.
+
+ # The transformation of each handle will be automatically set
+ # to self.get_trasnform(). If the artist does not use its
+ # default transform (e.g., Collections), you need to
+ # manually set their transform to the self.get_transform().
+ legend_handler_map = self.get_legend_handler_map()
+
+ for orig_handle, lab in zip(handles, labels):
+ handler = self.get_legend_handler(legend_handler_map, orig_handle)
+ if handler is None:
+ warnings.warn(
+ "Legend does not support {!r} instances.\nA proxy artist "
+ "may be used instead.\nSee: "
+ "http://matplotlib.org/users/legend_guide.html"
+ "#creating-artists-specifically-for-adding-to-the-legend-"
+ "aka-proxy-artists".format(orig_handle)
+ )
+ # We don't have a handle for this artist, so we just defer
+ # to None.
+ handle_list.append(None)
+ else:
+ textbox = TextArea(lab, textprops=label_prop,
+ multilinebaseline=True,
+ minimumdescent=True)
+ handlebox = DrawingArea(width=self.handlelength * fontsize,
+ height=height,
+ xdescent=0., ydescent=descent)
+
+ text_list.append(textbox._text)
+ # Create the artist for the legend which represents the
+ # original artist/handle.
+ handle_list.append(handler.legend_artist(self, orig_handle,
+ fontsize, handlebox))
+ handles_and_labels.append((handlebox, textbox))
+
+ if handles_and_labels:
+ # We calculate number of rows in each column. The first
+ # (num_largecol) columns will have (nrows+1) rows, and remaining
+ # (num_smallcol) columns will have (nrows) rows.
+ ncol = min(self._ncol, len(handles_and_labels))
+ nrows, num_largecol = divmod(len(handles_and_labels), ncol)
+ num_smallcol = ncol - num_largecol
+ # starting index of each column and number of rows in it.
+ rows_per_col = [nrows + 1] * num_largecol + [nrows] * num_smallcol
+ start_idxs = np.concatenate([[0], np.cumsum(rows_per_col)[:-1]])
+ cols = zip(start_idxs, rows_per_col)
+ else:
+ cols = []
+
+ columnbox = []
+ for i0, di in cols:
+ # pack handleBox and labelBox into itemBox
+ itemBoxes = [HPacker(pad=0,
+ sep=self.handletextpad * fontsize,
+ children=[h, t] if markerfirst else [t, h],
+ align="baseline")
+ for h, t in handles_and_labels[i0:i0 + di]]
+ # minimumdescent=False for the text of the last row of the column
+ if markerfirst:
+ itemBoxes[-1].get_children()[1].set_minimumdescent(False)
+ else:
+ itemBoxes[-1].get_children()[0].set_minimumdescent(False)
+
+ # pack columnBox
+ alignment = "baseline" if markerfirst else "right"
+ columnbox.append(VPacker(pad=0,
+ sep=self.labelspacing * fontsize,
+ align=alignment,
+ children=itemBoxes))
+
+ mode = "expand" if self._mode == "expand" else "fixed"
+ sep = self.columnspacing * fontsize
+ self._legend_handle_box = HPacker(pad=0,
+ sep=sep, align="baseline",
+ mode=mode,
+ children=columnbox)
+ self._legend_title_box = TextArea("")
+ self._legend_box = VPacker(pad=self.borderpad * fontsize,
+ sep=self.labelspacing * fontsize,
+ align="center",
+ children=[self._legend_title_box,
+ self._legend_handle_box])
+ self._legend_box.set_figure(self.figure)
+ self.texts = text_list
+ self.legendHandles = handle_list
+
+ def _auto_legend_data(self):
+ """
+ Returns list of vertices and extents covered by the plot.
+
+ Returns a two long list.
+
+ First element is a list of (x, y) vertices (in
+ display-coordinates) covered by all the lines and line
+ collections, in the legend's handles.
+
+ Second element is a list of bounding boxes for all the patches in
+ the legend's handles.
+ """
+ # should always hold because function is only called internally
+ assert self.isaxes
+
+ ax = self.parent
+ bboxes = []
+ lines = []
+ offsets = []
+
+ for handle in ax.lines:
+ assert isinstance(handle, Line2D)
+ path = handle.get_path()
+ trans = handle.get_transform()
+ tpath = trans.transform_path(path)
+ lines.append(tpath)
+
+ for handle in ax.patches:
+ assert isinstance(handle, Patch)
+
+ if isinstance(handle, Rectangle):
+ transform = handle.get_data_transform()
+ bboxes.append(handle.get_bbox().transformed(transform))
+ else:
+ transform = handle.get_transform()
+ bboxes.append(handle.get_path().get_extents(transform))
+
+ for handle in ax.collections:
+ transform, transOffset, hoffsets, paths = handle._prepare_points()
+
+ if len(hoffsets):
+ for offset in transOffset.transform(hoffsets):
+ offsets.append(offset)
+
+ try:
+ vertices = np.concatenate([l.vertices for l in lines])
+ except ValueError:
+ vertices = np.array([])
+
+ return [vertices, bboxes, lines, offsets]
+
+ def draw_frame(self, b):
+ '''
+ Set draw frame to b.
+
+ Parameters
+ ----------
+ b : bool
+ '''
+ self.set_frame_on(b)
+
+ def get_children(self):
+ 'Return a list of child artists.'
+ children = []
+ if self._legend_box:
+ children.append(self._legend_box)
+ children.append(self.get_frame())
+
+ return children
+
+ def get_frame(self):
+ '''
+ Return the `~.patches.Rectangle` instances used to frame the legend.
+ '''
+ return self.legendPatch
+
+ def get_lines(self):
+ 'Return a list of `~.lines.Line2D` instances in the legend.'
+ return [h for h in self.legendHandles if isinstance(h, Line2D)]
+
+ def get_patches(self):
+ 'Return a list of `~.patches.Patch` instances in the legend.'
+ return silent_list('Patch',
+ [h for h in self.legendHandles
+ if isinstance(h, Patch)])
+
+ def get_texts(self):
+ 'Return a list of `~.text.Text` instances in the legend.'
+ return silent_list('Text', self.texts)
+
+ def set_title(self, title, prop=None):
+ """
+ Set the legend title. Fontproperties can be optionally set
+ with *prop* parameter.
+ """
+ self._legend_title_box._text.set_text(title)
+
+ if prop is not None:
+ if isinstance(prop, dict):
+ prop = FontProperties(**prop)
+ self._legend_title_box._text.set_fontproperties(prop)
+
+ if title:
+ self._legend_title_box.set_visible(True)
+ else:
+ self._legend_title_box.set_visible(False)
+ self.stale = True
+
+ def get_title(self):
+ 'Return the `.Text` instance for the legend title.'
+ return self._legend_title_box._text
+
+ def get_window_extent(self, *args, **kwargs):
+ 'Return extent of the legend.'
+ return self.legendPatch.get_window_extent(*args, **kwargs)
+
+ def get_frame_on(self):
+ """Get whether the legend box patch is drawn."""
+ return self._drawFrame
+
+ def set_frame_on(self, b):
+ """
+ Set whether the legend box patch is drawn.
+
+ Parameters
+ ----------
+ b : bool
+ .. ACCEPTS: bool
+ """
+ self._drawFrame = b
+ self.stale = True
+
+ def get_bbox_to_anchor(self):
+ """Return the bbox that the legend will be anchored to."""
+ if self._bbox_to_anchor is None:
+ return self.parent.bbox
+ else:
+ return self._bbox_to_anchor
+
+ def set_bbox_to_anchor(self, bbox, transform=None):
+ """
+ Set the bbox that the legend will be anchored to.
+
+ *bbox* can be
+
+ - A `.BboxBase` instance
+ - A tuple of ``(left, bottom, width, height)`` in the given transform
+ (normalized axes coordinate if None)
+ - A tuple of ``(left, bottom)`` where the width and height will be
+ assumed to be zero.
+ """
+ if bbox is None:
+ self._bbox_to_anchor = None
+ return
+ elif isinstance(bbox, BboxBase):
+ self._bbox_to_anchor = bbox
+ else:
+ try:
+ l = len(bbox)
+ except TypeError:
+ raise ValueError("Invalid argument for bbox : %s" % str(bbox))
+
+ if l == 2:
+ bbox = [bbox[0], bbox[1], 0, 0]
+
+ self._bbox_to_anchor = Bbox.from_bounds(*bbox)
+
+ if transform is None:
+ transform = BboxTransformTo(self.parent.bbox)
+
+ self._bbox_to_anchor = TransformedBbox(self._bbox_to_anchor,
+ transform)
+ self.stale = True
+
+ def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
+ """
+ Place the *bbox* inside the *parentbbox* according to a given
+ location code. Return the (x,y) coordinate of the bbox.
+
+ - loc: a location code in range(1, 11).
+ This corresponds to the possible values for self._loc, excluding
+ "best".
+
+ - bbox: bbox to be placed, display coordinate units.
+ - parentbbox: a parent box which will contain the bbox. In
+ display coordinates.
+ """
+ assert loc in range(1, 11) # called only internally
+
+ BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
+
+ anchor_coefs = {UR: "NE",
+ UL: "NW",
+ LL: "SW",
+ LR: "SE",
+ R: "E",
+ CL: "W",
+ CR: "E",
+ LC: "S",
+ UC: "N",
+ C: "C"}
+
+ c = anchor_coefs[loc]
+
+ fontsize = renderer.points_to_pixels(self._fontsize)
+ container = parentbbox.padded(-(self.borderaxespad) * fontsize)
+ anchored_box = bbox.anchored(c, container=container)
+ return anchored_box.x0, anchored_box.y0
+
+ def _find_best_position(self, width, height, renderer, consider=None):
+ """
+ Determine the best location to place the legend.
+
+ *consider* is a list of ``(x, y)`` pairs to consider as a potential
+ lower-left corner of the legend. All are display coords.
+ """
+ # should always hold because function is only called internally
+ assert self.isaxes
+
+ verts, bboxes, lines, offsets = self._auto_legend_data()
+
+ bbox = Bbox.from_bounds(0, 0, width, height)
+ if consider is None:
+ consider = [self._get_anchored_bbox(x, bbox,
+ self.get_bbox_to_anchor(),
+ renderer)
+ for x in range(1, len(self.codes))]
+
+ candidates = []
+ for idx, (l, b) in enumerate(consider):
+ legendBox = Bbox.from_bounds(l, b, width, height)
+ badness = 0
+ # XXX TODO: If markers are present, it would be good to
+ # take them into account when checking vertex overlaps in
+ # the next line.
+ badness = (legendBox.count_contains(verts)
+ + legendBox.count_contains(offsets)
+ + legendBox.count_overlaps(bboxes)
+ + sum(line.intersects_bbox(legendBox, filled=False)
+ for line in lines))
+ if badness == 0:
+ return l, b
+ # Include the index to favor lower codes in case of a tie.
+ candidates.append((badness, idx, (l, b)))
+
+ _, _, (l, b) = min(candidates)
+ return l, b
+
+ def contains(self, event):
+ return self.legendPatch.contains(event)
+
+ def draggable(self, state=None, use_blit=False, update="loc"):
+ """
+ Set the draggable state -- if state is
+
+ * None : toggle the current state
+
+ * True : turn draggable on
+
+ * False : turn draggable off
+
+ If draggable is on, you can drag the legend on the canvas with
+ the mouse. The `.DraggableLegend` helper instance is returned if
+ draggable is on.
+
+ The update parameter control which parameter of the legend changes
+ when dragged. If update is "loc", the *loc* parameter of the legend
+ is changed. If "bbox", the *bbox_to_anchor* parameter is changed.
+ """
+ is_draggable = self._draggable is not None
+
+ # if state is None we'll toggle
+ if state is None:
+ state = not is_draggable
+
+ if state:
+ if self._draggable is None:
+ self._draggable = DraggableLegend(self,
+ use_blit,
+ update=update)
+ else:
+ if self._draggable is not None:
+ self._draggable.disconnect()
+ self._draggable = None
+
+ return self._draggable
+
+
+# Helper functions to parse legend arguments for both `figure.legend` and
+# `axes.legend`:
+def _get_legend_handles(axs, legend_handler_map=None):
+ """
+ Return a generator of artists that can be used as handles in
+ a legend.
+
+ """
+ handles_original = []
+ for ax in axs:
+ handles_original += (ax.lines + ax.patches +
+ ax.collections + ax.containers)
+ # support parasite axes:
+ if hasattr(ax, 'parasites'):
+ for axx in ax.parasites:
+ handles_original += (axx.lines + axx.patches +
+ axx.collections + axx.containers)
+
+ handler_map = Legend.get_default_handler_map()
+
+ if legend_handler_map is not None:
+ handler_map = handler_map.copy()
+ handler_map.update(legend_handler_map)
+
+ has_handler = Legend.get_legend_handler
+
+ for handle in handles_original:
+ label = handle.get_label()
+ if label != '_nolegend_' and has_handler(handler_map, handle):
+ yield handle
+
+
+def _get_legend_handles_labels(axs, legend_handler_map=None):
+ """
+ Return handles and labels for legend, internal method.
+
+ """
+ handles = []
+ labels = []
+
+ for handle in _get_legend_handles(axs, legend_handler_map):
+ label = handle.get_label()
+ if (label and not label.startswith('_')):
+ handles.append(handle)
+ labels.append(label)
+ return handles, labels
+
+
+def _parse_legend_args(axs, *args, **kwargs):
+ """
+ Get the handles and labels from the calls to either ``figure.legend``
+ or ``axes.legend``.
+
+ ``axs`` is a list of axes (to get legend artists from)
+ """
+ log = logging.getLogger(__name__)
+
+ handlers = kwargs.get('handler_map', {}) or {}
+
+ # Support handles and labels being passed as keywords.
+ handles = kwargs.pop('handles', None)
+ labels = kwargs.pop('labels', None)
+
+ extra_args = ()
+
+ if (handles is not None or labels is not None) and len(args):
+ warnings.warn("You have mixed positional and keyword "
+ "arguments, some input may be "
+ "discarded.")
+
+ # if got both handles and labels as kwargs, make same length
+ if handles and labels:
+ handles, labels = zip(*zip(handles, labels))
+
+ elif handles is not None and labels is None:
+ labels = [handle.get_label() for handle in handles]
+
+ elif labels is not None and handles is None:
+ # Get as many handles as there are labels.
+ handles = [handle for handle, label
+ in zip(_get_legend_handles(axs, handlers), labels)]
+
+ # No arguments - automatically detect labels and handles.
+ elif len(args) == 0:
+ handles, labels = _get_legend_handles_labels(axs, handlers)
+ if not handles:
+ log.warning('No handles with labels found to put in legend.')
+
+ # One argument. User defined labels - automatic handle detection.
+ elif len(args) == 1:
+ labels, = args
+ # Get as many handles as there are labels.
+ handles = [handle for handle, label
+ in zip(_get_legend_handles(axs, handlers), labels)]
+
+ # Two arguments:
+ # * user defined handles and labels
+ elif len(args) >= 2:
+ handles, labels = args[:2]
+ extra_args = args[2:]
+
+ else:
+ raise TypeError('Invalid arguments to legend.')
+
+ return handles, labels, extra_args, kwargs
diff --git a/contrib/python/matplotlib/py2/matplotlib/legend_handler.py b/contrib/python/matplotlib/py2/matplotlib/legend_handler.py
new file mode 100644
index 00000000000..e1a7e2d03d1
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/legend_handler.py
@@ -0,0 +1,730 @@
+"""
+This module defines default legend handlers.
+
+It is strongly encouraged to have read the :doc:`legend guide
+</tutorials/intermediate/legend_guide>` before this documentation.
+
+Legend handlers are expected to be a callable object with a following
+signature. ::
+
+ legend_handler(legend, orig_handle, fontsize, handlebox)
+
+Where *legend* is the legend itself, *orig_handle* is the original
+plot, *fontsize* is the fontsize in pixels, and *handlebox* is a
+OffsetBox instance. Within the call, you should create relevant
+artists (using relevant properties from the *legend* and/or
+*orig_handle*) and add them into the handlebox. The artists needs to
+be scaled according to the fontsize (note that the size is in pixel,
+i.e., this is dpi-scaled value).
+
+This module includes definition of several legend handler classes
+derived from the base class (HandlerBase) with the following method::
+
+ def legend_artist(self, legend, orig_handle, fontsize, handlebox):
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import zip
+from itertools import cycle
+
+import numpy as np
+
+from matplotlib.lines import Line2D
+from matplotlib.patches import Rectangle
+import matplotlib.collections as mcoll
+import matplotlib.colors as mcolors
+
+
+def update_from_first_child(tgt, src):
+ tgt.update_from(src.get_children()[0])
+
+
+class HandlerBase(object):
+ """
+ A Base class for default legend handlers.
+
+ The derived classes are meant to override *create_artists* method, which
+ has a following signature.::
+
+ def create_artists(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize,
+ trans):
+
+ The overridden method needs to create artists of the given
+ transform that fits in the given dimension (xdescent, ydescent,
+ width, height) that are scaled by fontsize if necessary.
+
+ """
+ def __init__(self, xpad=0., ypad=0., update_func=None):
+ self._xpad, self._ypad = xpad, ypad
+ self._update_prop_func = update_func
+
+ def _update_prop(self, legend_handle, orig_handle):
+ if self._update_prop_func is None:
+ self._default_update_prop(legend_handle, orig_handle)
+ else:
+ self._update_prop_func(legend_handle, orig_handle)
+
+ def _default_update_prop(self, legend_handle, orig_handle):
+ legend_handle.update_from(orig_handle)
+
+ def update_prop(self, legend_handle, orig_handle, legend):
+
+ self._update_prop(legend_handle, orig_handle)
+
+ legend._set_artist_props(legend_handle)
+ legend_handle.set_clip_box(None)
+ legend_handle.set_clip_path(None)
+
+ def adjust_drawing_area(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize,
+ ):
+ xdescent = xdescent - self._xpad * fontsize
+ ydescent = ydescent - self._ypad * fontsize
+ width = width - self._xpad * fontsize
+ height = height - self._ypad * fontsize
+ return xdescent, ydescent, width, height
+
+ def legend_artist(self, legend, orig_handle,
+ fontsize, handlebox):
+ """
+ Return the artist that this HandlerBase generates for the given
+ original artist/handle.
+
+ Parameters
+ ----------
+ legend : :class:`matplotlib.legend.Legend` instance
+ The legend for which these legend artists are being created.
+ orig_handle : :class:`matplotlib.artist.Artist` or similar
+ The object for which these legend artists are being created.
+ fontsize : float or int
+ The fontsize in pixels. The artists being created should
+ be scaled according to the given fontsize.
+ handlebox : :class:`matplotlib.offsetbox.OffsetBox` instance
+ The box which has been created to hold this legend entry's
+ artists. Artists created in the `legend_artist` method must
+ be added to this handlebox inside this method.
+
+ """
+ xdescent, ydescent, width, height = self.adjust_drawing_area(
+ legend, orig_handle,
+ handlebox.xdescent, handlebox.ydescent,
+ handlebox.width, handlebox.height,
+ fontsize)
+ artists = self.create_artists(legend, orig_handle,
+ xdescent, ydescent, width, height,
+ fontsize, handlebox.get_transform())
+
+ # create_artists will return a list of artists.
+ for a in artists:
+ handlebox.add_artist(a)
+
+ # we only return the first artist
+ return artists[0]
+
+ def create_artists(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize,
+ trans):
+ raise NotImplementedError('Derived must override')
+
+
+class HandlerNpoints(HandlerBase):
+ """
+ A legend handler that shows *numpoints* points in the legend entry.
+ """
+ def __init__(self, marker_pad=0.3, numpoints=None, **kw):
+ """
+ Parameters
+ ----------
+ marker_pad : float
+ Padding between points in legend entry.
+
+ numpoints : int
+ Number of points to show in legend entry.
+
+ Notes
+ -----
+ Any other keyword arguments are given to `HandlerBase`.
+ """
+ HandlerBase.__init__(self, **kw)
+
+ self._numpoints = numpoints
+ self._marker_pad = marker_pad
+
+ def get_numpoints(self, legend):
+ if self._numpoints is None:
+ return legend.numpoints
+ else:
+ return self._numpoints
+
+ def get_xdata(self, legend, xdescent, ydescent, width, height, fontsize):
+ numpoints = self.get_numpoints(legend)
+ if numpoints > 1:
+ # we put some pad here to compensate the size of the marker
+ pad = self._marker_pad * fontsize
+ xdata = np.linspace(-xdescent + pad,
+ -xdescent + width - pad,
+ numpoints)
+ xdata_marker = xdata
+ else:
+ xdata = np.linspace(-xdescent, -xdescent + width, 2)
+ xdata_marker = [-xdescent + 0.5 * width]
+ return xdata, xdata_marker
+
+
+class HandlerNpointsYoffsets(HandlerNpoints):
+ """
+ A legend handler that shows *numpoints* in the legend, and allows them to
+ be individually offest in the y-direction.
+ """
+ def __init__(self, numpoints=None, yoffsets=None, **kw):
+ """
+ Parameters
+ ----------
+ numpoints : int
+ Number of points to show in legend entry.
+
+ yoffsets : array of floats
+ Length *numpoints* list of y offsets for each point in
+ legend entry.
+
+ Notes
+ -----
+ Any other keyword arguments are given to `HandlerNpoints`.
+ """
+ HandlerNpoints.__init__(self, numpoints=numpoints, **kw)
+ self._yoffsets = yoffsets
+
+ def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
+ if self._yoffsets is None:
+ ydata = height * legend._scatteryoffsets
+ else:
+ ydata = height * np.asarray(self._yoffsets)
+
+ return ydata
+
+
+class HandlerLine2D(HandlerNpoints):
+ """
+ Handler for `.Line2D` instances.
+ """
+ def __init__(self, marker_pad=0.3, numpoints=None, **kw):
+ """
+ Parameters
+ ----------
+ marker_pad : float
+ Padding between points in legend entry.
+
+ numpoints : int
+ Number of points to show in legend entry.
+
+ Notes
+ -----
+ Any other keyword arguments are given to `HandlerNpoints`.
+ """
+ HandlerNpoints.__init__(self, marker_pad=marker_pad,
+ numpoints=numpoints, **kw)
+
+ def create_artists(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize,
+ trans):
+
+ xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
+ width, height, fontsize)
+
+ ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)
+ legline = Line2D(xdata, ydata)
+
+ self.update_prop(legline, orig_handle, legend)
+ legline.set_drawstyle('default')
+ legline.set_marker("")
+
+ legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
+ self.update_prop(legline_marker, orig_handle, legend)
+ legline_marker.set_linestyle('None')
+ if legend.markerscale != 1:
+ newsz = legline_marker.get_markersize() * legend.markerscale
+ legline_marker.set_markersize(newsz)
+ # we don't want to add this to the return list because
+ # the texts and handles are assumed to be in one-to-one
+ # correspondence.
+ legline._legmarker = legline_marker
+
+ legline.set_transform(trans)
+ legline_marker.set_transform(trans)
+
+ return [legline, legline_marker]
+
+
+class HandlerPatch(HandlerBase):
+ """
+ Handler for `.Patch` instances.
+ """
+ def __init__(self, patch_func=None, **kw):
+ """
+ Parameters
+ ----------
+ patch_func : callable, optional
+ The function that creates the legend key artist.
+ *patch_func* should have the signature::
+
+ def patch_func(legend=legend, orig_handle=orig_handle,
+ xdescent=xdescent, ydescent=ydescent,
+ width=width, height=height, fontsize=fontsize)
+
+ Subsequently the created artist will have its ``update_prop`` method
+ called and the appropriate transform will be applied.
+
+ Notes
+ -----
+ Any other keyword arguments are given to `HandlerBase`.
+ """
+ HandlerBase.__init__(self, **kw)
+ self._patch_func = patch_func
+
+ def _create_patch(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize):
+ if self._patch_func is None:
+ p = Rectangle(xy=(-xdescent, -ydescent),
+ width=width, height=height)
+ else:
+ p = self._patch_func(legend=legend, orig_handle=orig_handle,
+ xdescent=xdescent, ydescent=ydescent,
+ width=width, height=height, fontsize=fontsize)
+ return p
+
+ def create_artists(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize, trans):
+ p = self._create_patch(legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize)
+ self.update_prop(p, orig_handle, legend)
+ p.set_transform(trans)
+ return [p]
+
+
+class HandlerLineCollection(HandlerLine2D):
+ """
+ Handler for `.LineCollection` instances.
+ """
+ def get_numpoints(self, legend):
+ if self._numpoints is None:
+ return legend.scatterpoints
+ else:
+ return self._numpoints
+
+ def _default_update_prop(self, legend_handle, orig_handle):
+ lw = orig_handle.get_linewidths()[0]
+ dashes = orig_handle._us_linestyles[0]
+ color = orig_handle.get_colors()[0]
+ legend_handle.set_color(color)
+ legend_handle.set_linestyle(dashes)
+ legend_handle.set_linewidth(lw)
+
+ def create_artists(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize, trans):
+
+ xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
+ width, height, fontsize)
+ ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)
+ legline = Line2D(xdata, ydata)
+
+ self.update_prop(legline, orig_handle, legend)
+ legline.set_transform(trans)
+
+ return [legline]
+
+
+class HandlerRegularPolyCollection(HandlerNpointsYoffsets):
+ """
+ Handler for `.RegularPolyCollections`.
+ """
+ def __init__(self, yoffsets=None, sizes=None, **kw):
+ HandlerNpointsYoffsets.__init__(self, yoffsets=yoffsets, **kw)
+
+ self._sizes = sizes
+
+ def get_numpoints(self, legend):
+ if self._numpoints is None:
+ return legend.scatterpoints
+ else:
+ return self._numpoints
+
+ def get_sizes(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize):
+ if self._sizes is None:
+ handle_sizes = orig_handle.get_sizes()
+ if not len(handle_sizes):
+ handle_sizes = [1]
+ size_max = max(handle_sizes) * legend.markerscale ** 2
+ size_min = min(handle_sizes) * legend.markerscale ** 2
+
+ numpoints = self.get_numpoints(legend)
+ if numpoints < 4:
+ sizes = [.5 * (size_max + size_min), size_max,
+ size_min][:numpoints]
+ else:
+ rng = (size_max - size_min)
+ sizes = rng * np.linspace(0, 1, numpoints) + size_min
+ else:
+ sizes = self._sizes
+
+ return sizes
+
+ def update_prop(self, legend_handle, orig_handle, legend):
+
+ self._update_prop(legend_handle, orig_handle)
+
+ legend_handle.set_figure(legend.figure)
+ #legend._set_artist_props(legend_handle)
+ legend_handle.set_clip_box(None)
+ legend_handle.set_clip_path(None)
+
+ def create_collection(self, orig_handle, sizes, offsets, transOffset):
+ p = type(orig_handle)(orig_handle.get_numsides(),
+ rotation=orig_handle.get_rotation(),
+ sizes=sizes,
+ offsets=offsets,
+ transOffset=transOffset,
+ )
+ return p
+
+ def create_artists(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize,
+ trans):
+ xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
+ width, height, fontsize)
+
+ ydata = self.get_ydata(legend, xdescent, ydescent,
+ width, height, fontsize)
+
+ sizes = self.get_sizes(legend, orig_handle, xdescent, ydescent,
+ width, height, fontsize)
+
+ p = self.create_collection(orig_handle, sizes,
+ offsets=list(zip(xdata_marker, ydata)),
+ transOffset=trans)
+
+ self.update_prop(p, orig_handle, legend)
+ p._transOffset = trans
+ return [p]
+
+
+class HandlerPathCollection(HandlerRegularPolyCollection):
+ """
+ Handler for `.PathCollections`, which are used by `~.Axes.scatter`.
+ """
+ def create_collection(self, orig_handle, sizes, offsets, transOffset):
+ p = type(orig_handle)([orig_handle.get_paths()[0]],
+ sizes=sizes,
+ offsets=offsets,
+ transOffset=transOffset,
+ )
+ return p
+
+
+class HandlerCircleCollection(HandlerRegularPolyCollection):
+ """
+ Handler for `.CircleCollections`.
+ """
+ def create_collection(self, orig_handle, sizes, offsets, transOffset):
+ p = type(orig_handle)(sizes,
+ offsets=offsets,
+ transOffset=transOffset,
+ )
+ return p
+
+
+class HandlerErrorbar(HandlerLine2D):
+ """
+ Handler for Errorbars.
+ """
+ def __init__(self, xerr_size=0.5, yerr_size=None,
+ marker_pad=0.3, numpoints=None, **kw):
+
+ self._xerr_size = xerr_size
+ self._yerr_size = yerr_size
+
+ HandlerLine2D.__init__(self, marker_pad=marker_pad,
+ numpoints=numpoints, **kw)
+
+ def get_err_size(self, legend, xdescent, ydescent,
+ width, height, fontsize):
+ xerr_size = self._xerr_size * fontsize
+
+ if self._yerr_size is None:
+ yerr_size = xerr_size
+ else:
+ yerr_size = self._yerr_size * fontsize
+
+ return xerr_size, yerr_size
+
+ def create_artists(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize,
+ trans):
+
+ plotlines, caplines, barlinecols = orig_handle
+
+ xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
+ width, height, fontsize)
+
+ ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)
+ legline = Line2D(xdata, ydata)
+
+ xdata_marker = np.asarray(xdata_marker)
+ ydata_marker = np.asarray(ydata[:len(xdata_marker)])
+
+ xerr_size, yerr_size = self.get_err_size(legend, xdescent, ydescent,
+ width, height, fontsize)
+
+ legline_marker = Line2D(xdata_marker, ydata_marker)
+
+ # when plotlines are None (only errorbars are drawn), we just
+ # make legline invisible.
+ if plotlines is None:
+ legline.set_visible(False)
+ legline_marker.set_visible(False)
+ else:
+ self.update_prop(legline, plotlines, legend)
+
+ legline.set_drawstyle('default')
+ legline.set_marker('None')
+
+ self.update_prop(legline_marker, plotlines, legend)
+ legline_marker.set_linestyle('None')
+
+ if legend.markerscale != 1:
+ newsz = legline_marker.get_markersize() * legend.markerscale
+ legline_marker.set_markersize(newsz)
+
+ handle_barlinecols = []
+ handle_caplines = []
+
+ if orig_handle.has_xerr:
+ verts = [ ((x - xerr_size, y), (x + xerr_size, y))
+ for x, y in zip(xdata_marker, ydata_marker)]
+ coll = mcoll.LineCollection(verts)
+ self.update_prop(coll, barlinecols[0], legend)
+ handle_barlinecols.append(coll)
+
+ if caplines:
+ capline_left = Line2D(xdata_marker - xerr_size, ydata_marker)
+ capline_right = Line2D(xdata_marker + xerr_size, ydata_marker)
+ self.update_prop(capline_left, caplines[0], legend)
+ self.update_prop(capline_right, caplines[0], legend)
+ capline_left.set_marker("|")
+ capline_right.set_marker("|")
+
+ handle_caplines.append(capline_left)
+ handle_caplines.append(capline_right)
+
+ if orig_handle.has_yerr:
+ verts = [ ((x, y - yerr_size), (x, y + yerr_size))
+ for x, y in zip(xdata_marker, ydata_marker)]
+ coll = mcoll.LineCollection(verts)
+ self.update_prop(coll, barlinecols[0], legend)
+ handle_barlinecols.append(coll)
+
+ if caplines:
+ capline_left = Line2D(xdata_marker, ydata_marker - yerr_size)
+ capline_right = Line2D(xdata_marker, ydata_marker + yerr_size)
+ self.update_prop(capline_left, caplines[0], legend)
+ self.update_prop(capline_right, caplines[0], legend)
+ capline_left.set_marker("_")
+ capline_right.set_marker("_")
+
+ handle_caplines.append(capline_left)
+ handle_caplines.append(capline_right)
+
+ artists = []
+ artists.extend(handle_barlinecols)
+ artists.extend(handle_caplines)
+ artists.append(legline)
+ artists.append(legline_marker)
+
+ for artist in artists:
+ artist.set_transform(trans)
+
+ return artists
+
+
+class HandlerStem(HandlerNpointsYoffsets):
+ """
+ Handler for plots produced by `~.Axes.stem`.
+ """
+ def __init__(self, marker_pad=0.3, numpoints=None,
+ bottom=None, yoffsets=None, **kw):
+ """
+ Parameters
+ ----------
+ marker_pad : float
+ Padding between points in legend entry. Default is 0.3.
+
+ numpoints : int, optional
+ Number of points to show in legend entry.
+
+ bottom : float, optional
+
+ yoffsets : array of floats, optional
+ Length *numpoints* list of y offsets for each point in
+ legend entry.
+
+ Notes
+ -----
+ Any other keyword arguments are given to `HandlerNpointsYoffsets`.
+ """
+
+ HandlerNpointsYoffsets.__init__(self, marker_pad=marker_pad,
+ numpoints=numpoints,
+ yoffsets=yoffsets,
+ **kw)
+ self._bottom = bottom
+
+ def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
+ if self._yoffsets is None:
+ ydata = height * (0.5 * legend._scatteryoffsets + 0.5)
+ else:
+ ydata = height * np.asarray(self._yoffsets)
+
+ return ydata
+
+ def create_artists(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize,
+ trans):
+
+ markerline, stemlines, baseline = orig_handle
+
+ xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
+ width, height, fontsize)
+
+ ydata = self.get_ydata(legend, xdescent, ydescent,
+ width, height, fontsize)
+
+ if self._bottom is None:
+ bottom = 0.
+ else:
+ bottom = self._bottom
+
+ leg_markerline = Line2D(xdata_marker, ydata[:len(xdata_marker)])
+ self.update_prop(leg_markerline, markerline, legend)
+
+ leg_stemlines = []
+ for thisx, thisy in zip(xdata_marker, ydata):
+ l = Line2D([thisx, thisx], [bottom, thisy])
+ leg_stemlines.append(l)
+
+ for lm, m in zip(leg_stemlines, stemlines):
+ self.update_prop(lm, m, legend)
+
+ leg_baseline = Line2D([np.min(xdata), np.max(xdata)],
+ [bottom, bottom])
+
+ self.update_prop(leg_baseline, baseline, legend)
+
+ artists = [leg_markerline]
+ artists.extend(leg_stemlines)
+ artists.append(leg_baseline)
+
+ for artist in artists:
+ artist.set_transform(trans)
+
+ return artists
+
+
+class HandlerTuple(HandlerBase):
+ """
+ Handler for Tuple.
+
+ Additional kwargs are passed through to `HandlerBase`.
+
+ Parameters
+ ----------
+ ndivide : int, optional
+ The number of sections to divide the legend area into. If None,
+ use the length of the input tuple. Default is 1.
+
+
+ pad : float, optional
+ If None, fall back to ``legend.borderpad`` as the default.
+ In units of fraction of font size. Default is None.
+ """
+ def __init__(self, ndivide=1, pad=None, **kwargs):
+
+ self._ndivide = ndivide
+ self._pad = pad
+ HandlerBase.__init__(self, **kwargs)
+
+ def create_artists(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize,
+ trans):
+
+ handler_map = legend.get_legend_handler_map()
+
+ if self._ndivide is None:
+ ndivide = len(orig_handle)
+ else:
+ ndivide = self._ndivide
+
+ if self._pad is None:
+ pad = legend.borderpad * fontsize
+ else:
+ pad = self._pad * fontsize
+
+ if ndivide > 1:
+ width = (width - pad * (ndivide - 1)) / ndivide
+
+ xds_cycle = cycle(xdescent - (width + pad) * np.arange(ndivide))
+
+ a_list = []
+ for handle1 in orig_handle:
+ handler = legend.get_legend_handler(handler_map, handle1)
+ _a_list = handler.create_artists(
+ legend, handle1,
+ next(xds_cycle), ydescent, width, height, fontsize, trans)
+ a_list.extend(_a_list)
+
+ return a_list
+
+
+class HandlerPolyCollection(HandlerBase):
+ """
+ Handler for `.PolyCollection` used in `~.Axes.fill_between` and `~.Axes.stackplot`.
+ """
+ def _update_prop(self, legend_handle, orig_handle):
+ def first_color(colors):
+ if colors is None:
+ return None
+ colors = mcolors.to_rgba_array(colors)
+ if len(colors):
+ return colors[0]
+ else:
+ return "none"
+
+ def get_first(prop_array):
+ if len(prop_array):
+ return prop_array[0]
+ else:
+ return None
+ edgecolor = getattr(orig_handle, '_original_edgecolor',
+ orig_handle.get_edgecolor())
+ legend_handle.set_edgecolor(first_color(edgecolor))
+ facecolor = getattr(orig_handle, '_original_facecolor',
+ orig_handle.get_facecolor())
+ legend_handle.set_facecolor(first_color(facecolor))
+ legend_handle.set_fill(orig_handle.get_fill())
+ legend_handle.set_hatch(orig_handle.get_hatch())
+ legend_handle.set_linewidth(get_first(orig_handle.get_linewidths()))
+ legend_handle.set_linestyle(get_first(orig_handle.get_linestyles()))
+ legend_handle.set_transform(get_first(orig_handle.get_transforms()))
+ legend_handle.set_figure(orig_handle.get_figure())
+ legend_handle.set_alpha(orig_handle.get_alpha())
+
+ def create_artists(self, legend, orig_handle,
+ xdescent, ydescent, width, height, fontsize, trans):
+ p = Rectangle(xy=(-xdescent, -ydescent),
+ width=width, height=height)
+ self.update_prop(p, orig_handle, legend)
+ p.set_transform(trans)
+ return [p]
diff --git a/contrib/python/matplotlib/py2/matplotlib/lines.py b/contrib/python/matplotlib/py2/matplotlib/lines.py
new file mode 100644
index 00000000000..dac18d49855
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/lines.py
@@ -0,0 +1,1507 @@
+"""
+This module contains all the 2D line class which can draw with a
+variety of line styles, markers and colors.
+"""
+
+# TODO: expose cap and join style attrs
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import warnings
+
+import numpy as np
+
+from . import artist, cbook, colors as mcolors, docstring, rcParams
+from .artist import Artist, allow_rasterization
+from .cbook import (
+ _to_unmasked_float_array, iterable, is_numlike, ls_mapper, ls_mapper_r,
+ STEP_LOOKUP_MAP)
+from .markers import MarkerStyle
+from .path import Path
+from .transforms import Bbox, TransformedPath, IdentityTransform
+
+# Imported here for backward compatibility, even though they don't
+# really belong.
+from . import _path
+from .markers import (
+ CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,
+ CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE,
+ TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN)
+
+
+def _get_dash_pattern(style):
+ """Convert linestyle -> dash pattern
+ """
+ # go from short hand -> full strings
+ if isinstance(style, six.string_types):
+ style = ls_mapper.get(style, style)
+ # un-dashed styles
+ if style in ['solid', 'None']:
+ offset, dashes = None, None
+ # dashed styles
+ elif style in ['dashed', 'dashdot', 'dotted']:
+ offset = 0
+ dashes = tuple(rcParams['lines.{}_pattern'.format(style)])
+ #
+ elif isinstance(style, tuple):
+ offset, dashes = style
+ else:
+ raise ValueError('Unrecognized linestyle: %s' % str(style))
+
+ # normalize offset to be positive and shorter than the dash cycle
+ if dashes is not None and offset is not None:
+ dsum = sum(dashes)
+ if dsum:
+ offset %= dsum
+
+ return offset, dashes
+
+
+def _scale_dashes(offset, dashes, lw):
+ if not rcParams['lines.scale_dashes']:
+ return offset, dashes
+
+ scaled_offset = scaled_dashes = None
+ if offset is not None:
+ scaled_offset = offset * lw
+ if dashes is not None:
+ scaled_dashes = [x * lw if x is not None else None
+ for x in dashes]
+
+ return scaled_offset, scaled_dashes
+
+
+def segment_hits(cx, cy, x, y, radius):
+ """
+ Determine if any line segments are within radius of a
+ point. Returns the list of line segments that are within that
+ radius.
+ """
+ # Process single points specially
+ if len(x) < 2:
+ res, = np.nonzero((cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2)
+ return res
+
+ # We need to lop the last element off a lot.
+ xr, yr = x[:-1], y[:-1]
+
+ # Only look at line segments whose nearest point to C on the line
+ # lies within the segment.
+ dx, dy = x[1:] - xr, y[1:] - yr
+ Lnorm_sq = dx ** 2 + dy ** 2 # Possibly want to eliminate Lnorm==0
+ u = ((cx - xr) * dx + (cy - yr) * dy) / Lnorm_sq
+ candidates = (u >= 0) & (u <= 1)
+
+ # Note that there is a little area near one side of each point
+ # which will be near neither segment, and another which will
+ # be near both, depending on the angle of the lines. The
+ # following radius test eliminates these ambiguities.
+ point_hits = (cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2
+ candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
+
+ # For those candidates which remain, determine how far they lie away
+ # from the line.
+ px, py = xr + u * dx, yr + u * dy
+ line_hits = (cx - px) ** 2 + (cy - py) ** 2 <= radius ** 2
+ line_hits = line_hits & candidates
+ points, = point_hits.ravel().nonzero()
+ lines, = line_hits.ravel().nonzero()
+ return np.concatenate((points, lines))
+
+
+def _mark_every_path(markevery, tpath, affine, ax_transform):
+ """
+ Helper function that sorts out how to deal the input
+ `markevery` and returns the points where markers should be drawn.
+
+ Takes in the `markevery` value and the line path and returns the
+ sub-sampled path.
+ """
+ # pull out the two bits of data we want from the path
+ codes, verts = tpath.codes, tpath.vertices
+
+ def _slice_or_none(in_v, slc):
+ '''
+ Helper function to cope with `codes` being an
+ ndarray or `None`
+ '''
+ if in_v is None:
+ return None
+ return in_v[slc]
+
+ # if just a float, assume starting at 0.0 and make a tuple
+ if isinstance(markevery, float):
+ markevery = (0.0, markevery)
+ # if just an int, assume starting at 0 and make a tuple
+ elif isinstance(markevery, int):
+ markevery = (0, markevery)
+ # if just an numpy int, assume starting at 0 and make a tuple
+ elif isinstance(markevery, np.integer):
+ markevery = (0, markevery.item())
+
+ if isinstance(markevery, tuple):
+ if len(markevery) != 2:
+ raise ValueError('`markevery` is a tuple but its '
+ 'len is not 2; '
+ 'markevery=%s' % (markevery,))
+ start, step = markevery
+ # if step is an int, old behavior
+ if isinstance(step, int):
+ #tuple of 2 int is for backwards compatibility,
+ if not(isinstance(start, int)):
+ raise ValueError('`markevery` is a tuple with '
+ 'len 2 and second element is an int, but '
+ 'the first element is not an int; '
+ 'markevery=%s' % (markevery,))
+ # just return, we are done here
+
+ return Path(verts[slice(start, None, step)],
+ _slice_or_none(codes, slice(start, None, step)))
+
+ elif isinstance(step, float):
+ if not (isinstance(start, int) or
+ isinstance(start, float)):
+ raise ValueError('`markevery` is a tuple with '
+ 'len 2 and second element is a float, but '
+ 'the first element is not a float or an '
+ 'int; '
+ 'markevery=%s' % (markevery,))
+ #calc cumulative distance along path (in display
+ # coords):
+ disp_coords = affine.transform(tpath.vertices)
+ delta = np.empty((len(disp_coords), 2),
+ dtype=float)
+ delta[0, :] = 0.0
+ delta[1:, :] = (disp_coords[1:, :] -
+ disp_coords[:-1, :])
+ delta = np.sum(delta**2, axis=1)
+ delta = np.sqrt(delta)
+ delta = np.cumsum(delta)
+ #calc distance between markers along path based on
+ # the axes bounding box diagonal being a distance
+ # of unity:
+ scale = ax_transform.transform(
+ np.array([[0, 0], [1, 1]]))
+ scale = np.diff(scale, axis=0)
+ scale = np.sum(scale**2)
+ scale = np.sqrt(scale)
+ marker_delta = np.arange(start * scale,
+ delta[-1],
+ step * scale)
+ #find closest actual data point that is closest to
+ # the theoretical distance along the path:
+ inds = np.abs(delta[np.newaxis, :] -
+ marker_delta[:, np.newaxis])
+ inds = inds.argmin(axis=1)
+ inds = np.unique(inds)
+ # return, we are done here
+ return Path(verts[inds],
+ _slice_or_none(codes, inds))
+ else:
+ raise ValueError('`markevery` is a tuple with '
+ 'len 2, but its second element is not an int '
+ 'or a float; '
+ 'markevery=%s' % (markevery,))
+
+ elif isinstance(markevery, slice):
+ # mazol tov, it's already a slice, just return
+ return Path(verts[markevery],
+ _slice_or_none(codes, markevery))
+
+ elif iterable(markevery):
+ #fancy indexing
+ try:
+ return Path(verts[markevery],
+ _slice_or_none(codes, markevery))
+
+ except (ValueError, IndexError):
+ raise ValueError('`markevery` is iterable but '
+ 'not a valid form of numpy fancy indexing; '
+ 'markevery=%s' % (markevery,))
+ else:
+ raise ValueError('Value of `markevery` is not '
+ 'recognized; '
+ 'markevery=%s' % (markevery,))
+
+
+class Line2D(Artist):
+ """
+ A line - the line can have both a solid linestyle connecting all
+ the vertices, and a marker at each vertex. Additionally, the
+ drawing of the solid line is influenced by the drawstyle, e.g., one
+ can create "stepped" lines in various styles.
+
+
+ """
+ lineStyles = _lineStyles = { # hidden names deprecated
+ '-': '_draw_solid',
+ '--': '_draw_dashed',
+ '-.': '_draw_dash_dot',
+ ':': '_draw_dotted',
+ 'None': '_draw_nothing',
+ ' ': '_draw_nothing',
+ '': '_draw_nothing',
+ }
+
+ _drawStyles_l = {
+ 'default': '_draw_lines',
+ 'steps-mid': '_draw_steps_mid',
+ 'steps-pre': '_draw_steps_pre',
+ 'steps-post': '_draw_steps_post',
+ }
+
+ _drawStyles_s = {
+ 'steps': '_draw_steps_pre',
+ }
+
+ # drawStyles should now be deprecated.
+ drawStyles = {}
+ drawStyles.update(_drawStyles_l)
+ drawStyles.update(_drawStyles_s)
+ # Need a list ordered with long names first:
+ drawStyleKeys = list(_drawStyles_l) + list(_drawStyles_s)
+
+ # Referenced here to maintain API. These are defined in
+ # MarkerStyle
+ markers = MarkerStyle.markers
+ filled_markers = MarkerStyle.filled_markers
+ fillStyles = MarkerStyle.fillstyles
+
+ zorder = 2
+ validCap = ('butt', 'round', 'projecting')
+ validJoin = ('miter', 'round', 'bevel')
+
+ def __str__(self):
+ if self._label != "":
+ return "Line2D(%s)" % (self._label)
+ elif self._x is None:
+ return "Line2D()"
+ elif len(self._x) > 3:
+ return "Line2D((%g,%g),(%g,%g),...,(%g,%g))"\
+ % (self._x[0], self._y[0], self._x[0],
+ self._y[0], self._x[-1], self._y[-1])
+ else:
+ return "Line2D(%s)"\
+ % (",".join(["(%g,%g)" % (x, y) for x, y
+ in zip(self._x, self._y)]))
+
+ def __init__(self, xdata, ydata,
+ linewidth=None, # all Nones default to rc
+ linestyle=None,
+ color=None,
+ marker=None,
+ markersize=None,
+ markeredgewidth=None,
+ markeredgecolor=None,
+ markerfacecolor=None,
+ markerfacecoloralt='none',
+ fillstyle=None,
+ antialiased=None,
+ dash_capstyle=None,
+ solid_capstyle=None,
+ dash_joinstyle=None,
+ solid_joinstyle=None,
+ pickradius=5,
+ drawstyle=None,
+ markevery=None,
+ **kwargs
+ ):
+ """
+ Create a :class:`~matplotlib.lines.Line2D` instance with *x*
+ and *y* data in sequences *xdata*, *ydata*.
+
+ The kwargs are :class:`~matplotlib.lines.Line2D` properties:
+
+ %(Line2D)s
+
+ See :meth:`set_linestyle` for a decription of the line styles,
+ :meth:`set_marker` for a description of the markers, and
+ :meth:`set_drawstyle` for a description of the draw styles.
+
+ """
+ Artist.__init__(self)
+
+ #convert sequences to numpy arrays
+ if not iterable(xdata):
+ raise RuntimeError('xdata must be a sequence')
+ if not iterable(ydata):
+ raise RuntimeError('ydata must be a sequence')
+
+ if linewidth is None:
+ linewidth = rcParams['lines.linewidth']
+
+ if linestyle is None:
+ linestyle = rcParams['lines.linestyle']
+ if marker is None:
+ marker = rcParams['lines.marker']
+ if color is None:
+ color = rcParams['lines.color']
+
+ if markersize is None:
+ markersize = rcParams['lines.markersize']
+ if antialiased is None:
+ antialiased = rcParams['lines.antialiased']
+ if dash_capstyle is None:
+ dash_capstyle = rcParams['lines.dash_capstyle']
+ if dash_joinstyle is None:
+ dash_joinstyle = rcParams['lines.dash_joinstyle']
+ if solid_capstyle is None:
+ solid_capstyle = rcParams['lines.solid_capstyle']
+ if solid_joinstyle is None:
+ solid_joinstyle = rcParams['lines.solid_joinstyle']
+
+ if isinstance(linestyle, six.string_types):
+ ds, ls = self._split_drawstyle_linestyle(linestyle)
+ if ds is not None and drawstyle is not None and ds != drawstyle:
+ raise ValueError("Inconsistent drawstyle ({0!r}) and "
+ "linestyle ({1!r})".format(drawstyle,
+ linestyle)
+ )
+ linestyle = ls
+
+ if ds is not None:
+ drawstyle = ds
+
+ if drawstyle is None:
+ drawstyle = 'default'
+
+ self._dashcapstyle = None
+ self._dashjoinstyle = None
+ self._solidjoinstyle = None
+ self._solidcapstyle = None
+ self.set_dash_capstyle(dash_capstyle)
+ self.set_dash_joinstyle(dash_joinstyle)
+ self.set_solid_capstyle(solid_capstyle)
+ self.set_solid_joinstyle(solid_joinstyle)
+
+ self._linestyles = None
+ self._drawstyle = None
+ self._linewidth = linewidth
+
+ # scaled dash + offset
+ self._dashSeq = None
+ self._dashOffset = 0
+ # unscaled dash + offset
+ # this is needed scaling the dash pattern by linewidth
+ self._us_dashSeq = None
+ self._us_dashOffset = 0
+
+ self.set_linestyle(linestyle)
+ self.set_drawstyle(drawstyle)
+ self.set_linewidth(linewidth)
+
+ self._color = None
+ self.set_color(color)
+ self._marker = MarkerStyle(marker, fillstyle)
+
+ self._markevery = None
+ self._markersize = None
+ self._antialiased = None
+
+ self.set_markevery(markevery)
+ self.set_antialiased(antialiased)
+ self.set_markersize(markersize)
+
+ self._markeredgecolor = None
+ self._markeredgewidth = None
+ self._markerfacecolor = None
+ self._markerfacecoloralt = None
+
+ self.set_markerfacecolor(markerfacecolor)
+ self.set_markerfacecoloralt(markerfacecoloralt)
+ self.set_markeredgecolor(markeredgecolor)
+ self.set_markeredgewidth(markeredgewidth)
+
+ self.verticalOffset = None
+
+ # update kwargs before updating data to give the caller a
+ # chance to init axes (and hence unit support)
+ self.update(kwargs)
+ self.pickradius = pickradius
+ self.ind_offset = 0
+ if is_numlike(self._picker):
+ self.pickradius = self._picker
+
+ self._xorig = np.asarray([])
+ self._yorig = np.asarray([])
+ self._invalidx = True
+ self._invalidy = True
+ self._x = None
+ self._y = None
+ self._xy = None
+ self._path = None
+ self._transformed_path = None
+ self._subslice = False
+ self._x_filled = None # used in subslicing; only x is needed
+
+ self.set_data(xdata, ydata)
+
+ def contains(self, mouseevent):
+ """
+ Test whether the mouse event occurred on the line. The pick
+ radius determines the precision of the location test (usually
+ within five points of the value). Use
+ :meth:`~matplotlib.lines.Line2D.get_pickradius` or
+ :meth:`~matplotlib.lines.Line2D.set_pickradius` to view or
+ modify it.
+
+ Returns *True* if any values are within the radius along with
+ ``{'ind': pointlist}``, where *pointlist* is the set of points
+ within the radius.
+
+ TODO: sort returned indices by distance
+ """
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+
+ if not is_numlike(self.pickradius):
+ raise ValueError("pick radius should be a distance")
+
+ # Make sure we have data to plot
+ if self._invalidy or self._invalidx:
+ self.recache()
+ if len(self._xy) == 0:
+ return False, {}
+
+ # Convert points to pixels
+ transformed_path = self._get_transformed_path()
+ path, affine = transformed_path.get_transformed_path_and_affine()
+ path = affine.transform_path(path)
+ xy = path.vertices
+ xt = xy[:, 0]
+ yt = xy[:, 1]
+
+ # Convert pick radius from points to pixels
+ if self.figure is None:
+ warnings.warn('no figure set when check if mouse is on line')
+ pixels = self.pickradius
+ else:
+ pixels = self.figure.dpi / 72. * self.pickradius
+
+ # the math involved in checking for containment (here and inside of
+ # segment_hits) assumes that it is OK to overflow. In case the
+ # application has set the error flags such that an exception is raised
+ # on overflow, we temporarily set the appropriate error flags here and
+ # set them back when we are finished.
+ with np.errstate(all='ignore'):
+ # Check for collision
+ if self._linestyle in ['None', None]:
+ # If no line, return the nearby point(s)
+ d = (xt - mouseevent.x) ** 2 + (yt - mouseevent.y) ** 2
+ ind, = np.nonzero(np.less_equal(d, pixels ** 2))
+ else:
+ # If line, return the nearby segment(s)
+ ind = segment_hits(mouseevent.x, mouseevent.y, xt, yt, pixels)
+ if self._drawstyle.startswith("steps"):
+ ind //= 2
+
+ ind += self.ind_offset
+
+ # Return the point(s) within radius
+ return len(ind) > 0, dict(ind=ind)
+
+ def get_pickradius(self):
+ """return the pick radius used for containment tests"""
+ return self.pickradius
+
+ def set_pickradius(self, d):
+ """Set the pick radius used for containment tests.
+
+ .. ACCEPTS: float distance in points
+
+ Parameters
+ ----------
+ d : float
+ Pick radius, in points.
+ """
+ self.pickradius = d
+
+ def get_fillstyle(self):
+ """
+ return the marker fillstyle
+ """
+ return self._marker.get_fillstyle()
+
+ def set_fillstyle(self, fs):
+ """
+ Set the marker fill style; 'full' means fill the whole marker.
+ 'none' means no filling; other options are for half-filled markers.
+
+ ACCEPTS: ['full' | 'left' | 'right' | 'bottom' | 'top' | 'none']
+ """
+ self._marker.set_fillstyle(fs)
+ self.stale = True
+
+ def set_markevery(self, every):
+ """Set the markevery property to subsample the plot when using markers.
+
+ e.g., if `every=5`, every 5-th marker will be plotted.
+
+ ACCEPTS: [None | int | length-2 tuple of int | slice |
+ list/array of int | float | length-2 tuple of float]
+
+ Parameters
+ ----------
+ every: None | int | length-2 tuple of int | slice | list/array of int \
+| float | length-2 tuple of float
+ Which markers to plot.
+
+ - every=None, every point will be plotted.
+ - every=N, every N-th marker will be plotted starting with
+ marker 0.
+ - every=(start, N), every N-th marker, starting at point
+ start, will be plotted.
+ - every=slice(start, end, N), every N-th marker, starting at
+ point start, upto but not including point end, will be plotted.
+ - every=[i, j, m, n], only markers at points i, j, m, and n
+ will be plotted.
+ - every=0.1, (i.e. a float) then markers will be spaced at
+ approximately equal distances along the line; the distance
+ along the line between markers is determined by multiplying the
+ display-coordinate distance of the axes bounding-box diagonal
+ by the value of every.
+ - every=(0.5, 0.1) (i.e. a length-2 tuple of float), the
+ same functionality as every=0.1 is exhibited but the first
+ marker will be 0.5 multiplied by the
+ display-cordinate-diagonal-distance along the line.
+
+ Notes
+ -----
+ Setting the markevery property will only show markers at actual data
+ points. When using float arguments to set the markevery property
+ on irregularly spaced data, the markers will likely not appear evenly
+ spaced because the actual data points do not coincide with the
+ theoretical spacing between markers.
+
+ When using a start offset to specify the first marker, the offset will
+ be from the first data point which may be different from the first
+ the visible data point if the plot is zoomed in.
+
+ If zooming in on a plot when using float arguments then the actual
+ data points that have markers will change because the distance between
+ markers is always determined from the display-coordinates
+ axes-bounding-box-diagonal regardless of the actual axes data limits.
+
+ """
+ if self._markevery != every:
+ self.stale = True
+ self._markevery = every
+
+ def get_markevery(self):
+ """return the markevery setting"""
+ return self._markevery
+
+ def set_picker(self, p):
+ """Sets the event picker details for the line.
+
+ ACCEPTS: float distance in points or callable pick function
+ ``fn(artist, event)``
+ """
+ if callable(p):
+ self._contains = p
+ else:
+ self.pickradius = p
+ self._picker = p
+
+ def get_window_extent(self, renderer):
+ bbox = Bbox([[0, 0], [0, 0]])
+ trans_data_to_xy = self.get_transform().transform
+ bbox.update_from_data_xy(trans_data_to_xy(self.get_xydata()),
+ ignore=True)
+ # correct for marker size, if any
+ if self._marker:
+ ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5
+ bbox = bbox.padded(ms)
+ return bbox
+
+ @Artist.axes.setter
+ def axes(self, ax):
+ # call the set method from the base-class property
+ Artist.axes.fset(self, ax)
+ if ax is not None:
+ # connect unit-related callbacks
+ if ax.xaxis is not None:
+ self._xcid = ax.xaxis.callbacks.connect('units',
+ self.recache_always)
+ if ax.yaxis is not None:
+ self._ycid = ax.yaxis.callbacks.connect('units',
+ self.recache_always)
+
+ def set_data(self, *args):
+ """
+ Set the x and y data
+
+ ACCEPTS: 2D array (rows are x, y) or two 1D arrays
+ """
+ if len(args) == 1:
+ x, y = args[0]
+ else:
+ x, y = args
+
+ self.set_xdata(x)
+ self.set_ydata(y)
+
+ def recache_always(self):
+ self.recache(always=True)
+
+ def recache(self, always=False):
+ if always or self._invalidx:
+ xconv = self.convert_xunits(self._xorig)
+ x = _to_unmasked_float_array(xconv).ravel()
+ else:
+ x = self._x
+ if always or self._invalidy:
+ yconv = self.convert_yunits(self._yorig)
+ y = _to_unmasked_float_array(yconv).ravel()
+ else:
+ y = self._y
+
+ self._xy = np.column_stack(np.broadcast_arrays(x, y)).astype(float)
+ self._x, self._y = self._xy.T # views
+
+ self._subslice = False
+ if (self.axes and len(x) > 1000 and self._is_sorted(x) and
+ self.axes.name == 'rectilinear' and
+ self.axes.get_xscale() == 'linear' and
+ self._markevery is None and
+ self.get_clip_on() is True):
+ self._subslice = True
+ nanmask = np.isnan(x)
+ if nanmask.any():
+ self._x_filled = self._x.copy()
+ indices = np.arange(len(x))
+ self._x_filled[nanmask] = np.interp(indices[nanmask],
+ indices[~nanmask], self._x[~nanmask])
+ else:
+ self._x_filled = self._x
+
+ if self._path is not None:
+ interpolation_steps = self._path._interpolation_steps
+ else:
+ interpolation_steps = 1
+ xy = STEP_LOOKUP_MAP[self._drawstyle](*self._xy.T)
+ self._path = Path(np.asarray(xy).T,
+ _interpolation_steps=interpolation_steps)
+ self._transformed_path = None
+ self._invalidx = False
+ self._invalidy = False
+
+ def _transform_path(self, subslice=None):
+ """
+ Puts a TransformedPath instance at self._transformed_path;
+ all invalidation of the transform is then handled by the
+ TransformedPath instance.
+ """
+ # Masked arrays are now handled by the Path class itself
+ if subslice is not None:
+ xy = STEP_LOOKUP_MAP[self._drawstyle](*self._xy[subslice, :].T)
+ _path = Path(np.asarray(xy).T,
+ _interpolation_steps=self._path._interpolation_steps)
+ else:
+ _path = self._path
+ self._transformed_path = TransformedPath(_path, self.get_transform())
+
+ def _get_transformed_path(self):
+ """
+ Return the :class:`~matplotlib.transforms.TransformedPath` instance
+ of this line.
+ """
+ if self._transformed_path is None:
+ self._transform_path()
+ return self._transformed_path
+
+ def set_transform(self, t):
+ """
+ set the Transformation instance used by this artist
+
+ ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
+ """
+ Artist.set_transform(self, t)
+ self._invalidx = True
+ self._invalidy = True
+ self.stale = True
+
+ def _is_sorted(self, x):
+ """return True if x is sorted in ascending order"""
+ # We don't handle the monotonically decreasing case.
+ return _path.is_sorted(x)
+
+ @allow_rasterization
+ def draw(self, renderer):
+ """draw the Line with `renderer` unless visibility is False"""
+ if not self.get_visible():
+ return
+
+ if self._invalidy or self._invalidx:
+ self.recache()
+ self.ind_offset = 0 # Needed for contains() method.
+ if self._subslice and self.axes:
+ x0, x1 = self.axes.get_xbound()
+ i0, = self._x_filled.searchsorted([x0], 'left')
+ i1, = self._x_filled.searchsorted([x1], 'right')
+ subslice = slice(max(i0 - 1, 0), i1 + 1)
+ self.ind_offset = subslice.start
+ self._transform_path(subslice)
+ else:
+ subslice = None
+
+ if self.get_path_effects():
+ from matplotlib.patheffects import PathEffectRenderer
+ renderer = PathEffectRenderer(self.get_path_effects(), renderer)
+
+ renderer.open_group('line2d', self.get_gid())
+ if self._lineStyles[self._linestyle] != '_draw_nothing':
+ tpath, affine = (self._get_transformed_path()
+ .get_transformed_path_and_affine())
+ if len(tpath.vertices):
+ gc = renderer.new_gc()
+ self._set_gc_clip(gc)
+
+ lc_rgba = mcolors.to_rgba(self._color, self._alpha)
+ gc.set_foreground(lc_rgba, isRGBA=True)
+
+ gc.set_antialiased(self._antialiased)
+ gc.set_linewidth(self._linewidth)
+
+ if self.is_dashed():
+ cap = self._dashcapstyle
+ join = self._dashjoinstyle
+ else:
+ cap = self._solidcapstyle
+ join = self._solidjoinstyle
+ gc.set_joinstyle(join)
+ gc.set_capstyle(cap)
+ gc.set_snap(self.get_snap())
+ if self.get_sketch_params() is not None:
+ gc.set_sketch_params(*self.get_sketch_params())
+
+ gc.set_dashes(self._dashOffset, self._dashSeq)
+ renderer.draw_path(gc, tpath, affine.frozen())
+ gc.restore()
+
+ if self._marker and self._markersize > 0:
+ gc = renderer.new_gc()
+ self._set_gc_clip(gc)
+ gc.set_linewidth(self._markeredgewidth)
+ gc.set_antialiased(self._antialiased)
+
+ ec_rgba = mcolors.to_rgba(
+ self.get_markeredgecolor(), self._alpha)
+ fc_rgba = mcolors.to_rgba(
+ self._get_markerfacecolor(), self._alpha)
+ fcalt_rgba = mcolors.to_rgba(
+ self._get_markerfacecolor(alt=True), self._alpha)
+ # If the edgecolor is "auto", it is set according to the *line*
+ # color but inherits the alpha value of the *face* color, if any.
+ if (cbook._str_equal(self._markeredgecolor, "auto")
+ and not cbook._str_lower_equal(
+ self.get_markerfacecolor(), "none")):
+ ec_rgba = ec_rgba[:3] + (fc_rgba[3],)
+ gc.set_foreground(ec_rgba, isRGBA=True)
+
+ marker = self._marker
+
+ # Markers *must* be drawn ignoring the drawstyle (but don't pay the
+ # recaching if drawstyle is already "default").
+ if self.get_drawstyle() != "default":
+ with cbook._setattr_cm(
+ self, _drawstyle="default", _transformed_path=None):
+ self.recache()
+ self._transform_path(subslice)
+ tpath, affine = (self._get_transformed_path()
+ .get_transformed_path_and_affine())
+ else:
+ tpath, affine = (self._get_transformed_path()
+ .get_transformed_path_and_affine())
+
+ if len(tpath.vertices):
+ # subsample the markers if markevery is not None
+ markevery = self.get_markevery()
+ if markevery is not None:
+ subsampled = _mark_every_path(markevery, tpath,
+ affine, self.axes.transAxes)
+ else:
+ subsampled = tpath
+
+ snap = marker.get_snap_threshold()
+ if type(snap) == float:
+ snap = renderer.points_to_pixels(self._markersize) >= snap
+ gc.set_snap(snap)
+ gc.set_joinstyle(marker.get_joinstyle())
+ gc.set_capstyle(marker.get_capstyle())
+ marker_path = marker.get_path()
+ marker_trans = marker.get_transform()
+ w = renderer.points_to_pixels(self._markersize)
+
+ if cbook._str_equal(marker.get_marker(), ","):
+ gc.set_linewidth(0)
+ else:
+ # Don't scale for pixels, and don't stroke them
+ marker_trans = marker_trans.scale(w)
+
+ renderer.draw_markers(gc, marker_path, marker_trans,
+ subsampled, affine.frozen(),
+ fc_rgba)
+
+ alt_marker_path = marker.get_alt_path()
+ if alt_marker_path:
+ alt_marker_trans = marker.get_alt_transform()
+ alt_marker_trans = alt_marker_trans.scale(w)
+ renderer.draw_markers(
+ gc, alt_marker_path, alt_marker_trans, subsampled,
+ affine.frozen(), fcalt_rgba)
+
+ gc.restore()
+
+ renderer.close_group('line2d')
+ self.stale = False
+
+ def get_antialiased(self):
+ return self._antialiased
+
+ def get_color(self):
+ return self._color
+
+ def get_drawstyle(self):
+ return self._drawstyle
+
+ def get_linestyle(self):
+ return self._linestyle
+
+ def get_linewidth(self):
+ return self._linewidth
+
+ def get_marker(self):
+ return self._marker.get_marker()
+
+ def get_markeredgecolor(self):
+ mec = self._markeredgecolor
+ if isinstance(mec, six.string_types) and mec == 'auto':
+ if rcParams['_internal.classic_mode']:
+ if self._marker.get_marker() in ('.', ','):
+ return self._color
+ if self._marker.is_filled() and self.get_fillstyle() != 'none':
+ return 'k' # Bad hard-wired default...
+ return self._color
+ else:
+ return mec
+
+ def get_markeredgewidth(self):
+ return self._markeredgewidth
+
+ def _get_markerfacecolor(self, alt=False):
+ if alt:
+ fc = self._markerfacecoloralt
+ else:
+ fc = self._markerfacecolor
+ if cbook._str_lower_equal(fc, 'auto'):
+ if self.get_fillstyle() == 'none':
+ return 'none'
+ else:
+ return self._color
+ else:
+ return fc
+
+ def get_markerfacecolor(self):
+ return self._get_markerfacecolor(alt=False)
+
+ def get_markerfacecoloralt(self):
+ return self._get_markerfacecolor(alt=True)
+
+ def get_markersize(self):
+ return self._markersize
+
+ def get_data(self, orig=True):
+ """
+ Return the xdata, ydata.
+
+ If *orig* is *True*, return the original data.
+ """
+ return self.get_xdata(orig=orig), self.get_ydata(orig=orig)
+
+ def get_xdata(self, orig=True):
+ """
+ Return the xdata.
+
+ If *orig* is *True*, return the original data, else the
+ processed data.
+ """
+ if orig:
+ return self._xorig
+ if self._invalidx:
+ self.recache()
+ return self._x
+
+ def get_ydata(self, orig=True):
+ """
+ Return the ydata.
+
+ If *orig* is *True*, return the original data, else the
+ processed data.
+ """
+ if orig:
+ return self._yorig
+ if self._invalidy:
+ self.recache()
+ return self._y
+
+ def get_path(self):
+ """
+ Return the :class:`~matplotlib.path.Path` object associated
+ with this line.
+ """
+ if self._invalidy or self._invalidx:
+ self.recache()
+ return self._path
+
+ def get_xydata(self):
+ """
+ Return the *xy* data as a Nx2 numpy array.
+ """
+ if self._invalidy or self._invalidx:
+ self.recache()
+ return self._xy
+
+ def set_antialiased(self, b):
+ """
+ Set whether to use antialiased rendering.
+
+ Parameters
+ ----------
+ b : bool
+ .. ACCEPTS: bool
+ """
+ if self._antialiased != b:
+ self.stale = True
+ self._antialiased = b
+
+ def set_color(self, color):
+ """
+ Set the color of the line
+
+ ACCEPTS: any matplotlib color
+ """
+ self._color = color
+ self.stale = True
+
+ def set_drawstyle(self, drawstyle):
+ """
+ Set the drawstyle of the plot
+
+ 'default' connects the points with lines. The steps variants
+ produce step-plots. 'steps' is equivalent to 'steps-pre' and
+ is maintained for backward-compatibility.
+
+ ACCEPTS: ['default' | 'steps' | 'steps-pre' | 'steps-mid' |
+ 'steps-post']
+ """
+ if drawstyle is None:
+ drawstyle = 'default'
+ if drawstyle not in self.drawStyles:
+ raise ValueError('Unrecognized drawstyle {!r}'.format(drawstyle))
+ if self._drawstyle != drawstyle:
+ self.stale = True
+ # invalidate to trigger a recache of the path
+ self._invalidx = True
+ self._drawstyle = drawstyle
+
+ def set_linewidth(self, w):
+ """
+ Set the line width in points
+
+ ACCEPTS: float value in points
+ """
+ w = float(w)
+
+ if self._linewidth != w:
+ self.stale = True
+ self._linewidth = w
+ # rescale the dashes + offset
+ self._dashOffset, self._dashSeq = _scale_dashes(
+ self._us_dashOffset, self._us_dashSeq, self._linewidth)
+
+ def _split_drawstyle_linestyle(self, ls):
+ '''Split drawstyle from linestyle string
+
+ If `ls` is only a drawstyle default to returning a linestyle
+ of '-'.
+
+ Parameters
+ ----------
+ ls : str
+ The linestyle to be processed
+
+ Returns
+ -------
+ ret_ds : str or None
+ If the linestyle string does not contain a drawstyle prefix
+ return None, otherwise return it.
+
+ ls : str
+ The linestyle with the drawstyle (if any) stripped.
+ '''
+ ret_ds = None
+ for ds in self.drawStyleKeys: # long names are first in the list
+ if ls.startswith(ds):
+ ret_ds = ds
+ if len(ls) > len(ds):
+ ls = ls[len(ds):]
+ else:
+ ls = '-'
+ break
+
+ return ret_ds, ls
+
+ def set_linestyle(self, ls):
+ """
+ Set the linestyle of the line (also accepts drawstyles,
+ e.g., ``'steps--'``)
+
+
+ =========================== =================
+ linestyle description
+ =========================== =================
+ ``'-'`` or ``'solid'`` solid line
+ ``'--'`` or ``'dashed'`` dashed line
+ ``'-.'`` or ``'dashdot'`` dash-dotted line
+ ``':'`` or ``'dotted'`` dotted line
+ ``'None'`` draw nothing
+ ``' '`` draw nothing
+ ``''`` draw nothing
+ =========================== =================
+
+ 'steps' is equivalent to 'steps-pre' and is maintained for
+ backward-compatibility.
+
+ Alternatively a dash tuple of the following form can be provided::
+
+ (offset, onoffseq),
+
+ where ``onoffseq`` is an even length tuple of on and off ink
+ in points.
+
+
+ ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
+ (offset, on-off-dash-seq) |
+ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` |
+ ``' '`` | ``''``]
+
+ .. seealso::
+
+ :meth:`set_drawstyle`
+ To set the drawing style (stepping) of the plot.
+
+ Parameters
+ ----------
+ ls : { ``'-'``, ``'--'``, ``'-.'``, ``':'``} and more see description
+ The line style.
+ """
+ if isinstance(ls, six.string_types):
+ ds, ls = self._split_drawstyle_linestyle(ls)
+ if ds is not None:
+ self.set_drawstyle(ds)
+
+ if ls in [' ', '', 'none']:
+ ls = 'None'
+
+ if ls not in self._lineStyles:
+ try:
+ ls = ls_mapper_r[ls]
+ except KeyError:
+ raise ValueError(("You passed in an invalid linestyle, "
+ "`{0}`. See "
+ "docs of Line2D.set_linestyle for "
+ "valid values.").format(ls))
+ self._linestyle = ls
+ else:
+ self._linestyle = '--'
+
+ # get the unscaled dashes
+ self._us_dashOffset, self._us_dashSeq = _get_dash_pattern(ls)
+ # compute the linewidth scaled dashes
+ self._dashOffset, self._dashSeq = _scale_dashes(
+ self._us_dashOffset, self._us_dashSeq, self._linewidth)
+
+ @docstring.dedent_interpd
+ def set_marker(self, marker):
+ """
+ Set the line marker
+
+ ACCEPTS: :mod:`A valid marker style <matplotlib.markers>`
+
+ Parameters
+ ----------
+
+ marker: marker style
+ See `~matplotlib.markers` for full description of possible
+ argument
+
+ """
+ self._marker.set_marker(marker)
+ self.stale = True
+
+ def set_markeredgecolor(self, ec):
+ """
+ Set the marker edge color
+
+ ACCEPTS: any matplotlib color
+ """
+ if ec is None:
+ ec = 'auto'
+ if self._markeredgecolor is None or \
+ np.any(self._markeredgecolor != ec):
+ self.stale = True
+ self._markeredgecolor = ec
+
+ def set_markeredgewidth(self, ew):
+ """
+ Set the marker edge width in points
+
+ ACCEPTS: float value in points
+ """
+ if ew is None:
+ ew = rcParams['lines.markeredgewidth']
+ if self._markeredgewidth != ew:
+ self.stale = True
+ self._markeredgewidth = ew
+
+ def set_markerfacecolor(self, fc):
+ """
+ Set the marker face color.
+
+ ACCEPTS: any matplotlib color
+ """
+ if fc is None:
+ fc = 'auto'
+ if np.any(self._markerfacecolor != fc):
+ self.stale = True
+ self._markerfacecolor = fc
+
+ def set_markerfacecoloralt(self, fc):
+ """
+ Set the alternate marker face color.
+
+ ACCEPTS: any matplotlib color
+ """
+ if fc is None:
+ fc = 'auto'
+ if np.any(self._markerfacecoloralt != fc):
+ self.stale = True
+ self._markerfacecoloralt = fc
+
+ def set_markersize(self, sz):
+ """
+ Set the marker size in points
+
+ ACCEPTS: float
+ """
+ sz = float(sz)
+ if self._markersize != sz:
+ self.stale = True
+ self._markersize = sz
+
+ def set_xdata(self, x):
+ """
+ Set the data np.array for x
+
+ ACCEPTS: 1D array
+ """
+ self._xorig = x
+ self._invalidx = True
+ self.stale = True
+
+ def set_ydata(self, y):
+ """
+ Set the data np.array for y
+
+ ACCEPTS: 1D array
+ """
+ self._yorig = y
+ self._invalidy = True
+ self.stale = True
+
+ def set_dashes(self, seq):
+ """
+ Set the dash sequence, sequence of dashes with on off ink in
+ points. If seq is empty or if seq = (None, None), the
+ linestyle will be set to solid.
+
+ ACCEPTS: sequence of on/off ink in points
+ """
+ if seq == (None, None) or len(seq) == 0:
+ self.set_linestyle('-')
+ else:
+ self.set_linestyle((0, seq))
+
+ def update_from(self, other):
+ """copy properties from other to self"""
+ Artist.update_from(self, other)
+ self._linestyle = other._linestyle
+ self._linewidth = other._linewidth
+ self._color = other._color
+ self._markersize = other._markersize
+ self._markerfacecolor = other._markerfacecolor
+ self._markerfacecoloralt = other._markerfacecoloralt
+ self._markeredgecolor = other._markeredgecolor
+ self._markeredgewidth = other._markeredgewidth
+ self._dashSeq = other._dashSeq
+ self._us_dashSeq = other._us_dashSeq
+ self._dashOffset = other._dashOffset
+ self._us_dashOffset = other._us_dashOffset
+ self._dashcapstyle = other._dashcapstyle
+ self._dashjoinstyle = other._dashjoinstyle
+ self._solidcapstyle = other._solidcapstyle
+ self._solidjoinstyle = other._solidjoinstyle
+
+ self._linestyle = other._linestyle
+ self._marker = MarkerStyle(other._marker.get_marker(),
+ other._marker.get_fillstyle())
+ self._drawstyle = other._drawstyle
+
+ def _get_rgba_face(self, alt=False):
+ return mcolors.to_rgba(self._get_markerfacecolor(alt=alt), self._alpha)
+
+ def _get_rgba_ln_color(self, alt=False):
+ return mcolors.to_rgba(self._color, self._alpha)
+
+ # some aliases....
+ def set_aa(self, val):
+ 'alias for set_antialiased'
+ self.set_antialiased(val)
+
+ def set_c(self, val):
+ 'alias for set_color'
+ self.set_color(val)
+
+ def set_ls(self, val):
+ """alias for set_linestyle"""
+ self.set_linestyle(val)
+
+ def set_lw(self, val):
+ """alias for set_linewidth"""
+ self.set_linewidth(val)
+
+ def set_mec(self, val):
+ """alias for set_markeredgecolor"""
+ self.set_markeredgecolor(val)
+
+ def set_mew(self, val):
+ """alias for set_markeredgewidth"""
+ self.set_markeredgewidth(val)
+
+ def set_mfc(self, val):
+ """alias for set_markerfacecolor"""
+ self.set_markerfacecolor(val)
+
+ def set_mfcalt(self, val):
+ """alias for set_markerfacecoloralt"""
+ self.set_markerfacecoloralt(val)
+
+ def set_ms(self, val):
+ """alias for set_markersize"""
+ self.set_markersize(val)
+
+ def get_aa(self):
+ """alias for get_antialiased"""
+ return self.get_antialiased()
+
+ def get_c(self):
+ """alias for get_color"""
+ return self.get_color()
+
+ def get_ls(self):
+ """alias for get_linestyle"""
+ return self.get_linestyle()
+
+ def get_lw(self):
+ """alias for get_linewidth"""
+ return self.get_linewidth()
+
+ def get_mec(self):
+ """alias for get_markeredgecolor"""
+ return self.get_markeredgecolor()
+
+ def get_mew(self):
+ """alias for get_markeredgewidth"""
+ return self.get_markeredgewidth()
+
+ def get_mfc(self):
+ """alias for get_markerfacecolor"""
+ return self.get_markerfacecolor()
+
+ def get_mfcalt(self, alt=False):
+ """alias for get_markerfacecoloralt"""
+ return self.get_markerfacecoloralt()
+
+ def get_ms(self):
+ """alias for get_markersize"""
+ return self.get_markersize()
+
+ def set_dash_joinstyle(self, s):
+ """
+ Set the join style for dashed linestyles
+ ACCEPTS: ['miter' | 'round' | 'bevel']
+ """
+ s = s.lower()
+ if s not in self.validJoin:
+ raise ValueError('set_dash_joinstyle passed "%s";\n' % (s,)
+ + 'valid joinstyles are %s' % (self.validJoin,))
+ if self._dashjoinstyle != s:
+ self.stale = True
+ self._dashjoinstyle = s
+
+ def set_solid_joinstyle(self, s):
+ """
+ Set the join style for solid linestyles
+ ACCEPTS: ['miter' | 'round' | 'bevel']
+ """
+ s = s.lower()
+ if s not in self.validJoin:
+ raise ValueError('set_solid_joinstyle passed "%s";\n' % (s,)
+ + 'valid joinstyles are %s' % (self.validJoin,))
+
+ if self._solidjoinstyle != s:
+ self.stale = True
+ self._solidjoinstyle = s
+
+ def get_dash_joinstyle(self):
+ """
+ Get the join style for dashed linestyles
+ """
+ return self._dashjoinstyle
+
+ def get_solid_joinstyle(self):
+ """
+ Get the join style for solid linestyles
+ """
+ return self._solidjoinstyle
+
+ def set_dash_capstyle(self, s):
+ """
+ Set the cap style for dashed linestyles
+
+ ACCEPTS: ['butt' | 'round' | 'projecting']
+ """
+ s = s.lower()
+ if s not in self.validCap:
+ raise ValueError('set_dash_capstyle passed "%s";\n' % (s,)
+ + 'valid capstyles are %s' % (self.validCap,))
+ if self._dashcapstyle != s:
+ self.stale = True
+ self._dashcapstyle = s
+
+ def set_solid_capstyle(self, s):
+ """
+ Set the cap style for solid linestyles
+
+ ACCEPTS: ['butt' | 'round' | 'projecting']
+ """
+ s = s.lower()
+ if s not in self.validCap:
+ raise ValueError('set_solid_capstyle passed "%s";\n' % (s,)
+ + 'valid capstyles are %s' % (self.validCap,))
+ if self._solidcapstyle != s:
+ self.stale = True
+ self._solidcapstyle = s
+
+ def get_dash_capstyle(self):
+ """
+ Get the cap style for dashed linestyles
+ """
+ return self._dashcapstyle
+
+ def get_solid_capstyle(self):
+ """
+ Get the cap style for solid linestyles
+ """
+ return self._solidcapstyle
+
+ def is_dashed(self):
+ 'return True if line is dashstyle'
+ return self._linestyle in ('--', '-.', ':')
+
+
+class VertexSelector(object):
+ """
+ Manage the callbacks to maintain a list of selected vertices for
+ :class:`matplotlib.lines.Line2D`. Derived classes should override
+ :meth:`~matplotlib.lines.VertexSelector.process_selected` to do
+ something with the picks.
+
+ Here is an example which highlights the selected verts with red
+ circles::
+
+ import numpy as np
+ import matplotlib.pyplot as plt
+ import matplotlib.lines as lines
+
+ class HighlightSelected(lines.VertexSelector):
+ def __init__(self, line, fmt='ro', **kwargs):
+ lines.VertexSelector.__init__(self, line)
+ self.markers, = self.axes.plot([], [], fmt, **kwargs)
+
+ def process_selected(self, ind, xs, ys):
+ self.markers.set_data(xs, ys)
+ self.canvas.draw()
+
+ fig, ax = plt.subplots()
+ x, y = np.random.rand(2, 30)
+ line, = ax.plot(x, y, 'bs-', picker=5)
+
+ selector = HighlightSelected(line)
+ plt.show()
+
+ """
+ def __init__(self, line):
+ """
+ Initialize the class with a :class:`matplotlib.lines.Line2D`
+ instance. The line should already be added to some
+ :class:`matplotlib.axes.Axes` instance and should have the
+ picker property set.
+ """
+ if line.axes is None:
+ raise RuntimeError('You must first add the line to the Axes')
+
+ if line.get_picker() is None:
+ raise RuntimeError('You must first set the picker property '
+ 'of the line')
+
+ self.axes = line.axes
+ self.line = line
+ self.canvas = self.axes.figure.canvas
+ self.cid = self.canvas.mpl_connect('pick_event', self.onpick)
+
+ self.ind = set()
+
+ def process_selected(self, ind, xs, ys):
+ """
+ Default "do nothing" implementation of the
+ :meth:`process_selected` method.
+
+ *ind* are the indices of the selected vertices. *xs* and *ys*
+ are the coordinates of the selected vertices.
+ """
+ pass
+
+ def onpick(self, event):
+ """When the line is picked, update the set of selected indices."""
+ if event.artist is not self.line:
+ return
+ self.ind ^= set(event.ind)
+ ind = sorted(self.ind)
+ xdata, ydata = self.line.get_data()
+ self.process_selected(ind, xdata[ind], ydata[ind])
+
+
+lineStyles = Line2D._lineStyles
+lineMarkers = MarkerStyle.markers
+drawStyles = Line2D.drawStyles
+fillStyles = MarkerStyle.fillstyles
+
+docstring.interpd.update(Line2D=artist.kwdoc(Line2D))
+
+# You can not set the docstring of an instancemethod,
+# but you can on the underlying function. Go figure.
+docstring.dedent_interpd(Line2D.__init__)
diff --git a/contrib/python/matplotlib/py2/matplotlib/markers.py b/contrib/python/matplotlib/py2/matplotlib/markers.py
new file mode 100644
index 00000000000..ff27c4b253b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/markers.py
@@ -0,0 +1,896 @@
+"""
+This module contains functions to handle markers. Used by both the
+marker functionality of `~matplotlib.axes.Axes.plot` and
+`~matplotlib.axes.Axes.scatter`.
+
+All possible markers are defined here:
+
+============================== ===============================================
+marker description
+============================== ===============================================
+`"."` point
+`","` pixel
+`"o"` circle
+`"v"` triangle_down
+`"^"` triangle_up
+`"<"` triangle_left
+`">"` triangle_right
+`"1"` tri_down
+`"2"` tri_up
+`"3"` tri_left
+`"4"` tri_right
+`"8"` octagon
+`"s"` square
+`"p"` pentagon
+`"P"` plus (filled)
+`"*"` star
+`"h"` hexagon1
+`"H"` hexagon2
+`"+"` plus
+`"x"` x
+`"X"` x (filled)
+`"D"` diamond
+`"d"` thin_diamond
+`"|"` vline
+`"_"` hline
+TICKLEFT tickleft
+TICKRIGHT tickright
+TICKUP tickup
+TICKDOWN tickdown
+CARETLEFT caretleft (centered at tip)
+CARETRIGHT caretright (centered at tip)
+CARETUP caretup (centered at tip)
+CARETDOWN caretdown (centered at tip)
+CARETLEFTBASE caretleft (centered at base)
+CARETRIGHTBASE caretright (centered at base)
+CARETUPBASE caretup (centered at base)
+`"None"`, `" "` or `""` nothing
+``'$...$'`` render the string using mathtext.
+`verts` a list of (x, y) pairs used for Path vertices.
+ The center of the marker is located at (0,0) and
+ the size is normalized.
+path a `~matplotlib.path.Path` instance.
+(`numsides`, `style`, `angle`) The marker can also be a tuple (`numsides`,
+ `style`, `angle`), which will create a custom,
+ regular symbol.
+
+ `numsides`:
+ the number of sides
+
+ `style`:
+ the style of the regular symbol:
+
+ 0
+ a regular polygon
+ 1
+ a star-like symbol
+ 2
+ an asterisk
+ 3
+ a circle (`numsides` and `angle` is
+ ignored)
+
+ `angle`:
+ the angle of rotation of the symbol
+============================== ===============================================
+
+For backward compatibility, the form (`verts`, 0) is also accepted,
+but it is equivalent to just `verts` for giving a raw set of vertices
+that define the shape.
+
+`None` is the default which means 'nothing', however this table is
+referred to from other docs for the valid inputs from marker inputs and in
+those cases `None` still means 'default'.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange
+
+from collections import Sized
+
+import numpy as np
+
+from . import rcParams
+from .cbook import is_math_text, is_numlike
+from .path import Path
+from .transforms import IdentityTransform, Affine2D
+
+# special-purpose marker identifiers:
+(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
+ CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,
+ CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE) = xrange(12)
+
+_empty_path = Path(np.empty((0, 2)))
+
+
+class MarkerStyle(object):
+
+ markers = {
+ '.': 'point',
+ ',': 'pixel',
+ 'o': 'circle',
+ 'v': 'triangle_down',
+ '^': 'triangle_up',
+ '<': 'triangle_left',
+ '>': 'triangle_right',
+ '1': 'tri_down',
+ '2': 'tri_up',
+ '3': 'tri_left',
+ '4': 'tri_right',
+ '8': 'octagon',
+ 's': 'square',
+ 'p': 'pentagon',
+ '*': 'star',
+ 'h': 'hexagon1',
+ 'H': 'hexagon2',
+ '+': 'plus',
+ 'x': 'x',
+ 'D': 'diamond',
+ 'd': 'thin_diamond',
+ '|': 'vline',
+ '_': 'hline',
+ 'P': 'plus_filled',
+ 'X': 'x_filled',
+ TICKLEFT: 'tickleft',
+ TICKRIGHT: 'tickright',
+ TICKUP: 'tickup',
+ TICKDOWN: 'tickdown',
+ CARETLEFT: 'caretleft',
+ CARETRIGHT: 'caretright',
+ CARETUP: 'caretup',
+ CARETDOWN: 'caretdown',
+ CARETLEFTBASE: 'caretleftbase',
+ CARETRIGHTBASE: 'caretrightbase',
+ CARETUPBASE: 'caretupbase',
+ CARETDOWNBASE: 'caretdownbase',
+ "None": 'nothing',
+ None: 'nothing',
+ ' ': 'nothing',
+ '': 'nothing'
+ }
+
+ # Just used for informational purposes. is_filled()
+ # is calculated in the _set_* functions.
+ filled_markers = (
+ 'o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd',
+ 'P', 'X')
+
+ fillstyles = ('full', 'left', 'right', 'bottom', 'top', 'none')
+ _half_fillstyles = ('left', 'right', 'bottom', 'top')
+
+ # TODO: Is this ever used as a non-constant?
+ _point_size_reduction = 0.5
+
+ def __init__(self, marker=None, fillstyle=None):
+ """
+ MarkerStyle
+
+ Attributes
+ ----------
+ markers : list of known marks
+
+ fillstyles : list of known fillstyles
+
+ filled_markers : list of known filled markers.
+
+ Parameters
+ ----------
+ marker : string or array_like, optional, default: None
+ See the descriptions of possible markers in the module docstring.
+
+ fillstyle : string, optional, default: 'full'
+ 'full', 'left", 'right', 'bottom', 'top', 'none'
+ """
+ self._marker_function = None
+ self.set_fillstyle(fillstyle)
+ self.set_marker(marker)
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ d.pop('_marker_function')
+ return d
+
+ def __setstate__(self, statedict):
+ self.__dict__ = statedict
+ self.set_marker(self._marker)
+
+ def _recache(self):
+ if self._marker_function is None:
+ return
+ self._path = _empty_path
+ self._transform = IdentityTransform()
+ self._alt_path = None
+ self._alt_transform = None
+ self._snap_threshold = None
+ self._joinstyle = 'round'
+ self._capstyle = 'butt'
+ self._filled = True
+ self._marker_function()
+
+ if six.PY3:
+ def __bool__(self):
+ return bool(len(self._path.vertices))
+ else:
+ def __nonzero__(self):
+ return bool(len(self._path.vertices))
+
+ def is_filled(self):
+ return self._filled
+
+ def get_fillstyle(self):
+ return self._fillstyle
+
+ def set_fillstyle(self, fillstyle):
+ """
+ Sets fillstyle
+
+ Parameters
+ ----------
+ fillstyle : string amongst known fillstyles
+ """
+ if fillstyle is None:
+ fillstyle = rcParams['markers.fillstyle']
+ if fillstyle not in self.fillstyles:
+ raise ValueError("Unrecognized fillstyle %s"
+ % ' '.join(self.fillstyles))
+ self._fillstyle = fillstyle
+ self._recache()
+
+ def get_joinstyle(self):
+ return self._joinstyle
+
+ def get_capstyle(self):
+ return self._capstyle
+
+ def get_marker(self):
+ return self._marker
+
+ def set_marker(self, marker):
+ if (isinstance(marker, np.ndarray) and marker.ndim == 2 and
+ marker.shape[1] == 2):
+ self._marker_function = self._set_vertices
+ elif (isinstance(marker, Sized) and len(marker) in (2, 3) and
+ marker[1] in (0, 1, 2, 3)):
+ self._marker_function = self._set_tuple_marker
+ elif (not isinstance(marker, (np.ndarray, list)) and
+ marker in self.markers):
+ self._marker_function = getattr(
+ self, '_set_' + self.markers[marker])
+ elif isinstance(marker, six.string_types) and is_math_text(marker):
+ self._marker_function = self._set_mathtext_path
+ elif isinstance(marker, Path):
+ self._marker_function = self._set_path_marker
+ else:
+ try:
+ Path(marker)
+ self._marker_function = self._set_vertices
+ except ValueError:
+ raise ValueError('Unrecognized marker style'
+ ' {0}'.format(marker))
+
+ self._marker = marker
+ self._recache()
+
+ def get_path(self):
+ return self._path
+
+ def get_transform(self):
+ return self._transform.frozen()
+
+ def get_alt_path(self):
+ return self._alt_path
+
+ def get_alt_transform(self):
+ return self._alt_transform.frozen()
+
+ def get_snap_threshold(self):
+ return self._snap_threshold
+
+ def _set_nothing(self):
+ self._filled = False
+
+ def _set_custom_marker(self, path):
+ verts = path.vertices
+ rescale = max(np.max(np.abs(verts[:, 0])),
+ np.max(np.abs(verts[:, 1])))
+ self._transform = Affine2D().scale(0.5 / rescale)
+ self._path = path
+
+ def _set_path_marker(self):
+ self._set_custom_marker(self._marker)
+
+ def _set_vertices(self):
+ verts = self._marker
+ marker = Path(verts)
+ self._set_custom_marker(marker)
+
+ def _set_tuple_marker(self):
+ marker = self._marker
+ if is_numlike(marker[0]):
+ if len(marker) == 2:
+ numsides, rotation = marker[0], 0.0
+ elif len(marker) == 3:
+ numsides, rotation = marker[0], marker[2]
+ symstyle = marker[1]
+ if symstyle == 0:
+ self._path = Path.unit_regular_polygon(numsides)
+ self._joinstyle = 'miter'
+ elif symstyle == 1:
+ self._path = Path.unit_regular_star(numsides)
+ self._joinstyle = 'bevel'
+ elif symstyle == 2:
+ self._path = Path.unit_regular_asterisk(numsides)
+ self._filled = False
+ self._joinstyle = 'bevel'
+ elif symstyle == 3:
+ self._path = Path.unit_circle()
+ self._transform = Affine2D().scale(0.5).rotate_deg(rotation)
+ else:
+ verts = np.asarray(marker[0])
+ path = Path(verts)
+ self._set_custom_marker(path)
+
+ def _set_mathtext_path(self):
+ """
+ Draws mathtext markers '$...$' using TextPath object.
+
+ Submitted by tcb
+ """
+ from matplotlib.text import TextPath
+ from matplotlib.font_manager import FontProperties
+
+ # again, the properties could be initialised just once outside
+ # this function
+ # Font size is irrelevant here, it will be rescaled based on
+ # the drawn size later
+ props = FontProperties(size=1.0)
+ text = TextPath(xy=(0, 0), s=self.get_marker(), fontproperties=props,
+ usetex=rcParams['text.usetex'])
+ if len(text.vertices) == 0:
+ return
+
+ xmin, ymin = text.vertices.min(axis=0)
+ xmax, ymax = text.vertices.max(axis=0)
+ width = xmax - xmin
+ height = ymax - ymin
+ max_dim = max(width, height)
+ self._transform = Affine2D() \
+ .translate(-xmin + 0.5 * -width, -ymin + 0.5 * -height) \
+ .scale(1.0 / max_dim)
+ self._path = text
+ self._snap = False
+
+ def _half_fill(self):
+ fs = self.get_fillstyle()
+ result = fs in self._half_fillstyles
+ return result
+
+ def _set_circle(self, reduction=1.0):
+ self._transform = Affine2D().scale(0.5 * reduction)
+ self._snap_threshold = np.inf
+ fs = self.get_fillstyle()
+ if not self._half_fill():
+ self._path = Path.unit_circle()
+ else:
+ # build a right-half circle
+ if fs == 'bottom':
+ rotate = 270.
+ elif fs == 'top':
+ rotate = 90.
+ elif fs == 'left':
+ rotate = 180.
+ else:
+ rotate = 0.
+
+ self._path = self._alt_path = Path.unit_circle_righthalf()
+ self._transform.rotate_deg(rotate)
+ self._alt_transform = self._transform.frozen().rotate_deg(180.)
+
+ def _set_pixel(self):
+ self._path = Path.unit_rectangle()
+ # Ideally, you'd want -0.5, -0.5 here, but then the snapping
+ # algorithm in the Agg backend will round this to a 2x2
+ # rectangle from (-1, -1) to (1, 1). By offsetting it
+ # slightly, we can force it to be (0, 0) to (1, 1), which both
+ # makes it only be a single pixel and places it correctly
+ # aligned to 1-width stroking (i.e. the ticks). This hack is
+ # the best of a number of bad alternatives, mainly because the
+ # backends are not aware of what marker is actually being used
+ # beyond just its path data.
+ self._transform = Affine2D().translate(-0.49999, -0.49999)
+ self._snap_threshold = None
+
+ def _set_point(self):
+ self._set_circle(reduction=self._point_size_reduction)
+
+ _triangle_path = Path(
+ [[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]],
+ [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
+ # Going down halfway looks to small. Golden ratio is too far.
+ _triangle_path_u = Path(
+ [[0.0, 1.0], [-3 / 5., -1 / 5.], [3 / 5., -1 / 5.], [0.0, 1.0]],
+ [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
+ _triangle_path_d = Path(
+ [[-3 / 5., -1 / 5.], [3 / 5., -1 / 5.], [1.0, -1.0], [-1.0, -1.0],
+ [-3 / 5., -1 / 5.]],
+ [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
+ _triangle_path_l = Path(
+ [[0.0, 1.0], [0.0, -1.0], [-1.0, -1.0], [0.0, 1.0]],
+ [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
+ _triangle_path_r = Path(
+ [[0.0, 1.0], [0.0, -1.0], [1.0, -1.0], [0.0, 1.0]],
+ [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
+
+ def _set_triangle(self, rot, skip):
+ self._transform = Affine2D().scale(0.5, 0.5).rotate_deg(rot)
+ self._snap_threshold = 5.0
+ fs = self.get_fillstyle()
+
+ if not self._half_fill():
+ self._path = self._triangle_path
+ else:
+ mpaths = [self._triangle_path_u,
+ self._triangle_path_l,
+ self._triangle_path_d,
+ self._triangle_path_r]
+
+ if fs == 'top':
+ self._path = mpaths[(0 + skip) % 4]
+ self._alt_path = mpaths[(2 + skip) % 4]
+ elif fs == 'bottom':
+ self._path = mpaths[(2 + skip) % 4]
+ self._alt_path = mpaths[(0 + skip) % 4]
+ elif fs == 'left':
+ self._path = mpaths[(1 + skip) % 4]
+ self._alt_path = mpaths[(3 + skip) % 4]
+ else:
+ self._path = mpaths[(3 + skip) % 4]
+ self._alt_path = mpaths[(1 + skip) % 4]
+
+ self._alt_transform = self._transform
+
+ self._joinstyle = 'miter'
+
+ def _set_triangle_up(self):
+ return self._set_triangle(0.0, 0)
+
+ def _set_triangle_down(self):
+ return self._set_triangle(180.0, 2)
+
+ def _set_triangle_left(self):
+ return self._set_triangle(90.0, 3)
+
+ def _set_triangle_right(self):
+ return self._set_triangle(270.0, 1)
+
+ def _set_square(self):
+ self._transform = Affine2D().translate(-0.5, -0.5)
+ self._snap_threshold = 2.0
+ fs = self.get_fillstyle()
+ if not self._half_fill():
+ self._path = Path.unit_rectangle()
+ else:
+ # build a bottom filled square out of two rectangles, one
+ # filled. Use the rotation to support left, right, bottom
+ # or top
+ if fs == 'bottom':
+ rotate = 0.
+ elif fs == 'top':
+ rotate = 180.
+ elif fs == 'left':
+ rotate = 270.
+ else:
+ rotate = 90.
+
+ self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 0.5],
+ [0.0, 0.5], [0.0, 0.0]])
+ self._alt_path = Path([[0.0, 0.5], [1.0, 0.5], [1.0, 1.0],
+ [0.0, 1.0], [0.0, 0.5]])
+ self._transform.rotate_deg(rotate)
+ self._alt_transform = self._transform
+
+ self._joinstyle = 'miter'
+
+ def _set_diamond(self):
+ self._transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45)
+ self._snap_threshold = 5.0
+ fs = self.get_fillstyle()
+ if not self._half_fill():
+ self._path = Path.unit_rectangle()
+ else:
+ self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]])
+ self._alt_path = Path([[0.0, 0.0], [0.0, 1.0],
+ [1.0, 1.0], [0.0, 0.0]])
+
+ if fs == 'bottom':
+ rotate = 270.
+ elif fs == 'top':
+ rotate = 90.
+ elif fs == 'left':
+ rotate = 180.
+ else:
+ rotate = 0.
+
+ self._transform.rotate_deg(rotate)
+ self._alt_transform = self._transform
+
+ self._joinstyle = 'miter'
+
+ def _set_thin_diamond(self):
+ self._set_diamond()
+ self._transform.scale(0.6, 1.0)
+
+ def _set_pentagon(self):
+ self._transform = Affine2D().scale(0.5)
+ self._snap_threshold = 5.0
+
+ polypath = Path.unit_regular_polygon(5)
+ fs = self.get_fillstyle()
+
+ if not self._half_fill():
+ self._path = polypath
+ else:
+ verts = polypath.vertices
+
+ y = (1 + np.sqrt(5)) / 4.
+ top = Path([verts[0], verts[1], verts[4], verts[0]])
+ bottom = Path([verts[1], verts[2], verts[3], verts[4], verts[1]])
+ left = Path([verts[0], verts[1], verts[2], [0, -y], verts[0]])
+ right = Path([verts[0], verts[4], verts[3], [0, -y], verts[0]])
+
+ if fs == 'top':
+ mpath, mpath_alt = top, bottom
+ elif fs == 'bottom':
+ mpath, mpath_alt = bottom, top
+ elif fs == 'left':
+ mpath, mpath_alt = left, right
+ else:
+ mpath, mpath_alt = right, left
+ self._path = mpath
+ self._alt_path = mpath_alt
+ self._alt_transform = self._transform
+
+ self._joinstyle = 'miter'
+
+ def _set_star(self):
+ self._transform = Affine2D().scale(0.5)
+ self._snap_threshold = 5.0
+
+ fs = self.get_fillstyle()
+ polypath = Path.unit_regular_star(5, innerCircle=0.381966)
+
+ if not self._half_fill():
+ self._path = polypath
+ else:
+ verts = polypath.vertices
+
+ top = Path(np.vstack((verts[0:4, :], verts[7:10, :], verts[0])))
+ bottom = Path(np.vstack((verts[3:8, :], verts[3])))
+ left = Path(np.vstack((verts[0:6, :], verts[0])))
+ right = Path(np.vstack((verts[0], verts[5:10, :], verts[0])))
+
+ if fs == 'top':
+ mpath, mpath_alt = top, bottom
+ elif fs == 'bottom':
+ mpath, mpath_alt = bottom, top
+ elif fs == 'left':
+ mpath, mpath_alt = left, right
+ else:
+ mpath, mpath_alt = right, left
+ self._path = mpath
+ self._alt_path = mpath_alt
+ self._alt_transform = self._transform
+
+ self._joinstyle = 'bevel'
+
+ def _set_hexagon1(self):
+ self._transform = Affine2D().scale(0.5)
+ self._snap_threshold = None
+
+ fs = self.get_fillstyle()
+ polypath = Path.unit_regular_polygon(6)
+
+ if not self._half_fill():
+ self._path = polypath
+ else:
+ verts = polypath.vertices
+
+ # not drawing inside lines
+ x = np.abs(np.cos(5 * np.pi / 6.))
+ top = Path(np.vstack(([-x, 0], verts[(1, 0, 5), :], [x, 0])))
+ bottom = Path(np.vstack(([-x, 0], verts[2:5, :], [x, 0])))
+ left = Path(verts[(0, 1, 2, 3), :])
+ right = Path(verts[(0, 5, 4, 3), :])
+
+ if fs == 'top':
+ mpath, mpath_alt = top, bottom
+ elif fs == 'bottom':
+ mpath, mpath_alt = bottom, top
+ elif fs == 'left':
+ mpath, mpath_alt = left, right
+ else:
+ mpath, mpath_alt = right, left
+
+ self._path = mpath
+ self._alt_path = mpath_alt
+ self._alt_transform = self._transform
+
+ self._joinstyle = 'miter'
+
+ def _set_hexagon2(self):
+ self._transform = Affine2D().scale(0.5).rotate_deg(30)
+ self._snap_threshold = None
+
+ fs = self.get_fillstyle()
+ polypath = Path.unit_regular_polygon(6)
+
+ if not self._half_fill():
+ self._path = polypath
+ else:
+ verts = polypath.vertices
+
+ # not drawing inside lines
+ x, y = np.sqrt(3) / 4, 3 / 4.
+ top = Path(verts[(1, 0, 5, 4, 1), :])
+ bottom = Path(verts[(1, 2, 3, 4), :])
+ left = Path(np.vstack(([x, y], verts[(0, 1, 2), :],
+ [-x, -y], [x, y])))
+ right = Path(np.vstack(([x, y], verts[(5, 4, 3), :], [-x, -y])))
+
+ if fs == 'top':
+ mpath, mpath_alt = top, bottom
+ elif fs == 'bottom':
+ mpath, mpath_alt = bottom, top
+ elif fs == 'left':
+ mpath, mpath_alt = left, right
+ else:
+ mpath, mpath_alt = right, left
+
+ self._path = mpath
+ self._alt_path = mpath_alt
+ self._alt_transform = self._transform
+
+ self._joinstyle = 'miter'
+
+ def _set_octagon(self):
+ self._transform = Affine2D().scale(0.5)
+ self._snap_threshold = 5.0
+
+ fs = self.get_fillstyle()
+ polypath = Path.unit_regular_polygon(8)
+
+ if not self._half_fill():
+ self._transform.rotate_deg(22.5)
+ self._path = polypath
+ else:
+ x = np.sqrt(2.) / 4.
+ half = Path([[0, -1], [0, 1], [-x, 1], [-1, x],
+ [-1, -x], [-x, -1], [0, -1]])
+
+ if fs == 'bottom':
+ rotate = 90.
+ elif fs == 'top':
+ rotate = 270.
+ elif fs == 'right':
+ rotate = 180.
+ else:
+ rotate = 0.
+
+ self._transform.rotate_deg(rotate)
+ self._path = self._alt_path = half
+ self._alt_transform = self._transform.frozen().rotate_deg(180.0)
+
+ self._joinstyle = 'miter'
+
+ _line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
+
+ def _set_vline(self):
+ self._transform = Affine2D().scale(0.5)
+ self._snap_threshold = 1.0
+ self._filled = False
+ self._path = self._line_marker_path
+
+ def _set_hline(self):
+ self._set_vline()
+ self._transform = self._transform.rotate_deg(90)
+
+ _tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
+
+ def _set_tickleft(self):
+ self._transform = Affine2D().scale(-1.0, 1.0)
+ self._snap_threshold = 1.0
+ self._filled = False
+ self._path = self._tickhoriz_path
+
+ def _set_tickright(self):
+ self._transform = Affine2D().scale(1.0, 1.0)
+ self._snap_threshold = 1.0
+ self._filled = False
+ self._path = self._tickhoriz_path
+
+ _tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
+
+ def _set_tickup(self):
+ self._transform = Affine2D().scale(1.0, 1.0)
+ self._snap_threshold = 1.0
+ self._filled = False
+ self._path = self._tickvert_path
+
+ def _set_tickdown(self):
+ self._transform = Affine2D().scale(1.0, -1.0)
+ self._snap_threshold = 1.0
+ self._filled = False
+ self._path = self._tickvert_path
+
+ _tri_path = Path([[0.0, 0.0], [0.0, -1.0],
+ [0.0, 0.0], [0.8, 0.5],
+ [0.0, 0.0], [-0.8, 0.5]],
+ [Path.MOVETO, Path.LINETO,
+ Path.MOVETO, Path.LINETO,
+ Path.MOVETO, Path.LINETO])
+
+ def _set_tri_down(self):
+ self._transform = Affine2D().scale(0.5)
+ self._snap_threshold = 5.0
+ self._filled = False
+ self._path = self._tri_path
+
+ def _set_tri_up(self):
+ self._set_tri_down()
+ self._transform = self._transform.rotate_deg(180)
+
+ def _set_tri_left(self):
+ self._set_tri_down()
+ self._transform = self._transform.rotate_deg(270)
+
+ def _set_tri_right(self):
+ self._set_tri_down()
+ self._transform = self._transform.rotate_deg(90)
+
+ _caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
+
+ def _set_caretdown(self):
+ self._transform = Affine2D().scale(0.5)
+ self._snap_threshold = 3.0
+ self._filled = False
+ self._path = self._caret_path
+ self._joinstyle = 'miter'
+
+ def _set_caretup(self):
+ self._set_caretdown()
+ self._transform = self._transform.rotate_deg(180)
+
+ def _set_caretleft(self):
+ self._set_caretdown()
+ self._transform = self._transform.rotate_deg(270)
+
+ def _set_caretright(self):
+ self._set_caretdown()
+ self._transform = self._transform.rotate_deg(90)
+
+ _caret_path_base = Path([[-1.0, 0.0], [0.0, -1.5], [1.0, 0]])
+
+ def _set_caretdownbase(self):
+ self._set_caretdown()
+ self._path = self._caret_path_base
+
+ def _set_caretupbase(self):
+ self._set_caretdownbase()
+ self._transform = self._transform.rotate_deg(180)
+
+ def _set_caretleftbase(self):
+ self._set_caretdownbase()
+ self._transform = self._transform.rotate_deg(270)
+
+ def _set_caretrightbase(self):
+ self._set_caretdownbase()
+ self._transform = self._transform.rotate_deg(90)
+
+ _plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
+ [0.0, -1.0], [0.0, 1.0]],
+ [Path.MOVETO, Path.LINETO,
+ Path.MOVETO, Path.LINETO])
+
+ def _set_plus(self):
+ self._transform = Affine2D().scale(0.5)
+ self._snap_threshold = 1.0
+ self._filled = False
+ self._path = self._plus_path
+
+ _x_path = Path([[-1.0, -1.0], [1.0, 1.0],
+ [-1.0, 1.0], [1.0, -1.0]],
+ [Path.MOVETO, Path.LINETO,
+ Path.MOVETO, Path.LINETO])
+
+ def _set_x(self):
+ self._transform = Affine2D().scale(0.5)
+ self._snap_threshold = 3.0
+ self._filled = False
+ self._path = self._x_path
+
+ _plus_filled_path = Path([(1/3, 0), (2/3, 0), (2/3, 1/3),
+ (1, 1/3), (1, 2/3), (2/3, 2/3),
+ (2/3, 1), (1/3, 1), (1/3, 2/3),
+ (0, 2/3), (0, 1/3), (1/3, 1/3),
+ (1/3, 0)],
+ [Path.MOVETO, Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.CLOSEPOLY])
+
+ _plus_filled_path_t = Path([(1, 1/2), (1, 2/3), (2/3, 2/3),
+ (2/3, 1), (1/3, 1), (1/3, 2/3),
+ (0, 2/3), (0, 1/2), (1, 1/2)],
+ [Path.MOVETO, Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.LINETO,
+ Path.CLOSEPOLY])
+
+ def _set_plus_filled(self):
+ self._transform = Affine2D().translate(-0.5, -0.5)
+ self._snap_threshold = 5.0
+ self._joinstyle = 'miter'
+ fs = self.get_fillstyle()
+ if not self._half_fill():
+ self._path = self._plus_filled_path
+ else:
+ # Rotate top half path to support all partitions
+ if fs == 'top':
+ rotate, rotate_alt = 0, 180
+ elif fs == 'bottom':
+ rotate, rotate_alt = 180, 0
+ elif fs == 'left':
+ rotate, rotate_alt = 90, 270
+ else:
+ rotate, rotate_alt = 270, 90
+
+ self._path = self._plus_filled_path_t
+ self._alt_path = self._plus_filled_path_t
+ self._alt_transform = Affine2D().translate(-0.5, -0.5)
+ self._transform.rotate_deg(rotate)
+ self._alt_transform.rotate_deg(rotate_alt)
+
+ _x_filled_path = Path([(0.25, 0), (0.5, 0.25), (0.75, 0), (1, 0.25),
+ (0.75, 0.5), (1, 0.75), (0.75, 1), (0.5, 0.75),
+ (0.25, 1), (0, 0.75), (0.25, 0.5), (0, 0.25),
+ (0.25, 0)],
+ [Path.MOVETO, Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.CLOSEPOLY])
+
+ _x_filled_path_t = Path([(0.75, 0.5), (1, 0.75), (0.75, 1),
+ (0.5, 0.75), (0.25, 1), (0, 0.75),
+ (0.25, 0.5), (0.75, 0.5)],
+ [Path.MOVETO, Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.CLOSEPOLY])
+
+ def _set_x_filled(self):
+ self._transform = Affine2D().translate(-0.5, -0.5)
+ self._snap_threshold = 5.0
+ self._joinstyle = 'miter'
+ fs = self.get_fillstyle()
+ if not self._half_fill():
+ self._path = self._x_filled_path
+ else:
+ # Rotate top half path to support all partitions
+ if fs == 'top':
+ rotate, rotate_alt = 0, 180
+ elif fs == 'bottom':
+ rotate, rotate_alt = 180, 0
+ elif fs == 'left':
+ rotate, rotate_alt = 90, 270
+ else:
+ rotate, rotate_alt = 270, 90
+
+ self._path = self._x_filled_path_t
+ self._alt_path = self._x_filled_path_t
+ self._alt_transform = Affine2D().translate(-0.5, -0.5)
+ self._transform.rotate_deg(rotate)
+ self._alt_transform.rotate_deg(rotate_alt)
diff --git a/contrib/python/matplotlib/py2/matplotlib/mathtext.py b/contrib/python/matplotlib/py2/matplotlib/mathtext.py
new file mode 100644
index 00000000000..dee778b0d0a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/mathtext.py
@@ -0,0 +1,3445 @@
+r"""
+:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
+TeX math syntax and drawing them to a matplotlib backend.
+
+For a tutorial of its usage see :doc:`/tutorials/text/mathtext`. This
+document is primarily concerned with implementation details.
+
+The module uses pyparsing_ to parse the TeX expression.
+
+.. _pyparsing: http://pyparsing.wikispaces.com/
+
+The Bakoma distribution of the TeX Computer Modern fonts, and STIX
+fonts are supported. There is experimental support for using
+arbitrary fonts, but results may vary without proper tweaking and
+metrics for those fonts.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six import unichr
+
+import os
+from math import ceil
+import unicodedata
+from warnings import warn
+
+from numpy import inf, isinf
+import numpy as np
+
+from pyparsing import (
+ Combine, Empty, FollowedBy, Forward, Group, Literal, oneOf, OneOrMore,
+ Optional, ParseBaseException, ParseFatalException, ParserElement,
+ QuotedString, Regex, StringEnd, Suppress, ZeroOrMore)
+
+ParserElement.enablePackrat()
+
+from matplotlib import _png, colors as mcolors, get_data_path, rcParams
+from matplotlib.afm import AFM
+from matplotlib.cbook import Bunch, get_realpath_and_stat, maxdict
+from matplotlib.ft2font import FT2Image, KERNING_DEFAULT, LOAD_NO_HINTING
+from matplotlib.font_manager import findfont, FontProperties, get_font
+from matplotlib._mathtext_data import (latex_to_bakoma, latex_to_standard,
+ tex2uni, latex_to_cmex,
+ stix_virtual_fonts)
+
+####################
+
+
+
+##############################################################################
+# FONTS
+
+def get_unicode_index(symbol, math=True):
+ """get_unicode_index(symbol, [bool]) -> integer
+
+Return the integer index (from the Unicode table) of symbol. *symbol*
+can be a single unicode character, a TeX command (i.e. r'\\pi'), or a
+Type1 symbol name (i.e. 'phi').
+If math is False, the current symbol should be treated as a non-math symbol.
+"""
+ # for a non-math symbol, simply return its unicode index
+ if not math:
+ return ord(symbol)
+ # From UTF #25: U+2212 minus sign is the preferred
+ # representation of the unary and binary minus sign rather than
+ # the ASCII-derived U+002D hyphen-minus, because minus sign is
+ # unambiguous and because it is rendered with a more desirable
+ # length, usually longer than a hyphen.
+ if symbol == '-':
+ return 0x2212
+ try:# This will succeed if symbol is a single unicode char
+ return ord(symbol)
+ except TypeError:
+ pass
+ try:# Is symbol a TeX symbol (i.e. \alpha)
+ return tex2uni[symbol.strip("\\")]
+ except KeyError:
+ message = """'%(symbol)s' is not a valid Unicode character or
+TeX/Type1 symbol"""%locals()
+ raise ValueError(message)
+
+def unichr_safe(index):
+ """Return the Unicode character corresponding to the index,
+or the replacement character if this is a narrow build of Python
+and the requested character is outside the BMP."""
+ try:
+ return unichr(index)
+ except ValueError:
+ return unichr(0xFFFD)
+
+class MathtextBackend(object):
+ """
+ The base class for the mathtext backend-specific code. The
+ purpose of :class:`MathtextBackend` subclasses is to interface
+ between mathtext and a specific matplotlib graphics backend.
+
+ Subclasses need to override the following:
+
+ - :meth:`render_glyph`
+ - :meth:`render_rect_filled`
+ - :meth:`get_results`
+
+ And optionally, if you need to use a FreeType hinting style:
+
+ - :meth:`get_hinting_type`
+ """
+ def __init__(self):
+ self.width = 0
+ self.height = 0
+ self.depth = 0
+
+ def set_canvas_size(self, w, h, d):
+ 'Dimension the drawing canvas'
+ self.width = w
+ self.height = h
+ self.depth = d
+
+ def render_glyph(self, ox, oy, info):
+ """
+ Draw a glyph described by *info* to the reference point (*ox*,
+ *oy*).
+ """
+ raise NotImplementedError()
+
+ def render_rect_filled(self, x1, y1, x2, y2):
+ """
+ Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
+ """
+ raise NotImplementedError()
+
+ def get_results(self, box):
+ """
+ Return a backend-specific tuple to return to the backend after
+ all processing is done.
+ """
+ raise NotImplementedError()
+
+ def get_hinting_type(self):
+ """
+ Get the FreeType hinting type to use with this particular
+ backend.
+ """
+ return LOAD_NO_HINTING
+
+class MathtextBackendAgg(MathtextBackend):
+ """
+ Render glyphs and rectangles to an FTImage buffer, which is later
+ transferred to the Agg image by the Agg backend.
+ """
+ def __init__(self):
+ self.ox = 0
+ self.oy = 0
+ self.image = None
+ self.mode = 'bbox'
+ self.bbox = [0, 0, 0, 0]
+ MathtextBackend.__init__(self)
+
+ def _update_bbox(self, x1, y1, x2, y2):
+ self.bbox = [min(self.bbox[0], x1),
+ min(self.bbox[1], y1),
+ max(self.bbox[2], x2),
+ max(self.bbox[3], y2)]
+
+ def set_canvas_size(self, w, h, d):
+ MathtextBackend.set_canvas_size(self, w, h, d)
+ if self.mode != 'bbox':
+ self.image = FT2Image(ceil(w), ceil(h + max(d, 0)))
+
+ def render_glyph(self, ox, oy, info):
+ if self.mode == 'bbox':
+ self._update_bbox(ox + info.metrics.xmin,
+ oy - info.metrics.ymax,
+ ox + info.metrics.xmax,
+ oy - info.metrics.ymin)
+ else:
+ info.font.draw_glyph_to_bitmap(
+ self.image, ox, oy - info.metrics.iceberg, info.glyph,
+ antialiased=rcParams['text.antialiased'])
+
+ def render_rect_filled(self, x1, y1, x2, y2):
+ if self.mode == 'bbox':
+ self._update_bbox(x1, y1, x2, y2)
+ else:
+ height = max(int(y2 - y1) - 1, 0)
+ if height == 0:
+ center = (y2 + y1) / 2.0
+ y = int(center - (height + 1) / 2.0)
+ else:
+ y = int(y1)
+ self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
+
+ def get_results(self, box, used_characters):
+ self.mode = 'bbox'
+ orig_height = box.height
+ orig_depth = box.depth
+ ship(0, 0, box)
+ bbox = self.bbox
+ bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
+ self.mode = 'render'
+ self.set_canvas_size(
+ bbox[2] - bbox[0],
+ (bbox[3] - bbox[1]) - orig_depth,
+ (bbox[3] - bbox[1]) - orig_height)
+ ship(-bbox[0], -bbox[1], box)
+ result = (self.ox,
+ self.oy,
+ self.width,
+ self.height + self.depth,
+ self.depth,
+ self.image,
+ used_characters)
+ self.image = None
+ return result
+
+ def get_hinting_type(self):
+ from matplotlib.backends import backend_agg
+ return backend_agg.get_hinting_flag()
+
+class MathtextBackendBitmap(MathtextBackendAgg):
+ def get_results(self, box, used_characters):
+ ox, oy, width, height, depth, image, characters = \
+ MathtextBackendAgg.get_results(self, box, used_characters)
+ return image, depth
+
+class MathtextBackendPs(MathtextBackend):
+ """
+ Store information to write a mathtext rendering to the PostScript
+ backend.
+ """
+ def __init__(self):
+ self.pswriter = six.moves.cStringIO()
+ self.lastfont = None
+
+ def render_glyph(self, ox, oy, info):
+ oy = self.height - oy + info.offset
+ postscript_name = info.postscript_name
+ fontsize = info.fontsize
+ symbol_name = info.symbol_name
+
+ if (postscript_name, fontsize) != self.lastfont:
+ ps = """/%(postscript_name)s findfont
+%(fontsize)s scalefont
+setfont
+""" % locals()
+ self.lastfont = postscript_name, fontsize
+ self.pswriter.write(ps)
+
+ ps = """%(ox)f %(oy)f moveto
+/%(symbol_name)s glyphshow\n
+""" % locals()
+ self.pswriter.write(ps)
+
+ def render_rect_filled(self, x1, y1, x2, y2):
+ ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
+ self.pswriter.write(ps)
+
+ def get_results(self, box, used_characters):
+ ship(0, 0, box)
+ return (self.width,
+ self.height + self.depth,
+ self.depth,
+ self.pswriter,
+ used_characters)
+
+class MathtextBackendPdf(MathtextBackend):
+ """
+ Store information to write a mathtext rendering to the PDF
+ backend.
+ """
+ def __init__(self):
+ self.glyphs = []
+ self.rects = []
+
+ def render_glyph(self, ox, oy, info):
+ filename = info.font.fname
+ oy = self.height - oy + info.offset
+ self.glyphs.append(
+ (ox, oy, filename, info.fontsize,
+ info.num, info.symbol_name))
+
+ def render_rect_filled(self, x1, y1, x2, y2):
+ self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
+
+ def get_results(self, box, used_characters):
+ ship(0, 0, box)
+ return (self.width,
+ self.height + self.depth,
+ self.depth,
+ self.glyphs,
+ self.rects,
+ used_characters)
+
+class MathtextBackendSvg(MathtextBackend):
+ """
+ Store information to write a mathtext rendering to the SVG
+ backend.
+ """
+ def __init__(self):
+ self.svg_glyphs = []
+ self.svg_rects = []
+
+ def render_glyph(self, ox, oy, info):
+ oy = self.height - oy + info.offset
+
+ self.svg_glyphs.append(
+ (info.font, info.fontsize, info.num, ox, oy, info.metrics))
+
+ def render_rect_filled(self, x1, y1, x2, y2):
+ self.svg_rects.append(
+ (x1, self.height - y1 + 1, x2 - x1, y2 - y1))
+
+ def get_results(self, box, used_characters):
+ ship(0, 0, box)
+ svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
+ svg_rects = self.svg_rects)
+ return (self.width,
+ self.height + self.depth,
+ self.depth,
+ svg_elements,
+ used_characters)
+
+class MathtextBackendPath(MathtextBackend):
+ """
+ Store information to write a mathtext rendering to the text path
+ machinery.
+ """
+
+ def __init__(self):
+ self.glyphs = []
+ self.rects = []
+
+ def render_glyph(self, ox, oy, info):
+ oy = self.height - oy + info.offset
+ thetext = info.num
+ self.glyphs.append(
+ (info.font, info.fontsize, thetext, ox, oy))
+
+ def render_rect_filled(self, x1, y1, x2, y2):
+ self.rects.append(
+ (x1, self.height-y2 , x2 - x1, y2 - y1))
+
+ def get_results(self, box, used_characters):
+ ship(0, 0, box)
+ return (self.width,
+ self.height + self.depth,
+ self.depth,
+ self.glyphs,
+ self.rects)
+
+class MathtextBackendCairo(MathtextBackend):
+ """
+ Store information to write a mathtext rendering to the Cairo
+ backend.
+ """
+
+ def __init__(self):
+ self.glyphs = []
+ self.rects = []
+
+ def render_glyph(self, ox, oy, info):
+ oy = oy - info.offset - self.height
+ thetext = unichr_safe(info.num)
+ self.glyphs.append(
+ (info.font, info.fontsize, thetext, ox, oy))
+
+ def render_rect_filled(self, x1, y1, x2, y2):
+ self.rects.append(
+ (x1, y1 - self.height, x2 - x1, y2 - y1))
+
+ def get_results(self, box, used_characters):
+ ship(0, 0, box)
+ return (self.width,
+ self.height + self.depth,
+ self.depth,
+ self.glyphs,
+ self.rects)
+
+class Fonts(object):
+ """
+ An abstract base class for a system of fonts to use for mathtext.
+
+ The class must be able to take symbol keys and font file names and
+ return the character metrics. It also delegates to a backend class
+ to do the actual drawing.
+ """
+
+ def __init__(self, default_font_prop, mathtext_backend):
+ """
+ *default_font_prop*: A
+ :class:`~matplotlib.font_manager.FontProperties` object to use
+ for the default non-math font, or the base font for Unicode
+ (generic) font rendering.
+
+ *mathtext_backend*: A subclass of :class:`MathTextBackend`
+ used to delegate the actual rendering.
+ """
+ self.default_font_prop = default_font_prop
+ self.mathtext_backend = mathtext_backend
+ self.used_characters = {}
+
+ def destroy(self):
+ """
+ Fix any cyclical references before the object is about
+ to be destroyed.
+ """
+ self.used_characters = None
+
+ def get_kern(self, font1, fontclass1, sym1, fontsize1,
+ font2, fontclass2, sym2, fontsize2, dpi):
+ """
+ Get the kerning distance for font between *sym1* and *sym2*.
+
+ *fontX*: one of the TeX font names::
+
+ tt, it, rm, cal, sf, bf or default/regular (non-math)
+
+ *fontclassX*: TODO
+
+ *symX*: a symbol in raw TeX form. e.g., '1', 'x' or '\\sigma'
+
+ *fontsizeX*: the fontsize in points
+
+ *dpi*: the current dots-per-inch
+ """
+ return 0.
+
+ def get_metrics(self, font, font_class, sym, fontsize, dpi, math=True):
+ """
+ *font*: one of the TeX font names::
+
+ tt, it, rm, cal, sf, bf or default/regular (non-math)
+
+ *font_class*: TODO
+
+ *sym*: a symbol in raw TeX form. e.g., '1', 'x' or '\\sigma'
+
+ *fontsize*: font size in points
+
+ *dpi*: current dots-per-inch
+
+ *math*: whether sym is a math character
+
+ Returns an object with the following attributes:
+
+ - *advance*: The advance distance (in points) of the glyph.
+
+ - *height*: The height of the glyph in points.
+
+ - *width*: The width of the glyph in points.
+
+ - *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
+
+ - *iceberg* - the distance from the baseline to the top of
+ the glyph. This corresponds to TeX's definition of
+ "height".
+ """
+ info = self._get_info(font, font_class, sym, fontsize, dpi, math)
+ return info.metrics
+
+ def set_canvas_size(self, w, h, d):
+ """
+ Set the size of the buffer used to render the math expression.
+ Only really necessary for the bitmap backends.
+ """
+ self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
+ self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
+
+ def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
+ """
+ Draw a glyph at
+
+ - *ox*, *oy*: position
+
+ - *facename*: One of the TeX face names
+
+ - *font_class*:
+
+ - *sym*: TeX symbol name or single character
+
+ - *fontsize*: fontsize in points
+
+ - *dpi*: The dpi to draw at.
+ """
+ info = self._get_info(facename, font_class, sym, fontsize, dpi)
+ realpath, stat_key = get_realpath_and_stat(info.font.fname)
+ used_characters = self.used_characters.setdefault(
+ stat_key, (realpath, set()))
+ used_characters[1].add(info.num)
+ self.mathtext_backend.render_glyph(ox, oy, info)
+
+ def render_rect_filled(self, x1, y1, x2, y2):
+ """
+ Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
+ """
+ self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
+
+ def get_xheight(self, font, fontsize, dpi):
+ """
+ Get the xheight for the given *font* and *fontsize*.
+ """
+ raise NotImplementedError()
+
+ def get_underline_thickness(self, font, fontsize, dpi):
+ """
+ Get the line thickness that matches the given font. Used as a
+ base unit for drawing lines such as in a fraction or radical.
+ """
+ raise NotImplementedError()
+
+ def get_used_characters(self):
+ """
+ Get the set of characters that were used in the math
+ expression. Used by backends that need to subset fonts so
+ they know which glyphs to include.
+ """
+ return self.used_characters
+
+ def get_results(self, box):
+ """
+ Get the data needed by the backend to render the math
+ expression. The return value is backend-specific.
+ """
+ result = self.mathtext_backend.get_results(box, self.get_used_characters())
+ self.destroy()
+ return result
+
+ def get_sized_alternatives_for_symbol(self, fontname, sym):
+ """
+ Override if your font provides multiple sizes of the same
+ symbol. Should return a list of symbols matching *sym* in
+ various sizes. The expression renderer will select the most
+ appropriate size for a given situation from this list.
+ """
+ return [(fontname, sym)]
+
+class TruetypeFonts(Fonts):
+ """
+ A generic base class for all font setups that use Truetype fonts
+ (through FT2Font).
+ """
+ def __init__(self, default_font_prop, mathtext_backend):
+ Fonts.__init__(self, default_font_prop, mathtext_backend)
+ self.glyphd = {}
+ self._fonts = {}
+
+ filename = findfont(default_font_prop)
+ default_font = get_font(filename)
+ self._fonts['default'] = default_font
+ self._fonts['regular'] = default_font
+
+ def destroy(self):
+ self.glyphd = None
+ Fonts.destroy(self)
+
+ def _get_font(self, font):
+ if font in self.fontmap:
+ basename = self.fontmap[font]
+ else:
+ basename = font
+ cached_font = self._fonts.get(basename)
+ if cached_font is None and os.path.exists(basename):
+ cached_font = get_font(basename)
+ self._fonts[basename] = cached_font
+ self._fonts[cached_font.postscript_name] = cached_font
+ self._fonts[cached_font.postscript_name.lower()] = cached_font
+ return cached_font
+
+ def _get_offset(self, font, glyph, fontsize, dpi):
+ if font.postscript_name == 'Cmex10':
+ return ((glyph.height/64.0/2.0) + (fontsize/3.0 * dpi/72.0))
+ return 0.
+
+ def _get_info(self, fontname, font_class, sym, fontsize, dpi, math=True):
+ key = fontname, font_class, sym, fontsize, dpi
+ bunch = self.glyphd.get(key)
+ if bunch is not None:
+ return bunch
+
+ font, num, symbol_name, fontsize, slanted = \
+ self._get_glyph(fontname, font_class, sym, fontsize, math)
+
+ font.set_size(fontsize, dpi)
+ glyph = font.load_char(
+ num,
+ flags=self.mathtext_backend.get_hinting_type())
+
+ xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
+ offset = self._get_offset(font, glyph, fontsize, dpi)
+ metrics = Bunch(
+ advance = glyph.linearHoriAdvance/65536.0,
+ height = glyph.height/64.0,
+ width = glyph.width/64.0,
+ xmin = xmin,
+ xmax = xmax,
+ ymin = ymin+offset,
+ ymax = ymax+offset,
+ # iceberg is the equivalent of TeX's "height"
+ iceberg = glyph.horiBearingY/64.0 + offset,
+ slanted = slanted
+ )
+
+ result = self.glyphd[key] = Bunch(
+ font = font,
+ fontsize = fontsize,
+ postscript_name = font.postscript_name,
+ metrics = metrics,
+ symbol_name = symbol_name,
+ num = num,
+ glyph = glyph,
+ offset = offset
+ )
+ return result
+
+ def get_xheight(self, fontname, fontsize, dpi):
+ font = self._get_font(fontname)
+ font.set_size(fontsize, dpi)
+ pclt = font.get_sfnt_table('pclt')
+ if pclt is None:
+ # Some fonts don't store the xHeight, so we do a poor man's xHeight
+ metrics = self.get_metrics(fontname, rcParams['mathtext.default'], 'x', fontsize, dpi)
+ return metrics.iceberg
+ xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
+ return xHeight
+
+ def get_underline_thickness(self, font, fontsize, dpi):
+ # This function used to grab underline thickness from the font
+ # metrics, but that information is just too un-reliable, so it
+ # is now hardcoded.
+ return ((0.75 / 12.0) * fontsize * dpi) / 72.0
+
+ def get_kern(self, font1, fontclass1, sym1, fontsize1,
+ font2, fontclass2, sym2, fontsize2, dpi):
+ if font1 == font2 and fontsize1 == fontsize2:
+ info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
+ info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
+ font = info1.font
+ return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
+ return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
+ font2, fontclass2, sym2, fontsize2, dpi)
+
+class BakomaFonts(TruetypeFonts):
+ """
+ Use the Bakoma TrueType fonts for rendering.
+
+ Symbols are strewn about a number of font files, each of which has
+ its own proprietary 8-bit encoding.
+ """
+ _fontmap = { 'cal' : 'cmsy10',
+ 'rm' : 'cmr10',
+ 'tt' : 'cmtt10',
+ 'it' : 'cmmi10',
+ 'bf' : 'cmb10',
+ 'sf' : 'cmss10',
+ 'ex' : 'cmex10'
+ }
+
+ def __init__(self, *args, **kwargs):
+ self._stix_fallback = StixFonts(*args, **kwargs)
+
+ TruetypeFonts.__init__(self, *args, **kwargs)
+ self.fontmap = {}
+ for key, val in six.iteritems(self._fontmap):
+ fullpath = findfont(val)
+ self.fontmap[key] = fullpath
+ self.fontmap[val] = fullpath
+
+
+ _slanted_symbols = set(r"\int \oint".split())
+
+ def _get_glyph(self, fontname, font_class, sym, fontsize, math=True):
+ symbol_name = None
+ font = None
+ if fontname in self.fontmap and sym in latex_to_bakoma:
+ basename, num = latex_to_bakoma[sym]
+ slanted = (basename == "cmmi10") or sym in self._slanted_symbols
+ font = self._get_font(basename)
+ elif len(sym) == 1:
+ slanted = (fontname == "it")
+ font = self._get_font(fontname)
+ if font is not None:
+ num = ord(sym)
+
+ if font is not None:
+ gid = font.get_char_index(num)
+ if gid != 0:
+ symbol_name = font.get_glyph_name(gid)
+
+ if symbol_name is None:
+ return self._stix_fallback._get_glyph(
+ fontname, font_class, sym, fontsize, math)
+
+ return font, num, symbol_name, fontsize, slanted
+
+ # The Bakoma fonts contain many pre-sized alternatives for the
+ # delimiters. The AutoSizedChar class will use these alternatives
+ # and select the best (closest sized) glyph.
+ _size_alternatives = {
+ '(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
+ ('ex', '\xb5'), ('ex', '\xc3')],
+ ')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
+ ('ex', '\xb6'), ('ex', '\x21')],
+ '{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
+ ('ex', '\xbd'), ('ex', '\x28')],
+ '}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
+ ('ex', '\xbe'), ('ex', '\x29')],
+ # The fourth size of '[' is mysteriously missing from the BaKoMa
+ # font, so I've omitted it for both '[' and ']'
+ '[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
+ ('ex', '\x22')],
+ ']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
+ ('ex', '\x23')],
+ r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
+ ('ex', '\xb9'), ('ex', '\x24')],
+ r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
+ ('ex', '\xba'), ('ex', '\x25')],
+ r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
+ ('ex', '\xbb'), ('ex', '\x26')],
+ r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
+ ('ex', '\xbc'), ('ex', '\x27')],
+ r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
+ ('ex', '\xbf'), ('ex', '\x2a')],
+ r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
+ ('ex', '\xc0'), ('ex', '\x2b')],
+ r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
+ ('ex', '\x72'), ('ex', '\x73')],
+ r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
+ ('ex', '\xc2'), ('ex', '\x2d')],
+ r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
+ ('ex', '\xcb'), ('ex', '\x2c')],
+ r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
+ ('ex', '\x64')],
+ r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
+ ('ex', '\x67')],
+ r'<' : [('cal', 'h'), ('ex', 'D')],
+ r'>' : [('cal', 'i'), ('ex', 'E')]
+ }
+
+ for alias, target in [(r'\leftparen', '('),
+ (r'\rightparent', ')'),
+ (r'\leftbrace', '{'),
+ (r'\rightbrace', '}'),
+ (r'\leftbracket', '['),
+ (r'\rightbracket', ']'),
+ (r'\{', '{'),
+ (r'\}', '}'),
+ (r'\[', '['),
+ (r'\]', ']')]:
+ _size_alternatives[alias] = _size_alternatives[target]
+
+ def get_sized_alternatives_for_symbol(self, fontname, sym):
+ return self._size_alternatives.get(sym, [(fontname, sym)])
+
+class UnicodeFonts(TruetypeFonts):
+ """
+ An abstract base class for handling Unicode fonts.
+
+ While some reasonably complete Unicode fonts (such as DejaVu) may
+ work in some situations, the only Unicode font I'm aware of with a
+ complete set of math symbols is STIX.
+
+ This class will "fallback" on the Bakoma fonts when a required
+ symbol can not be found in the font.
+ """
+ use_cmex = True
+
+ def __init__(self, *args, **kwargs):
+ # This must come first so the backend's owner is set correctly
+ if rcParams['mathtext.fallback_to_cm']:
+ self.cm_fallback = BakomaFonts(*args, **kwargs)
+ else:
+ self.cm_fallback = None
+ TruetypeFonts.__init__(self, *args, **kwargs)
+ self.fontmap = {}
+ for texfont in "cal rm tt it bf sf".split():
+ prop = rcParams['mathtext.' + texfont]
+ font = findfont(prop)
+ self.fontmap[texfont] = font
+ prop = FontProperties('cmex10')
+ font = findfont(prop)
+ self.fontmap['ex'] = font
+
+ _slanted_symbols = set(r"\int \oint".split())
+
+ def _map_virtual_font(self, fontname, font_class, uniindex):
+ return fontname, uniindex
+
+ def _get_glyph(self, fontname, font_class, sym, fontsize, math=True):
+ found_symbol = False
+
+ if self.use_cmex:
+ uniindex = latex_to_cmex.get(sym)
+ if uniindex is not None:
+ fontname = 'ex'
+ found_symbol = True
+
+ if not found_symbol:
+ try:
+ uniindex = get_unicode_index(sym, math)
+ found_symbol = True
+ except ValueError:
+ uniindex = ord('?')
+ warn("No TeX to unicode mapping for '%s'" %
+ sym.encode('ascii', 'backslashreplace'),
+ MathTextWarning)
+
+ fontname, uniindex = self._map_virtual_font(
+ fontname, font_class, uniindex)
+
+ new_fontname = fontname
+
+ # Only characters in the "Letter" class should be italicized in 'it'
+ # mode. Greek capital letters should be Roman.
+ if found_symbol:
+ if fontname == 'it':
+ if uniindex < 0x10000:
+ unistring = unichr(uniindex)
+ if (not unicodedata.category(unistring)[0] == "L"
+ or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
+ new_fontname = 'rm'
+
+ slanted = (new_fontname == 'it') or sym in self._slanted_symbols
+ found_symbol = False
+ font = self._get_font(new_fontname)
+ if font is not None:
+ glyphindex = font.get_char_index(uniindex)
+ if glyphindex != 0:
+ found_symbol = True
+
+ if not found_symbol:
+ if self.cm_fallback:
+ if isinstance(self.cm_fallback, BakomaFonts):
+ warn("Substituting with a symbol from Computer Modern.",
+ MathTextWarning)
+ if (fontname in ('it', 'regular') and
+ isinstance(self.cm_fallback, StixFonts)):
+ return self.cm_fallback._get_glyph(
+ 'rm', font_class, sym, fontsize)
+ else:
+ return self.cm_fallback._get_glyph(
+ fontname, font_class, sym, fontsize)
+ else:
+ if fontname in ('it', 'regular') and isinstance(self, StixFonts):
+ return self._get_glyph('rm', font_class, sym, fontsize)
+ warn("Font '%s' does not have a glyph for '%s' [U+%x]" %
+ (new_fontname,
+ sym.encode('ascii', 'backslashreplace').decode('ascii'),
+ uniindex),
+ MathTextWarning)
+ warn("Substituting with a dummy symbol.", MathTextWarning)
+ fontname = 'rm'
+ new_fontname = fontname
+ font = self._get_font(fontname)
+ uniindex = 0xA4 # currency character, for lack of anything better
+ glyphindex = font.get_char_index(uniindex)
+ slanted = False
+
+ symbol_name = font.get_glyph_name(glyphindex)
+ return font, uniindex, symbol_name, fontsize, slanted
+
+ def get_sized_alternatives_for_symbol(self, fontname, sym):
+ if self.cm_fallback:
+ return self.cm_fallback.get_sized_alternatives_for_symbol(
+ fontname, sym)
+ return [(fontname, sym)]
+
+
+class DejaVuFonts(UnicodeFonts):
+ use_cmex = False
+
+ def __init__(self, *args, **kwargs):
+ # This must come first so the backend's owner is set correctly
+ if isinstance(self, DejaVuSerifFonts):
+ self.cm_fallback = StixFonts(*args, **kwargs)
+ else:
+ self.cm_fallback = StixSansFonts(*args, **kwargs)
+ self.bakoma = BakomaFonts(*args, **kwargs)
+ TruetypeFonts.__init__(self, *args, **kwargs)
+ self.fontmap = {}
+ # Include Stix sized alternatives for glyphs
+ self._fontmap.update({
+ 1 : 'STIXSizeOneSym',
+ 2 : 'STIXSizeTwoSym',
+ 3 : 'STIXSizeThreeSym',
+ 4 : 'STIXSizeFourSym',
+ 5 : 'STIXSizeFiveSym'})
+ for key, name in six.iteritems(self._fontmap):
+ fullpath = findfont(name)
+ self.fontmap[key] = fullpath
+ self.fontmap[name] = fullpath
+
+ def _get_glyph(self, fontname, font_class, sym, fontsize, math=True):
+ """ Override prime symbol to use Bakoma """
+ if sym == r'\prime':
+ return self.bakoma._get_glyph(fontname,
+ font_class, sym, fontsize, math)
+ else:
+ # check whether the glyph is available in the display font
+ uniindex = get_unicode_index(sym)
+ font = self._get_font('ex')
+ if font is not None:
+ glyphindex = font.get_char_index(uniindex)
+ if glyphindex != 0:
+ return super(DejaVuFonts, self)._get_glyph('ex',
+ font_class, sym, fontsize, math)
+ # otherwise return regular glyph
+ return super(DejaVuFonts, self)._get_glyph(fontname,
+ font_class, sym, fontsize, math)
+
+
+class DejaVuSerifFonts(DejaVuFonts):
+ """
+ A font handling class for the DejaVu Serif fonts
+
+ If a glyph is not found it will fallback to Stix Serif
+ """
+ _fontmap = { 'rm' : 'DejaVu Serif',
+ 'it' : 'DejaVu Serif:italic',
+ 'bf' : 'DejaVu Serif:weight=bold',
+ 'sf' : 'DejaVu Sans',
+ 'tt' : 'DejaVu Sans Mono',
+ 'ex' : 'DejaVu Serif Display',
+ 0 : 'DejaVu Serif',
+ }
+
+class DejaVuSansFonts(DejaVuFonts):
+ """
+ A font handling class for the DejaVu Sans fonts
+
+ If a glyph is not found it will fallback to Stix Sans
+ """
+ _fontmap = { 'rm' : 'DejaVu Sans',
+ 'it' : 'DejaVu Sans:italic',
+ 'bf' : 'DejaVu Sans:weight=bold',
+ 'sf' : 'DejaVu Sans',
+ 'tt' : 'DejaVu Sans Mono',
+ 'ex' : 'DejaVu Sans Display',
+ 0 : 'DejaVu Sans',
+ }
+
+class StixFonts(UnicodeFonts):
+ """
+ A font handling class for the STIX fonts.
+
+ In addition to what UnicodeFonts provides, this class:
+
+ - supports "virtual fonts" which are complete alpha numeric
+ character sets with different font styles at special Unicode
+ code points, such as "Blackboard".
+
+ - handles sized alternative characters for the STIXSizeX fonts.
+ """
+ _fontmap = { 'rm' : 'STIXGeneral',
+ 'it' : 'STIXGeneral:italic',
+ 'bf' : 'STIXGeneral:weight=bold',
+ 'nonunirm' : 'STIXNonUnicode',
+ 'nonuniit' : 'STIXNonUnicode:italic',
+ 'nonunibf' : 'STIXNonUnicode:weight=bold',
+
+ 0 : 'STIXGeneral',
+ 1 : 'STIXSizeOneSym',
+ 2 : 'STIXSizeTwoSym',
+ 3 : 'STIXSizeThreeSym',
+ 4 : 'STIXSizeFourSym',
+ 5 : 'STIXSizeFiveSym'
+ }
+ use_cmex = False
+ cm_fallback = False
+ _sans = False
+
+ def __init__(self, *args, **kwargs):
+ TruetypeFonts.__init__(self, *args, **kwargs)
+ self.fontmap = {}
+ for key, name in six.iteritems(self._fontmap):
+ fullpath = findfont(name)
+ self.fontmap[key] = fullpath
+ self.fontmap[name] = fullpath
+
+ def _map_virtual_font(self, fontname, font_class, uniindex):
+ # Handle these "fonts" that are actually embedded in
+ # other fonts.
+ mapping = stix_virtual_fonts.get(fontname)
+ if (self._sans and mapping is None and
+ fontname not in ('regular', 'default')):
+ mapping = stix_virtual_fonts['sf']
+ doing_sans_conversion = True
+ else:
+ doing_sans_conversion = False
+
+ if mapping is not None:
+ if isinstance(mapping, dict):
+ try:
+ mapping = mapping[font_class]
+ except KeyError:
+ mapping = mapping['rm']
+
+ # Binary search for the source glyph
+ lo = 0
+ hi = len(mapping)
+ while lo < hi:
+ mid = (lo+hi)//2
+ range = mapping[mid]
+ if uniindex < range[0]:
+ hi = mid
+ elif uniindex <= range[1]:
+ break
+ else:
+ lo = mid + 1
+
+ if uniindex >= range[0] and uniindex <= range[1]:
+ uniindex = uniindex - range[0] + range[3]
+ fontname = range[2]
+ elif not doing_sans_conversion:
+ # This will generate a dummy character
+ uniindex = 0x1
+ fontname = rcParams['mathtext.default']
+
+ # Handle private use area glyphs
+ if (fontname in ('it', 'rm', 'bf') and
+ uniindex >= 0xe000 and uniindex <= 0xf8ff):
+ fontname = 'nonuni' + fontname
+
+ return fontname, uniindex
+
+ _size_alternatives = {}
+ def get_sized_alternatives_for_symbol(self, fontname, sym):
+ fixes = {'\\{': '{', '\\}': '}', '\\[': '[', '\\]': ']'}
+ sym = fixes.get(sym, sym)
+
+ alternatives = self._size_alternatives.get(sym)
+ if alternatives:
+ return alternatives
+
+ alternatives = []
+ try:
+ uniindex = get_unicode_index(sym)
+ except ValueError:
+ return [(fontname, sym)]
+
+ fix_ups = {
+ ord('<'): 0x27e8,
+ ord('>'): 0x27e9 }
+
+ uniindex = fix_ups.get(uniindex, uniindex)
+
+ for i in range(6):
+ font = self._get_font(i)
+ glyphindex = font.get_char_index(uniindex)
+ if glyphindex != 0:
+ alternatives.append((i, unichr_safe(uniindex)))
+
+ # The largest size of the radical symbol in STIX has incorrect
+ # metrics that cause it to be disconnected from the stem.
+ if sym == r'\__sqrt__':
+ alternatives = alternatives[:-1]
+
+ self._size_alternatives[sym] = alternatives
+ return alternatives
+
+class StixSansFonts(StixFonts):
+ """
+ A font handling class for the STIX fonts (that uses sans-serif
+ characters by default).
+ """
+ _sans = True
+
+class StandardPsFonts(Fonts):
+ """
+ Use the standard postscript fonts for rendering to backend_ps
+
+ Unlike the other font classes, BakomaFont and UnicodeFont, this
+ one requires the Ps backend.
+ """
+ basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
+
+ fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
+ 'rm' : 'pncr8a', # New Century Schoolbook
+ 'tt' : 'pcrr8a', # Courier
+ 'it' : 'pncri8a', # New Century Schoolbook Italic
+ 'sf' : 'phvr8a', # Helvetica
+ 'bf' : 'pncb8a', # New Century Schoolbook Bold
+ None : 'psyr' # Symbol
+ }
+
+ def __init__(self, default_font_prop):
+ Fonts.__init__(self, default_font_prop, MathtextBackendPs())
+ self.glyphd = {}
+ self.fonts = {}
+
+ filename = findfont(default_font_prop, fontext='afm',
+ directory=self.basepath)
+ if filename is None:
+ filename = findfont('Helvetica', fontext='afm',
+ directory=self.basepath)
+ with open(filename, 'rb') as fd:
+ default_font = AFM(fd)
+ default_font.fname = filename
+
+ self.fonts['default'] = default_font
+ self.fonts['regular'] = default_font
+ self.pswriter = six.moves.cStringIO()
+
+ def _get_font(self, font):
+ if font in self.fontmap:
+ basename = self.fontmap[font]
+ else:
+ basename = font
+
+ cached_font = self.fonts.get(basename)
+ if cached_font is None:
+ fname = os.path.join(self.basepath, basename + ".afm")
+ with open(fname, 'rb') as fd:
+ cached_font = AFM(fd)
+ cached_font.fname = fname
+ self.fonts[basename] = cached_font
+ self.fonts[cached_font.get_fontname()] = cached_font
+ return cached_font
+
+ def _get_info (self, fontname, font_class, sym, fontsize, dpi, math=True):
+ 'load the cmfont, metrics and glyph with caching'
+ key = fontname, sym, fontsize, dpi
+ tup = self.glyphd.get(key)
+
+ if tup is not None:
+ return tup
+
+ # Only characters in the "Letter" class should really be italicized.
+ # This class includes greek letters, so we're ok
+ if (fontname == 'it' and
+ (len(sym) > 1 or
+ not unicodedata.category(six.text_type(sym)).startswith("L"))):
+ fontname = 'rm'
+
+ found_symbol = False
+
+ if sym in latex_to_standard:
+ fontname, num = latex_to_standard[sym]
+ glyph = chr(num)
+ found_symbol = True
+ elif len(sym) == 1:
+ glyph = sym
+ num = ord(glyph)
+ found_symbol = True
+ else:
+ warn("No TeX to built-in Postscript mapping for {!r}".format(sym),
+ MathTextWarning)
+
+ slanted = (fontname == 'it')
+ font = self._get_font(fontname)
+
+ if found_symbol:
+ try:
+ symbol_name = font.get_name_char(glyph)
+ except KeyError:
+ warn("No glyph in standard Postscript font {!r} for {!r}"
+ .format(font.get_fontname(), sym), MathTextWarning)
+ found_symbol = False
+
+ if not found_symbol:
+ glyph = sym = '?'
+ num = ord(glyph)
+ symbol_name = font.get_name_char(glyph)
+
+ offset = 0
+
+ scale = 0.001 * fontsize
+
+ xmin, ymin, xmax, ymax = [val * scale
+ for val in font.get_bbox_char(glyph)]
+ metrics = Bunch(
+ advance = font.get_width_char(glyph) * scale,
+ width = font.get_width_char(glyph) * scale,
+ height = font.get_height_char(glyph) * scale,
+ xmin = xmin,
+ xmax = xmax,
+ ymin = ymin+offset,
+ ymax = ymax+offset,
+ # iceberg is the equivalent of TeX's "height"
+ iceberg = ymax + offset,
+ slanted = slanted
+ )
+
+ self.glyphd[key] = Bunch(
+ font = font,
+ fontsize = fontsize,
+ postscript_name = font.get_fontname(),
+ metrics = metrics,
+ symbol_name = symbol_name,
+ num = num,
+ glyph = glyph,
+ offset = offset
+ )
+
+ return self.glyphd[key]
+
+ def get_kern(self, font1, fontclass1, sym1, fontsize1,
+ font2, fontclass2, sym2, fontsize2, dpi):
+ if font1 == font2 and fontsize1 == fontsize2:
+ info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
+ info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
+ font = info1.font
+ return (font.get_kern_dist(info1.glyph, info2.glyph)
+ * 0.001 * fontsize1)
+ return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
+ font2, fontclass2, sym2, fontsize2, dpi)
+
+ def get_xheight(self, font, fontsize, dpi):
+ font = self._get_font(font)
+ return font.get_xheight() * 0.001 * fontsize
+
+ def get_underline_thickness(self, font, fontsize, dpi):
+ font = self._get_font(font)
+ return font.get_underline_thickness() * 0.001 * fontsize
+
+
+##############################################################################
+# TeX-LIKE BOX MODEL
+
+# The following is based directly on the document 'woven' from the
+# TeX82 source code. This information is also available in printed
+# form:
+#
+# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
+# TeX: The Program. Addison-Wesley Professional.
+#
+# The most relevant "chapters" are:
+# Data structures for boxes and their friends
+# Shipping pages out (Ship class)
+# Packaging (hpack and vpack)
+# Data structures for math mode
+# Subroutines for math mode
+# Typesetting math formulas
+#
+# Many of the docstrings below refer to a numbered "node" in that
+# book, e.g., node123
+#
+# Note that (as TeX) y increases downward, unlike many other parts of
+# matplotlib.
+
+# How much text shrinks when going to the next-smallest level. GROW_FACTOR
+# must be the inverse of SHRINK_FACTOR.
+SHRINK_FACTOR = 0.7
+GROW_FACTOR = 1.0 / SHRINK_FACTOR
+# The number of different sizes of chars to use, beyond which they will not
+# get any smaller
+NUM_SIZE_LEVELS = 6
+
+
+class FontConstantsBase(object):
+ """
+ A set of constants that controls how certain things, such as sub-
+ and superscripts are laid out. These are all metrics that can't
+ be reliably retrieved from the font metrics in the font itself.
+ """
+ # Percentage of x-height of additional horiz. space after sub/superscripts
+ script_space = 0.05
+
+ # Percentage of x-height that sub/superscripts drop below the baseline
+ subdrop = 0.4
+
+ # Percentage of x-height that superscripts are raised from the baseline
+ sup1 = 0.7
+
+ # Percentage of x-height that subscripts drop below the baseline
+ sub1 = 0.3
+
+ # Percentage of x-height that subscripts drop below the baseline when a
+ # superscript is present
+ sub2 = 0.5
+
+ # Percentage of x-height that sub/supercripts are offset relative to the
+ # nucleus edge for non-slanted nuclei
+ delta = 0.025
+
+ # Additional percentage of last character height above 2/3 of the
+ # x-height that supercripts are offset relative to the subscript
+ # for slanted nuclei
+ delta_slanted = 0.2
+
+ # Percentage of x-height that supercripts and subscripts are offset for
+ # integrals
+ delta_integral = 0.1
+
+
+class ComputerModernFontConstants(FontConstantsBase):
+ script_space = 0.075
+ subdrop = 0.2
+ sup1 = 0.45
+ sub1 = 0.2
+ sub2 = 0.3
+ delta = 0.075
+ delta_slanted = 0.3
+ delta_integral = 0.3
+
+
+class STIXFontConstants(FontConstantsBase):
+ script_space = 0.1
+ sup1 = 0.8
+ sub2 = 0.6
+ delta = 0.05
+ delta_slanted = 0.3
+ delta_integral = 0.3
+
+
+class STIXSansFontConstants(FontConstantsBase):
+ script_space = 0.05
+ sup1 = 0.8
+ delta_slanted = 0.6
+ delta_integral = 0.3
+
+
+class DejaVuSerifFontConstants(FontConstantsBase):
+ pass
+
+
+class DejaVuSansFontConstants(FontConstantsBase):
+ pass
+
+
+# Maps font family names to the FontConstantBase subclass to use
+_font_constant_mapping = {
+ 'DejaVu Sans': DejaVuSansFontConstants,
+ 'DejaVu Sans Mono': DejaVuSansFontConstants,
+ 'DejaVu Serif': DejaVuSerifFontConstants,
+ 'cmb10': ComputerModernFontConstants,
+ 'cmex10': ComputerModernFontConstants,
+ 'cmmi10': ComputerModernFontConstants,
+ 'cmr10': ComputerModernFontConstants,
+ 'cmss10': ComputerModernFontConstants,
+ 'cmsy10': ComputerModernFontConstants,
+ 'cmtt10': ComputerModernFontConstants,
+ 'STIXGeneral': STIXFontConstants,
+ 'STIXNonUnicode': STIXFontConstants,
+ 'STIXSizeFiveSym': STIXFontConstants,
+ 'STIXSizeFourSym': STIXFontConstants,
+ 'STIXSizeThreeSym': STIXFontConstants,
+ 'STIXSizeTwoSym': STIXFontConstants,
+ 'STIXSizeOneSym': STIXFontConstants,
+ # Map the fonts we used to ship, just for good measure
+ 'Bitstream Vera Sans': DejaVuSansFontConstants,
+ 'Bitstream Vera': DejaVuSansFontConstants,
+ }
+
+
+def _get_font_constant_set(state):
+ constants = _font_constant_mapping.get(
+ state.font_output._get_font(state.font).family_name,
+ FontConstantsBase)
+ # STIX sans isn't really its own fonts, just different code points
+ # in the STIX fonts, so we have to detect this one separately.
+ if (constants is STIXFontConstants and
+ isinstance(state.font_output, StixSansFonts)):
+ return STIXSansFontConstants
+ return constants
+
+
+class MathTextWarning(Warning):
+ pass
+
+class Node(object):
+ """
+ A node in the TeX box model
+ """
+ def __init__(self):
+ self.size = 0
+
+ def __repr__(self):
+ return self.__internal_repr__()
+
+ def __internal_repr__(self):
+ return self.__class__.__name__
+
+ def get_kerning(self, next):
+ return 0.0
+
+ def shrink(self):
+ """
+ Shrinks one level smaller. There are only three levels of
+ sizes, after which things will no longer get smaller.
+ """
+ self.size += 1
+
+ def grow(self):
+ """
+ Grows one level larger. There is no limit to how big
+ something can get.
+ """
+ self.size -= 1
+
+ def render(self, x, y):
+ pass
+
+class Box(Node):
+ """
+ Represents any node with a physical location.
+ """
+ def __init__(self, width, height, depth):
+ Node.__init__(self)
+ self.width = width
+ self.height = height
+ self.depth = depth
+
+ def shrink(self):
+ Node.shrink(self)
+ if self.size < NUM_SIZE_LEVELS:
+ self.width *= SHRINK_FACTOR
+ self.height *= SHRINK_FACTOR
+ self.depth *= SHRINK_FACTOR
+
+ def grow(self):
+ Node.grow(self)
+ self.width *= GROW_FACTOR
+ self.height *= GROW_FACTOR
+ self.depth *= GROW_FACTOR
+
+ def render(self, x1, y1, x2, y2):
+ pass
+
+class Vbox(Box):
+ """
+ A box with only height (zero width).
+ """
+ def __init__(self, height, depth):
+ Box.__init__(self, 0., height, depth)
+
+class Hbox(Box):
+ """
+ A box with only width (zero height and depth).
+ """
+ def __init__(self, width):
+ Box.__init__(self, width, 0., 0.)
+
+class Char(Node):
+ """
+ Represents a single character. Unlike TeX, the font information
+ and metrics are stored with each :class:`Char` to make it easier
+ to lookup the font metrics when needed. Note that TeX boxes have
+ a width, height, and depth, unlike Type1 and Truetype which use a
+ full bounding box and an advance in the x-direction. The metrics
+ must be converted to the TeX way, and the advance (if different
+ from width) must be converted into a :class:`Kern` node when the
+ :class:`Char` is added to its parent :class:`Hlist`.
+ """
+ def __init__(self, c, state, math=True):
+ Node.__init__(self)
+ self.c = c
+ self.font_output = state.font_output
+ self.font = state.font
+ self.font_class = state.font_class
+ self.fontsize = state.fontsize
+ self.dpi = state.dpi
+ self.math = math
+ # The real width, height and depth will be set during the
+ # pack phase, after we know the real fontsize
+ self._update_metrics()
+
+ def __internal_repr__(self):
+ return '`%s`' % self.c
+
+ def _update_metrics(self):
+ metrics = self._metrics = self.font_output.get_metrics(
+ self.font, self.font_class, self.c, self.fontsize, self.dpi, self.math)
+ if self.c == ' ':
+ self.width = metrics.advance
+ else:
+ self.width = metrics.width
+ self.height = metrics.iceberg
+ self.depth = -(metrics.iceberg - metrics.height)
+
+ def is_slanted(self):
+ return self._metrics.slanted
+
+ def get_kerning(self, next):
+ """
+ Return the amount of kerning between this and the given
+ character. Called when characters are strung together into
+ :class:`Hlist` to create :class:`Kern` nodes.
+ """
+ advance = self._metrics.advance - self.width
+ kern = 0.
+ if isinstance(next, Char):
+ kern = self.font_output.get_kern(
+ self.font, self.font_class, self.c, self.fontsize,
+ next.font, next.font_class, next.c, next.fontsize,
+ self.dpi)
+ return advance + kern
+
+ def render(self, x, y):
+ """
+ Render the character to the canvas
+ """
+ self.font_output.render_glyph(
+ x, y,
+ self.font, self.font_class, self.c, self.fontsize, self.dpi)
+
+ def shrink(self):
+ Node.shrink(self)
+ if self.size < NUM_SIZE_LEVELS:
+ self.fontsize *= SHRINK_FACTOR
+ self.width *= SHRINK_FACTOR
+ self.height *= SHRINK_FACTOR
+ self.depth *= SHRINK_FACTOR
+
+ def grow(self):
+ Node.grow(self)
+ self.fontsize *= GROW_FACTOR
+ self.width *= GROW_FACTOR
+ self.height *= GROW_FACTOR
+ self.depth *= GROW_FACTOR
+
+class Accent(Char):
+ """
+ The font metrics need to be dealt with differently for accents,
+ since they are already offset correctly from the baseline in
+ TrueType fonts.
+ """
+ def _update_metrics(self):
+ metrics = self._metrics = self.font_output.get_metrics(
+ self.font, self.font_class, self.c, self.fontsize, self.dpi)
+ self.width = metrics.xmax - metrics.xmin
+ self.height = metrics.ymax - metrics.ymin
+ self.depth = 0
+
+ def shrink(self):
+ Char.shrink(self)
+ self._update_metrics()
+
+ def grow(self):
+ Char.grow(self)
+ self._update_metrics()
+
+ def render(self, x, y):
+ """
+ Render the character to the canvas.
+ """
+ self.font_output.render_glyph(
+ x - self._metrics.xmin, y + self._metrics.ymin,
+ self.font, self.font_class, self.c, self.fontsize, self.dpi)
+
+class List(Box):
+ """
+ A list of nodes (either horizontal or vertical).
+ """
+ def __init__(self, elements):
+ Box.__init__(self, 0., 0., 0.)
+ self.shift_amount = 0. # An arbitrary offset
+ self.children = elements # The child nodes of this list
+ # The following parameters are set in the vpack and hpack functions
+ self.glue_set = 0. # The glue setting of this list
+ self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
+ self.glue_order = 0 # The order of infinity (0 - 3) for the glue
+
+ def __repr__(self):
+ return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
+ self.__internal_repr__(),
+ self.width, self.height,
+ self.depth, self.shift_amount,
+ ' '.join([repr(x) for x in self.children]))
+
+ def _determine_order(self, totals):
+ """
+ A helper function to determine the highest order of glue
+ used by the members of this list. Used by vpack and hpack.
+ """
+ o = 0
+ for i in range(len(totals) - 1, 0, -1):
+ if totals[i] != 0.0:
+ o = i
+ break
+ return o
+
+ def _set_glue(self, x, sign, totals, error_type):
+ o = self._determine_order(totals)
+ self.glue_order = o
+ self.glue_sign = sign
+ if totals[o] != 0.:
+ self.glue_set = x / totals[o]
+ else:
+ self.glue_sign = 0
+ self.glue_ratio = 0.
+ if o == 0:
+ if len(self.children):
+ warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
+ MathTextWarning)
+
+ def shrink(self):
+ for child in self.children:
+ child.shrink()
+ Box.shrink(self)
+ if self.size < NUM_SIZE_LEVELS:
+ self.shift_amount *= SHRINK_FACTOR
+ self.glue_set *= SHRINK_FACTOR
+
+ def grow(self):
+ for child in self.children:
+ child.grow()
+ Box.grow(self)
+ self.shift_amount *= GROW_FACTOR
+ self.glue_set *= GROW_FACTOR
+
+class Hlist(List):
+ """
+ A horizontal list of boxes.
+ """
+ def __init__(self, elements, w=0., m='additional', do_kern=True):
+ List.__init__(self, elements)
+ if do_kern:
+ self.kern()
+ self.hpack()
+
+ def kern(self):
+ """
+ Insert :class:`Kern` nodes between :class:`Char` nodes to set
+ kerning. The :class:`Char` nodes themselves determine the
+ amount of kerning they need (in :meth:`~Char.get_kerning`),
+ and this function just creates the linked list in the correct
+ way.
+ """
+ new_children = []
+ num_children = len(self.children)
+ if num_children:
+ for i in range(num_children):
+ elem = self.children[i]
+ if i < num_children - 1:
+ next = self.children[i + 1]
+ else:
+ next = None
+
+ new_children.append(elem)
+ kerning_distance = elem.get_kerning(next)
+ if kerning_distance != 0.:
+ kern = Kern(kerning_distance)
+ new_children.append(kern)
+ self.children = new_children
+
+ # This is a failed experiment to fake cross-font kerning.
+# def get_kerning(self, next):
+# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
+# if isinstance(next, Char):
+# print "CASE A"
+# return self.children[-2].get_kerning(next)
+# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
+# print "CASE B"
+# result = self.children[-2].get_kerning(next.children[0])
+# print result
+# return result
+# return 0.0
+
+ def hpack(self, w=0., m='additional'):
+ """
+ The main duty of :meth:`hpack` is to compute the dimensions of
+ the resulting boxes, and to adjust the glue if one of those
+ dimensions is pre-specified. The computed sizes normally
+ enclose all of the material inside the new box; but some items
+ may stick out if negative glue is used, if the box is
+ overfull, or if a ``\\vbox`` includes other boxes that have
+ been shifted left.
+
+ - *w*: specifies a width
+
+ - *m*: is either 'exactly' or 'additional'.
+
+ Thus, ``hpack(w, 'exactly')`` produces a box whose width is
+ exactly *w*, while ``hpack(w, 'additional')`` yields a box
+ whose width is the natural width plus *w*. The default values
+ produce a box with the natural width.
+ """
+ # I don't know why these get reset in TeX. Shift_amount is pretty
+ # much useless if we do.
+ #self.shift_amount = 0.
+ h = 0.
+ d = 0.
+ x = 0.
+ total_stretch = [0.] * 4
+ total_shrink = [0.] * 4
+ for p in self.children:
+ if isinstance(p, Char):
+ x += p.width
+ h = max(h, p.height)
+ d = max(d, p.depth)
+ elif isinstance(p, Box):
+ x += p.width
+ if not np.isinf(p.height) and not np.isinf(p.depth):
+ s = getattr(p, 'shift_amount', 0.)
+ h = max(h, p.height - s)
+ d = max(d, p.depth + s)
+ elif isinstance(p, Glue):
+ glue_spec = p.glue_spec
+ x += glue_spec.width
+ total_stretch[glue_spec.stretch_order] += glue_spec.stretch
+ total_shrink[glue_spec.shrink_order] += glue_spec.shrink
+ elif isinstance(p, Kern):
+ x += p.width
+ self.height = h
+ self.depth = d
+
+ if m == 'additional':
+ w += x
+ self.width = w
+ x = w - x
+
+ if x == 0.:
+ self.glue_sign = 0
+ self.glue_order = 0
+ self.glue_ratio = 0.
+ return
+ if x > 0.:
+ self._set_glue(x, 1, total_stretch, "Overfull")
+ else:
+ self._set_glue(x, -1, total_shrink, "Underfull")
+
+class Vlist(List):
+ """
+ A vertical list of boxes.
+ """
+ def __init__(self, elements, h=0., m='additional'):
+ List.__init__(self, elements)
+ self.vpack()
+
+ def vpack(self, h=0., m='additional', l=np.inf):
+ """
+ The main duty of :meth:`vpack` is to compute the dimensions of
+ the resulting boxes, and to adjust the glue if one of those
+ dimensions is pre-specified.
+
+ - *h*: specifies a height
+ - *m*: is either 'exactly' or 'additional'.
+ - *l*: a maximum height
+
+ Thus, ``vpack(h, 'exactly')`` produces a box whose height is
+ exactly *h*, while ``vpack(h, 'additional')`` yields a box
+ whose height is the natural height plus *h*. The default
+ values produce a box with the natural width.
+ """
+ # I don't know why these get reset in TeX. Shift_amount is pretty
+ # much useless if we do.
+ # self.shift_amount = 0.
+ w = 0.
+ d = 0.
+ x = 0.
+ total_stretch = [0.] * 4
+ total_shrink = [0.] * 4
+ for p in self.children:
+ if isinstance(p, Box):
+ x += d + p.height
+ d = p.depth
+ if not np.isinf(p.width):
+ s = getattr(p, 'shift_amount', 0.)
+ w = max(w, p.width + s)
+ elif isinstance(p, Glue):
+ x += d
+ d = 0.
+ glue_spec = p.glue_spec
+ x += glue_spec.width
+ total_stretch[glue_spec.stretch_order] += glue_spec.stretch
+ total_shrink[glue_spec.shrink_order] += glue_spec.shrink
+ elif isinstance(p, Kern):
+ x += d + p.width
+ d = 0.
+ elif isinstance(p, Char):
+ raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
+
+ self.width = w
+ if d > l:
+ x += d - l
+ self.depth = l
+ else:
+ self.depth = d
+
+ if m == 'additional':
+ h += x
+ self.height = h
+ x = h - x
+
+ if x == 0:
+ self.glue_sign = 0
+ self.glue_order = 0
+ self.glue_ratio = 0.
+ return
+
+ if x > 0.:
+ self._set_glue(x, 1, total_stretch, "Overfull")
+ else:
+ self._set_glue(x, -1, total_shrink, "Underfull")
+
+class Rule(Box):
+ """
+ A :class:`Rule` node stands for a solid black rectangle; it has
+ *width*, *depth*, and *height* fields just as in an
+ :class:`Hlist`. However, if any of these dimensions is inf, the
+ actual value will be determined by running the rule up to the
+ boundary of the innermost enclosing box. This is called a "running
+ dimension." The width is never running in an :class:`Hlist`; the
+ height and depth are never running in a :class:`Vlist`.
+ """
+ def __init__(self, width, height, depth, state):
+ Box.__init__(self, width, height, depth)
+ self.font_output = state.font_output
+
+ def render(self, x, y, w, h):
+ self.font_output.render_rect_filled(x, y, x + w, y + h)
+
+class Hrule(Rule):
+ """
+ Convenience class to create a horizontal rule.
+ """
+ def __init__(self, state, thickness=None):
+ if thickness is None:
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+ height = depth = thickness * 0.5
+ Rule.__init__(self, np.inf, height, depth, state)
+
+class Vrule(Rule):
+ """
+ Convenience class to create a vertical rule.
+ """
+ def __init__(self, state):
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+ Rule.__init__(self, thickness, np.inf, np.inf, state)
+
+class Glue(Node):
+ """
+ Most of the information in this object is stored in the underlying
+ :class:`GlueSpec` class, which is shared between multiple glue objects. (This
+ is a memory optimization which probably doesn't matter anymore, but it's
+ easier to stick to what TeX does.)
+ """
+ def __init__(self, glue_type, copy=False):
+ Node.__init__(self)
+ self.glue_subtype = 'normal'
+ if isinstance(glue_type, six.string_types):
+ glue_spec = GlueSpec.factory(glue_type)
+ elif isinstance(glue_type, GlueSpec):
+ glue_spec = glue_type
+ else:
+ raise ValueError("glue_type must be a glue spec name or instance.")
+ if copy:
+ glue_spec = glue_spec.copy()
+ self.glue_spec = glue_spec
+
+ def shrink(self):
+ Node.shrink(self)
+ if self.size < NUM_SIZE_LEVELS:
+ if self.glue_spec.width != 0.:
+ self.glue_spec = self.glue_spec.copy()
+ self.glue_spec.width *= SHRINK_FACTOR
+
+ def grow(self):
+ Node.grow(self)
+ if self.glue_spec.width != 0.:
+ self.glue_spec = self.glue_spec.copy()
+ self.glue_spec.width *= GROW_FACTOR
+
+class GlueSpec(object):
+ """
+ See :class:`Glue`.
+ """
+ def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
+ self.width = width
+ self.stretch = stretch
+ self.stretch_order = stretch_order
+ self.shrink = shrink
+ self.shrink_order = shrink_order
+
+ def copy(self):
+ return GlueSpec(
+ self.width,
+ self.stretch,
+ self.stretch_order,
+ self.shrink,
+ self.shrink_order)
+
+ def factory(cls, glue_type):
+ return cls._types[glue_type]
+ factory = classmethod(factory)
+
+GlueSpec._types = {
+ 'fil': GlueSpec(0., 1., 1, 0., 0),
+ 'fill': GlueSpec(0., 1., 2, 0., 0),
+ 'filll': GlueSpec(0., 1., 3, 0., 0),
+ 'neg_fil': GlueSpec(0., 0., 0, 1., 1),
+ 'neg_fill': GlueSpec(0., 0., 0, 1., 2),
+ 'neg_filll': GlueSpec(0., 0., 0, 1., 3),
+ 'empty': GlueSpec(0., 0., 0, 0., 0),
+ 'ss': GlueSpec(0., 1., 1, -1., 1)
+}
+
+# Some convenient ways to get common kinds of glue
+
+class Fil(Glue):
+ def __init__(self):
+ Glue.__init__(self, 'fil')
+
+class Fill(Glue):
+ def __init__(self):
+ Glue.__init__(self, 'fill')
+
+class Filll(Glue):
+ def __init__(self):
+ Glue.__init__(self, 'filll')
+
+class NegFil(Glue):
+ def __init__(self):
+ Glue.__init__(self, 'neg_fil')
+
+class NegFill(Glue):
+ def __init__(self):
+ Glue.__init__(self, 'neg_fill')
+
+class NegFilll(Glue):
+ def __init__(self):
+ Glue.__init__(self, 'neg_filll')
+
+class SsGlue(Glue):
+ def __init__(self):
+ Glue.__init__(self, 'ss')
+
+class HCentered(Hlist):
+ """
+ A convenience class to create an :class:`Hlist` whose contents are
+ centered within its enclosing box.
+ """
+ def __init__(self, elements):
+ Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
+ do_kern=False)
+
+class VCentered(Hlist):
+ """
+ A convenience class to create a :class:`Vlist` whose contents are
+ centered within its enclosing box.
+ """
+ def __init__(self, elements):
+ Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
+
+class Kern(Node):
+ """
+ A :class:`Kern` node has a width field to specify a (normally
+ negative) amount of spacing. This spacing correction appears in
+ horizontal lists between letters like A and V when the font
+ designer said that it looks better to move them closer together or
+ further apart. A kern node can also appear in a vertical list,
+ when its *width* denotes additional spacing in the vertical
+ direction.
+ """
+ height = 0
+ depth = 0
+
+ def __init__(self, width):
+ Node.__init__(self)
+ self.width = width
+
+ def __repr__(self):
+ return "k%.02f" % self.width
+
+ def shrink(self):
+ Node.shrink(self)
+ if self.size < NUM_SIZE_LEVELS:
+ self.width *= SHRINK_FACTOR
+
+ def grow(self):
+ Node.grow(self)
+ self.width *= GROW_FACTOR
+
+class SubSuperCluster(Hlist):
+ """
+ :class:`SubSuperCluster` is a sort of hack to get around that fact
+ that this code do a two-pass parse like TeX. This lets us store
+ enough information in the hlist itself, namely the nucleus, sub-
+ and super-script, such that if another script follows that needs
+ to be attached, it can be reconfigured on the fly.
+ """
+ def __init__(self):
+ self.nucleus = None
+ self.sub = None
+ self.super = None
+ Hlist.__init__(self, [])
+
+class AutoHeightChar(Hlist):
+ """
+ :class:`AutoHeightChar` will create a character as close to the
+ given height and depth as possible. When using a font with
+ multiple height versions of some characters (such as the BaKoMa
+ fonts), the correct glyph will be selected, otherwise this will
+ always just return a scaled version of the glyph.
+ """
+ def __init__(self, c, height, depth, state, always=False, factor=None):
+ alternatives = state.font_output.get_sized_alternatives_for_symbol(
+ state.font, c)
+
+ xHeight = state.font_output.get_xheight(
+ state.font, state.fontsize, state.dpi)
+
+ state = state.copy()
+ target_total = height + depth
+ for fontname, sym in alternatives:
+ state.font = fontname
+ char = Char(sym, state)
+ # Ensure that size 0 is chosen when the text is regular sized but
+ # with descender glyphs by subtracting 0.2 * xHeight
+ if char.height + char.depth >= target_total - 0.2 * xHeight:
+ break
+
+ shift = 0
+ if state.font != 0:
+ if factor is None:
+ factor = (target_total) / (char.height + char.depth)
+ state.fontsize *= factor
+ char = Char(sym, state)
+
+ shift = (depth - char.depth)
+
+ Hlist.__init__(self, [char])
+ self.shift_amount = shift
+
+class AutoWidthChar(Hlist):
+ """
+ :class:`AutoWidthChar` will create a character as close to the
+ given width as possible. When using a font with multiple width
+ versions of some characters (such as the BaKoMa fonts), the
+ correct glyph will be selected, otherwise this will always just
+ return a scaled version of the glyph.
+ """
+ def __init__(self, c, width, state, always=False, char_class=Char):
+ alternatives = state.font_output.get_sized_alternatives_for_symbol(
+ state.font, c)
+
+ state = state.copy()
+ for fontname, sym in alternatives:
+ state.font = fontname
+ char = char_class(sym, state)
+ if char.width >= width:
+ break
+
+ factor = width / char.width
+ state.fontsize *= factor
+ char = char_class(sym, state)
+
+ Hlist.__init__(self, [char])
+ self.width = char.width
+
+
+class Ship(object):
+ """
+ Once the boxes have been set up, this sends them to output. Since
+ boxes can be inside of boxes inside of boxes, the main work of
+ :class:`Ship` is done by two mutually recursive routines,
+ :meth:`hlist_out` and :meth:`vlist_out`, which traverse the
+ :class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
+ and vertical boxes. The global variables used in TeX to store
+ state as it processes have become member variables here.
+ """
+ def __call__(self, ox, oy, box):
+ self.max_push = 0 # Deepest nesting of push commands so far
+ self.cur_s = 0
+ self.cur_v = 0.
+ self.cur_h = 0.
+ self.off_h = ox
+ self.off_v = oy + box.height
+ self.hlist_out(box)
+
+ def clamp(value):
+ if value < -1000000000.:
+ return -1000000000.
+ if value > 1000000000.:
+ return 1000000000.
+ return value
+ clamp = staticmethod(clamp)
+
+ def hlist_out(self, box):
+ cur_g = 0
+ cur_glue = 0.
+ glue_order = box.glue_order
+ glue_sign = box.glue_sign
+ base_line = self.cur_v
+ left_edge = self.cur_h
+ self.cur_s += 1
+ self.max_push = max(self.cur_s, self.max_push)
+ clamp = self.clamp
+
+ for p in box.children:
+ if isinstance(p, Char):
+ p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
+ self.cur_h += p.width
+ elif isinstance(p, Kern):
+ self.cur_h += p.width
+ elif isinstance(p, List):
+ # node623
+ if len(p.children) == 0:
+ self.cur_h += p.width
+ else:
+ edge = self.cur_h
+ self.cur_v = base_line + p.shift_amount
+ if isinstance(p, Hlist):
+ self.hlist_out(p)
+ else:
+ # p.vpack(box.height + box.depth, 'exactly')
+ self.vlist_out(p)
+ self.cur_h = edge + p.width
+ self.cur_v = base_line
+ elif isinstance(p, Box):
+ # node624
+ rule_height = p.height
+ rule_depth = p.depth
+ rule_width = p.width
+ if np.isinf(rule_height):
+ rule_height = box.height
+ if np.isinf(rule_depth):
+ rule_depth = box.depth
+ if rule_height > 0 and rule_width > 0:
+ self.cur_v = base_line + rule_depth
+ p.render(self.cur_h + self.off_h,
+ self.cur_v + self.off_v,
+ rule_width, rule_height)
+ self.cur_v = base_line
+ self.cur_h += rule_width
+ elif isinstance(p, Glue):
+ # node625
+ glue_spec = p.glue_spec
+ rule_width = glue_spec.width - cur_g
+ if glue_sign != 0: # normal
+ if glue_sign == 1: # stretching
+ if glue_spec.stretch_order == glue_order:
+ cur_glue += glue_spec.stretch
+ cur_g = np.round(clamp(float(box.glue_set) * cur_glue))
+ elif glue_spec.shrink_order == glue_order:
+ cur_glue += glue_spec.shrink
+ cur_g = np.round(clamp(float(box.glue_set) * cur_glue))
+ rule_width += cur_g
+ self.cur_h += rule_width
+ self.cur_s -= 1
+
+ def vlist_out(self, box):
+ cur_g = 0
+ cur_glue = 0.
+ glue_order = box.glue_order
+ glue_sign = box.glue_sign
+ self.cur_s += 1
+ self.max_push = max(self.max_push, self.cur_s)
+ left_edge = self.cur_h
+ self.cur_v -= box.height
+ top_edge = self.cur_v
+ clamp = self.clamp
+
+ for p in box.children:
+ if isinstance(p, Kern):
+ self.cur_v += p.width
+ elif isinstance(p, List):
+ if len(p.children) == 0:
+ self.cur_v += p.height + p.depth
+ else:
+ self.cur_v += p.height
+ self.cur_h = left_edge + p.shift_amount
+ save_v = self.cur_v
+ p.width = box.width
+ if isinstance(p, Hlist):
+ self.hlist_out(p)
+ else:
+ self.vlist_out(p)
+ self.cur_v = save_v + p.depth
+ self.cur_h = left_edge
+ elif isinstance(p, Box):
+ rule_height = p.height
+ rule_depth = p.depth
+ rule_width = p.width
+ if np.isinf(rule_width):
+ rule_width = box.width
+ rule_height += rule_depth
+ if rule_height > 0 and rule_depth > 0:
+ self.cur_v += rule_height
+ p.render(self.cur_h + self.off_h,
+ self.cur_v + self.off_v,
+ rule_width, rule_height)
+ elif isinstance(p, Glue):
+ glue_spec = p.glue_spec
+ rule_height = glue_spec.width - cur_g
+ if glue_sign != 0: # normal
+ if glue_sign == 1: # stretching
+ if glue_spec.stretch_order == glue_order:
+ cur_glue += glue_spec.stretch
+ cur_g = np.round(clamp(float(box.glue_set) * cur_glue))
+ elif glue_spec.shrink_order == glue_order: # shrinking
+ cur_glue += glue_spec.shrink
+ cur_g = np.round(clamp(float(box.glue_set) * cur_glue))
+ rule_height += cur_g
+ self.cur_v += rule_height
+ elif isinstance(p, Char):
+ raise RuntimeError("Internal mathtext error: Char node found in vlist")
+ self.cur_s -= 1
+
+
+ship = Ship()
+
+##############################################################################
+# PARSER
+
+def Error(msg):
+ """
+ Helper class to raise parser errors.
+ """
+ def raise_error(s, loc, toks):
+ raise ParseFatalException(s, loc, msg)
+
+ empty = Empty()
+ empty.setParseAction(raise_error)
+ return empty
+
+class Parser(object):
+ """
+ This is the pyparsing-based parser for math expressions. It
+ actually parses full strings *containing* math expressions, in
+ that raw text may also appear outside of pairs of ``$``.
+
+ The grammar is based directly on that in TeX, though it cuts a few
+ corners.
+ """
+
+ _math_style_dict = dict(displaystyle=0, textstyle=1,
+ scriptstyle=2, scriptscriptstyle=3)
+
+ _binary_operators = set('''
+ + * -
+ \\pm \\sqcap \\rhd
+ \\mp \\sqcup \\unlhd
+ \\times \\vee \\unrhd
+ \\div \\wedge \\oplus
+ \\ast \\setminus \\ominus
+ \\star \\wr \\otimes
+ \\circ \\diamond \\oslash
+ \\bullet \\bigtriangleup \\odot
+ \\cdot \\bigtriangledown \\bigcirc
+ \\cap \\triangleleft \\dagger
+ \\cup \\triangleright \\ddagger
+ \\uplus \\lhd \\amalg'''.split())
+
+ _relation_symbols = set('''
+ = < > :
+ \\leq \\geq \\equiv \\models
+ \\prec \\succ \\sim \\perp
+ \\preceq \\succeq \\simeq \\mid
+ \\ll \\gg \\asymp \\parallel
+ \\subset \\supset \\approx \\bowtie
+ \\subseteq \\supseteq \\cong \\Join
+ \\sqsubset \\sqsupset \\neq \\smile
+ \\sqsubseteq \\sqsupseteq \\doteq \\frown
+ \\in \\ni \\propto \\vdash
+ \\dashv \\dots \\dotplus \\doteqdot'''.split())
+
+ _arrow_symbols = set('''
+ \\leftarrow \\longleftarrow \\uparrow
+ \\Leftarrow \\Longleftarrow \\Uparrow
+ \\rightarrow \\longrightarrow \\downarrow
+ \\Rightarrow \\Longrightarrow \\Downarrow
+ \\leftrightarrow \\longleftrightarrow \\updownarrow
+ \\Leftrightarrow \\Longleftrightarrow \\Updownarrow
+ \\mapsto \\longmapsto \\nearrow
+ \\hookleftarrow \\hookrightarrow \\searrow
+ \\leftharpoonup \\rightharpoonup \\swarrow
+ \\leftharpoondown \\rightharpoondown \\nwarrow
+ \\rightleftharpoons \\leadsto'''.split())
+
+ _spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
+
+ _punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
+
+ _overunder_symbols = set(r'''
+ \sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
+ \bigwedge \bigodot \bigotimes \bigoplus \biguplus
+ '''.split())
+
+ _overunder_functions = set(
+ r"lim liminf limsup sup max min".split())
+
+ _dropsub_symbols = set(r'''\int \oint'''.split())
+
+ _fontnames = set("rm cal it tt sf bf default bb frak circled scr regular".split())
+
+ _function_names = set("""
+ arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
+ liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
+ coth inf max tanh""".split())
+
+ _ambi_delim = set("""
+ | \\| / \\backslash \\uparrow \\downarrow \\updownarrow \\Uparrow
+ \\Downarrow \\Updownarrow . \\vert \\Vert \\\\|""".split())
+
+ _left_delim = set(r"( [ \{ < \lfloor \langle \lceil".split())
+
+ _right_delim = set(r") ] \} > \rfloor \rangle \rceil".split())
+
+ def __init__(self):
+ p = Bunch()
+ # All forward declarations are here
+ p.accent = Forward()
+ p.ambi_delim = Forward()
+ p.apostrophe = Forward()
+ p.auto_delim = Forward()
+ p.binom = Forward()
+ p.bslash = Forward()
+ p.c_over_c = Forward()
+ p.customspace = Forward()
+ p.end_group = Forward()
+ p.float_literal = Forward()
+ p.font = Forward()
+ p.frac = Forward()
+ p.dfrac = Forward()
+ p.function = Forward()
+ p.genfrac = Forward()
+ p.group = Forward()
+ p.int_literal = Forward()
+ p.latexfont = Forward()
+ p.lbracket = Forward()
+ p.left_delim = Forward()
+ p.lbrace = Forward()
+ p.main = Forward()
+ p.math = Forward()
+ p.math_string = Forward()
+ p.non_math = Forward()
+ p.operatorname = Forward()
+ p.overline = Forward()
+ p.placeable = Forward()
+ p.rbrace = Forward()
+ p.rbracket = Forward()
+ p.required_group = Forward()
+ p.right_delim = Forward()
+ p.right_delim_safe = Forward()
+ p.simple = Forward()
+ p.simple_group = Forward()
+ p.single_symbol = Forward()
+ p.snowflake = Forward()
+ p.space = Forward()
+ p.sqrt = Forward()
+ p.stackrel = Forward()
+ p.start_group = Forward()
+ p.subsuper = Forward()
+ p.subsuperop = Forward()
+ p.symbol = Forward()
+ p.symbol_name = Forward()
+ p.token = Forward()
+ p.unknown_symbol = Forward()
+
+ # Set names on everything -- very useful for debugging
+ for key, val in vars(p).items():
+ if not key.startswith('_'):
+ val.setName(key)
+
+ p.float_literal <<= Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
+ p.int_literal <<= Regex("[-+]?[0-9]+")
+
+ p.lbrace <<= Literal('{').suppress()
+ p.rbrace <<= Literal('}').suppress()
+ p.lbracket <<= Literal('[').suppress()
+ p.rbracket <<= Literal(']').suppress()
+ p.bslash <<= Literal('\\')
+
+ p.space <<= oneOf(list(self._space_widths))
+ p.customspace <<= (Suppress(Literal(r'\hspace'))
+ - ((p.lbrace + p.float_literal + p.rbrace)
+ | Error(r"Expected \hspace{n}")))
+
+ unicode_range = "\U00000080-\U0001ffff"
+ p.single_symbol <<= Regex(r"([a-zA-Z0-9 +\-*/<>=:,.;!\?&'@()\[\]|%s])|(\\[%%${}\[\]_|])" %
+ unicode_range)
+ p.snowflake <<= Suppress(p.bslash) + oneOf(self._snowflake)
+ p.symbol_name <<= (Combine(p.bslash + oneOf(list(tex2uni))) +
+ FollowedBy(Regex("[^A-Za-z]").leaveWhitespace() | StringEnd()))
+ p.symbol <<= (p.single_symbol | p.symbol_name).leaveWhitespace()
+
+ p.apostrophe <<= Regex("'+")
+
+ p.c_over_c <<= Suppress(p.bslash) + oneOf(list(self._char_over_chars))
+
+ p.accent <<= Group(
+ Suppress(p.bslash)
+ + oneOf(list(self._accent_map) + list(self._wide_accents))
+ - p.placeable
+ )
+
+ p.function <<= Suppress(p.bslash) + oneOf(list(self._function_names))
+
+ p.start_group <<= Optional(p.latexfont) + p.lbrace
+ p.end_group <<= p.rbrace.copy()
+ p.simple_group <<= Group(p.lbrace + ZeroOrMore(p.token) + p.rbrace)
+ p.required_group<<= Group(p.lbrace + OneOrMore(p.token) + p.rbrace)
+ p.group <<= Group(p.start_group + ZeroOrMore(p.token) + p.end_group)
+
+ p.font <<= Suppress(p.bslash) + oneOf(list(self._fontnames))
+ p.latexfont <<= Suppress(p.bslash) + oneOf(['math' + x for x in self._fontnames])
+
+ p.frac <<= Group(
+ Suppress(Literal(r"\frac"))
+ - ((p.required_group + p.required_group) | Error(r"Expected \frac{num}{den}"))
+ )
+
+ p.dfrac <<= Group(
+ Suppress(Literal(r"\dfrac"))
+ - ((p.required_group + p.required_group) | Error(r"Expected \dfrac{num}{den}"))
+ )
+
+ p.stackrel <<= Group(
+ Suppress(Literal(r"\stackrel"))
+ - ((p.required_group + p.required_group) | Error(r"Expected \stackrel{num}{den}"))
+ )
+
+ p.binom <<= Group(
+ Suppress(Literal(r"\binom"))
+ - ((p.required_group + p.required_group) | Error(r"Expected \binom{num}{den}"))
+ )
+
+ p.ambi_delim <<= oneOf(list(self._ambi_delim))
+ p.left_delim <<= oneOf(list(self._left_delim))
+ p.right_delim <<= oneOf(list(self._right_delim))
+ p.right_delim_safe <<= oneOf(list(self._right_delim - {'}'}) + [r'\}'])
+
+ p.genfrac <<= Group(
+ Suppress(Literal(r"\genfrac"))
+ - (((p.lbrace + Optional(p.ambi_delim | p.left_delim, default='') + p.rbrace)
+ + (p.lbrace + Optional(p.ambi_delim | p.right_delim_safe, default='') + p.rbrace)
+ + (p.lbrace + p.float_literal + p.rbrace)
+ + p.simple_group + p.required_group + p.required_group)
+ | Error(r"Expected \genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}"))
+ )
+
+ p.sqrt <<= Group(
+ Suppress(Literal(r"\sqrt"))
+ - ((Optional(p.lbracket + p.int_literal + p.rbracket, default=None)
+ + p.required_group)
+ | Error("Expected \\sqrt{value}"))
+ )
+
+ p.overline <<= Group(
+ Suppress(Literal(r"\overline"))
+ - (p.required_group | Error("Expected \\overline{value}"))
+ )
+
+ p.unknown_symbol<<= Combine(p.bslash + Regex("[A-Za-z]*"))
+
+ p.operatorname <<= Group(
+ Suppress(Literal(r"\operatorname"))
+ - ((p.lbrace + ZeroOrMore(p.simple | p.unknown_symbol) + p.rbrace)
+ | Error("Expected \\operatorname{value}"))
+ )
+
+ p.placeable <<= ( p.snowflake # this needs to be before accent so named symbols
+ # that are prefixed with an accent name work
+ | p.accent # Must be before symbol as all accents are symbols
+ | p.symbol # Must be third to catch all named symbols and single chars not in a group
+ | p.c_over_c
+ | p.function
+ | p.group
+ | p.frac
+ | p.dfrac
+ | p.stackrel
+ | p.binom
+ | p.genfrac
+ | p.sqrt
+ | p.overline
+ | p.operatorname
+ )
+
+ p.simple <<= ( p.space
+ | p.customspace
+ | p.font
+ | p.subsuper
+ )
+
+ p.subsuperop <<= oneOf(["_", "^"])
+
+ p.subsuper <<= Group(
+ (Optional(p.placeable) + OneOrMore(p.subsuperop - p.placeable) + Optional(p.apostrophe))
+ | (p.placeable + Optional(p.apostrophe))
+ | p.apostrophe
+ )
+
+ p.token <<= ( p.simple
+ | p.auto_delim
+ | p.unknown_symbol # Must be last
+ )
+
+ p.auto_delim <<= (Suppress(Literal(r"\left"))
+ - ((p.left_delim | p.ambi_delim) | Error("Expected a delimiter"))
+ + Group(ZeroOrMore(p.simple | p.auto_delim))
+ + Suppress(Literal(r"\right"))
+ - ((p.right_delim | p.ambi_delim) | Error("Expected a delimiter"))
+ )
+
+ p.math <<= OneOrMore(p.token)
+
+ p.math_string <<= QuotedString('$', '\\', unquoteResults=False)
+
+ p.non_math <<= Regex(r"(?:(?:\\[$])|[^$])*").leaveWhitespace()
+
+ p.main <<= (p.non_math + ZeroOrMore(p.math_string + p.non_math)) + StringEnd()
+
+ # Set actions
+ for key, val in vars(p).items():
+ if not key.startswith('_'):
+ if hasattr(self, key):
+ val.setParseAction(getattr(self, key))
+
+ self._expression = p.main
+ self._math_expression = p.math
+
+ def parse(self, s, fonts_object, fontsize, dpi):
+ """
+ Parse expression *s* using the given *fonts_object* for
+ output, at the given *fontsize* and *dpi*.
+
+ Returns the parse tree of :class:`Node` instances.
+ """
+ self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
+ self._em_width_cache = {}
+ try:
+ result = self._expression.parseString(s)
+ except ParseBaseException as err:
+ raise ValueError("\n".join([
+ "",
+ err.line,
+ " " * (err.column - 1) + "^",
+ six.text_type(err)]))
+ self._state_stack = None
+ self._em_width_cache = {}
+ self._expression.resetCache()
+ return result[0]
+
+ # The state of the parser is maintained in a stack. Upon
+ # entering and leaving a group { } or math/non-math, the stack
+ # is pushed and popped accordingly. The current state always
+ # exists in the top element of the stack.
+ class State(object):
+ """
+ Stores the state of the parser.
+
+ States are pushed and popped from a stack as necessary, and
+ the "current" state is always at the top of the stack.
+ """
+ def __init__(self, font_output, font, font_class, fontsize, dpi):
+ self.font_output = font_output
+ self._font = font
+ self.font_class = font_class
+ self.fontsize = fontsize
+ self.dpi = dpi
+
+ def copy(self):
+ return Parser.State(
+ self.font_output,
+ self.font,
+ self.font_class,
+ self.fontsize,
+ self.dpi)
+
+ def _get_font(self):
+ return self._font
+ def _set_font(self, name):
+ if name in ('rm', 'it', 'bf'):
+ self.font_class = name
+ self._font = name
+ font = property(_get_font, _set_font)
+
+ def get_state(self):
+ """
+ Get the current :class:`State` of the parser.
+ """
+ return self._state_stack[-1]
+
+ def pop_state(self):
+ """
+ Pop a :class:`State` off of the stack.
+ """
+ self._state_stack.pop()
+
+ def push_state(self):
+ """
+ Push a new :class:`State` onto the stack which is just a copy
+ of the current state.
+ """
+ self._state_stack.append(self.get_state().copy())
+
+ def main(self, s, loc, toks):
+ return [Hlist(toks)]
+
+ def math_string(self, s, loc, toks):
+ return self._math_expression.parseString(toks[0][1:-1])
+
+ def math(self, s, loc, toks):
+ hlist = Hlist(toks)
+ self.pop_state()
+ return [hlist]
+
+ def non_math(self, s, loc, toks):
+ s = toks[0].replace(r'\$', '$')
+ symbols = [Char(c, self.get_state(), math=False) for c in s]
+ hlist = Hlist(symbols)
+ # We're going into math now, so set font to 'it'
+ self.push_state()
+ self.get_state().font = rcParams['mathtext.default']
+ return [hlist]
+
+ def _make_space(self, percentage):
+ # All spaces are relative to em width
+ state = self.get_state()
+ key = (state.font, state.fontsize, state.dpi)
+ width = self._em_width_cache.get(key)
+ if width is None:
+ metrics = state.font_output.get_metrics(
+ state.font, rcParams['mathtext.default'], 'm', state.fontsize, state.dpi)
+ width = metrics.advance
+ self._em_width_cache[key] = width
+ return Kern(width * percentage)
+
+ _space_widths = { r'\,' : 0.16667, # 3/18 em = 3 mu
+ r'\thinspace' : 0.16667, # 3/18 em = 3 mu
+ r'\/' : 0.16667, # 3/18 em = 3 mu
+ r'\>' : 0.22222, # 4/18 em = 4 mu
+ r'\:' : 0.22222, # 4/18 em = 4 mu
+ r'\;' : 0.27778, # 5/18 em = 5 mu
+ r'\ ' : 0.33333, # 6/18 em = 6 mu
+ r'\enspace' : 0.5, # 9/18 em = 9 mu
+ r'\quad' : 1, # 1 em = 18 mu
+ r'\qquad' : 2, # 2 em = 36 mu
+ r'\!' : -0.16667, # -3/18 em = -3 mu
+ }
+ def space(self, s, loc, toks):
+ assert len(toks)==1
+ num = self._space_widths[toks[0]]
+ box = self._make_space(num)
+ return [box]
+
+ def customspace(self, s, loc, toks):
+ return [self._make_space(float(toks[0]))]
+
+ def symbol(self, s, loc, toks):
+ c = toks[0]
+ try:
+ char = Char(c, self.get_state())
+ except ValueError:
+ raise ParseFatalException(s, loc, "Unknown symbol: %s" % c)
+
+ if c in self._spaced_symbols:
+ # iterate until we find previous character, needed for cases
+ # such as ${ -2}$, $ -2$, or $ -2$.
+ for i in six.moves.xrange(1, loc + 1):
+ prev_char = s[loc-i]
+ if prev_char != ' ':
+ break
+ # Binary operators at start of string should not be spaced
+ if (c in self._binary_operators and
+ (len(s[:loc].split()) == 0 or prev_char == '{' or
+ prev_char in self._left_delim)):
+ return [char]
+ else:
+ return [Hlist([self._make_space(0.2),
+ char,
+ self._make_space(0.2)] ,
+ do_kern = True)]
+ elif c in self._punctuation_symbols:
+
+ # Do not space commas between brackets
+ if c == ',':
+ prev_char, next_char = '', ''
+ for i in six.moves.xrange(1, loc + 1):
+ prev_char = s[loc - i]
+ if prev_char != ' ':
+ break
+ for i in six.moves.xrange(1, len(s) - loc):
+ next_char = s[loc + i]
+ if next_char != ' ':
+ break
+ if (prev_char == '{' and next_char == '}'):
+ return [char]
+
+ # Do not space dots as decimal separators
+ if (c == '.' and s[loc - 1].isdigit() and s[loc + 1].isdigit()):
+ return [char]
+ else:
+ return [Hlist([char,
+ self._make_space(0.2)],
+ do_kern = True)]
+ return [char]
+
+ snowflake = symbol
+
+ def unknown_symbol(self, s, loc, toks):
+ c = toks[0]
+ raise ParseFatalException(s, loc, "Unknown symbol: %s" % c)
+
+ _char_over_chars = {
+ # The first 2 entries in the tuple are (font, char, sizescale) for
+ # the two symbols under and over. The third element is the space
+ # (in multiples of underline height)
+ r'AA': (('it', 'A', 1.0), (None, '\\circ', 0.5), 0.0),
+ }
+
+ def c_over_c(self, s, loc, toks):
+ sym = toks[0]
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+
+ under_desc, over_desc, space = \
+ self._char_over_chars.get(sym, (None, None, 0.0))
+ if under_desc is None:
+ raise ParseFatalException("Error parsing symbol")
+
+ over_state = state.copy()
+ if over_desc[0] is not None:
+ over_state.font = over_desc[0]
+ over_state.fontsize *= over_desc[2]
+ over = Accent(over_desc[1], over_state)
+
+ under_state = state.copy()
+ if under_desc[0] is not None:
+ under_state.font = under_desc[0]
+ under_state.fontsize *= under_desc[2]
+ under = Char(under_desc[1], under_state)
+
+ width = max(over.width, under.width)
+
+ over_centered = HCentered([over])
+ over_centered.hpack(width, 'exactly')
+
+ under_centered = HCentered([under])
+ under_centered.hpack(width, 'exactly')
+
+ return Vlist([
+ over_centered,
+ Vbox(0., thickness * space),
+ under_centered
+ ])
+
+ _accent_map = {
+ r'hat' : r'\circumflexaccent',
+ r'breve' : r'\combiningbreve',
+ r'bar' : r'\combiningoverline',
+ r'grave' : r'\combininggraveaccent',
+ r'acute' : r'\combiningacuteaccent',
+ r'tilde' : r'\combiningtilde',
+ r'dot' : r'\combiningdotabove',
+ r'ddot' : r'\combiningdiaeresis',
+ r'vec' : r'\combiningrightarrowabove',
+ r'"' : r'\combiningdiaeresis',
+ r"`" : r'\combininggraveaccent',
+ r"'" : r'\combiningacuteaccent',
+ r'~' : r'\combiningtilde',
+ r'.' : r'\combiningdotabove',
+ r'^' : r'\circumflexaccent',
+ r'overrightarrow' : r'\rightarrow',
+ r'overleftarrow' : r'\leftarrow',
+ r'mathring' : r'\circ'
+ }
+
+ _wide_accents = set(r"widehat widetilde widebar".split())
+
+ # make a lambda and call it to get the namespace right
+ _snowflake = (lambda am: [p for p in tex2uni if
+ any(p.startswith(a) and a != p for a in am)]
+ ) (set(_accent_map))
+
+ def accent(self, s, loc, toks):
+ assert len(toks)==1
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+ if len(toks[0]) != 2:
+ raise ParseFatalException("Error parsing accent")
+ accent, sym = toks[0]
+ if accent in self._wide_accents:
+ accent_box = AutoWidthChar(
+ '\\' + accent, sym.width, state, char_class=Accent)
+ else:
+ accent_box = Accent(self._accent_map[accent], state)
+ if accent == 'mathring':
+ accent_box.shrink()
+ accent_box.shrink()
+ centered = HCentered([Hbox(sym.width / 4.0), accent_box])
+ centered.hpack(sym.width, 'exactly')
+ return Vlist([
+ centered,
+ Vbox(0., thickness * 2.0),
+ Hlist([sym])
+ ])
+
+ def function(self, s, loc, toks):
+ self.push_state()
+ state = self.get_state()
+ state.font = 'rm'
+ hlist = Hlist([Char(c, state) for c in toks[0]])
+ self.pop_state()
+ hlist.function_name = toks[0]
+ return hlist
+
+ def operatorname(self, s, loc, toks):
+ self.push_state()
+ state = self.get_state()
+ state.font = 'rm'
+ # Change the font of Chars, but leave Kerns alone
+ for c in toks[0]:
+ if isinstance(c, Char):
+ c.font = 'rm'
+ c._update_metrics()
+ self.pop_state()
+ return Hlist(toks[0])
+
+ def start_group(self, s, loc, toks):
+ self.push_state()
+ # Deal with LaTeX-style font tokens
+ if len(toks):
+ self.get_state().font = toks[0][4:]
+ return []
+
+ def group(self, s, loc, toks):
+ grp = Hlist(toks[0])
+ return [grp]
+ required_group = simple_group = group
+
+ def end_group(self, s, loc, toks):
+ self.pop_state()
+ return []
+
+ def font(self, s, loc, toks):
+ assert len(toks)==1
+ name = toks[0]
+ self.get_state().font = name
+ return []
+
+ def is_overunder(self, nucleus):
+ if isinstance(nucleus, Char):
+ return nucleus.c in self._overunder_symbols
+ elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
+ return nucleus.function_name in self._overunder_functions
+ return False
+
+ def is_dropsub(self, nucleus):
+ if isinstance(nucleus, Char):
+ return nucleus.c in self._dropsub_symbols
+ return False
+
+ def is_slanted(self, nucleus):
+ if isinstance(nucleus, Char):
+ return nucleus.is_slanted()
+ return False
+
+ def is_between_brackets(self, s, loc):
+ return False
+
+ def subsuper(self, s, loc, toks):
+ assert len(toks)==1
+
+ nucleus = None
+ sub = None
+ super = None
+
+ # Pick all of the apostrophes out, including first apostrophes that have
+ # been parsed as characters
+ napostrophes = 0
+ new_toks = []
+ for tok in toks[0]:
+ if isinstance(tok, six.string_types) and tok not in ('^', '_'):
+ napostrophes += len(tok)
+ elif isinstance(tok, Char) and tok.c == "'":
+ napostrophes += 1
+ else:
+ new_toks.append(tok)
+ toks = new_toks
+
+ if len(toks) == 0:
+ assert napostrophes
+ nucleus = Hbox(0.0)
+ elif len(toks) == 1:
+ if not napostrophes:
+ return toks[0] # .asList()
+ else:
+ nucleus = toks[0]
+ elif len(toks) in (2, 3):
+ # single subscript or superscript
+ nucleus = toks[0] if len(toks) == 3 else Hbox(0.0)
+ op, next = toks[-2:]
+ if op == '_':
+ sub = next
+ else:
+ super = next
+ elif len(toks) in (4, 5):
+ # subscript and superscript
+ nucleus = toks[0] if len(toks) == 5 else Hbox(0.0)
+ op1, next1, op2, next2 = toks[-4:]
+ if op1 == op2:
+ if op1 == '_':
+ raise ParseFatalException("Double subscript")
+ else:
+ raise ParseFatalException("Double superscript")
+ if op1 == '_':
+ sub = next1
+ super = next2
+ else:
+ super = next1
+ sub = next2
+ else:
+ raise ParseFatalException(
+ "Subscript/superscript sequence is too long. "
+ "Use braces { } to remove ambiguity.")
+
+ state = self.get_state()
+ rule_thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+ xHeight = state.font_output.get_xheight(
+ state.font, state.fontsize, state.dpi)
+
+ if napostrophes:
+ if super is None:
+ super = Hlist([])
+ for i in range(napostrophes):
+ super.children.extend(self.symbol(s, loc, ['\\prime']))
+ # kern() and hpack() needed to get the metrics right after extending
+ super.kern()
+ super.hpack()
+
+ # Handle over/under symbols, such as sum or integral
+ if self.is_overunder(nucleus):
+ vlist = []
+ shift = 0.
+ width = nucleus.width
+ if super is not None:
+ super.shrink()
+ width = max(width, super.width)
+ if sub is not None:
+ sub.shrink()
+ width = max(width, sub.width)
+
+ if super is not None:
+ hlist = HCentered([super])
+ hlist.hpack(width, 'exactly')
+ vlist.extend([hlist, Kern(rule_thickness * 3.0)])
+ hlist = HCentered([nucleus])
+ hlist.hpack(width, 'exactly')
+ vlist.append(hlist)
+ if sub is not None:
+ hlist = HCentered([sub])
+ hlist.hpack(width, 'exactly')
+ vlist.extend([Kern(rule_thickness * 3.0), hlist])
+ shift = hlist.height
+ vlist = Vlist(vlist)
+ vlist.shift_amount = shift + nucleus.depth
+ result = Hlist([vlist])
+ return [result]
+
+ # We remove kerning on the last character for consistency (otherwise it
+ # will compute kerning based on non-shrinked characters and may put them
+ # too close together when superscripted)
+ # We change the width of the last character to match the advance to
+ # consider some fonts with weird metrics: e.g. stix's f has a width of
+ # 7.75 and a kerning of -4.0 for an advance of 3.72, and we want to put
+ # the superscript at the advance
+ last_char = nucleus
+ if isinstance(nucleus, Hlist):
+ new_children = nucleus.children
+ if len(new_children):
+ # remove last kern
+ if (isinstance(new_children[-1],Kern) and
+ hasattr(new_children[-2], '_metrics')):
+ new_children = new_children[:-1]
+ last_char = new_children[-1]
+ if hasattr(last_char, '_metrics'):
+ last_char.width = last_char._metrics.advance
+ # create new Hlist without kerning
+ nucleus = Hlist(new_children, do_kern=False)
+ else:
+ if isinstance(nucleus, Char):
+ last_char.width = last_char._metrics.advance
+ nucleus = Hlist([nucleus])
+
+ # Handle regular sub/superscripts
+ constants = _get_font_constant_set(state)
+ lc_height = last_char.height
+ lc_baseline = 0
+ if self.is_dropsub(last_char):
+ lc_baseline = last_char.depth
+
+ # Compute kerning for sub and super
+ superkern = constants.delta * xHeight
+ subkern = constants.delta * xHeight
+ if self.is_slanted(last_char):
+ superkern += constants.delta * xHeight
+ superkern += (constants.delta_slanted *
+ (lc_height - xHeight * 2. / 3.))
+ if self.is_dropsub(last_char):
+ subkern = (3 * constants.delta -
+ constants.delta_integral) * lc_height
+ superkern = (3 * constants.delta +
+ constants.delta_integral) * lc_height
+ else:
+ subkern = 0
+
+ if super is None:
+ # node757
+ x = Hlist([Kern(subkern), sub])
+ x.shrink()
+ if self.is_dropsub(last_char):
+ shift_down = lc_baseline + constants.subdrop * xHeight
+ else:
+ shift_down = constants.sub1 * xHeight
+ x.shift_amount = shift_down
+ else:
+ x = Hlist([Kern(superkern), super])
+ x.shrink()
+ if self.is_dropsub(last_char):
+ shift_up = lc_height - constants.subdrop * xHeight
+ else:
+ shift_up = constants.sup1 * xHeight
+ if sub is None:
+ x.shift_amount = -shift_up
+ else: # Both sub and superscript
+ y = Hlist([Kern(subkern),sub])
+ y.shrink()
+ if self.is_dropsub(last_char):
+ shift_down = lc_baseline + constants.subdrop * xHeight
+ else:
+ shift_down = constants.sub2 * xHeight
+ # If sub and superscript collide, move super up
+ clr = (2.0 * rule_thickness -
+ ((shift_up - x.depth) - (y.height - shift_down)))
+ if clr > 0.:
+ shift_up += clr
+ x = Vlist([x,
+ Kern((shift_up - x.depth) - (y.height - shift_down)),
+ y])
+ x.shift_amount = shift_down
+
+ if not self.is_dropsub(last_char):
+ x.width += constants.script_space * xHeight
+ result = Hlist([nucleus, x])
+
+ return [result]
+
+ def _genfrac(self, ldelim, rdelim, rule, style, num, den):
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+
+ rule = float(rule)
+
+ # If style != displaystyle == 0, shrink the num and den
+ if style != self._math_style_dict['displaystyle']:
+ num.shrink()
+ den.shrink()
+ cnum = HCentered([num])
+ cden = HCentered([den])
+ width = max(num.width, den.width)
+ cnum.hpack(width, 'exactly')
+ cden.hpack(width, 'exactly')
+ vlist = Vlist([cnum, # numerator
+ Vbox(0, thickness * 2.0), # space
+ Hrule(state, rule), # rule
+ Vbox(0, thickness * 2.0), # space
+ cden # denominator
+ ])
+
+ # Shift so the fraction line sits in the middle of the
+ # equals sign
+ metrics = state.font_output.get_metrics(
+ state.font, rcParams['mathtext.default'],
+ '=', state.fontsize, state.dpi)
+ shift = (cden.height -
+ ((metrics.ymax + metrics.ymin) / 2 -
+ thickness * 3.0))
+ vlist.shift_amount = shift
+
+ result = [Hlist([vlist, Hbox(thickness * 2.)])]
+ if ldelim or rdelim:
+ if ldelim == '':
+ ldelim = '.'
+ if rdelim == '':
+ rdelim = '.'
+ return self._auto_sized_delimiter(ldelim, result, rdelim)
+ return result
+
+ def genfrac(self, s, loc, toks):
+ assert len(toks) == 1
+ assert len(toks[0]) == 6
+
+ return self._genfrac(*tuple(toks[0]))
+
+ def frac(self, s, loc, toks):
+ assert len(toks) == 1
+ assert len(toks[0]) == 2
+ state = self.get_state()
+
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+ num, den = toks[0]
+
+ return self._genfrac('', '', thickness,
+ self._math_style_dict['textstyle'], num, den)
+
+ def dfrac(self, s, loc, toks):
+ assert len(toks) == 1
+ assert len(toks[0]) == 2
+ state = self.get_state()
+
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+ num, den = toks[0]
+
+ return self._genfrac('', '', thickness,
+ self._math_style_dict['displaystyle'], num, den)
+
+ def stackrel(self, s, loc, toks):
+ assert len(toks) == 1
+ assert len(toks[0]) == 2
+ num, den = toks[0]
+
+ return self._genfrac('', '', 0.0,
+ self._math_style_dict['textstyle'], num, den)
+
+ def binom(self, s, loc, toks):
+ assert len(toks) == 1
+ assert len(toks[0]) == 2
+ num, den = toks[0]
+
+ return self._genfrac('(', ')', 0.0,
+ self._math_style_dict['textstyle'], num, den)
+
+ def sqrt(self, s, loc, toks):
+ root, body = toks[0]
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+
+ # Determine the height of the body, and add a little extra to
+ # the height so it doesn't seem cramped
+ height = body.height - body.shift_amount + thickness * 5.0
+ depth = body.depth + body.shift_amount
+ check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
+ height = check.height - check.shift_amount
+ depth = check.depth + check.shift_amount
+
+ # Put a little extra space to the left and right of the body
+ padded_body = Hlist([Hbox(thickness * 2.0),
+ body,
+ Hbox(thickness * 2.0)])
+ rightside = Vlist([Hrule(state),
+ Fill(),
+ padded_body])
+ # Stretch the glue between the hrule and the body
+ rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
+ 'exactly', depth)
+
+ # Add the root and shift it upward so it is above the tick.
+ # The value of 0.6 is a hard-coded hack ;)
+ if root is None:
+ root = Box(check.width * 0.5, 0., 0.)
+ else:
+ root = Hlist([Char(x, state) for x in root])
+ root.shrink()
+ root.shrink()
+
+ root_vlist = Vlist([Hlist([root])])
+ root_vlist.shift_amount = -height * 0.6
+
+ hlist = Hlist([root_vlist, # Root
+ # Negative kerning to put root over tick
+ Kern(-check.width * 0.5),
+ check, # Check
+ rightside]) # Body
+ return [hlist]
+
+ def overline(self, s, loc, toks):
+ assert len(toks)==1
+ assert len(toks[0])==1
+
+ body = toks[0][0]
+
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+
+ height = body.height - body.shift_amount + thickness * 3.0
+ depth = body.depth + body.shift_amount
+
+ # Place overline above body
+ rightside = Vlist([Hrule(state),
+ Fill(),
+ Hlist([body])])
+
+ # Stretch the glue between the hrule and the body
+ rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
+ 'exactly', depth)
+
+ hlist = Hlist([rightside])
+ return [hlist]
+
+ def _auto_sized_delimiter(self, front, middle, back):
+ state = self.get_state()
+ if len(middle):
+ height = max(x.height for x in middle)
+ depth = max(x.depth for x in middle)
+ factor = None
+ else:
+ height = 0
+ depth = 0
+ factor = 1.0
+ parts = []
+ # \left. and \right. aren't supposed to produce any symbols
+ if front != '.':
+ parts.append(AutoHeightChar(front, height, depth, state, factor=factor))
+ parts.extend(middle)
+ if back != '.':
+ parts.append(AutoHeightChar(back, height, depth, state, factor=factor))
+ hlist = Hlist(parts)
+ return hlist
+
+ def auto_delim(self, s, loc, toks):
+ front, middle, back = toks
+
+ return self._auto_sized_delimiter(front, middle.asList(), back)
+
+###
+
+##############################################################################
+# MAIN
+
+class MathTextParser(object):
+ _parser = None
+
+ _backend_mapping = {
+ 'bitmap': MathtextBackendBitmap,
+ 'agg' : MathtextBackendAgg,
+ 'ps' : MathtextBackendPs,
+ 'pdf' : MathtextBackendPdf,
+ 'svg' : MathtextBackendSvg,
+ 'path' : MathtextBackendPath,
+ 'cairo' : MathtextBackendCairo,
+ 'macosx': MathtextBackendAgg,
+ }
+
+ _font_type_mapping = {
+ 'cm' : BakomaFonts,
+ 'dejavuserif' : DejaVuSerifFonts,
+ 'dejavusans' : DejaVuSansFonts,
+ 'stix' : StixFonts,
+ 'stixsans' : StixSansFonts,
+ 'custom' : UnicodeFonts
+ }
+
+ def __init__(self, output):
+ """
+ Create a MathTextParser for the given backend *output*.
+ """
+ self._output = output.lower()
+ self._cache = maxdict(50)
+
+ def parse(self, s, dpi = 72, prop = None):
+ """
+ Parse the given math expression *s* at the given *dpi*. If
+ *prop* is provided, it is a
+ :class:`~matplotlib.font_manager.FontProperties` object
+ specifying the "default" font to use in the math expression,
+ used for all non-math text.
+
+ The results are cached, so multiple calls to :meth:`parse`
+ with the same expression should be fast.
+ """
+ # There is a bug in Python 3.x where it leaks frame references,
+ # and therefore can't handle this caching
+ if prop is None:
+ prop = FontProperties()
+
+ cacheKey = (s, dpi, hash(prop))
+ result = self._cache.get(cacheKey)
+ if result is not None:
+ return result
+
+ if self._output == 'ps' and rcParams['ps.useafm']:
+ font_output = StandardPsFonts(prop)
+ else:
+ backend = self._backend_mapping[self._output]()
+ fontset = rcParams['mathtext.fontset']
+ fontset_class = self._font_type_mapping.get(fontset.lower())
+ if fontset_class is not None:
+ font_output = fontset_class(prop, backend)
+ else:
+ raise ValueError(
+ "mathtext.fontset must be either 'cm', 'dejavuserif', "
+ "'dejavusans', 'stix', 'stixsans', or 'custom'")
+
+ fontsize = prop.get_size_in_points()
+
+ # This is a class variable so we don't rebuild the parser
+ # with each request.
+ if self._parser is None:
+ self.__class__._parser = Parser()
+
+ box = self._parser.parse(s, font_output, fontsize, dpi)
+ font_output.set_canvas_size(box.width, box.height, box.depth)
+ result = font_output.get_results(box)
+ self._cache[cacheKey] = result
+ return result
+
+ def to_mask(self, texstr, dpi=120, fontsize=14):
+ """
+ *texstr*
+ A valid mathtext string, e.g., r'IQ: $\\sigma_i=15$'
+
+ *dpi*
+ The dots-per-inch to render the text
+
+ *fontsize*
+ The font size in points
+
+ Returns a tuple (*array*, *depth*)
+
+ - *array* is an NxM uint8 alpha ubyte mask array of
+ rasterized tex.
+
+ - depth is the offset of the baseline from the bottom of the
+ image in pixels.
+ """
+ assert self._output == "bitmap"
+ prop = FontProperties(size=fontsize)
+ ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
+
+ x = ftimage.as_array()
+ return x, depth
+
+ def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
+ """
+ *texstr*
+ A valid mathtext string, e.g., r'IQ: $\\sigma_i=15$'
+
+ *color*
+ Any matplotlib color argument
+
+ *dpi*
+ The dots-per-inch to render the text
+
+ *fontsize*
+ The font size in points
+
+ Returns a tuple (*array*, *depth*)
+
+ - *array* is an NxM uint8 alpha ubyte mask array of
+ rasterized tex.
+
+ - depth is the offset of the baseline from the bottom of the
+ image in pixels.
+ """
+ x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
+
+ r, g, b, a = mcolors.to_rgba(color)
+ RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
+ RGBA[:, :, 0] = 255 * r
+ RGBA[:, :, 1] = 255 * g
+ RGBA[:, :, 2] = 255 * b
+ RGBA[:, :, 3] = x
+ return RGBA, depth
+
+ def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
+ """
+ Writes a tex expression to a PNG file.
+
+ Returns the offset of the baseline from the bottom of the
+ image in pixels.
+
+ *filename*
+ A writable filename or fileobject
+
+ *texstr*
+ A valid mathtext string, e.g., r'IQ: $\\sigma_i=15$'
+
+ *color*
+ A valid matplotlib color argument
+
+ *dpi*
+ The dots-per-inch to render the text
+
+ *fontsize*
+ The font size in points
+
+ Returns the offset of the baseline from the bottom of the
+ image in pixels.
+ """
+ rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
+ _png.write_png(rgba, filename)
+ return depth
+
+ def get_depth(self, texstr, dpi=120, fontsize=14):
+ """
+ Returns the offset of the baseline from the bottom of the
+ image in pixels.
+
+ *texstr*
+ A valid mathtext string, e.g., r'IQ: $\\sigma_i=15$'
+
+ *dpi*
+ The dots-per-inch to render the text
+
+ *fontsize*
+ The font size in points
+ """
+ assert self._output=="bitmap"
+ prop = FontProperties(size=fontsize)
+ ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
+ return depth
+
+def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None):
+ """
+ Given a math expression, renders it in a closely-clipped bounding
+ box to an image file.
+
+ *s*
+ A math expression. The math portion should be enclosed in
+ dollar signs.
+
+ *filename_or_obj*
+ A filepath or writable file-like object to write the image data
+ to.
+
+ *prop*
+ If provided, a FontProperties() object describing the size and
+ style of the text.
+
+ *dpi*
+ Override the output dpi, otherwise use the default associated
+ with the output format.
+
+ *format*
+ The output format, e.g., 'svg', 'pdf', 'ps' or 'png'. If not
+ provided, will be deduced from the filename.
+ """
+ from matplotlib import figure
+ # backend_agg supports all of the core output formats
+ from matplotlib.backends import backend_agg
+
+ if prop is None:
+ prop = FontProperties()
+
+ parser = MathTextParser('path')
+ width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
+
+ fig = figure.Figure(figsize=(width / 72.0, height / 72.0))
+ fig.text(0, depth/height, s, fontproperties=prop)
+ backend_agg.FigureCanvasAgg(fig)
+ fig.savefig(filename_or_obj, dpi=dpi, format=format)
+
+ return depth
diff --git a/contrib/python/matplotlib/py2/matplotlib/mlab.py b/contrib/python/matplotlib/py2/matplotlib/mlab.py
new file mode 100644
index 00000000000..bf4bc52a931
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/mlab.py
@@ -0,0 +1,4041 @@
+"""
+
+Numerical python functions written for compatibility with MATLAB
+commands with the same names.
+
+MATLAB compatible functions
+---------------------------
+
+:func:`cohere`
+ Coherence (normalized cross spectral density)
+
+:func:`csd`
+ Cross spectral density using Welch's average periodogram
+
+:func:`detrend`
+ Remove the mean or best fit line from an array
+
+:func:`find`
+ Return the indices where some condition is true;
+ numpy.nonzero is similar but more general.
+
+:func:`griddata`
+ Interpolate irregularly distributed data to a
+ regular grid.
+
+:func:`prctile`
+ Find the percentiles of a sequence
+
+:func:`prepca`
+ Principal Component Analysis
+
+:func:`psd`
+ Power spectral density using Welch's average periodogram
+
+:func:`rk4`
+ A 4th order runge kutta integrator for 1D or ND systems
+
+:func:`specgram`
+ Spectrogram (spectrum over segments of time)
+
+Miscellaneous functions
+-----------------------
+
+Functions that don't exist in MATLAB, but are useful anyway:
+
+:func:`cohere_pairs`
+ Coherence over all pairs. This is not a MATLAB function, but we
+ compute coherence a lot in my lab, and we compute it for a lot of
+ pairs. This function is optimized to do this efficiently by
+ caching the direct FFTs.
+
+:func:`rk4`
+ A 4th order Runge-Kutta ODE integrator in case you ever find
+ yourself stranded without scipy (and the far superior
+ scipy.integrate tools)
+
+:func:`contiguous_regions`
+ Return the indices of the regions spanned by some logical mask
+
+:func:`cross_from_below`
+ Return the indices where a 1D array crosses a threshold from below
+
+:func:`cross_from_above`
+ Return the indices where a 1D array crosses a threshold from above
+
+:func:`complex_spectrum`
+ Return the complex-valued frequency spectrum of a signal
+
+:func:`magnitude_spectrum`
+ Return the magnitude of the frequency spectrum of a signal
+
+:func:`angle_spectrum`
+ Return the angle (wrapped phase) of the frequency spectrum of a signal
+
+:func:`phase_spectrum`
+ Return the phase (unwrapped angle) of the frequency spectrum of a signal
+
+:func:`detrend_mean`
+ Remove the mean from a line.
+
+:func:`demean`
+ Remove the mean from a line. This function is the same as
+ :func:`detrend_mean` except for the default *axis*.
+
+:func:`detrend_linear`
+ Remove the best fit line from a line.
+
+:func:`detrend_none`
+ Return the original line.
+
+:func:`stride_windows`
+ Get all windows in an array in a memory-efficient manner
+
+:func:`stride_repeat`
+ Repeat an array in a memory-efficient manner
+
+:func:`apply_window`
+ Apply a window along a given axis
+
+
+record array helper functions
+-----------------------------
+
+A collection of helper methods for numpyrecord arrays
+
+.. _htmlonly:
+
+ See :ref:`misc-examples-index`
+
+:func:`rec2txt`
+ Pretty print a record array
+
+:func:`rec2csv`
+ Store record array in CSV file
+
+:func:`csv2rec`
+ Import record array from CSV file with type inspection
+
+:func:`rec_append_fields`
+ Adds field(s)/array(s) to record array
+
+:func:`rec_drop_fields`
+ Drop fields from record array
+
+:func:`rec_join`
+ Join two record arrays on sequence of fields
+
+:func:`recs_join`
+ A simple join of multiple recarrays using a single column as a key
+
+:func:`rec_groupby`
+ Summarize data by groups (similar to SQL GROUP BY)
+
+:func:`rec_summarize`
+ Helper code to filter rec array fields into new fields
+
+For the rec viewer functions(e rec2csv), there are a bunch of Format
+objects you can pass into the functions that will do things like color
+negative values red, set percent formatting and scaling, etc.
+
+Example usage::
+
+ r = csv2rec('somefile.csv', checkrows=0)
+
+ formatd = dict(
+ weight = FormatFloat(2),
+ change = FormatPercent(2),
+ cost = FormatThousands(2),
+ )
+
+
+ rec2excel(r, 'test.xls', formatd=formatd)
+ rec2csv(r, 'test.csv', formatd=formatd)
+ scroll = rec2gtk(r, formatd=formatd)
+
+ win = gtk.Window()
+ win.set_size_request(600,800)
+ win.add(scroll)
+ win.show_all()
+ gtk.main()
+
+
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import map, xrange, zip
+
+import copy
+import csv
+import operator
+import os
+import warnings
+
+import numpy as np
+
+import matplotlib.cbook as cbook
+from matplotlib import docstring
+from matplotlib.path import Path
+import math
+
+
+if six.PY3:
+ long = int
+
+
+@cbook.deprecated("2.2", alternative='numpy.logspace or numpy.geomspace')
+def logspace(xmin, xmax, N):
+ '''
+ Return N values logarithmically spaced between xmin and xmax.
+
+ '''
+ return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
+
+
+def window_hanning(x):
+ '''
+ Return x times the hanning window of len(x).
+
+ See Also
+ --------
+ :func:`window_none`
+ :func:`window_none` is another window algorithm.
+ '''
+ return np.hanning(len(x))*x
+
+
+def window_none(x):
+ '''
+ No window function; simply return x.
+
+ See Also
+ --------
+ :func:`window_hanning`
+ :func:`window_hanning` is another window algorithm.
+ '''
+ return x
+
+
+def apply_window(x, window, axis=0, return_window=None):
+ '''
+ Apply the given window to the given 1D or 2D array along the given axis.
+
+ Parameters
+ ----------
+ x : 1D or 2D array or sequence
+ Array or sequence containing the data.
+
+ window : function or array.
+ Either a function to generate a window or an array with length
+ *x*.shape[*axis*]
+
+ axis : integer
+ The axis over which to do the repetition.
+ Must be 0 or 1. The default is 0
+
+ return_window : bool
+ If true, also return the 1D values of the window that was applied
+ '''
+ x = np.asarray(x)
+
+ if x.ndim < 1 or x.ndim > 2:
+ raise ValueError('only 1D or 2D arrays can be used')
+ if axis+1 > x.ndim:
+ raise ValueError('axis(=%s) out of bounds' % axis)
+
+ xshape = list(x.shape)
+ xshapetarg = xshape.pop(axis)
+
+ if cbook.iterable(window):
+ if len(window) != xshapetarg:
+ raise ValueError('The len(window) must be the same as the shape '
+ 'of x for the chosen axis')
+ windowVals = window
+ else:
+ windowVals = window(np.ones(xshapetarg, dtype=x.dtype))
+
+ if x.ndim == 1:
+ if return_window:
+ return windowVals * x, windowVals
+ else:
+ return windowVals * x
+
+ xshapeother = xshape.pop()
+
+ otheraxis = (axis+1) % 2
+
+ windowValsRep = stride_repeat(windowVals, xshapeother, axis=otheraxis)
+
+ if return_window:
+ return windowValsRep * x, windowVals
+ else:
+ return windowValsRep * x
+
+
+def detrend(x, key=None, axis=None):
+ '''
+ Return x with its trend removed.
+
+ Parameters
+ ----------
+ x : array or sequence
+ Array or sequence containing the data.
+
+ key : [ 'default' | 'constant' | 'mean' | 'linear' | 'none'] or function
+ Specifies the detrend algorithm to use. 'default' is 'mean', which is
+ the same as :func:`detrend_mean`. 'constant' is the same. 'linear' is
+ the same as :func:`detrend_linear`. 'none' is the same as
+ :func:`detrend_none`. The default is 'mean'. See the corresponding
+ functions for more details regarding the algorithms. Can also be a
+ function that carries out the detrend operation.
+
+ axis : integer
+ The axis along which to do the detrending.
+
+ See Also
+ --------
+ :func:`detrend_mean`
+ :func:`detrend_mean` implements the 'mean' algorithm.
+
+ :func:`detrend_linear`
+ :func:`detrend_linear` implements the 'linear' algorithm.
+
+ :func:`detrend_none`
+ :func:`detrend_none` implements the 'none' algorithm.
+ '''
+ if key is None or key in ['constant', 'mean', 'default']:
+ return detrend(x, key=detrend_mean, axis=axis)
+ elif key == 'linear':
+ return detrend(x, key=detrend_linear, axis=axis)
+ elif key == 'none':
+ return detrend(x, key=detrend_none, axis=axis)
+ elif isinstance(key, six.string_types):
+ raise ValueError("Unknown value for key %s, must be one of: "
+ "'default', 'constant', 'mean', "
+ "'linear', or a function" % key)
+
+ if not callable(key):
+ raise ValueError("Unknown value for key %s, must be one of: "
+ "'default', 'constant', 'mean', "
+ "'linear', or a function" % key)
+
+ x = np.asarray(x)
+
+ if axis is not None and axis+1 > x.ndim:
+ raise ValueError('axis(=%s) out of bounds' % axis)
+
+ if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1):
+ return key(x)
+
+ # try to use the 'axis' argument if the function supports it,
+ # otherwise use apply_along_axis to do it
+ try:
+ return key(x, axis=axis)
+ except TypeError:
+ return np.apply_along_axis(key, axis=axis, arr=x)
+
+
+def demean(x, axis=0):
+ '''
+ Return x minus its mean along the specified axis.
+
+ Parameters
+ ----------
+ x : array or sequence
+ Array or sequence containing the data
+ Can have any dimensionality
+
+ axis : integer
+ The axis along which to take the mean. See numpy.mean for a
+ description of this argument.
+
+ See Also
+ --------
+ :func:`delinear`
+
+ :func:`denone`
+ :func:`delinear` and :func:`denone` are other detrend algorithms.
+
+ :func:`detrend_mean`
+ This function is the same as :func:`detrend_mean` except for the
+ default *axis*.
+ '''
+ return detrend_mean(x, axis=axis)
+
+
+def detrend_mean(x, axis=None):
+ '''
+ Return x minus the mean(x).
+
+ Parameters
+ ----------
+ x : array or sequence
+ Array or sequence containing the data
+ Can have any dimensionality
+
+ axis : integer
+ The axis along which to take the mean. See numpy.mean for a
+ description of this argument.
+
+ See Also
+ --------
+ :func:`demean`
+ This function is the same as :func:`demean` except for the default
+ *axis*.
+
+ :func:`detrend_linear`
+
+ :func:`detrend_none`
+ :func:`detrend_linear` and :func:`detrend_none` are other detrend
+ algorithms.
+
+ :func:`detrend`
+ :func:`detrend` is a wrapper around all the detrend algorithms.
+ '''
+ x = np.asarray(x)
+
+ if axis is not None and axis+1 > x.ndim:
+ raise ValueError('axis(=%s) out of bounds' % axis)
+
+ # short-circuit 0-D array.
+ if not x.ndim:
+ return np.array(0., dtype=x.dtype)
+
+ # short-circuit simple operations
+ if axis == 0 or axis is None or x.ndim <= 1:
+ return x - x.mean(axis)
+
+ ind = [slice(None)] * x.ndim
+ ind[axis] = np.newaxis
+ return x - x.mean(axis)[ind]
+
+
+def detrend_none(x, axis=None):
+ '''
+ Return x: no detrending.
+
+ Parameters
+ ----------
+ x : any object
+ An object containing the data
+
+ axis : integer
+ This parameter is ignored.
+ It is included for compatibility with detrend_mean
+
+ See Also
+ --------
+ :func:`denone`
+ This function is the same as :func:`denone` except for the default
+ *axis*, which has no effect.
+
+ :func:`detrend_mean`
+
+ :func:`detrend_linear`
+ :func:`detrend_mean` and :func:`detrend_linear` are other detrend
+ algorithms.
+
+ :func:`detrend`
+ :func:`detrend` is a wrapper around all the detrend algorithms.
+ '''
+ return x
+
+
+def detrend_linear(y):
+ '''
+ Return x minus best fit line; 'linear' detrending.
+
+ Parameters
+ ----------
+ y : 0-D or 1-D array or sequence
+ Array or sequence containing the data
+
+ axis : integer
+ The axis along which to take the mean. See numpy.mean for a
+ description of this argument.
+
+ See Also
+ --------
+ :func:`delinear`
+ This function is the same as :func:`delinear` except for the default
+ *axis*.
+
+ :func:`detrend_mean`
+
+ :func:`detrend_none`
+ :func:`detrend_mean` and :func:`detrend_none` are other detrend
+ algorithms.
+
+ :func:`detrend`
+ :func:`detrend` is a wrapper around all the detrend algorithms.
+ '''
+ # This is faster than an algorithm based on linalg.lstsq.
+ y = np.asarray(y)
+
+ if y.ndim > 1:
+ raise ValueError('y cannot have ndim > 1')
+
+ # short-circuit 0-D array.
+ if not y.ndim:
+ return np.array(0., dtype=y.dtype)
+
+ x = np.arange(y.size, dtype=float)
+
+ C = np.cov(x, y, bias=1)
+ b = C[0, 1]/C[0, 0]
+
+ a = y.mean() - b*x.mean()
+ return y - (b*x + a)
+
+
+def stride_windows(x, n, noverlap=None, axis=0):
+ '''
+ Get all windows of x with length n as a single array,
+ using strides to avoid data duplication.
+
+ .. warning::
+
+ It is not safe to write to the output array. Multiple
+ elements may point to the same piece of memory,
+ so modifying one value may change others.
+
+ Parameters
+ ----------
+ x : 1D array or sequence
+ Array or sequence containing the data.
+
+ n : integer
+ The number of data points in each window.
+
+ noverlap : integer
+ The overlap between adjacent windows.
+ Default is 0 (no overlap)
+
+ axis : integer
+ The axis along which the windows will run.
+
+ References
+ ----------
+ `stackoverflow: Rolling window for 1D arrays in Numpy?
+ <http://stackoverflow.com/a/6811241>`_
+ `stackoverflow: Using strides for an efficient moving average filter
+ <http://stackoverflow.com/a/4947453>`_
+ '''
+ if noverlap is None:
+ noverlap = 0
+
+ if noverlap >= n:
+ raise ValueError('noverlap must be less than n')
+ if n < 1:
+ raise ValueError('n cannot be less than 1')
+
+ x = np.asarray(x)
+
+ if x.ndim != 1:
+ raise ValueError('only 1-dimensional arrays can be used')
+ if n == 1 and noverlap == 0:
+ if axis == 0:
+ return x[np.newaxis]
+ else:
+ return x[np.newaxis].transpose()
+ if n > x.size:
+ raise ValueError('n cannot be greater than the length of x')
+
+ # np.lib.stride_tricks.as_strided easily leads to memory corruption for
+ # non integer shape and strides, i.e. noverlap or n. See #3845.
+ noverlap = int(noverlap)
+ n = int(n)
+
+ step = n - noverlap
+ if axis == 0:
+ shape = (n, (x.shape[-1]-noverlap)//step)
+ strides = (x.strides[0], step*x.strides[0])
+ else:
+ shape = ((x.shape[-1]-noverlap)//step, n)
+ strides = (step*x.strides[0], x.strides[0])
+ return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
+
+
+def stride_repeat(x, n, axis=0):
+ '''
+ Repeat the values in an array in a memory-efficient manner. Array x is
+ stacked vertically n times.
+
+ .. warning::
+
+ It is not safe to write to the output array. Multiple
+ elements may point to the same piece of memory, so
+ modifying one value may change others.
+
+ Parameters
+ ----------
+ x : 1D array or sequence
+ Array or sequence containing the data.
+
+ n : integer
+ The number of time to repeat the array.
+
+ axis : integer
+ The axis along which the data will run.
+
+ References
+ ----------
+ `stackoverflow: Repeat NumPy array without replicating data?
+ <http://stackoverflow.com/a/5568169>`_
+ '''
+ if axis not in [0, 1]:
+ raise ValueError('axis must be 0 or 1')
+ x = np.asarray(x)
+ if x.ndim != 1:
+ raise ValueError('only 1-dimensional arrays can be used')
+
+ if n == 1:
+ if axis == 0:
+ return np.atleast_2d(x)
+ else:
+ return np.atleast_2d(x).T
+ if n < 1:
+ raise ValueError('n cannot be less than 1')
+
+ # np.lib.stride_tricks.as_strided easily leads to memory corruption for
+ # non integer shape and strides, i.e. n. See #3845.
+ n = int(n)
+
+ if axis == 0:
+ shape = (n, x.size)
+ strides = (0, x.strides[0])
+ else:
+ shape = (x.size, n)
+ strides = (x.strides[0], 0)
+
+ return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
+
+
+def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None,
+ window=None, noverlap=None, pad_to=None,
+ sides=None, scale_by_freq=None, mode=None):
+ '''
+ This is a helper function that implements the commonality between the
+ psd, csd, spectrogram and complex, magnitude, angle, and phase spectrums.
+ It is *NOT* meant to be used outside of mlab and may change at any time.
+ '''
+ if y is None:
+ # if y is None use x for y
+ same_data = True
+ else:
+ # The checks for if y is x are so that we can use the same function to
+ # implement the core of psd(), csd(), and spectrogram() without doing
+ # extra calculations. We return the unaveraged Pxy, freqs, and t.
+ same_data = y is x
+
+ if Fs is None:
+ Fs = 2
+ if noverlap is None:
+ noverlap = 0
+ if detrend_func is None:
+ detrend_func = detrend_none
+ if window is None:
+ window = window_hanning
+
+ # if NFFT is set to None use the whole signal
+ if NFFT is None:
+ NFFT = 256
+
+ if mode is None or mode == 'default':
+ mode = 'psd'
+ elif mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
+ raise ValueError("Unknown value for mode %s, must be one of: "
+ "'default', 'psd', 'complex', "
+ "'magnitude', 'angle', 'phase'" % mode)
+
+ if not same_data and mode != 'psd':
+ raise ValueError("x and y must be equal if mode is not 'psd'")
+
+ # Make sure we're dealing with a numpy array. If y and x were the same
+ # object to start with, keep them that way
+ x = np.asarray(x)
+ if not same_data:
+ y = np.asarray(y)
+
+ if sides is None or sides == 'default':
+ if np.iscomplexobj(x):
+ sides = 'twosided'
+ else:
+ sides = 'onesided'
+ elif sides not in ['onesided', 'twosided']:
+ raise ValueError("Unknown value for sides %s, must be one of: "
+ "'default', 'onesided', or 'twosided'" % sides)
+
+ # zero pad x and y up to NFFT if they are shorter than NFFT
+ if len(x) < NFFT:
+ n = len(x)
+ x = np.resize(x, (NFFT,))
+ x[n:] = 0
+
+ if not same_data and len(y) < NFFT:
+ n = len(y)
+ y = np.resize(y, (NFFT,))
+ y[n:] = 0
+
+ if pad_to is None:
+ pad_to = NFFT
+
+ if mode != 'psd':
+ scale_by_freq = False
+ elif scale_by_freq is None:
+ scale_by_freq = True
+
+ # For real x, ignore the negative frequencies unless told otherwise
+ if sides == 'twosided':
+ numFreqs = pad_to
+ if pad_to % 2:
+ freqcenter = (pad_to - 1)//2 + 1
+ else:
+ freqcenter = pad_to//2
+ scaling_factor = 1.
+ elif sides == 'onesided':
+ if pad_to % 2:
+ numFreqs = (pad_to + 1)//2
+ else:
+ numFreqs = pad_to//2 + 1
+ scaling_factor = 2.
+
+ result = stride_windows(x, NFFT, noverlap, axis=0)
+ result = detrend(result, detrend_func, axis=0)
+ result, windowVals = apply_window(result, window, axis=0,
+ return_window=True)
+ result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :]
+ freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs]
+
+ if not same_data:
+ # if same_data is False, mode must be 'psd'
+ resultY = stride_windows(y, NFFT, noverlap)
+ resultY = detrend(resultY, detrend_func, axis=0)
+ resultY = apply_window(resultY, window, axis=0)
+ resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :]
+ result = np.conj(result) * resultY
+ elif mode == 'psd':
+ result = np.conj(result) * result
+ elif mode == 'magnitude':
+ result = np.abs(result) / np.abs(windowVals).sum()
+ elif mode == 'angle' or mode == 'phase':
+ # we unwrap the phase later to handle the onesided vs. twosided case
+ result = np.angle(result)
+ elif mode == 'complex':
+ result /= np.abs(windowVals).sum()
+
+ if mode == 'psd':
+
+ # Also include scaling factors for one-sided densities and dividing by
+ # the sampling frequency, if desired. Scale everything, except the DC
+ # component and the NFFT/2 component:
+
+ # if we have a even number of frequencies, don't scale NFFT/2
+ if not NFFT % 2:
+ slc = slice(1, -1, None)
+ # if we have an odd number, just don't scale DC
+ else:
+ slc = slice(1, None, None)
+
+ result[slc] *= scaling_factor
+
+ # MATLAB divides by the sampling frequency so that density function
+ # has units of dB/Hz and can be integrated by the plotted frequency
+ # values. Perform the same scaling here.
+ if scale_by_freq:
+ result /= Fs
+ # Scale the spectrum by the norm of the window to compensate for
+ # windowing loss; see Bendat & Piersol Sec 11.5.2.
+ result /= (np.abs(windowVals)**2).sum()
+ else:
+ # In this case, preserve power in the segment, not amplitude
+ result /= np.abs(windowVals).sum()**2
+
+ t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs
+
+ if sides == 'twosided':
+ # center the frequency range at zero
+ freqs = np.concatenate((freqs[freqcenter:], freqs[:freqcenter]))
+ result = np.concatenate((result[freqcenter:, :],
+ result[:freqcenter, :]), 0)
+ elif not pad_to % 2:
+ # get the last value correctly, it is negative otherwise
+ freqs[-1] *= -1
+
+ # we unwrap the phase here to handle the onesided vs. twosided case
+ if mode == 'phase':
+ result = np.unwrap(result, axis=0)
+
+ return result, freqs, t
+
+
+def _single_spectrum_helper(x, mode, Fs=None, window=None, pad_to=None,
+ sides=None):
+ '''
+ This is a helper function that implements the commonality between the
+ complex, magnitude, angle, and phase spectrums.
+ It is *NOT* meant to be used outside of mlab and may change at any time.
+ '''
+ if mode is None or mode == 'psd' or mode == 'default':
+ raise ValueError('_single_spectrum_helper does not work with %s mode'
+ % mode)
+
+ if pad_to is None:
+ pad_to = len(x)
+
+ spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs,
+ detrend_func=detrend_none, window=window,
+ noverlap=0, pad_to=pad_to,
+ sides=sides,
+ scale_by_freq=False,
+ mode=mode)
+ if mode != 'complex':
+ spec = spec.real
+
+ if spec.ndim == 2 and spec.shape[1] == 1:
+ spec = spec[:, 0]
+
+ return spec, freqs
+
+
+# Split out these keyword docs so that they can be used elsewhere
+docstring.interpd.update(Spectral=cbook.dedent("""
+ Fs : scalar
+ The sampling frequency (samples per time unit). It is used
+ to calculate the Fourier frequencies, freqs, in cycles per time
+ unit. The default value is 2.
+
+ window : callable or ndarray
+ A function or a vector of length *NFFT*. To create window
+ vectors see :func:`window_hanning`, :func:`window_none`,
+ :func:`numpy.blackman`, :func:`numpy.hamming`,
+ :func:`numpy.bartlett`, :func:`scipy.signal`,
+ :func:`scipy.signal.get_window`, etc. The default is
+ :func:`window_hanning`. If a function is passed as the
+ argument, it must take a data segment as an argument and
+ return the windowed version of the segment.
+
+ sides : [ 'default' | 'onesided' | 'twosided' ]
+ Specifies which sides of the spectrum to return. Default gives the
+ default behavior, which returns one-sided for real data and both
+ for complex data. 'onesided' forces the return of a one-sided
+ spectrum, while 'twosided' forces two-sided.
+"""))
+
+
+docstring.interpd.update(Single_Spectrum=cbook.dedent("""
+ pad_to : integer
+ The number of points to which the data segment is padded when
+ performing the FFT. While not increasing the actual resolution of
+ the spectrum (the minimum distance between resolvable peaks),
+ this can give more points in the plot, allowing for more
+ detail. This corresponds to the *n* parameter in the call to fft().
+ The default is None, which sets *pad_to* equal to the length of the
+ input signal (i.e. no padding).
+"""))
+
+
+docstring.interpd.update(PSD=cbook.dedent("""
+ pad_to : integer
+ The number of points to which the data segment is padded when
+ performing the FFT. This can be different from *NFFT*, which
+ specifies the number of data points used. While not increasing
+ the actual resolution of the spectrum (the minimum distance between
+ resolvable peaks), this can give more points in the plot,
+ allowing for more detail. This corresponds to the *n* parameter
+ in the call to fft(). The default is None, which sets *pad_to*
+ equal to *NFFT*
+
+ NFFT : integer
+ The number of data points used in each block for the FFT.
+ A power 2 is most efficient. The default value is 256.
+ This should *NOT* be used to get zero padding, or the scaling of the
+ result will be incorrect. Use *pad_to* for this instead.
+
+ detrend : {'default', 'constant', 'mean', 'linear', 'none'} or callable
+ The function applied to each segment before fft-ing,
+ designed to remove the mean or linear trend. Unlike in
+ MATLAB, where the *detrend* parameter is a vector, in
+ matplotlib is it a function. The :mod:`~matplotlib.pylab`
+ module defines :func:`~matplotlib.pylab.detrend_none`,
+ :func:`~matplotlib.pylab.detrend_mean`, and
+ :func:`~matplotlib.pylab.detrend_linear`, but you can use
+ a custom function as well. You can also use a string to choose
+ one of the functions. 'default', 'constant', and 'mean' call
+ :func:`~matplotlib.pylab.detrend_mean`. 'linear' calls
+ :func:`~matplotlib.pylab.detrend_linear`. 'none' calls
+ :func:`~matplotlib.pylab.detrend_none`.
+
+ scale_by_freq : boolean, optional
+ Specifies whether the resulting density values should be scaled
+ by the scaling frequency, which gives density in units of Hz^-1.
+ This allows for integration over the returned frequency values.
+ The default is True for MATLAB compatibility.
+"""))
+
+
+@docstring.dedent_interpd
+def psd(x, NFFT=None, Fs=None, detrend=None, window=None,
+ noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
+ r"""
+ Compute the power spectral density.
+
+ Call signature::
+
+ psd(x, NFFT=256, Fs=2, detrend=mlab.detrend_none,
+ window=mlab.window_hanning, noverlap=0, pad_to=None,
+ sides='default', scale_by_freq=None)
+
+ The power spectral density :math:`P_{xx}` by Welch's average
+ periodogram method. The vector *x* is divided into *NFFT* length
+ segments. Each segment is detrended by function *detrend* and
+ windowed by function *window*. *noverlap* gives the length of
+ the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
+ of each segment :math:`i` are averaged to compute :math:`P_{xx}`.
+
+ If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
+
+ Parameters
+ ----------
+ x : 1-D array or sequence
+ Array or sequence containing the data
+
+ %(Spectral)s
+
+ %(PSD)s
+
+ noverlap : integer
+ The number of points of overlap between segments.
+ The default value is 0 (no overlap).
+
+ Returns
+ -------
+ Pxx : 1-D array
+ The values for the power spectrum `P_{xx}` (real valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *Pxx*
+
+ References
+ ----------
+ Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John
+ Wiley & Sons (1986)
+
+ See Also
+ --------
+ :func:`specgram`
+ :func:`specgram` differs in the default overlap; in not returning the
+ mean of the segment periodograms; and in returning the times of the
+ segments.
+
+ :func:`magnitude_spectrum`
+ :func:`magnitude_spectrum` returns the magnitude spectrum.
+
+ :func:`csd`
+ :func:`csd` returns the spectral density between two signals.
+ """
+ Pxx, freqs = csd(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend=detrend,
+ window=window, noverlap=noverlap, pad_to=pad_to,
+ sides=sides, scale_by_freq=scale_by_freq)
+ return Pxx.real, freqs
+
+
+@docstring.dedent_interpd
+def csd(x, y, NFFT=None, Fs=None, detrend=None, window=None,
+ noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
+ """
+ Compute the cross-spectral density.
+
+ Call signature::
+
+ csd(x, y, NFFT=256, Fs=2, detrend=mlab.detrend_none,
+ window=mlab.window_hanning, noverlap=0, pad_to=None,
+ sides='default', scale_by_freq=None)
+
+ The cross spectral density :math:`P_{xy}` by Welch's average
+ periodogram method. The vectors *x* and *y* are divided into
+ *NFFT* length segments. Each segment is detrended by function
+ *detrend* and windowed by function *window*. *noverlap* gives
+ the length of the overlap between segments. The product of
+ the direct FFTs of *x* and *y* are averaged over each segment
+ to compute :math:`P_{xy}`, with a scaling to correct for power
+ loss due to windowing.
+
+ If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
+ padded to *NFFT*.
+
+ Parameters
+ ----------
+ x, y : 1-D arrays or sequences
+ Arrays or sequences containing the data
+
+ %(Spectral)s
+
+ %(PSD)s
+
+ noverlap : integer
+ The number of points of overlap between segments.
+ The default value is 0 (no overlap).
+
+ Returns
+ -------
+ Pxy : 1-D array
+ The values for the cross spectrum `P_{xy}` before scaling (real valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *Pxy*
+
+ References
+ ----------
+ Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John
+ Wiley & Sons (1986)
+
+ See Also
+ --------
+ :func:`psd`
+ :func:`psd` is the equivalent to setting y=x.
+ """
+ if NFFT is None:
+ NFFT = 256
+ Pxy, freqs, _ = _spectral_helper(x=x, y=y, NFFT=NFFT, Fs=Fs,
+ detrend_func=detrend, window=window,
+ noverlap=noverlap, pad_to=pad_to,
+ sides=sides, scale_by_freq=scale_by_freq,
+ mode='psd')
+
+ if Pxy.ndim == 2:
+ if Pxy.shape[1] > 1:
+ Pxy = Pxy.mean(axis=1)
+ else:
+ Pxy = Pxy[:, 0]
+ return Pxy, freqs
+
+
+@docstring.dedent_interpd
+def complex_spectrum(x, Fs=None, window=None, pad_to=None,
+ sides=None):
+ """
+ Compute the complex-valued frequency spectrum of *x*. Data is padded to a
+ length of *pad_to* and the windowing function *window* is applied to the
+ signal.
+
+ Parameters
+ ----------
+ x : 1-D array or sequence
+ Array or sequence containing the data
+
+ %(Spectral)s
+
+ %(Single_Spectrum)s
+
+ Returns
+ -------
+ spectrum : 1-D array
+ The values for the complex spectrum (complex valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *spectrum*
+
+ See Also
+ --------
+ :func:`magnitude_spectrum`
+ :func:`magnitude_spectrum` returns the absolute value of this function.
+
+ :func:`angle_spectrum`
+ :func:`angle_spectrum` returns the angle of this function.
+
+ :func:`phase_spectrum`
+ :func:`phase_spectrum` returns the phase (unwrapped angle) of this
+ function.
+
+ :func:`specgram`
+ :func:`specgram` can return the complex spectrum of segments within the
+ signal.
+ """
+ return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
+ sides=sides, mode='complex')
+
+
+@docstring.dedent_interpd
+def magnitude_spectrum(x, Fs=None, window=None, pad_to=None,
+ sides=None):
+ """
+ Compute the magnitude (absolute value) of the frequency spectrum of
+ *x*. Data is padded to a length of *pad_to* and the windowing function
+ *window* is applied to the signal.
+
+ Parameters
+ ----------
+ x : 1-D array or sequence
+ Array or sequence containing the data
+
+ %(Spectral)s
+
+ %(Single_Spectrum)s
+
+ Returns
+ -------
+ spectrum : 1-D array
+ The values for the magnitude spectrum (real valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *spectrum*
+
+ See Also
+ --------
+ :func:`psd`
+ :func:`psd` returns the power spectral density.
+
+ :func:`complex_spectrum`
+ This function returns the absolute value of :func:`complex_spectrum`.
+
+ :func:`angle_spectrum`
+ :func:`angle_spectrum` returns the angles of the corresponding
+ frequencies.
+
+ :func:`phase_spectrum`
+ :func:`phase_spectrum` returns the phase (unwrapped angle) of the
+ corresponding frequencies.
+
+ :func:`specgram`
+ :func:`specgram` can return the magnitude spectrum of segments within
+ the signal.
+ """
+ return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
+ sides=sides, mode='magnitude')
+
+
+@docstring.dedent_interpd
+def angle_spectrum(x, Fs=None, window=None, pad_to=None,
+ sides=None):
+ """
+ Compute the angle of the frequency spectrum (wrapped phase spectrum) of
+ *x*. Data is padded to a length of *pad_to* and the windowing function
+ *window* is applied to the signal.
+
+ Parameters
+ ----------
+ x : 1-D array or sequence
+ Array or sequence containing the data
+
+ %(Spectral)s
+
+ %(Single_Spectrum)s
+
+ Returns
+ -------
+ spectrum : 1-D array
+ The values for the angle spectrum in radians (real valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *spectrum*
+
+ See Also
+ --------
+ :func:`complex_spectrum`
+ This function returns the angle value of :func:`complex_spectrum`.
+
+ :func:`magnitude_spectrum`
+ :func:`angle_spectrum` returns the magnitudes of the corresponding
+ frequencies.
+
+ :func:`phase_spectrum`
+ :func:`phase_spectrum` returns the unwrapped version of this function.
+
+ :func:`specgram`
+ :func:`specgram` can return the angle spectrum of segments within the
+ signal.
+ """
+ return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
+ sides=sides, mode='angle')
+
+
+@docstring.dedent_interpd
+def phase_spectrum(x, Fs=None, window=None, pad_to=None,
+ sides=None):
+ """
+ Compute the phase of the frequency spectrum (unwrapped angle spectrum) of
+ *x*. Data is padded to a length of *pad_to* and the windowing function
+ *window* is applied to the signal.
+
+ Parameters
+ ----------
+ x : 1-D array or sequence
+ Array or sequence containing the data
+
+ %(Spectral)s
+
+ %(Single_Spectrum)s
+
+ Returns
+ -------
+ spectrum : 1-D array
+ The values for the phase spectrum in radians (real valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *spectrum*
+
+ See Also
+ --------
+ :func:`complex_spectrum`
+ This function returns the angle value of :func:`complex_spectrum`.
+
+ :func:`magnitude_spectrum`
+ :func:`magnitude_spectrum` returns the magnitudes of the corresponding
+ frequencies.
+
+ :func:`angle_spectrum`
+ :func:`angle_spectrum` returns the wrapped version of this function.
+
+ :func:`specgram`
+ :func:`specgram` can return the phase spectrum of segments within the
+ signal.
+ """
+ return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
+ sides=sides, mode='phase')
+
+
+@docstring.dedent_interpd
+def specgram(x, NFFT=None, Fs=None, detrend=None, window=None,
+ noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
+ mode=None):
+ """
+ Compute a spectrogram.
+
+ Compute and plot a spectrogram of data in x. Data are split into
+ NFFT length segments and the spectrum of each section is
+ computed. The windowing function window is applied to each
+ segment, and the amount of overlap of each segment is
+ specified with noverlap.
+
+ Parameters
+ ----------
+ x : array_like
+ 1-D array or sequence.
+
+ %(Spectral)s
+
+ %(PSD)s
+
+ noverlap : int, optional
+ The number of points of overlap between blocks. The default
+ value is 128.
+ mode : str, optional
+ What sort of spectrum to use, default is 'psd'.
+ 'psd'
+ Returns the power spectral density.
+
+ 'complex'
+ Returns the complex-valued frequency spectrum.
+
+ 'magnitude'
+ Returns the magnitude spectrum.
+
+ 'angle'
+ Returns the phase spectrum without unwrapping.
+
+ 'phase'
+ Returns the phase spectrum with unwrapping.
+
+ Returns
+ -------
+ spectrum : array_like
+ 2-D array, columns are the periodograms of successive segments.
+
+ freqs : array_like
+ 1-D array, frequencies corresponding to the rows in *spectrum*.
+
+ t : array_like
+ 1-D array, the times corresponding to midpoints of segments
+ (i.e the columns in *spectrum*).
+
+ See Also
+ --------
+ psd : differs in the overlap and in the return values.
+ complex_spectrum : similar, but with complex valued frequencies.
+ magnitude_spectrum : similar single segment when mode is 'magnitude'.
+ angle_spectrum : similar to single segment when mode is 'angle'.
+ phase_spectrum : similar to single segment when mode is 'phase'.
+
+ Notes
+ -----
+ detrend and scale_by_freq only apply when *mode* is set to 'psd'.
+
+ """
+ if noverlap is None:
+ noverlap = 128 # default in _spectral_helper() is noverlap = 0
+ if NFFT is None:
+ NFFT = 256 # same default as in _spectral_helper()
+ if len(x) <= NFFT:
+ warnings.warn("Only one segment is calculated since parameter NFFT " +
+ "(=%d) >= signal length (=%d)." % (NFFT, len(x)))
+
+ spec, freqs, t = _spectral_helper(x=x, y=None, NFFT=NFFT, Fs=Fs,
+ detrend_func=detrend, window=window,
+ noverlap=noverlap, pad_to=pad_to,
+ sides=sides,
+ scale_by_freq=scale_by_freq,
+ mode=mode)
+
+ if mode != 'complex':
+ spec = spec.real # Needed since helper implements generically
+
+ return spec, freqs, t
+
+
+_coh_error = """Coherence is calculated by averaging over *NFFT*
+length segments. Your signal is too short for your choice of *NFFT*.
+"""
+
+
+@docstring.dedent_interpd
+def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
+ noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
+ """
+ The coherence between *x* and *y*. Coherence is the normalized
+ cross spectral density:
+
+ .. math::
+
+ C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
+
+ Parameters
+ ----------
+ x, y
+ Array or sequence containing the data
+
+ %(Spectral)s
+
+ %(PSD)s
+
+ noverlap : integer
+ The number of points of overlap between blocks. The default value
+ is 0 (no overlap).
+
+ Returns
+ -------
+ The return value is the tuple (*Cxy*, *f*), where *f* are the
+ frequencies of the coherence vector. For cohere, scaling the
+ individual densities by the sampling frequency has no effect,
+ since the factors cancel out.
+
+ See Also
+ --------
+ :func:`psd`, :func:`csd` :
+ For information about the methods used to compute :math:`P_{xy}`,
+ :math:`P_{xx}` and :math:`P_{yy}`.
+ """
+
+ if len(x) < 2 * NFFT:
+ raise ValueError(_coh_error)
+ Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
+ scale_by_freq)
+ Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
+ scale_by_freq)
+ Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
+ scale_by_freq)
+ Cxy = np.abs(Pxy) ** 2 / (Pxx * Pyy)
+ return Cxy, f
+
+
+@cbook.deprecated('2.2')
+def donothing_callback(*args):
+ pass
+
+
+@cbook.deprecated('2.2', 'scipy.signal.coherence')
+def cohere_pairs(X, ij, NFFT=256, Fs=2, detrend=detrend_none,
+ window=window_hanning, noverlap=0,
+ preferSpeedOverMemory=True,
+ progressCallback=donothing_callback,
+ returnPxx=False):
+
+ """
+ Compute the coherence and phase for all pairs *ij*, in *X*.
+
+ *X* is a *numSamples* * *numCols* array
+
+ *ij* is a list of tuples. Each tuple is a pair of indexes into
+ the columns of X for which you want to compute coherence. For
+ example, if *X* has 64 columns, and you want to compute all
+ nonredundant pairs, define *ij* as::
+
+ ij = []
+ for i in range(64):
+ for j in range(i+1,64):
+ ij.append( (i,j) )
+
+ *preferSpeedOverMemory* is an optional bool. Defaults to true. If
+ False, limits the caching by only making one, rather than two,
+ complex cache arrays. This is useful if memory becomes critical.
+ Even when *preferSpeedOverMemory* is False, :func:`cohere_pairs`
+ will still give significant performance gains over calling
+ :func:`cohere` for each pair, and will use subtantially less
+ memory than if *preferSpeedOverMemory* is True. In my tests with
+ a 43000,64 array over all nonredundant pairs,
+ *preferSpeedOverMemory* = True delivered a 33% performance boost
+ on a 1.7GHZ Athlon with 512MB RAM compared with
+ *preferSpeedOverMemory* = False. But both solutions were more
+ than 10x faster than naively crunching all possible pairs through
+ :func:`cohere`.
+
+ Returns
+ -------
+ Cxy : dictionary of (*i*, *j*) tuples -> coherence vector for
+ that pair. i.e., ``Cxy[(i,j) = cohere(X[:,i], X[:,j])``.
+ Number of dictionary keys is ``len(ij)``.
+
+ Phase : dictionary of phases of the cross spectral density at
+ each frequency for each pair. Keys are (*i*, *j*).
+
+ freqs : vector of frequencies, equal in length to either the
+ coherence or phase vectors for any (*i*, *j*) key.
+
+ e.g., to make a coherence Bode plot::
+
+ subplot(211)
+ plot( freqs, Cxy[(12,19)])
+ subplot(212)
+ plot( freqs, Phase[(12,19)])
+
+ For a large number of pairs, :func:`cohere_pairs` can be much more
+ efficient than just calling :func:`cohere` for each pair, because
+ it caches most of the intensive computations. If :math:`N` is the
+ number of pairs, this function is :math:`O(N)` for most of the
+ heavy lifting, whereas calling cohere for each pair is
+ :math:`O(N^2)`. However, because of the caching, it is also more
+ memory intensive, making 2 additional complex arrays with
+ approximately the same number of elements as *X*.
+
+ See :file:`test/cohere_pairs_test.py` in the src tree for an
+ example script that shows that this :func:`cohere_pairs` and
+ :func:`cohere` give the same results for a given pair.
+
+ See Also
+ --------
+ :func:`psd`
+ For information about the methods used to compute :math:`P_{xy}`,
+ :math:`P_{xx}` and :math:`P_{yy}`.
+ """
+ numRows, numCols = X.shape
+
+ # zero pad if X is too short
+ if numRows < NFFT:
+ tmp = X
+ X = np.zeros((NFFT, numCols), X.dtype)
+ X[:numRows, :] = tmp
+ del tmp
+
+ numRows, numCols = X.shape
+ # get all the columns of X that we are interested in by checking
+ # the ij tuples
+ allColumns = set()
+ for i, j in ij:
+ allColumns.add(i)
+ allColumns.add(j)
+ Ncols = len(allColumns)
+
+ # for real X, ignore the negative frequencies
+ if np.iscomplexobj(X):
+ numFreqs = NFFT
+ else:
+ numFreqs = NFFT//2+1
+
+ # cache the FFT of every windowed, detrended NFFT length segment
+ # of every channel. If preferSpeedOverMemory, cache the conjugate
+ # as well
+ if cbook.iterable(window):
+ if len(window) != NFFT:
+ raise ValueError("The length of the window must be equal to NFFT")
+ windowVals = window
+ else:
+ windowVals = window(np.ones(NFFT, X.dtype))
+ ind = list(xrange(0, numRows-NFFT+1, NFFT-noverlap))
+ numSlices = len(ind)
+ FFTSlices = {}
+ FFTConjSlices = {}
+ Pxx = {}
+ slices = range(numSlices)
+ normVal = np.linalg.norm(windowVals)**2
+ for iCol in allColumns:
+ progressCallback(i/Ncols, 'Cacheing FFTs')
+ Slices = np.zeros((numSlices, numFreqs), dtype=np.complex_)
+ for iSlice in slices:
+ thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
+ thisSlice = windowVals*detrend(thisSlice)
+ Slices[iSlice, :] = np.fft.fft(thisSlice)[:numFreqs]
+
+ FFTSlices[iCol] = Slices
+ if preferSpeedOverMemory:
+ FFTConjSlices[iCol] = np.conj(Slices)
+ Pxx[iCol] = np.divide(np.mean(abs(Slices)**2, axis=0), normVal)
+ del Slices, ind, windowVals
+
+ # compute the coherences and phases for all pairs using the
+ # cached FFTs
+ Cxy = {}
+ Phase = {}
+ count = 0
+ N = len(ij)
+ for i, j in ij:
+ count += 1
+ if count % 10 == 0:
+ progressCallback(count/N, 'Computing coherences')
+
+ if preferSpeedOverMemory:
+ Pxy = FFTSlices[i] * FFTConjSlices[j]
+ else:
+ Pxy = FFTSlices[i] * np.conj(FFTSlices[j])
+ if numSlices > 1:
+ Pxy = np.mean(Pxy, axis=0)
+# Pxy = np.divide(Pxy, normVal)
+ Pxy /= normVal
+# Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
+ Cxy[i, j] = abs(Pxy)**2 / (Pxx[i]*Pxx[j])
+ Phase[i, j] = np.arctan2(Pxy.imag, Pxy.real)
+
+ freqs = Fs/NFFT*np.arange(numFreqs)
+ if returnPxx:
+ return Cxy, Phase, freqs, Pxx
+ else:
+ return Cxy, Phase, freqs
+
+
+@cbook.deprecated('2.2', 'scipy.stats.entropy')
+def entropy(y, bins):
+ r"""
+ Return the entropy of the data in *y* in units of nat.
+
+ .. math::
+
+ -\sum p_i \ln(p_i)
+
+ where :math:`p_i` is the probability of observing *y* in the
+ :math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
+ range of bins; see :func:`numpy.histogram`.
+
+ Compare *S* with analytic calculation for a Gaussian::
+
+ x = mu + sigma * randn(200000)
+ Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
+ """
+ n, bins = np.histogram(y, bins)
+ n = n.astype(float)
+
+ n = np.take(n, np.nonzero(n)[0]) # get the positive
+
+ p = np.divide(n, len(y))
+
+ delta = bins[1] - bins[0]
+ S = -1.0 * np.sum(p * np.log(p)) + np.log(delta)
+ return S
+
+
+@cbook.deprecated('2.2', 'scipy.stats.norm.pdf')
+def normpdf(x, *args):
+ "Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
+ mu, sigma = args
+ return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
+
+
+@cbook.deprecated('2.2')
+def find(condition):
+ "Return the indices where ravel(condition) is true"
+ res, = np.nonzero(np.ravel(condition))
+ return res
+
+
+@cbook.deprecated('2.2')
+def longest_contiguous_ones(x):
+ """
+ Return the indices of the longest stretch of contiguous ones in *x*,
+ assuming *x* is a vector of zeros and ones. If there are two
+ equally long stretches, pick the first.
+ """
+ x = np.ravel(x)
+ if len(x) == 0:
+ return np.array([])
+
+ ind = (x == 0).nonzero()[0]
+ if len(ind) == 0:
+ return np.arange(len(x))
+ if len(ind) == len(x):
+ return np.array([])
+
+ y = np.zeros((len(x)+2,), x.dtype)
+ y[1:-1] = x
+ dif = np.diff(y)
+ up = (dif == 1).nonzero()[0]
+ dn = (dif == -1).nonzero()[0]
+ i = (dn-up == max(dn - up)).nonzero()[0][0]
+ ind = np.arange(up[i], dn[i])
+
+ return ind
+
+
+@cbook.deprecated('2.2')
+def longest_ones(x):
+ '''alias for longest_contiguous_ones'''
+ return longest_contiguous_ones(x)
+
+
+@cbook.deprecated('2.2')
+class PCA(object):
+ def __init__(self, a, standardize=True):
+ """
+ compute the SVD of a and store data for PCA. Use project to
+ project the data onto a reduced set of dimensions
+
+ Parameters
+ ----------
+ a : np.ndarray
+ A numobservations x numdims array
+ standardize : bool
+ True if input data are to be standardized. If False, only centering
+ will be carried out.
+
+ Attributes
+ ----------
+ a
+ A centered unit sigma version of input ``a``.
+
+ numrows, numcols
+ The dimensions of ``a``.
+
+ mu
+ A numdims array of means of ``a``. This is the vector that points
+ to the origin of PCA space.
+
+ sigma
+ A numdims array of standard deviation of ``a``.
+
+ fracs
+ The proportion of variance of each of the principal components.
+
+ s
+ The actual eigenvalues of the decomposition.
+
+ Wt
+ The weight vector for projecting a numdims point or array into
+ PCA space.
+
+ Y
+ A projected into PCA space.
+
+ Notes
+ -----
+ The factor loadings are in the ``Wt`` factor, i.e., the factor loadings
+ for the first principal component are given by ``Wt[0]``. This row is
+ also the first eigenvector.
+
+ """
+ n, m = a.shape
+ if n < m:
+ raise RuntimeError('we assume data in a is organized with '
+ 'numrows>numcols')
+
+ self.numrows, self.numcols = n, m
+ self.mu = a.mean(axis=0)
+ self.sigma = a.std(axis=0)
+ self.standardize = standardize
+
+ a = self.center(a)
+
+ self.a = a
+
+ U, s, Vh = np.linalg.svd(a, full_matrices=False)
+
+ # Note: .H indicates the conjugate transposed / Hermitian.
+
+ # The SVD is commonly written as a = U s V.H.
+ # If U is a unitary matrix, it means that it satisfies U.H = inv(U).
+
+ # The rows of Vh are the eigenvectors of a.H a.
+ # The columns of U are the eigenvectors of a a.H.
+ # For row i in Vh and column i in U, the corresponding eigenvalue is
+ # s[i]**2.
+
+ self.Wt = Vh
+
+ # save the transposed coordinates
+ Y = np.dot(Vh, a.T).T
+ self.Y = Y
+
+ # save the eigenvalues
+ self.s = s**2
+
+ # and now the contribution of the individual components
+ vars = self.s / len(s)
+ self.fracs = vars/vars.sum()
+
+ def project(self, x, minfrac=0.):
+ '''
+ project x onto the principle axes, dropping any axes where fraction
+ of variance<minfrac
+ '''
+ x = np.asarray(x)
+ if x.shape[-1] != self.numcols:
+ raise ValueError('Expected an array with dims[-1]==%d' %
+ self.numcols)
+ Y = np.dot(self.Wt, self.center(x).T).T
+ mask = self.fracs >= minfrac
+ if x.ndim == 2:
+ Yreduced = Y[:, mask]
+ else:
+ Yreduced = Y[mask]
+ return Yreduced
+
+ def center(self, x):
+ '''
+ center and optionally standardize the data using the mean and sigma
+ from training set a
+ '''
+ if self.standardize:
+ return (x - self.mu)/self.sigma
+ else:
+ return (x - self.mu)
+
+ @staticmethod
+ def _get_colinear():
+ c0 = np.array([
+ 0.19294738, 0.6202667, 0.45962655, 0.07608613, 0.135818,
+ 0.83580842, 0.07218851, 0.48318321, 0.84472463, 0.18348462,
+ 0.81585306, 0.96923926, 0.12835919, 0.35075355, 0.15807861,
+ 0.837437, 0.10824303, 0.1723387, 0.43926494, 0.83705486])
+
+ c1 = np.array([
+ -1.17705601, -0.513883, -0.26614584, 0.88067144, 1.00474954,
+ -1.1616545, 0.0266109, 0.38227157, 1.80489433, 0.21472396,
+ -1.41920399, -2.08158544, -0.10559009, 1.68999268, 0.34847107,
+ -0.4685737, 1.23980423, -0.14638744, -0.35907697, 0.22442616])
+
+ c2 = c0 + 2*c1
+ c3 = -3*c0 + 4*c1
+ a = np.array([c3, c0, c1, c2]).T
+ return a
+
+
+@cbook.deprecated('2.2', 'numpy.percentile')
+def prctile(x, p=(0.0, 25.0, 50.0, 75.0, 100.0)):
+ """
+ Return the percentiles of *x*. *p* can either be a sequence of
+ percentile values or a scalar. If *p* is a sequence, the ith
+ element of the return sequence is the *p*(i)-th percentile of *x*.
+ If *p* is a scalar, the largest value of *x* less than or equal to
+ the *p* percentage point in the sequence is returned.
+ """
+
+ # This implementation derived from scipy.stats.scoreatpercentile
+ def _interpolate(a, b, fraction):
+ """Returns the point at the given fraction between a and b, where
+ 'fraction' must be between 0 and 1.
+ """
+ return a + (b - a) * fraction
+
+ per = np.array(p)
+ values = np.sort(x, axis=None)
+
+ idxs = per / 100 * (values.shape[0] - 1)
+ ai = idxs.astype(int)
+ bi = ai + 1
+ frac = idxs % 1
+
+ # handle cases where attempting to interpolate past last index
+ cond = bi >= len(values)
+ if per.ndim:
+ ai[cond] -= 1
+ bi[cond] -= 1
+ frac[cond] += 1
+ else:
+ if cond:
+ ai -= 1
+ bi -= 1
+ frac += 1
+
+ return _interpolate(values[ai], values[bi], frac)
+
+
+@cbook.deprecated('2.2')
+def prctile_rank(x, p):
+ """
+ Return the rank for each element in *x*, return the rank
+ 0..len(*p*). e.g., if *p* = (25, 50, 75), the return value will be a
+ len(*x*) array with values in [0,1,2,3] where 0 indicates the
+ value is less than the 25th percentile, 1 indicates the value is
+ >= the 25th and < 50th percentile, ... and 3 indicates the value
+ is above the 75th percentile cutoff.
+
+ *p* is either an array of percentiles in [0..100] or a scalar which
+ indicates how many quantiles of data you want ranked.
+ """
+
+ if not cbook.iterable(p):
+ p = np.arange(100.0/p, 100.0, 100.0/p)
+ else:
+ p = np.asarray(p)
+
+ if p.max() <= 1 or p.min() < 0 or p.max() > 100:
+ raise ValueError('percentiles should be in range 0..100, not 0..1')
+
+ ptiles = prctile(x, p)
+ return np.searchsorted(ptiles, x)
+
+
+@cbook.deprecated('2.2')
+def center_matrix(M, dim=0):
+ """
+ Return the matrix *M* with each row having zero mean and unit std.
+
+ If *dim* = 1 operate on columns instead of rows. (*dim* is
+ opposite to the numpy axis kwarg.)
+ """
+ M = np.asarray(M, float)
+ if dim:
+ M = (M - M.mean(axis=0)) / M.std(axis=0)
+ else:
+ M = (M - M.mean(axis=1)[:, np.newaxis])
+ M = M / M.std(axis=1)[:, np.newaxis]
+ return M
+
+
+@cbook.deprecated('2.2', 'scipy.integrate.ode')
+def rk4(derivs, y0, t):
+ """
+ Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
+ This is a toy implementation which may be useful if you find
+ yourself stranded on a system w/o scipy. Otherwise use
+ :func:`scipy.integrate`.
+
+ Parameters
+ ----------
+ y0
+ initial state vector
+
+ t
+ sample times
+
+ derivs
+ returns the derivative of the system and has the
+ signature ``dy = derivs(yi, ti)``
+
+ Examples
+ --------
+
+ A 2D system::
+
+ def derivs6(x,t):
+ d1 = x[0] + 2*x[1]
+ d2 = -3*x[0] + 4*x[1]
+ return (d1, d2)
+ dt = 0.0005
+ t = arange(0.0, 2.0, dt)
+ y0 = (1,2)
+ yout = rk4(derivs6, y0, t)
+
+ A 1D system::
+
+ alpha = 2
+ def derivs(x,t):
+ return -alpha*x + exp(-t)
+
+ y0 = 1
+ yout = rk4(derivs, y0, t)
+
+ If you have access to scipy, you should probably be using the
+ scipy.integrate tools rather than this function.
+ """
+
+ try:
+ Ny = len(y0)
+ except TypeError:
+ yout = np.zeros((len(t),), float)
+ else:
+ yout = np.zeros((len(t), Ny), float)
+
+ yout[0] = y0
+ i = 0
+
+ for i in np.arange(len(t)-1):
+
+ thist = t[i]
+ dt = t[i+1] - thist
+ dt2 = dt/2.0
+ y0 = yout[i]
+
+ k1 = np.asarray(derivs(y0, thist))
+ k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
+ k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
+ k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
+ yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
+ return yout
+
+
+@cbook.deprecated('2.2')
+def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
+ mux=0.0, muy=0.0, sigmaxy=0.0):
+ """
+ Bivariate Gaussian distribution for equal shape *X*, *Y*.
+
+ See `bivariate normal
+ <http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
+ at mathworld.
+ """
+ Xmu = X-mux
+ Ymu = Y-muy
+
+ rho = sigmaxy/(sigmax*sigmay)
+ z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
+ denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
+ return np.exp(-z/(2*(1-rho**2))) / denom
+
+
+@cbook.deprecated('2.2')
+def get_xyz_where(Z, Cond):
+ """
+ *Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
+ a boolean matrix where some condition is satisfied. Return value
+ is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
+ *z* are the values of *Z* at those indices. *x*, *y*, and *z* are
+ 1D arrays.
+ """
+ X, Y = np.indices(Z.shape)
+ return X[Cond], Y[Cond], Z[Cond]
+
+
+@cbook.deprecated('2.2')
+def get_sparse_matrix(M, N, frac=0.1):
+ """
+ Return a *M* x *N* sparse matrix with *frac* elements randomly
+ filled.
+ """
+ data = np.zeros((M, N))*0.
+ for i in range(int(M*N*frac)):
+ x = np.random.randint(0, M-1)
+ y = np.random.randint(0, N-1)
+ data[x, y] = np.random.rand()
+ return data
+
+
+@cbook.deprecated('2.2', 'numpy.hypot')
+def dist(x, y):
+ """
+ Return the distance between two points.
+ """
+ d = x-y
+ return np.sqrt(np.dot(d, d))
+
+
+@cbook.deprecated('2.2')
+def dist_point_to_segment(p, s0, s1):
+ """
+ Get the distance of a point to a segment.
+
+ *p*, *s0*, *s1* are *xy* sequences
+
+ This algorithm from
+ http://geomalgorithms.com/a02-_lines.html
+ """
+ p = np.asarray(p, float)
+ s0 = np.asarray(s0, float)
+ s1 = np.asarray(s1, float)
+ v = s1 - s0
+ w = p - s0
+
+ c1 = np.dot(w, v)
+ if c1 <= 0:
+ return dist(p, s0)
+
+ c2 = np.dot(v, v)
+ if c2 <= c1:
+ return dist(p, s1)
+
+ b = c1 / c2
+ pb = s0 + b * v
+ return dist(p, pb)
+
+
+@cbook.deprecated('2.2')
+def segments_intersect(s1, s2):
+ """
+ Return *True* if *s1* and *s2* intersect.
+ *s1* and *s2* are defined as::
+
+ s1: (x1, y1), (x2, y2)
+ s2: (x3, y3), (x4, y4)
+ """
+ (x1, y1), (x2, y2) = s1
+ (x3, y3), (x4, y4) = s2
+
+ den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
+
+ n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
+ n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
+
+ if den == 0:
+ # lines parallel
+ return False
+
+ u1 = n1/den
+ u2 = n2/den
+
+ return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
+
+
+@cbook.deprecated('2.2')
+def fftsurr(x, detrend=detrend_none, window=window_none):
+ """
+ Compute an FFT phase randomized surrogate of *x*.
+ """
+ if cbook.iterable(window):
+ x = window*detrend(x)
+ else:
+ x = window(detrend(x))
+ z = np.fft.fft(x)
+ a = 2.*np.pi*1j
+ phase = a * np.random.rand(len(x))
+ z = z*np.exp(phase)
+ return np.fft.ifft(z).real
+
+
+@cbook.deprecated('2.2')
+def movavg(x, n):
+ """
+ Compute the len(*n*) moving average of *x*.
+ """
+ w = np.empty((n,), dtype=float)
+ w[:] = 1.0/n
+ return np.convolve(x, w, mode='valid')
+
+
+# the following code was written and submitted by Fernando Perez
+# from the ipython numutils package under a BSD license
+# begin fperez functions
+
+"""
+A set of convenient utilities for numerical work.
+
+Most of this module requires numpy or is meant to be used with it.
+
+Copyright (c) 2001-2004, Fernando Perez. <Fernando.Perez@colorado.edu>
+All rights reserved.
+
+This license was generated from the BSD license template as found in:
+http://www.opensource.org/licenses/bsd-license.php
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the IPython project nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+
+
+# *****************************************************************************
+# Globals
+# ****************************************************************************
+# function definitions
+exp_safe_MIN = math.log(2.2250738585072014e-308)
+exp_safe_MAX = 1.7976931348623157e+308
+
+
+@cbook.deprecated("2.2", 'numpy.exp')
+def exp_safe(x):
+ """
+ Compute exponentials which safely underflow to zero.
+
+ Slow, but convenient to use. Note that numpy provides proper
+ floating point exception handling with access to the underlying
+ hardware.
+ """
+
+ if type(x) is np.ndarray:
+ return np.exp(np.clip(x, exp_safe_MIN, exp_safe_MAX))
+ else:
+ return math.exp(x)
+
+
+@cbook.deprecated("2.2", alternative='numpy.array(list(map(...)))')
+def amap(fn, *args):
+ """
+ amap(function, sequence[, sequence, ...]) -> array.
+
+ Works like :func:`map`, but it returns an array. This is just a
+ convenient shorthand for ``numpy.array(map(...))``.
+ """
+ return np.array(list(map(fn, *args)))
+
+
+@cbook.deprecated("2.2")
+def rms_flat(a):
+ """
+ Return the root mean square of all the elements of *a*, flattened out.
+ """
+ return np.sqrt(np.mean(np.abs(a) ** 2))
+
+
+@cbook.deprecated("2.2", alternative='numpy.linalg.norm(a, ord=1)')
+def l1norm(a):
+ """
+ Return the *l1* norm of *a*, flattened out.
+
+ Implemented as a separate function (not a call to :func:`norm` for speed).
+ """
+ return np.sum(np.abs(a))
+
+
+@cbook.deprecated("2.2", alternative='numpy.linalg.norm(a, ord=2)')
+def l2norm(a):
+ """
+ Return the *l2* norm of *a*, flattened out.
+
+ Implemented as a separate function (not a call to :func:`norm` for speed).
+ """
+ return np.sqrt(np.sum(np.abs(a) ** 2))
+
+
+@cbook.deprecated("2.2", alternative='numpy.linalg.norm(a.flat, ord=p)')
+def norm_flat(a, p=2):
+ """
+ norm(a,p=2) -> l-p norm of a.flat
+
+ Return the l-p norm of *a*, considered as a flat array. This is NOT a true
+ matrix norm, since arrays of arbitrary rank are always flattened.
+
+ *p* can be a number or the string 'Infinity' to get the L-infinity norm.
+ """
+ # This function was being masked by a more general norm later in
+ # the file. We may want to simply delete it.
+ if p == 'Infinity':
+ return np.max(np.abs(a))
+ else:
+ return np.sum(np.abs(a) ** p) ** (1 / p)
+
+
+@cbook.deprecated("2.2", 'numpy.arange')
+def frange(xini, xfin=None, delta=None, **kw):
+ """
+ frange([start,] stop[, step, keywords]) -> array of floats
+
+ Return a numpy ndarray containing a progression of floats. Similar to
+ :func:`numpy.arange`, but defaults to a closed interval.
+
+ ``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
+ defaults to 0, and the endpoint *is included*. This behavior is
+ different from that of :func:`range` and
+ :func:`numpy.arange`. This is deliberate, since :func:`frange`
+ will probably be more useful for generating lists of points for
+ function evaluation, and endpoints are often desired in this
+ use. The usual behavior of :func:`range` can be obtained by
+ setting the keyword *closed* = 0, in this case, :func:`frange`
+ basically becomes :func:numpy.arange`.
+
+ When *step* is given, it specifies the increment (or
+ decrement). All arguments can be floating point numbers.
+
+ ``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
+ *xfin* <= *x1*.
+
+ :func:`frange` can also be called with the keyword *npts*. This
+ sets the number of points the list should contain (and overrides
+ the value *step* might have been given). :func:`numpy.arange`
+ doesn't offer this option.
+
+ Examples::
+
+ >>> frange(3)
+ array([ 0., 1., 2., 3.])
+ >>> frange(3,closed=0)
+ array([ 0., 1., 2.])
+ >>> frange(1,6,2)
+ array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
+ >>> frange(1,6.5,npts=5)
+ array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
+ """
+
+ # defaults
+ kw.setdefault('closed', 1)
+ endpoint = kw['closed'] != 0
+
+ # funny logic to allow the *first* argument to be optional (like range())
+ # This was modified with a simpler version from a similar frange() found
+ # at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
+ if xfin is None:
+ xfin = xini + 0.0
+ xini = 0.0
+
+ if delta is None:
+ delta = 1.0
+
+ # compute # of points, spacing and return final list
+ try:
+ npts = kw['npts']
+ delta = (xfin-xini) / (npts-endpoint)
+ except KeyError:
+ npts = int(np.round((xfin-xini)/delta)) + endpoint
+ # round finds the nearest, so the endpoint can be up to
+ # delta/2 larger than xfin.
+
+ return np.arange(npts)*delta+xini
+# end frange()
+
+
+@cbook.deprecated("2.2", 'numpy.identity')
+def identity(n, rank=2, dtype='l', typecode=None):
+ """
+ Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
+
+ For ranks higher than 2, this object is simply a multi-index Kronecker
+ delta::
+
+ / 1 if i0=i1=...=iR,
+ id[i0,i1,...,iR] = -|
+ \\ 0 otherwise.
+
+ Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
+
+ Since rank defaults to 2, this function behaves in the default case (when
+ only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
+ much faster.
+ """
+ if typecode is not None:
+ dtype = typecode
+ iden = np.zeros((n,)*rank, dtype)
+ for i in range(n):
+ idx = (i,)*rank
+ iden[idx] = 1
+ return iden
+
+
+@cbook.deprecated("2.2")
+def base_repr(number, base=2, padding=0):
+ """
+ Return the representation of a *number* in any given *base*.
+ """
+ chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ if number < base:
+ return (padding - 1) * chars[0] + chars[int(number)]
+ max_exponent = int(math.log(number)/math.log(base))
+ max_power = long(base) ** max_exponent
+ lead_digit = int(number/max_power)
+ return (chars[lead_digit] +
+ base_repr(number - max_power * lead_digit, base,
+ max(padding - 1, max_exponent)))
+
+
+@cbook.deprecated("2.2")
+def binary_repr(number, max_length=1025):
+ """
+ Return the binary representation of the input *number* as a
+ string.
+
+ This is more efficient than using :func:`base_repr` with base 2.
+
+ Increase the value of max_length for very large numbers. Note that
+ on 32-bit machines, 2**1023 is the largest integer power of 2
+ which can be converted to a Python float.
+ """
+
+# assert number < 2L << max_length
+ shifts = map(operator.rshift, max_length * [number],
+ range(max_length - 1, -1, -1))
+ digits = list(map(operator.mod, shifts, max_length * [2]))
+ if not digits.count(1):
+ return 0
+ digits = digits[digits.index(1):]
+ return ''.join(map(repr, digits)).replace('L', '')
+
+
+@cbook.deprecated("2.2", 'numpy.log2')
+def log2(x, ln2=math.log(2.0)):
+ """
+ Return the log(*x*) in base 2.
+
+ This is a _slow_ function but which is guaranteed to return the correct
+ integer value if the input is an integer exact power of 2.
+ """
+ try:
+ bin_n = binary_repr(x)[1:]
+ except (AssertionError, TypeError):
+ return math.log(x)/ln2
+ else:
+ if '1' in bin_n:
+ return math.log(x)/ln2
+ else:
+ return len(bin_n)
+
+
+@cbook.deprecated("2.2")
+def ispower2(n):
+ """
+ Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
+
+ Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
+ """
+
+ bin_n = binary_repr(n)[1:]
+ if '1' in bin_n:
+ return 0
+ else:
+ return len(bin_n)
+
+
+@cbook.deprecated("2.2")
+def isvector(X):
+ """
+ Like the MATLAB function with the same name, returns *True*
+ if the supplied numpy array or matrix *X* looks like a vector,
+ meaning it has a one non-singleton axis (i.e., it can have
+ multiple axes, but all must have length 1, except for one of
+ them).
+
+ If you just want to see if the array has 1 axis, use X.ndim == 1.
+ """
+ return np.prod(X.shape) == np.max(X.shape)
+
+# end fperez numutils code
+
+
+# helpers for loading, saving, manipulating and viewing numpy record arrays
+@cbook.deprecated("2.2", 'numpy.isnan')
+def safe_isnan(x):
+ ':func:`numpy.isnan` for arbitrary types'
+ if isinstance(x, six.string_types):
+ return False
+ try:
+ b = np.isnan(x)
+ except NotImplementedError:
+ return False
+ except TypeError:
+ return False
+ else:
+ return b
+
+
+@cbook.deprecated("2.2", 'numpy.isinf')
+def safe_isinf(x):
+ ':func:`numpy.isinf` for arbitrary types'
+ if isinstance(x, six.string_types):
+ return False
+ try:
+ b = np.isinf(x)
+ except NotImplementedError:
+ return False
+ except TypeError:
+ return False
+ else:
+ return b
+
+
+@cbook.deprecated("2.2")
+def rec_append_fields(rec, names, arrs, dtypes=None):
+ """
+ Return a new record array with field names populated with data
+ from arrays in *arrs*. If appending a single field, then *names*,
+ *arrs* and *dtypes* do not have to be lists. They can just be the
+ values themselves.
+ """
+ if (not isinstance(names, six.string_types) and cbook.iterable(names)
+ and len(names) and isinstance(names[0], six.string_types)):
+ if len(names) != len(arrs):
+ raise ValueError("number of arrays do not match number of names")
+ else: # we have only 1 name and 1 array
+ names = [names]
+ arrs = [arrs]
+ arrs = list(map(np.asarray, arrs))
+ if dtypes is None:
+ dtypes = [a.dtype for a in arrs]
+ elif not cbook.iterable(dtypes):
+ dtypes = [dtypes]
+ if len(arrs) != len(dtypes):
+ if len(dtypes) == 1:
+ dtypes = dtypes * len(arrs)
+ else:
+ raise ValueError("dtypes must be None, a single dtype or a list")
+ old_dtypes = rec.dtype.descr
+ if six.PY2:
+ old_dtypes = [(name.encode('utf-8'), dt) for name, dt in old_dtypes]
+ newdtype = np.dtype(old_dtypes + list(zip(names, dtypes)))
+ newrec = np.recarray(rec.shape, dtype=newdtype)
+ for field in rec.dtype.fields:
+ newrec[field] = rec[field]
+ for name, arr in zip(names, arrs):
+ newrec[name] = arr
+ return newrec
+
+
+@cbook.deprecated("2.2")
+def rec_drop_fields(rec, names):
+ """
+ Return a new numpy record array with fields in *names* dropped.
+ """
+
+ names = set(names)
+
+ newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
+ if name not in names])
+
+ newrec = np.recarray(rec.shape, dtype=newdtype)
+ for field in newdtype.names:
+ newrec[field] = rec[field]
+
+ return newrec
+
+
+@cbook.deprecated("2.2")
+def rec_keep_fields(rec, names):
+ """
+ Return a new numpy record array with only fields listed in names
+ """
+
+ if isinstance(names, six.string_types):
+ names = names.split(',')
+
+ arrays = []
+ for name in names:
+ arrays.append(rec[name])
+
+ return np.rec.fromarrays(arrays, names=names)
+
+
+@cbook.deprecated("2.2")
+def rec_groupby(r, groupby, stats):
+ """
+ *r* is a numpy record array
+
+ *groupby* is a sequence of record array attribute names that
+ together form the grouping key. e.g., ('date', 'productcode')
+
+ *stats* is a sequence of (*attr*, *func*, *outname*) tuples which
+ will call ``x = func(attr)`` and assign *x* to the record array
+ output with attribute *outname*. For example::
+
+ stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
+
+ Return record array has *dtype* names for each attribute name in
+ the *groupby* argument, with the associated group values, and
+ for each outname name in the *stats* argument, with the associated
+ stat summary output.
+ """
+ # build a dictionary from groupby keys-> list of indices into r with
+ # those keys
+ rowd = {}
+ for i, row in enumerate(r):
+ key = tuple([row[attr] for attr in groupby])
+ rowd.setdefault(key, []).append(i)
+
+ rows = []
+ # sort the output by groupby keys
+ for key in sorted(rowd):
+ row = list(key)
+ # get the indices for this groupby key
+ ind = rowd[key]
+ thisr = r[ind]
+ # call each stat function for this groupby slice
+ row.extend([func(thisr[attr]) for attr, func, outname in stats])
+ rows.append(row)
+
+ # build the output record array with groupby and outname attributes
+ attrs, funcs, outnames = list(zip(*stats))
+ names = list(groupby)
+ names.extend(outnames)
+ return np.rec.fromrecords(rows, names=names)
+
+
+@cbook.deprecated("2.2")
+def rec_summarize(r, summaryfuncs):
+ """
+ *r* is a numpy record array
+
+ *summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
+ which will apply *func* to the array *r*[attr] and assign the
+ output to a new attribute name *outname*. The returned record
+ array is identical to *r*, with extra arrays for each element in
+ *summaryfuncs*.
+
+ """
+
+ names = list(r.dtype.names)
+ arrays = [r[name] for name in names]
+
+ for attr, func, outname in summaryfuncs:
+ names.append(outname)
+ arrays.append(np.asarray(func(r[attr])))
+
+ return np.rec.fromarrays(arrays, names=names)
+
+
+@cbook.deprecated("2.2")
+def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1',
+ r2postfix='2'):
+ """
+ Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
+ field names -- if *key* is a string it is assumed to be a single
+ attribute name. If *r1* and *r2* have equal values on all the keys
+ in the *key* tuple, then their fields will be merged into a new
+ record array containing the intersection of the fields of *r1* and
+ *r2*.
+
+ *r1* (also *r2*) must not have any duplicate keys.
+
+ The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
+ do a rightouter join just reverse *r1* and *r2*.
+
+ The *defaults* keyword is a dictionary filled with
+ ``{column_name:default_value}`` pairs.
+
+ The keywords *r1postfix* and *r2postfix* are postfixed to column names
+ (other than keys) that are both in *r1* and *r2*.
+ """
+
+ if isinstance(key, six.string_types):
+ key = (key, )
+
+ for name in key:
+ if name not in r1.dtype.names:
+ raise ValueError('r1 does not have key field %s' % name)
+ if name not in r2.dtype.names:
+ raise ValueError('r2 does not have key field %s' % name)
+
+ def makekey(row):
+ return tuple([row[name] for name in key])
+
+ r1d = {makekey(row): i for i, row in enumerate(r1)}
+ r2d = {makekey(row): i for i, row in enumerate(r2)}
+
+ r1keys = set(r1d)
+ r2keys = set(r2d)
+
+ common_keys = r1keys & r2keys
+
+ r1ind = np.array([r1d[k] for k in common_keys])
+ r2ind = np.array([r2d[k] for k in common_keys])
+
+ common_len = len(common_keys)
+ left_len = right_len = 0
+ if jointype == "outer" or jointype == "leftouter":
+ left_keys = r1keys.difference(r2keys)
+ left_ind = np.array([r1d[k] for k in left_keys])
+ left_len = len(left_ind)
+ if jointype == "outer":
+ right_keys = r2keys.difference(r1keys)
+ right_ind = np.array([r2d[k] for k in right_keys])
+ right_len = len(right_ind)
+
+ def key_desc(name):
+ '''
+ if name is a string key, use the larger size of r1 or r2 before
+ merging
+ '''
+ dt1 = r1.dtype[name]
+ if dt1.type != np.string_:
+ return (name, dt1.descr[0][1])
+
+ dt2 = r2.dtype[name]
+ if dt1 != dt2:
+ raise ValueError("The '{}' fields in arrays 'r1' and 'r2' must "
+ "have the same dtype".format(name))
+ if dt1.num > dt2.num:
+ return (name, dt1.descr[0][1])
+ else:
+ return (name, dt2.descr[0][1])
+
+ keydesc = [key_desc(name) for name in key]
+
+ def mapped_r1field(name):
+ """
+ The column name in *newrec* that corresponds to the column in *r1*.
+ """
+ if name in key or name not in r2.dtype.names:
+ return name
+ else:
+ return name + r1postfix
+
+ def mapped_r2field(name):
+ """
+ The column name in *newrec* that corresponds to the column in *r2*.
+ """
+ if name in key or name not in r1.dtype.names:
+ return name
+ else:
+ return name + r2postfix
+
+ r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr
+ if desc[0] not in key]
+ r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr
+ if desc[0] not in key]
+ all_dtypes = keydesc + r1desc + r2desc
+ if six.PY2:
+ all_dtypes = [(name.encode('utf-8'), dt) for name, dt in all_dtypes]
+ newdtype = np.dtype(all_dtypes)
+ newrec = np.recarray((common_len + left_len + right_len,), dtype=newdtype)
+
+ if defaults is not None:
+ for thiskey in defaults:
+ if thiskey not in newdtype.names:
+ warnings.warn('rec_join defaults key="%s" not in new dtype '
+ 'names "%s"' % (thiskey, newdtype.names))
+
+ for name in newdtype.names:
+ dt = newdtype[name]
+ if dt.kind in ('f', 'i'):
+ newrec[name] = 0
+
+ if jointype != 'inner' and defaults is not None:
+ # fill in the defaults enmasse
+ newrec_fields = list(newrec.dtype.fields)
+ for k, v in six.iteritems(defaults):
+ if k in newrec_fields:
+ newrec[k] = v
+
+ for field in r1.dtype.names:
+ newfield = mapped_r1field(field)
+ if common_len:
+ newrec[newfield][:common_len] = r1[field][r1ind]
+ if (jointype == "outer" or jointype == "leftouter") and left_len:
+ newrec[newfield][common_len:(common_len+left_len)] = (
+ r1[field][left_ind]
+ )
+
+ for field in r2.dtype.names:
+ newfield = mapped_r2field(field)
+ if field not in key and common_len:
+ newrec[newfield][:common_len] = r2[field][r2ind]
+ if jointype == "outer" and right_len:
+ newrec[newfield][-right_len:] = r2[field][right_ind]
+
+ newrec.sort(order=key)
+
+ return newrec
+
+
+@cbook.deprecated("2.2")
+def recs_join(key, name, recs, jointype='outer', missing=0., postfixes=None):
+ """
+ Join a sequence of record arrays on single column key.
+
+ This function only joins a single column of the multiple record arrays
+
+ *key*
+ is the column name that acts as a key
+
+ *name*
+ is the name of the column that we want to join
+
+ *recs*
+ is a list of record arrays to join
+
+ *jointype*
+ is a string 'inner' or 'outer'
+
+ *missing*
+ is what any missing field is replaced by
+
+ *postfixes*
+ if not None, a len recs sequence of postfixes
+
+ returns a record array with columns [rowkey, name0, name1, ... namen-1].
+ or if postfixes [PF0, PF1, ..., PFN-1] are supplied,
+ [rowkey, namePF0, namePF1, ... namePFN-1].
+
+ Example::
+
+ r = recs_join("date", "close", recs=[r0, r1], missing=0.)
+
+ """
+ results = []
+ aligned_iters = cbook.align_iterators(operator.attrgetter(key),
+ *[iter(r) for r in recs])
+
+ def extract(r):
+ if r is None:
+ return missing
+ else:
+ return r[name]
+
+ if jointype == "outer":
+ for rowkey, row in aligned_iters:
+ results.append([rowkey] + list(map(extract, row)))
+ elif jointype == "inner":
+ for rowkey, row in aligned_iters:
+ if None not in row: # throw out any Nones
+ results.append([rowkey] + list(map(extract, row)))
+
+ if postfixes is None:
+ postfixes = ['%d' % i for i in range(len(recs))]
+ names = ",".join([key] + ["%s%s" % (name, postfix)
+ for postfix in postfixes])
+ return np.rec.fromrecords(results, names=names)
+
+
+@cbook.deprecated("2.2")
+def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
+ converterd=None, names=None, missing='', missingd=None,
+ use_mrecords=False, dayfirst=False, yearfirst=False):
+ """
+ Load data from comma/space/tab delimited file in *fname* into a
+ numpy record array and return the record array.
+
+ If *names* is *None*, a header row is required to automatically
+ assign the recarray names. The headers will be lower cased,
+ spaces will be converted to underscores, and illegal attribute
+ name characters removed. If *names* is not *None*, it is a
+ sequence of names to use for the column names. In this case, it
+ is assumed there is no header row.
+
+
+ - *fname*: can be a filename or a file handle. Support for gzipped
+ files is automatic, if the filename ends in '.gz'
+
+ - *comments*: the character used to indicate the start of a comment
+ in the file, or *None* to switch off the removal of comments
+
+ - *skiprows*: is the number of rows from the top to skip
+
+ - *checkrows*: is the number of rows to check to validate the column
+ data type. When set to zero all rows are validated.
+
+ - *converterd*: if not *None*, is a dictionary mapping column number or
+ munged column name to a converter function.
+
+ - *names*: if not None, is a list of header names. In this case, no
+ header will be read from the file
+
+ - *missingd* is a dictionary mapping munged column names to field values
+ which signify that the field does not contain actual data and should
+ be masked, e.g., '0000-00-00' or 'unused'
+
+ - *missing*: a string whose value signals a missing field regardless of
+ the column it appears in
+
+ - *use_mrecords*: if True, return an mrecords.fromrecords record array if
+ any of the data are missing
+
+ - *dayfirst*: default is False so that MM-DD-YY has precedence over
+ DD-MM-YY. See
+ http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
+ for further information.
+
+ - *yearfirst*: default is False so that MM-DD-YY has precedence over
+ YY-MM-DD. See
+ http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
+ for further information.
+
+ If no rows are found, *None* is returned
+ """
+
+ if converterd is None:
+ converterd = dict()
+
+ if missingd is None:
+ missingd = {}
+
+ import dateutil.parser
+ import datetime
+
+ fh = cbook.to_filehandle(fname)
+
+ delimiter = str(delimiter)
+
+ class FH:
+ """
+ For space-delimited files, we want different behavior than
+ comma or tab. Generally, we want multiple spaces to be
+ treated as a single separator, whereas with comma and tab we
+ want multiple commas to return multiple (empty) fields. The
+ join/strip trick below effects this.
+ """
+ def __init__(self, fh):
+ self.fh = fh
+
+ def close(self):
+ self.fh.close()
+
+ def seek(self, arg):
+ self.fh.seek(arg)
+
+ def fix(self, s):
+ return ' '.join(s.split())
+
+ def __next__(self):
+ return self.fix(next(self.fh))
+
+ def __iter__(self):
+ for line in self.fh:
+ yield self.fix(line)
+
+ if delimiter == ' ':
+ fh = FH(fh)
+
+ reader = csv.reader(fh, delimiter=delimiter)
+
+ def process_skiprows(reader):
+ if skiprows:
+ for i, row in enumerate(reader):
+ if i >= (skiprows-1):
+ break
+
+ return fh, reader
+
+ process_skiprows(reader)
+
+ def ismissing(name, val):
+ "Should the value val in column name be masked?"
+ return val == missing or val == missingd.get(name) or val == ''
+
+ def with_default_value(func, default):
+ def newfunc(name, val):
+ if ismissing(name, val):
+ return default
+ else:
+ return func(val)
+ return newfunc
+
+ def mybool(x):
+ if x == 'True':
+ return True
+ elif x == 'False':
+ return False
+ else:
+ raise ValueError('invalid bool')
+
+ dateparser = dateutil.parser.parse
+
+ def mydateparser(x):
+ # try and return a datetime object
+ d = dateparser(x, dayfirst=dayfirst, yearfirst=yearfirst)
+ return d
+
+ mydateparser = with_default_value(mydateparser, datetime.datetime(1, 1, 1))
+
+ myfloat = with_default_value(float, np.nan)
+ myint = with_default_value(int, -1)
+ mystr = with_default_value(str, '')
+ mybool = with_default_value(mybool, None)
+
+ def mydate(x):
+ # try and return a date object
+ d = dateparser(x, dayfirst=dayfirst, yearfirst=yearfirst)
+
+ if d.hour > 0 or d.minute > 0 or d.second > 0:
+ raise ValueError('not a date')
+ return d.date()
+ mydate = with_default_value(mydate, datetime.date(1, 1, 1))
+
+ def get_func(name, item, func):
+ # promote functions in this order
+ funcs = [mybool, myint, myfloat, mydate, mydateparser, mystr]
+ for func in funcs[funcs.index(func):]:
+ try:
+ func(name, item)
+ except Exception:
+ continue
+ return func
+ raise ValueError('Could not find a working conversion function')
+
+ # map column names that clash with builtins -- TODO - extend this list
+ itemd = {
+ 'return': 'return_',
+ 'file': 'file_',
+ 'print': 'print_',
+ }
+
+ def get_converters(reader, comments):
+
+ converters = None
+ i = 0
+ for row in reader:
+ if (len(row) and comments is not None and
+ row[0].startswith(comments)):
+ continue
+ if i == 0:
+ converters = [mybool]*len(row)
+ if checkrows and i > checkrows:
+ break
+ i += 1
+
+ for j, (name, item) in enumerate(zip(names, row)):
+ func = converterd.get(j)
+ if func is None:
+ func = converterd.get(name)
+ if func is None:
+ func = converters[j]
+ if len(item.strip()):
+ func = get_func(name, item, func)
+ else:
+ # how should we handle custom converters and defaults?
+ func = with_default_value(func, None)
+ converters[j] = func
+ return converters
+
+ # Get header and remove invalid characters
+ needheader = names is None
+
+ if needheader:
+ for row in reader:
+ if (len(row) and comments is not None and
+ row[0].startswith(comments)):
+ continue
+ headers = row
+ break
+
+ # remove these chars
+ delete = set(r"""~!@#$%^&*()-=+~\|}[]{';: /?.>,<""")
+ delete.add('"')
+
+ names = []
+ seen = dict()
+ for i, item in enumerate(headers):
+ item = item.strip().lower().replace(' ', '_')
+ item = ''.join([c for c in item if c not in delete])
+ if not len(item):
+ item = 'column%d' % i
+
+ item = itemd.get(item, item)
+ cnt = seen.get(item, 0)
+ if cnt > 0:
+ names.append(item + '_%d' % cnt)
+ else:
+ names.append(item)
+ seen[item] = cnt+1
+
+ else:
+ if isinstance(names, six.string_types):
+ names = [n.strip() for n in names.split(',')]
+
+ # get the converter functions by inspecting checkrows
+ converters = get_converters(reader, comments)
+ if converters is None:
+ raise ValueError('Could not find any valid data in CSV file')
+
+ # reset the reader and start over
+ fh.seek(0)
+ reader = csv.reader(fh, delimiter=delimiter)
+ process_skiprows(reader)
+
+ if needheader:
+ while True:
+ # skip past any comments and consume one line of column header
+ row = next(reader)
+ if (len(row) and comments is not None and
+ row[0].startswith(comments)):
+ continue
+ break
+
+ # iterate over the remaining rows and convert the data to date
+ # objects, ints, or floats as appropriate
+ rows = []
+ rowmasks = []
+ for i, row in enumerate(reader):
+ if not len(row):
+ continue
+ if comments is not None and row[0].startswith(comments):
+ continue
+ # Ensure that the row returned always has the same nr of elements
+ row.extend([''] * (len(converters) - len(row)))
+ rows.append([func(name, val)
+ for func, name, val in zip(converters, names, row)])
+ rowmasks.append([ismissing(name, val)
+ for name, val in zip(names, row)])
+ fh.close()
+
+ if not len(rows):
+ return None
+
+ if use_mrecords and np.any(rowmasks):
+ r = np.ma.mrecords.fromrecords(rows, names=names, mask=rowmasks)
+ else:
+ r = np.rec.fromrecords(rows, names=names)
+ return r
+
+
+# a series of classes for describing the format intentions of various rec views
+@cbook.deprecated("2.2")
+class FormatObj(object):
+ def tostr(self, x):
+ return self.toval(x)
+
+ def toval(self, x):
+ return str(x)
+
+ def fromstr(self, s):
+ return s
+
+ def __hash__(self):
+ """
+ override the hash function of any of the formatters, so that we don't
+ create duplicate excel format styles
+ """
+ return hash(self.__class__)
+
+
+@cbook.deprecated("2.2")
+class FormatString(FormatObj):
+ def tostr(self, x):
+ val = repr(x)
+ return val[1:-1]
+
+
+@cbook.deprecated("2.2")
+class FormatFormatStr(FormatObj):
+ def __init__(self, fmt):
+ self.fmt = fmt
+
+ def tostr(self, x):
+ if x is None:
+ return 'None'
+ return self.fmt % self.toval(x)
+
+
+@cbook.deprecated("2.2")
+class FormatFloat(FormatFormatStr):
+ def __init__(self, precision=4, scale=1.):
+ FormatFormatStr.__init__(self, '%%1.%df' % precision)
+ self.precision = precision
+ self.scale = scale
+
+ def __hash__(self):
+ return hash((self.__class__, self.precision, self.scale))
+
+ def toval(self, x):
+ if x is not None:
+ x = x * self.scale
+ return x
+
+ def fromstr(self, s):
+ return float(s)/self.scale
+
+
+@cbook.deprecated("2.2")
+class FormatInt(FormatObj):
+
+ def tostr(self, x):
+ return '%d' % int(x)
+
+ def toval(self, x):
+ return int(x)
+
+ def fromstr(self, s):
+ return int(s)
+
+
+@cbook.deprecated("2.2")
+class FormatBool(FormatObj):
+ def toval(self, x):
+ return str(x)
+
+ def fromstr(self, s):
+ return bool(s)
+
+
+@cbook.deprecated("2.2")
+class FormatPercent(FormatFloat):
+ def __init__(self, precision=4):
+ FormatFloat.__init__(self, precision, scale=100.)
+
+
+@cbook.deprecated("2.2")
+class FormatThousands(FormatFloat):
+ def __init__(self, precision=4):
+ FormatFloat.__init__(self, precision, scale=1e-3)
+
+
+@cbook.deprecated("2.2")
+class FormatMillions(FormatFloat):
+ def __init__(self, precision=4):
+ FormatFloat.__init__(self, precision, scale=1e-6)
+
+
+@cbook.deprecated("2.2", alternative='date.strftime')
+class FormatDate(FormatObj):
+ def __init__(self, fmt):
+ self.fmt = fmt
+
+ def __hash__(self):
+ return hash((self.__class__, self.fmt))
+
+ def toval(self, x):
+ if x is None:
+ return 'None'
+ return x.strftime(self.fmt)
+
+ def fromstr(self, x):
+ import dateutil.parser
+ return dateutil.parser.parse(x).date()
+
+
+@cbook.deprecated("2.2", alternative='datetime.strftime')
+class FormatDatetime(FormatDate):
+ def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
+ FormatDate.__init__(self, fmt)
+
+ def fromstr(self, x):
+ import dateutil.parser
+ return dateutil.parser.parse(x)
+
+
+@cbook.deprecated("2.2")
+def get_formatd(r, formatd=None):
+ 'build a formatd guaranteed to have a key for every dtype name'
+ defaultformatd = {
+ np.bool_: FormatBool(),
+ np.int16: FormatInt(),
+ np.int32: FormatInt(),
+ np.int64: FormatInt(),
+ np.float32: FormatFloat(),
+ np.float64: FormatFloat(),
+ np.object_: FormatObj(),
+ np.string_: FormatString()}
+
+ if formatd is None:
+ formatd = dict()
+
+ for i, name in enumerate(r.dtype.names):
+ dt = r.dtype[name]
+ format = formatd.get(name)
+ if format is None:
+ format = defaultformatd.get(dt.type, FormatObj())
+ formatd[name] = format
+ return formatd
+
+
+@cbook.deprecated("2.2")
+def csvformat_factory(format):
+ format = copy.deepcopy(format)
+ if isinstance(format, FormatFloat):
+ format.scale = 1. # override scaling for storage
+ format.fmt = '%r'
+ return format
+
+
+@cbook.deprecated("2.2", alternative='numpy.recarray.tofile')
+def rec2txt(r, header=None, padding=3, precision=3, fields=None):
+ """
+ Returns a textual representation of a record array.
+
+ Parameters
+ ----------
+ r: numpy recarray
+
+ header: list
+ column headers
+
+ padding:
+ space between each column
+
+ precision: number of decimal places to use for floats.
+ Set to an integer to apply to all floats. Set to a
+ list of integers to apply precision individually.
+ Precision for non-floats is simply ignored.
+
+ fields : list
+ If not None, a list of field names to print. fields
+ can be a list of strings like ['field1', 'field2'] or a single
+ comma separated string like 'field1,field2'
+
+ Examples
+ --------
+
+ For ``precision=[0,2,3]``, the output is ::
+
+ ID Price Return
+ ABC 12.54 0.234
+ XYZ 6.32 -0.076
+ """
+
+ if fields is not None:
+ r = rec_keep_fields(r, fields)
+
+ if cbook.is_numlike(precision):
+ precision = [precision]*len(r.dtype)
+
+ def get_type(item, atype=int):
+ tdict = {None: int, int: float, float: str}
+ try:
+ atype(str(item))
+ except:
+ return get_type(item, tdict[atype])
+ return atype
+
+ def get_justify(colname, column, precision):
+ ntype = column.dtype
+
+ if np.issubdtype(ntype, np.character):
+ fixed_width = int(ntype.str[2:])
+ length = max(len(colname), fixed_width)
+ return 0, length+padding, "%s" # left justify
+
+ if np.issubdtype(ntype, np.integer):
+ length = max(len(colname),
+ np.max(list(map(len, list(map(str, column))))))
+ return 1, length+padding, "%d" # right justify
+
+ if np.issubdtype(ntype, np.floating):
+ fmt = "%." + str(precision) + "f"
+ length = max(
+ len(colname),
+ np.max(list(map(len, list(map(lambda x: fmt % x, column)))))
+ )
+ return 1, length+padding, fmt # right justify
+
+ return (0,
+ max(len(colname),
+ np.max(list(map(len, list(map(str, column))))))+padding,
+ "%s")
+
+ if header is None:
+ header = r.dtype.names
+
+ justify_pad_prec = [get_justify(header[i], r.__getitem__(colname),
+ precision[i])
+ for i, colname in enumerate(r.dtype.names)]
+
+ justify_pad_prec_spacer = []
+ for i in range(len(justify_pad_prec)):
+ just, pad, prec = justify_pad_prec[i]
+ if i == 0:
+ justify_pad_prec_spacer.append((just, pad, prec, 0))
+ else:
+ pjust, ppad, pprec = justify_pad_prec[i-1]
+ if pjust == 0 and just == 1:
+ justify_pad_prec_spacer.append((just, pad-padding, prec, 0))
+ elif pjust == 1 and just == 0:
+ justify_pad_prec_spacer.append((just, pad, prec, padding))
+ else:
+ justify_pad_prec_spacer.append((just, pad, prec, 0))
+
+ def format(item, just_pad_prec_spacer):
+ just, pad, prec, spacer = just_pad_prec_spacer
+ if just == 0:
+ return spacer*' ' + str(item).ljust(pad)
+ else:
+ if get_type(item) == float:
+ item = (prec % float(item))
+ elif get_type(item) == int:
+ item = (prec % int(item))
+
+ return item.rjust(pad)
+
+ textl = []
+ textl.append(''.join([format(colitem, justify_pad_prec_spacer[j])
+ for j, colitem in enumerate(header)]))
+ for i, row in enumerate(r):
+ textl.append(''.join([format(colitem, justify_pad_prec_spacer[j])
+ for j, colitem in enumerate(row)]))
+ if i == 0:
+ textl[0] = textl[0].rstrip()
+
+ text = os.linesep.join(textl)
+ return text
+
+
+@cbook.deprecated("2.2", alternative='numpy.recarray.tofile')
+def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
+ missingd=None, withheader=True):
+ """
+ Save the data from numpy recarray *r* into a
+ comma-/space-/tab-delimited file. The record array dtype names
+ will be used for column headers.
+
+ *fname*: can be a filename or a file handle. Support for gzipped
+ files is automatic, if the filename ends in '.gz'
+
+ *withheader*: if withheader is False, do not write the attribute
+ names in the first row
+
+ for formatd type FormatFloat, we override the precision to store
+ full precision floats in the CSV file
+
+ See Also
+ --------
+ :func:`csv2rec`
+ For information about *missing* and *missingd*, which can be used to
+ fill in masked values into your CSV file.
+ """
+
+ delimiter = str(delimiter)
+
+ if missingd is None:
+ missingd = dict()
+
+ def with_mask(func):
+ def newfunc(val, mask, mval):
+ if mask:
+ return mval
+ else:
+ return func(val)
+ return newfunc
+
+ if r.ndim != 1:
+ raise ValueError('rec2csv only operates on 1 dimensional recarrays')
+
+ formatd = get_formatd(r, formatd)
+ funcs = []
+ for i, name in enumerate(r.dtype.names):
+ funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
+
+ fh, opened = cbook.to_filehandle(fname, 'wb', return_opened=True)
+ writer = csv.writer(fh, delimiter=delimiter)
+ header = r.dtype.names
+ if withheader:
+ writer.writerow(header)
+
+ # Our list of specials for missing values
+ mvals = []
+ for name in header:
+ mvals.append(missingd.get(name, missing))
+
+ ismasked = False
+ if len(r):
+ row = r[0]
+ ismasked = hasattr(row, '_fieldmask')
+
+ for row in r:
+ if ismasked:
+ row, rowmask = row.item(), row._fieldmask.item()
+ else:
+ rowmask = [False] * len(row)
+ writer.writerow([func(val, mask, mval) for func, val, mask, mval
+ in zip(funcs, row, rowmask, mvals)])
+ if opened:
+ fh.close()
+
+
+@cbook.deprecated('2.2', alternative='scipy.interpolate.griddata')
+def griddata(x, y, z, xi, yi, interp='nn'):
+ """
+ Interpolates from a nonuniformly spaced grid to some other grid.
+
+ Fits a surface of the form z = f(`x`, `y`) to the data in the
+ (usually) nonuniformly spaced vectors (`x`, `y`, `z`), then
+ interpolates this surface at the points specified by
+ (`xi`, `yi`) to produce `zi`.
+
+ Parameters
+ ----------
+ x, y, z : 1d array_like
+ Coordinates of grid points to interpolate from.
+ xi, yi : 1d or 2d array_like
+ Coordinates of grid points to interpolate to.
+ interp : string key from {'nn', 'linear'}
+ Interpolation algorithm, either 'nn' for natural neighbor, or
+ 'linear' for linear interpolation.
+
+ Returns
+ -------
+ 2d float array
+ Array of values interpolated at (`xi`, `yi`) points. Array
+ will be masked is any of (`xi`, `yi`) are outside the convex
+ hull of (`x`, `y`).
+
+ Notes
+ -----
+ If `interp` is 'nn' (the default), uses natural neighbor
+ interpolation based on Delaunay triangulation. This option is
+ only available if the mpl_toolkits.natgrid module is installed.
+ This can be downloaded from https://github.com/matplotlib/natgrid.
+ The (`xi`, `yi`) grid must be regular and monotonically increasing
+ in this case.
+
+ If `interp` is 'linear', linear interpolation is used via
+ matplotlib.tri.LinearTriInterpolator.
+
+ Instead of using `griddata`, more flexible functionality and other
+ interpolation options are available using a
+ matplotlib.tri.Triangulation and a matplotlib.tri.TriInterpolator.
+ """
+ # Check input arguments.
+ x = np.asanyarray(x, dtype=np.float64)
+ y = np.asanyarray(y, dtype=np.float64)
+ z = np.asanyarray(z, dtype=np.float64)
+ if x.shape != y.shape or x.shape != z.shape or x.ndim != 1:
+ raise ValueError("x, y and z must be equal-length 1-D arrays")
+
+ xi = np.asanyarray(xi, dtype=np.float64)
+ yi = np.asanyarray(yi, dtype=np.float64)
+ if xi.ndim != yi.ndim:
+ raise ValueError("xi and yi must be arrays with the same number of "
+ "dimensions (1 or 2)")
+ if xi.ndim == 2 and xi.shape != yi.shape:
+ raise ValueError("if xi and yi are 2D arrays, they must have the same "
+ "shape")
+ if xi.ndim == 1:
+ xi, yi = np.meshgrid(xi, yi)
+
+ if interp == 'nn':
+ use_nn_interpolation = True
+ elif interp == 'linear':
+ use_nn_interpolation = False
+ else:
+ raise ValueError("interp keyword must be one of 'linear' (for linear "
+ "interpolation) or 'nn' (for natural neighbor "
+ "interpolation). Default is 'nn'.")
+
+ # Remove masked points.
+ mask = np.ma.getmask(z)
+ if mask is not np.ma.nomask:
+ x = x.compress(~mask)
+ y = y.compress(~mask)
+ z = z.compressed()
+
+ if use_nn_interpolation:
+ try:
+ from mpl_toolkits.natgrid import _natgrid
+ except ImportError:
+ raise RuntimeError(
+ "To use interp='nn' (Natural Neighbor interpolation) in "
+ "griddata, natgrid must be installed. Either install it "
+ "from http://github.com/matplotlib/natgrid or use "
+ "interp='linear' instead.")
+
+ if xi.ndim == 2:
+ # natgrid expects 1D xi and yi arrays.
+ xi = xi[0, :]
+ yi = yi[:, 0]
+
+ # Override default natgrid internal parameters.
+ _natgrid.seti(b'ext', 0)
+ _natgrid.setr(b'nul', np.nan)
+
+ if np.min(np.diff(xi)) < 0 or np.min(np.diff(yi)) < 0:
+ raise ValueError("Output grid defined by xi,yi must be monotone "
+ "increasing")
+
+ # Allocate array for output (buffer will be overwritten by natgridd)
+ zi = np.empty((yi.shape[0], xi.shape[0]), np.float64)
+
+ # Natgrid requires each array to be contiguous rather than e.g. a view
+ # that is a non-contiguous slice of another array. Use numpy.require
+ # to deal with this, which will copy if necessary.
+ x = np.require(x, requirements=['C'])
+ y = np.require(y, requirements=['C'])
+ z = np.require(z, requirements=['C'])
+ xi = np.require(xi, requirements=['C'])
+ yi = np.require(yi, requirements=['C'])
+ _natgrid.natgridd(x, y, z, xi, yi, zi)
+
+ # Mask points on grid outside convex hull of input data.
+ if np.any(np.isnan(zi)):
+ zi = np.ma.masked_where(np.isnan(zi), zi)
+ return zi
+ else:
+ # Linear interpolation performed using a matplotlib.tri.Triangulation
+ # and a matplotlib.tri.LinearTriInterpolator.
+ from .tri import Triangulation, LinearTriInterpolator
+ triang = Triangulation(x, y)
+ interpolator = LinearTriInterpolator(triang, z)
+ return interpolator(xi, yi)
+
+
+##################################################
+# Linear interpolation algorithms
+##################################################
+@cbook.deprecated("2.2", alternative="numpy.interp")
+def less_simple_linear_interpolation(x, y, xi, extrap=False):
+ """
+ This function provides simple (but somewhat less so than
+ :func:`cbook.simple_linear_interpolation`) linear interpolation.
+ :func:`simple_linear_interpolation` will give a list of point
+ between a start and an end, while this does true linear
+ interpolation at an arbitrary set of points.
+
+ This is very inefficient linear interpolation meant to be used
+ only for a small number of points in relatively non-intensive use
+ cases. For real linear interpolation, use scipy.
+ """
+ x = np.asarray(x)
+ y = np.asarray(y)
+ xi = np.atleast_1d(xi)
+
+ s = list(y.shape)
+ s[0] = len(xi)
+ yi = np.tile(np.nan, s)
+
+ for ii, xx in enumerate(xi):
+ bb = x == xx
+ if np.any(bb):
+ jj, = np.nonzero(bb)
+ yi[ii] = y[jj[0]]
+ elif xx < x[0]:
+ if extrap:
+ yi[ii] = y[0]
+ elif xx > x[-1]:
+ if extrap:
+ yi[ii] = y[-1]
+ else:
+ jj, = np.nonzero(x < xx)
+ jj = max(jj)
+
+ yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
+
+ return yi
+
+
+@cbook.deprecated("2.2")
+def slopes(x, y):
+ """
+ :func:`slopes` calculates the slope *y*'(*x*)
+
+ The slope is estimated using the slope obtained from that of a
+ parabola through any three consecutive points.
+
+ This method should be superior to that described in the appendix
+ of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
+ W. Stineman (Creative Computing July 1980) in at least one aspect:
+
+ Circles for interpolation demand a known aspect ratio between
+ *x*- and *y*-values. For many functions, however, the abscissa
+ are given in different dimensions, so an aspect ratio is
+ completely arbitrary.
+
+ The parabola method gives very similar results to the circle
+ method for most regular cases but behaves much better in special
+ cases.
+
+ Norbert Nemec, Institute of Theoretical Physics, University or
+ Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
+
+ (inspired by a original implementation by Halldor Bjornsson,
+ Icelandic Meteorological Office, March 2006 halldor at vedur.is)
+ """
+ # Cast key variables as float.
+ x = np.asarray(x, float)
+ y = np.asarray(y, float)
+
+ yp = np.zeros(y.shape, float)
+
+ dx = x[1:] - x[:-1]
+ dy = y[1:] - y[:-1]
+ dydx = dy/dx
+ yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
+ yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
+ yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
+ return yp
+
+
+@cbook.deprecated("2.2")
+def stineman_interp(xi, x, y, yp=None):
+ """
+ Given data vectors *x* and *y*, the slope vector *yp* and a new
+ abscissa vector *xi*, the function :func:`stineman_interp` uses
+ Stineman interpolation to calculate a vector *yi* corresponding to
+ *xi*.
+
+ Here's an example that generates a coarse sine curve, then
+ interpolates over a finer abscissa::
+
+ x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
+ xi = linspace(0,2*pi,40);
+ yi = stineman_interp(xi,x,y,yp);
+ plot(x,y,'o',xi,yi)
+
+ The interpolation method is described in the article A
+ CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
+ W. Stineman. The article appeared in the July 1980 issue of
+ Creative Computing with a note from the editor stating that while
+ they were:
+
+ not an academic journal but once in a while something serious
+ and original comes in adding that this was
+ "apparently a real solution" to a well known problem.
+
+ For *yp* = *None*, the routine automatically determines the slopes
+ using the :func:`slopes` routine.
+
+ *x* is assumed to be sorted in increasing order.
+
+ For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
+ tries an extrapolation. The relevance of the data obtained from
+ this, of course, is questionable...
+
+ Original implementation by Halldor Bjornsson, Icelandic
+ Meteorolocial Office, March 2006 halldor at vedur.is
+
+ Completely reworked and optimized for Python by Norbert Nemec,
+ Institute of Theoretical Physics, University or Regensburg, April
+ 2006 Norbert.Nemec at physik.uni-regensburg.de
+ """
+
+ # Cast key variables as float.
+ x = np.asarray(x, float)
+ y = np.asarray(y, float)
+ if x.shape != y.shape:
+ raise ValueError("'x' and 'y' must be of same shape")
+
+ if yp is None:
+ yp = slopes(x, y)
+ else:
+ yp = np.asarray(yp, float)
+
+ xi = np.asarray(xi, float)
+ yi = np.zeros(xi.shape, float)
+
+ # calculate linear slopes
+ dx = x[1:] - x[:-1]
+ dy = y[1:] - y[:-1]
+ s = dy/dx # note length of s is N-1 so last element is #N-2
+
+ # find the segment each xi is in
+ # this line actually is the key to the efficiency of this implementation
+ idx = np.searchsorted(x[1:-1], xi)
+
+ # now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
+ # except at the boundaries, where it may be that xi[j] < x[0] or
+ # xi[j] > x[-1]
+
+ # the y-values that would come out from a linear interpolation:
+ sidx = s.take(idx)
+ xidx = x.take(idx)
+ yidx = y.take(idx)
+ xidxp1 = x.take(idx+1)
+ yo = yidx + sidx * (xi - xidx)
+
+ # the difference that comes when using the slopes given in yp
+ # using the yp slope of the left point
+ dy1 = (yp.take(idx) - sidx) * (xi - xidx)
+ # using the yp slope of the right point
+ dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1)
+
+ dy1dy2 = dy1*dy2
+ # The following is optimized for Python. The solution actually
+ # does more calculations than necessary but exploiting the power
+ # of numpy, this is far more efficient than coding a loop by hand
+ # in Python
+ yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
+ ((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
+ 0.0,
+ 1/(dy1+dy2),))
+ return yi
+
+
+class GaussianKDE(object):
+ """
+ Representation of a kernel-density estimate using Gaussian kernels.
+
+ Parameters
+ ----------
+ dataset : array_like
+ Datapoints to estimate from. In case of univariate data this is a 1-D
+ array, otherwise a 2-D array with shape (# of dims, # of data).
+
+ bw_method : str, scalar or callable, optional
+ The method used to calculate the estimator bandwidth. This can be
+ 'scott', 'silverman', a scalar constant or a callable. If a
+ scalar, this will be used directly as `kde.factor`. If a
+ callable, it should take a `GaussianKDE` instance as only
+ parameter and return a scalar. If None (default), 'scott' is used.
+
+ Attributes
+ ----------
+ dataset : ndarray
+ The dataset with which `gaussian_kde` was initialized.
+
+ dim : int
+ Number of dimensions.
+
+ num_dp : int
+ Number of datapoints.
+
+ factor : float
+ The bandwidth factor, obtained from `kde.covariance_factor`, with which
+ the covariance matrix is multiplied.
+
+ covariance : ndarray
+ The covariance matrix of `dataset`, scaled by the calculated bandwidth
+ (`kde.factor`).
+
+ inv_cov : ndarray
+ The inverse of `covariance`.
+
+ Methods
+ -------
+ kde.evaluate(points) : ndarray
+ Evaluate the estimated pdf on a provided set of points.
+
+ kde(points) : ndarray
+ Same as kde.evaluate(points)
+
+ """
+
+ # This implementation with minor modification was too good to pass up.
+ # from scipy: https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
+
+ def __init__(self, dataset, bw_method=None):
+ self.dataset = np.atleast_2d(dataset)
+ if not np.array(self.dataset).size > 1:
+ raise ValueError("`dataset` input should have multiple elements.")
+
+ self.dim, self.num_dp = np.array(self.dataset).shape
+ isString = isinstance(bw_method, six.string_types)
+
+ if bw_method is None:
+ pass
+ elif (isString and bw_method == 'scott'):
+ self.covariance_factor = self.scotts_factor
+ elif (isString and bw_method == 'silverman'):
+ self.covariance_factor = self.silverman_factor
+ elif (np.isscalar(bw_method) and not isString):
+ self._bw_method = 'use constant'
+ self.covariance_factor = lambda: bw_method
+ elif callable(bw_method):
+ self._bw_method = bw_method
+ self.covariance_factor = lambda: self._bw_method(self)
+ else:
+ raise ValueError("`bw_method` should be 'scott', 'silverman', a "
+ "scalar or a callable")
+
+ # Computes the covariance matrix for each Gaussian kernel using
+ # covariance_factor().
+
+ self.factor = self.covariance_factor()
+ # Cache covariance and inverse covariance of the data
+ if not hasattr(self, '_data_inv_cov'):
+ self.data_covariance = np.atleast_2d(
+ np.cov(
+ self.dataset,
+ rowvar=1,
+ bias=False))
+ self.data_inv_cov = np.linalg.inv(self.data_covariance)
+
+ self.covariance = self.data_covariance * self.factor ** 2
+ self.inv_cov = self.data_inv_cov / self.factor ** 2
+ self.norm_factor = np.sqrt(
+ np.linalg.det(
+ 2 * np.pi * self.covariance)) * self.num_dp
+
+ def scotts_factor(self):
+ return np.power(self.num_dp, -1. / (self.dim + 4))
+
+ def silverman_factor(self):
+ return np.power(
+ self.num_dp * (self.dim + 2.0) / 4.0, -1. / (self.dim + 4))
+
+ # Default method to calculate bandwidth, can be overwritten by subclass
+ covariance_factor = scotts_factor
+
+ def evaluate(self, points):
+ """Evaluate the estimated pdf on a set of points.
+
+ Parameters
+ ----------
+ points : (# of dimensions, # of points)-array
+ Alternatively, a (# of dimensions,) vector can be passed in and
+ treated as a single point.
+
+ Returns
+ -------
+ values : (# of points,)-array
+ The values at each point.
+
+ Raises
+ ------
+ ValueError : if the dimensionality of the input points is different
+ than the dimensionality of the KDE.
+
+ """
+ points = np.atleast_2d(points)
+
+ dim, num_m = np.array(points).shape
+ if dim != self.dim:
+ raise ValueError("points have dimension {}, dataset has dimension "
+ "{}".format(dim, self.dim))
+
+ result = np.zeros((num_m,), dtype=float)
+
+ if num_m >= self.num_dp:
+ # there are more points than data, so loop over data
+ for i in range(self.num_dp):
+ diff = self.dataset[:, i, np.newaxis] - points
+ tdiff = np.dot(self.inv_cov, diff)
+ energy = np.sum(diff * tdiff, axis=0) / 2.0
+ result = result + np.exp(-energy)
+ else:
+ # loop over points
+ for i in range(num_m):
+ diff = self.dataset - points[:, i, np.newaxis]
+ tdiff = np.dot(self.inv_cov, diff)
+ energy = np.sum(diff * tdiff, axis=0) / 2.0
+ result[i] = np.sum(np.exp(-energy), axis=0)
+
+ result = result / self.norm_factor
+
+ return result
+
+ __call__ = evaluate
+
+
+##################################################
+# Code related to things in and around polygons
+##################################################
+@cbook.deprecated("2.2")
+def inside_poly(points, verts):
+ """
+ *points* is a sequence of *x*, *y* points.
+ *verts* is a sequence of *x*, *y* vertices of a polygon.
+
+ Return value is a sequence of indices into points for the points
+ that are inside the polygon.
+ """
+ # Make a closed polygon path
+ poly = Path(verts)
+
+ # Check to see which points are contained within the Path
+ return [idx for idx, p in enumerate(points) if poly.contains_point(p)]
+
+
+@cbook.deprecated("2.2")
+def poly_below(xmin, xs, ys):
+ """
+ Given a sequence of *xs* and *ys*, return the vertices of a
+ polygon that has a horizontal base at *xmin* and an upper bound at
+ the *ys*. *xmin* is a scalar.
+
+ Intended for use with :meth:`matplotlib.axes.Axes.fill`, e.g.,::
+
+ xv, yv = poly_below(0, x, y)
+ ax.fill(xv, yv)
+ """
+ if any(isinstance(var, np.ma.MaskedArray) for var in [xs, ys]):
+ numpy = np.ma
+ else:
+ numpy = np
+
+ xs = numpy.asarray(xs)
+ ys = numpy.asarray(ys)
+ Nx = len(xs)
+ Ny = len(ys)
+ if Nx != Ny:
+ raise ValueError("'xs' and 'ys' must have the same length")
+ x = xmin*numpy.ones(2*Nx)
+ y = numpy.ones(2*Nx)
+ x[:Nx] = xs
+ y[:Nx] = ys
+ y[Nx:] = ys[::-1]
+ return x, y
+
+
+@cbook.deprecated("2.2")
+def poly_between(x, ylower, yupper):
+ """
+ Given a sequence of *x*, *ylower* and *yupper*, return the polygon
+ that fills the regions between them. *ylower* or *yupper* can be
+ scalar or iterable. If they are iterable, they must be equal in
+ length to *x*.
+
+ Return value is *x*, *y* arrays for use with
+ :meth:`matplotlib.axes.Axes.fill`.
+ """
+ if any(isinstance(var, np.ma.MaskedArray) for var in [ylower, yupper, x]):
+ numpy = np.ma
+ else:
+ numpy = np
+
+ Nx = len(x)
+ if not cbook.iterable(ylower):
+ ylower = ylower*numpy.ones(Nx)
+
+ if not cbook.iterable(yupper):
+ yupper = yupper*numpy.ones(Nx)
+
+ x = numpy.concatenate((x, x[::-1]))
+ y = numpy.concatenate((yupper, ylower[::-1]))
+ return x, y
+
+
+@cbook.deprecated('2.2')
+def is_closed_polygon(X):
+ """
+ Tests whether first and last object in a sequence are the same. These are
+ presumably coordinates on a polygonal curve, in which case this function
+ tests if that curve is closed.
+ """
+ return np.all(X[0] == X[-1])
+
+
+@cbook.deprecated("2.2", message='Moved to matplotlib.cbook')
+def contiguous_regions(mask):
+ """
+ return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
+ True and we cover all such regions
+ """
+ return cbook.contiguous_regions(mask)
+
+
+@cbook.deprecated("2.2")
+def cross_from_below(x, threshold):
+ """
+ return the indices into *x* where *x* crosses some threshold from
+ below, e.g., the i's where::
+
+ x[i-1]<threshold and x[i]>=threshold
+
+ Example code::
+
+ import matplotlib.pyplot as plt
+
+ t = np.arange(0.0, 2.0, 0.1)
+ s = np.sin(2*np.pi*t)
+
+ fig, ax = plt.subplots()
+ ax.plot(t, s, '-o')
+ ax.axhline(0.5)
+ ax.axhline(-0.5)
+
+ ind = cross_from_below(s, 0.5)
+ ax.vlines(t[ind], -1, 1)
+
+ ind = cross_from_above(s, -0.5)
+ ax.vlines(t[ind], -1, 1)
+
+ plt.show()
+
+ See Also
+ --------
+ :func:`cross_from_above` and :func:`contiguous_regions`
+
+ """
+ x = np.asarray(x)
+ ind = np.nonzero((x[:-1] < threshold) & (x[1:] >= threshold))[0]
+ if len(ind):
+ return ind+1
+ else:
+ return ind
+
+
+@cbook.deprecated("2.2")
+def cross_from_above(x, threshold):
+ """
+ return the indices into *x* where *x* crosses some threshold from
+ below, e.g., the i's where::
+
+ x[i-1]>threshold and x[i]<=threshold
+
+ See Also
+ --------
+ :func:`cross_from_below` and :func:`contiguous_regions`
+
+ """
+ x = np.asarray(x)
+ ind = np.nonzero((x[:-1] >= threshold) & (x[1:] < threshold))[0]
+ if len(ind):
+ return ind+1
+ else:
+ return ind
+
+
+##################################################
+# Vector and path length geometry calculations
+##################################################
+@cbook.deprecated('2.2')
+def vector_lengths(X, P=2., axis=None):
+ """
+ Finds the length of a set of vectors in *n* dimensions. This is
+ like the :func:`numpy.norm` function for vectors, but has the ability to
+ work over a particular axis of the supplied array or matrix.
+
+ Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
+ elements of *X* along the given axis. If *axis* is *None*,
+ compute over all elements of *X*.
+ """
+ X = np.asarray(X)
+ return (np.sum(X**(P), axis=axis))**(1./P)
+
+
+@cbook.deprecated('2.2')
+def distances_along_curve(X):
+ """
+ Computes the distance between a set of successive points in *N* dimensions.
+
+ Where *X* is an *M* x *N* array or matrix. The distances between
+ successive rows is computed. Distance is the standard Euclidean
+ distance.
+ """
+ X = np.diff(X, axis=0)
+ return vector_lengths(X, axis=1)
+
+
+@cbook.deprecated('2.2')
+def path_length(X):
+ """
+ Computes the distance travelled along a polygonal curve in *N* dimensions.
+
+ Where *X* is an *M* x *N* array or matrix. Returns an array of
+ length *M* consisting of the distance along the curve at each point
+ (i.e., the rows of *X*).
+ """
+ X = distances_along_curve(X)
+ return np.concatenate((np.zeros(1), np.cumsum(X)))
+
+
+@cbook.deprecated('2.2')
+def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
+ """
+ Converts a quadratic Bezier curve to a cubic approximation.
+
+ The inputs are the *x* and *y* coordinates of the three control
+ points of a quadratic curve, and the output is a tuple of *x* and
+ *y* coordinates of the four control points of the cubic curve.
+ """
+ # TODO: Candidate for deprecation -- no longer used internally
+
+ # c0x, c0y = q0x, q0y
+ c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
+ c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
+ # c3x, c3y = q2x, q2y
+ return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
+
+
+@cbook.deprecated("2.2")
+def offset_line(y, yerr):
+ """
+ Offsets an array *y* by +/- an error and returns a tuple
+ (y - err, y + err).
+
+ The error term can be:
+
+ * A scalar. In this case, the returned tuple is obvious.
+ * A vector of the same length as *y*. The quantities y +/- err are computed
+ component-wise.
+ * A tuple of length 2. In this case, yerr[0] is the error below *y* and
+ yerr[1] is error above *y*. For example::
+
+ from pylab import *
+ x = linspace(0, 2*pi, num=100, endpoint=True)
+ y = sin(x)
+ y_minus, y_plus = mlab.offset_line(y, 0.1)
+ plot(x, y)
+ fill_between(x, ym, y2=yp)
+ show()
+
+ """
+ if cbook.is_numlike(yerr) or (cbook.iterable(yerr) and
+ len(yerr) == len(y)):
+ ymin = y - yerr
+ ymax = y + yerr
+ elif len(yerr) == 2:
+ ymin, ymax = y - yerr[0], y + yerr[1]
+ else:
+ raise ValueError("yerr must be scalar, 1xN or 2xN")
+ return ymin, ymax
diff --git a/contrib/python/matplotlib/py2/matplotlib/offsetbox.py b/contrib/python/matplotlib/py2/matplotlib/offsetbox.py
new file mode 100644
index 00000000000..86c3a0d525a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/offsetbox.py
@@ -0,0 +1,1811 @@
+"""
+The OffsetBox is a simple container artist. The child artist are meant
+to be drawn at a relative position to its parent. The [VH]Packer,
+DrawingArea and TextArea are derived from the OffsetBox.
+
+The [VH]Packer automatically adjust the relative postisions of their
+children, which should be instances of the OffsetBox. This is used to
+align similar artists together, e.g., in legend.
+
+The DrawingArea can contain any Artist as a child. The
+DrawingArea has a fixed width and height. The position of children
+relative to the parent is fixed. The TextArea is contains a single
+Text instance. The width and height of the TextArea instance is the
+width and height of the its child text.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange, zip
+
+import warnings
+import matplotlib.transforms as mtransforms
+import matplotlib.artist as martist
+import matplotlib.text as mtext
+import matplotlib.path as mpath
+import numpy as np
+from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
+
+from matplotlib.font_manager import FontProperties
+from matplotlib.patches import FancyBboxPatch, FancyArrowPatch
+from matplotlib import rcParams
+
+from matplotlib import docstring
+
+from matplotlib.image import BboxImage
+
+from matplotlib.patches import bbox_artist as mbbox_artist
+from matplotlib.text import _AnnotationBase
+
+
+DEBUG = False
+
+
+# for debugging use
+def bbox_artist(*args, **kwargs):
+ if DEBUG:
+ mbbox_artist(*args, **kwargs)
+
+# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
+# that we are packing boxes horizontally. But same function will be
+# used with vertical packing.
+
+
+def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
+ """
+ Geiven a list of (width, xdescent) of each boxes, calculate the
+ total width and the x-offset positions of each items according to
+ *mode*. xdescent is analogous to the usual descent, but along the
+ x-direction. xdescent values are currently ignored.
+
+ *wd_list* : list of (width, xdescent) of boxes to be packed.
+ *sep* : spacing between boxes
+ *total* : Intended total length. None if not used.
+ *mode* : packing mode. 'fixed', 'expand', or 'equal'.
+ """
+
+ w_list, d_list = zip(*wd_list)
+ # d_list is currently not used.
+
+ if mode == "fixed":
+ offsets_ = np.cumsum([0] + [w + sep for w in w_list])
+ offsets = offsets_[:-1]
+ if total is None:
+ total = offsets_[-1] - sep
+ return total, offsets
+
+ elif mode == "expand":
+ # This is a bit of a hack to avoid a TypeError when *total*
+ # is None and used in conjugation with tight layout.
+ if total is None:
+ total = 1
+ if len(w_list) > 1:
+ sep = (total - sum(w_list)) / (len(w_list) - 1)
+ else:
+ sep = 0
+ offsets_ = np.cumsum([0] + [w + sep for w in w_list])
+ offsets = offsets_[:-1]
+ return total, offsets
+
+ elif mode == "equal":
+ maxh = max(w_list)
+ if total is None:
+ total = (maxh + sep) * len(w_list)
+ else:
+ sep = total / len(w_list) - maxh
+ offsets = (maxh + sep) * np.arange(len(w_list))
+ return total, offsets
+
+ else:
+ raise ValueError("Unknown mode : %s" % (mode,))
+
+
+def _get_aligned_offsets(hd_list, height, align="baseline"):
+ """
+ Given a list of (height, descent) of each boxes, align the boxes
+ with *align* and calculate the y-offsets of each boxes.
+ total width and the offset positions of each items according to
+ *mode*. xdescent is analogous to the usual descent, but along the
+ x-direction. xdescent values are currently ignored.
+
+ *hd_list* : list of (width, xdescent) of boxes to be aligned.
+ *sep* : spacing between boxes
+ *height* : Intended total length. None if not used.
+ *align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
+ """
+
+ if height is None:
+ height = max(h for h, d in hd_list)
+
+ if align == "baseline":
+ height_descent = max(h - d for h, d in hd_list)
+ descent = max(d for h, d in hd_list)
+ height = height_descent + descent
+ offsets = [0. for h, d in hd_list]
+ elif align in ["left", "top"]:
+ descent = 0.
+ offsets = [d for h, d in hd_list]
+ elif align in ["right", "bottom"]:
+ descent = 0.
+ offsets = [height - h + d for h, d in hd_list]
+ elif align == "center":
+ descent = 0.
+ offsets = [(height - h) * .5 + d for h, d in hd_list]
+ else:
+ raise ValueError("Unknown Align mode : %s" % (align,))
+
+ return height, descent, offsets
+
+
+class OffsetBox(martist.Artist):
+ """
+ The OffsetBox is a simple container artist. The child artist are meant
+ to be drawn at a relative position to its parent.
+ """
+ def __init__(self, *args, **kwargs):
+
+ super(OffsetBox, self).__init__(*args, **kwargs)
+
+ # Clipping has not been implemented in the OffesetBox family, so
+ # disable the clip flag for consistency. It can always be turned back
+ # on to zero effect.
+ self.set_clip_on(False)
+
+ self._children = []
+ self._offset = (0, 0)
+
+ def __getstate__(self):
+ state = martist.Artist.__getstate__(self)
+
+ # pickle cannot save instancemethods, so handle them here
+ from .cbook import _InstanceMethodPickler
+ import inspect
+
+ offset = state['_offset']
+ if inspect.ismethod(offset):
+ state['_offset'] = _InstanceMethodPickler(offset)
+ return state
+
+ def __setstate__(self, state):
+ self.__dict__ = state
+ from .cbook import _InstanceMethodPickler
+ if isinstance(self._offset, _InstanceMethodPickler):
+ self._offset = self._offset.get_instancemethod()
+ self.stale = True
+
+ def set_figure(self, fig):
+ """
+ Set the figure
+
+ accepts a class:`~matplotlib.figure.Figure` instance
+ """
+ martist.Artist.set_figure(self, fig)
+ for c in self.get_children():
+ c.set_figure(fig)
+
+ @martist.Artist.axes.setter
+ def axes(self, ax):
+ # TODO deal with this better
+ martist.Artist.axes.fset(self, ax)
+ for c in self.get_children():
+ if c is not None:
+ c.axes = ax
+
+ def contains(self, mouseevent):
+ for c in self.get_children():
+ a, b = c.contains(mouseevent)
+ if a:
+ return a, b
+ return False, {}
+
+ def set_offset(self, xy):
+ """
+ Set the offset
+
+ accepts x, y, tuple, or a callable object.
+ """
+ self._offset = xy
+ self.stale = True
+
+ def get_offset(self, width, height, xdescent, ydescent, renderer):
+ """
+ Get the offset
+
+ accepts extent of the box
+ """
+ return (self._offset(width, height, xdescent, ydescent, renderer)
+ if callable(self._offset)
+ else self._offset)
+
+ def set_width(self, width):
+ """
+ Set the width
+
+ accepts float
+ """
+ self.width = width
+ self.stale = True
+
+ def set_height(self, height):
+ """
+ Set the height
+
+ accepts float
+ """
+ self.height = height
+ self.stale = True
+
+ def get_visible_children(self):
+ """
+ Return a list of visible artists it contains.
+ """
+ return [c for c in self._children if c.get_visible()]
+
+ def get_children(self):
+ """
+ Return a list of artists it contains.
+ """
+ return self._children
+
+ def get_extent_offsets(self, renderer):
+ raise Exception("")
+
+ def get_extent(self, renderer):
+ """
+ Return with, height, xdescent, ydescent of box
+ """
+ w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
+ return w, h, xd, yd
+
+ def get_window_extent(self, renderer):
+ '''
+ get the bounding box in display space.
+ '''
+ w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
+ px, py = self.get_offset(w, h, xd, yd, renderer)
+ return mtransforms.Bbox.from_bounds(px - xd, py - yd, w, h)
+
+ def draw(self, renderer):
+ """
+ Update the location of children if necessary and draw them
+ to the given *renderer*.
+ """
+
+ width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
+ renderer)
+
+ px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
+
+ for c, (ox, oy) in zip(self.get_visible_children(), offsets):
+ c.set_offset((px + ox, py + oy))
+ c.draw(renderer)
+
+ bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
+ self.stale = False
+
+
+class PackerBase(OffsetBox):
+ def __init__(self, pad=None, sep=None, width=None, height=None,
+ align=None, mode=None,
+ children=None):
+ """
+ Parameters
+ ----------
+ pad : float, optional
+ Boundary pad.
+
+ sep : float, optional
+ Spacing between items.
+
+ width : float, optional
+
+ height : float, optional
+ Width and height of the container box, calculated if
+ `None`.
+
+ align : str, optional
+ Alignment of boxes. Can be one of ``top``, ``bottom``,
+ ``left``, ``right``, ``center`` and ``baseline``
+
+ mode : str, optional
+ Packing mode.
+
+ Notes
+ -----
+ *pad* and *sep* need to given in points and will be scale with
+ the renderer dpi, while *width* and *height* need to be in
+ pixels.
+ """
+ super(PackerBase, self).__init__()
+
+ self.height = height
+ self.width = width
+ self.sep = sep
+ self.pad = pad
+ self.mode = mode
+ self.align = align
+
+ self._children = children
+
+
+class VPacker(PackerBase):
+ """
+ The VPacker has its children packed vertically. It automatically
+ adjust the relative positions of children in the drawing time.
+ """
+ def __init__(self, pad=None, sep=None, width=None, height=None,
+ align="baseline", mode="fixed",
+ children=None):
+ """
+ Parameters
+ ----------
+ pad : float, optional
+ Boundary pad.
+
+ sep : float, optional
+ Spacing between items.
+
+ width : float, optional
+
+ height : float, optional
+
+ width and height of the container box, calculated if
+ `None`.
+
+ align : str, optional
+ Alignment of boxes.
+
+ mode : str, optional
+ Packing mode.
+
+ Notes
+ -----
+ *pad* and *sep* need to given in points and will be scale with
+ the renderer dpi, while *width* and *height* need to be in
+ pixels.
+ """
+ super(VPacker, self).__init__(pad, sep, width, height,
+ align, mode,
+ children)
+
+ def get_extent_offsets(self, renderer):
+ """
+ update offset of childrens and return the extents of the box
+ """
+
+ dpicor = renderer.points_to_pixels(1.)
+ pad = self.pad * dpicor
+ sep = self.sep * dpicor
+
+ if self.width is not None:
+ for c in self.get_visible_children():
+ if isinstance(c, PackerBase) and c.mode == "expand":
+ c.set_width(self.width)
+
+ whd_list = [c.get_extent(renderer)
+ for c in self.get_visible_children()]
+ whd_list = [(w, h, xd, (h - yd)) for w, h, xd, yd in whd_list]
+
+ wd_list = [(w, xd) for w, h, xd, yd in whd_list]
+ width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
+ self.width,
+ self.align)
+
+ pack_list = [(h, yd) for w, h, xd, yd in whd_list]
+ height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
+ sep, self.mode)
+
+ yoffsets = yoffsets_ + [yd for w, h, xd, yd in whd_list]
+ ydescent = height - yoffsets[0]
+ yoffsets = height - yoffsets
+
+ yoffsets = yoffsets - ydescent
+
+ return width + 2 * pad, height + 2 * pad, \
+ xdescent + pad, ydescent + pad, \
+ list(zip(xoffsets, yoffsets))
+
+
+class HPacker(PackerBase):
+ """
+ The HPacker has its children packed horizontally. It automatically
+ adjusts the relative positions of children at draw time.
+ """
+ def __init__(self, pad=None, sep=None, width=None, height=None,
+ align="baseline", mode="fixed",
+ children=None):
+ """
+ Parameters
+ ----------
+ pad : float, optional
+ Boundary pad.
+
+ sep : float, optional
+ Spacing between items.
+
+ width : float, optional
+
+ height : float, optional
+ Width and height of the container box, calculated if
+ `None`.
+
+ align : str
+ Alignment of boxes.
+
+ mode : str
+ Packing mode.
+
+ Notes
+ -----
+ *pad* and *sep* need to given in points and will be scale with
+ the renderer dpi, while *width* and *height* need to be in
+ pixels.
+ """
+ super(HPacker, self).__init__(pad, sep, width, height,
+ align, mode, children)
+
+ def get_extent_offsets(self, renderer):
+ """
+ update offset of children and return the extents of the box
+ """
+ dpicor = renderer.points_to_pixels(1.)
+ pad = self.pad * dpicor
+ sep = self.sep * dpicor
+
+ whd_list = [c.get_extent(renderer)
+ for c in self.get_visible_children()]
+
+ if not whd_list:
+ return 2 * pad, 2 * pad, pad, pad, []
+
+ if self.height is None:
+ height_descent = max(h - yd for w, h, xd, yd in whd_list)
+ ydescent = max(yd for w, h, xd, yd in whd_list)
+ height = height_descent + ydescent
+ else:
+ height = self.height - 2 * pad # width w/o pad
+
+ hd_list = [(h, yd) for w, h, xd, yd in whd_list]
+ height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
+ self.height,
+ self.align)
+
+ pack_list = [(w, xd) for w, h, xd, yd in whd_list]
+
+ width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
+ sep, self.mode)
+
+ xoffsets = xoffsets_ + [xd for w, h, xd, yd in whd_list]
+
+ xdescent = whd_list[0][2]
+ xoffsets = xoffsets - xdescent
+
+ return width + 2 * pad, height + 2 * pad, \
+ xdescent + pad, ydescent + pad, \
+ list(zip(xoffsets, yoffsets))
+
+
+class PaddedBox(OffsetBox):
+ def __init__(self, child, pad=None, draw_frame=False, patch_attrs=None):
+ """
+ *pad* : boundary pad
+
+ .. note::
+ *pad* need to given in points and will be
+ scale with the renderer dpi, while *width* and *height*
+ need to be in pixels.
+ """
+
+ super(PaddedBox, self).__init__()
+
+ self.pad = pad
+ self._children = [child]
+
+ self.patch = FancyBboxPatch(
+ xy=(0.0, 0.0), width=1., height=1.,
+ facecolor='w', edgecolor='k',
+ mutation_scale=1, # self.prop.get_size_in_points(),
+ snap=True
+ )
+
+ self.patch.set_boxstyle("square", pad=0)
+
+ if patch_attrs is not None:
+ self.patch.update(patch_attrs)
+
+ self._drawFrame = draw_frame
+
+ def get_extent_offsets(self, renderer):
+ """
+ update offset of childrens and return the extents of the box
+ """
+
+ dpicor = renderer.points_to_pixels(1.)
+ pad = self.pad * dpicor
+
+ w, h, xd, yd = self._children[0].get_extent(renderer)
+
+ return w + 2 * pad, h + 2 * pad, \
+ xd + pad, yd + pad, \
+ [(0, 0)]
+
+ def draw(self, renderer):
+ """
+ Update the location of children if necessary and draw them
+ to the given *renderer*.
+ """
+
+ width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
+ renderer)
+
+ px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
+
+ for c, (ox, oy) in zip(self.get_visible_children(), offsets):
+ c.set_offset((px + ox, py + oy))
+
+ self.draw_frame(renderer)
+
+ for c in self.get_visible_children():
+ c.draw(renderer)
+
+ #bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
+ self.stale = False
+
+ def update_frame(self, bbox, fontsize=None):
+ self.patch.set_bounds(bbox.x0, bbox.y0,
+ bbox.width, bbox.height)
+
+ if fontsize:
+ self.patch.set_mutation_scale(fontsize)
+ self.stale = True
+
+ def draw_frame(self, renderer):
+ # update the location and size of the legend
+ bbox = self.get_window_extent(renderer)
+ self.update_frame(bbox)
+
+ if self._drawFrame:
+ self.patch.draw(renderer)
+
+
+class DrawingArea(OffsetBox):
+ """
+ The DrawingArea can contain any Artist as a child. The DrawingArea
+ has a fixed width and height. The position of children relative to
+ the parent is fixed. The children can be clipped at the
+ boundaries of the parent.
+ """
+
+ def __init__(self, width, height, xdescent=0.,
+ ydescent=0., clip=False):
+ """
+ *width*, *height* : width and height of the container box.
+ *xdescent*, *ydescent* : descent of the box in x- and y-direction.
+ *clip* : Whether to clip the children
+ """
+
+ super(DrawingArea, self).__init__()
+
+ self.width = width
+ self.height = height
+ self.xdescent = xdescent
+ self.ydescent = ydescent
+ self._clip_children = clip
+
+ self.offset_transform = mtransforms.Affine2D()
+ self.offset_transform.clear()
+ self.offset_transform.translate(0, 0)
+
+ self.dpi_transform = mtransforms.Affine2D()
+
+ @property
+ def clip_children(self):
+ """
+ If the children of this DrawingArea should be clipped
+ by DrawingArea bounding box.
+ """
+ return self._clip_children
+
+ @clip_children.setter
+ def clip_children(self, val):
+ self._clip_children = bool(val)
+ self.stale = True
+
+ def get_transform(self):
+ """
+ Return the :class:`~matplotlib.transforms.Transform` applied
+ to the children
+ """
+ return self.dpi_transform + self.offset_transform
+
+ def set_transform(self, t):
+ """
+ set_transform is ignored.
+ """
+ pass
+
+ def set_offset(self, xy):
+ """
+ set offset of the container.
+
+ Accept : tuple of x,y coordinate in display units.
+ """
+ self._offset = xy
+
+ self.offset_transform.clear()
+ self.offset_transform.translate(xy[0], xy[1])
+ self.stale = True
+
+ def get_offset(self):
+ """
+ return offset of the container.
+ """
+ return self._offset
+
+ def get_window_extent(self, renderer):
+ '''
+ get the bounding box in display space.
+ '''
+ w, h, xd, yd = self.get_extent(renderer)
+ ox, oy = self.get_offset() # w, h, xd, yd)
+
+ return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
+
+ def get_extent(self, renderer):
+ """
+ Return with, height, xdescent, ydescent of box
+ """
+
+ dpi_cor = renderer.points_to_pixels(1.)
+ return self.width * dpi_cor, self.height * dpi_cor, \
+ self.xdescent * dpi_cor, self.ydescent * dpi_cor
+
+ def add_artist(self, a):
+ 'Add any :class:`~matplotlib.artist.Artist` to the container box'
+ self._children.append(a)
+ if not a.is_transform_set():
+ a.set_transform(self.get_transform())
+ if self.axes is not None:
+ a.axes = self.axes
+ fig = self.figure
+ if fig is not None:
+ a.set_figure(fig)
+
+ def draw(self, renderer):
+ """
+ Draw the children
+ """
+
+ dpi_cor = renderer.points_to_pixels(1.)
+ self.dpi_transform.clear()
+ self.dpi_transform.scale(dpi_cor, dpi_cor)
+
+ # At this point the DrawingArea has a transform
+ # to the display space so the path created is
+ # good for clipping children
+ tpath = mtransforms.TransformedPath(
+ mpath.Path([[0, 0], [0, self.height],
+ [self.width, self.height],
+ [self.width, 0]]),
+ self.get_transform())
+ for c in self._children:
+ if self._clip_children and not (c.clipbox or c._clippath):
+ c.set_clip_path(tpath)
+ c.draw(renderer)
+
+ bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
+ self.stale = False
+
+
+class TextArea(OffsetBox):
+ """
+ The TextArea is contains a single Text instance. The text is
+ placed at (0,0) with baseline+left alignment. The width and height
+ of the TextArea instance is the width and height of the its child
+ text.
+ """
+ def __init__(self, s,
+ textprops=None,
+ multilinebaseline=None,
+ minimumdescent=True,
+ ):
+ """
+ Parameters
+ ----------
+ s : str
+ a string to be displayed.
+
+ textprops : `~matplotlib.font_manager.FontProperties`, optional
+
+ multilinebaseline : bool, optional
+ If `True`, baseline for multiline text is adjusted so that
+ it is (approximatedly) center-aligned with singleline
+ text.
+
+ minimumdescent : bool, optional
+ If `True`, the box has a minimum descent of "p".
+ """
+ if textprops is None:
+ textprops = {}
+
+ if "va" not in textprops:
+ textprops["va"] = "baseline"
+
+ self._text = mtext.Text(0, 0, s, **textprops)
+
+ OffsetBox.__init__(self)
+
+ self._children = [self._text]
+
+ self.offset_transform = mtransforms.Affine2D()
+ self.offset_transform.clear()
+ self.offset_transform.translate(0, 0)
+ self._baseline_transform = mtransforms.Affine2D()
+ self._text.set_transform(self.offset_transform +
+ self._baseline_transform)
+
+ self._multilinebaseline = multilinebaseline
+ self._minimumdescent = minimumdescent
+
+ def set_text(self, s):
+ "Set the text of this area as a string."
+ self._text.set_text(s)
+ self.stale = True
+
+ def get_text(self):
+ "Returns the string representation of this area's text"
+ return self._text.get_text()
+
+ def set_multilinebaseline(self, t):
+ """
+ Set multilinebaseline .
+
+ If True, baseline for multiline text is
+ adjusted so that it is (approximatedly) center-aligned with
+ singleline text.
+ """
+ self._multilinebaseline = t
+ self.stale = True
+
+ def get_multilinebaseline(self):
+ """
+ get multilinebaseline .
+ """
+ return self._multilinebaseline
+
+ def set_minimumdescent(self, t):
+ """
+ Set minimumdescent .
+
+ If True, extent of the single line text is adjusted so that
+ it has minimum descent of "p"
+ """
+ self._minimumdescent = t
+ self.stale = True
+
+ def get_minimumdescent(self):
+ """
+ get minimumdescent.
+ """
+ return self._minimumdescent
+
+ def set_transform(self, t):
+ """
+ set_transform is ignored.
+ """
+ pass
+
+ def set_offset(self, xy):
+ """
+ set offset of the container.
+
+ Accept : tuple of x,y coordinates in display units.
+ """
+ self._offset = xy
+
+ self.offset_transform.clear()
+ self.offset_transform.translate(xy[0], xy[1])
+ self.stale = True
+
+ def get_offset(self):
+ """
+ return offset of the container.
+ """
+ return self._offset
+
+ def get_window_extent(self, renderer):
+ '''
+ get the bounding box in display space.
+ '''
+ w, h, xd, yd = self.get_extent(renderer)
+ ox, oy = self.get_offset() # w, h, xd, yd)
+ return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
+
+ def get_extent(self, renderer):
+ clean_line, ismath = self._text.is_math_text(self._text._text)
+ _, h_, d_ = renderer.get_text_width_height_descent(
+ "lp", self._text._fontproperties, ismath=False)
+
+ bbox, info, d = self._text._get_layout(renderer)
+ w, h = bbox.width, bbox.height
+
+ line = info[-1][0] # last line
+
+ self._baseline_transform.clear()
+
+ if len(info) > 1 and self._multilinebaseline:
+ d_new = 0.5 * h - 0.5 * (h_ - d_)
+ self._baseline_transform.translate(0, d - d_new)
+ d = d_new
+
+ else: # single line
+
+ h_d = max(h_ - d_, h - d)
+
+ if self.get_minimumdescent():
+ ## to have a minimum descent, #i.e., "l" and "p" have same
+ ## descents.
+ d = max(d, d_)
+ #else:
+ # d = d
+
+ h = h_d + d
+
+ return w, h, 0., d
+
+ def draw(self, renderer):
+ """
+ Draw the children
+ """
+
+ self._text.draw(renderer)
+
+ bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
+ self.stale = False
+
+
+class AuxTransformBox(OffsetBox):
+ """
+ Offset Box with the aux_transform . Its children will be
+ transformed with the aux_transform first then will be
+ offseted. The absolute coordinate of the aux_transform is meaning
+ as it will be automatically adjust so that the left-lower corner
+ of the bounding box of children will be set to (0,0) before the
+ offset transform.
+
+ It is similar to drawing area, except that the extent of the box
+ is not predetermined but calculated from the window extent of its
+ children. Furthermore, the extent of the children will be
+ calculated in the transformed coordinate.
+ """
+ def __init__(self, aux_transform):
+ self.aux_transform = aux_transform
+ OffsetBox.__init__(self)
+
+ self.offset_transform = mtransforms.Affine2D()
+ self.offset_transform.clear()
+ self.offset_transform.translate(0, 0)
+
+ # ref_offset_transform is used to make the offset_transform is
+ # always reference to the lower-left corner of the bbox of its
+ # children.
+ self.ref_offset_transform = mtransforms.Affine2D()
+ self.ref_offset_transform.clear()
+
+ def add_artist(self, a):
+ 'Add any :class:`~matplotlib.artist.Artist` to the container box'
+ self._children.append(a)
+ a.set_transform(self.get_transform())
+ self.stale = True
+
+ def get_transform(self):
+ """
+ Return the :class:`~matplotlib.transforms.Transform` applied
+ to the children
+ """
+ return self.aux_transform + \
+ self.ref_offset_transform + \
+ self.offset_transform
+
+ def set_transform(self, t):
+ """
+ set_transform is ignored.
+ """
+ pass
+
+ def set_offset(self, xy):
+ """
+ set offset of the container.
+
+ Accept : tuple of x,y coordinate in display units.
+ """
+ self._offset = xy
+
+ self.offset_transform.clear()
+ self.offset_transform.translate(xy[0], xy[1])
+ self.stale = True
+
+ def get_offset(self):
+ """
+ return offset of the container.
+ """
+ return self._offset
+
+ def get_window_extent(self, renderer):
+ '''
+ get the bounding box in display space.
+ '''
+ w, h, xd, yd = self.get_extent(renderer)
+ ox, oy = self.get_offset() # w, h, xd, yd)
+ return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
+
+ def get_extent(self, renderer):
+
+ # clear the offset transforms
+ _off = self.offset_transform.to_values() # to be restored later
+ self.ref_offset_transform.clear()
+ self.offset_transform.clear()
+
+ # calculate the extent
+ bboxes = [c.get_window_extent(renderer) for c in self._children]
+ ub = mtransforms.Bbox.union(bboxes)
+
+ # adjust ref_offset_tansform
+ self.ref_offset_transform.translate(-ub.x0, -ub.y0)
+
+ # restor offset transform
+ mtx = self.offset_transform.matrix_from_values(*_off)
+ self.offset_transform.set_matrix(mtx)
+
+ return ub.width, ub.height, 0., 0.
+
+ def draw(self, renderer):
+ """
+ Draw the children
+ """
+
+ for c in self._children:
+ c.draw(renderer)
+
+ bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
+ self.stale = False
+
+
+class AnchoredOffsetbox(OffsetBox):
+ """
+ An offset box placed according to the legend location
+ loc. AnchoredOffsetbox has a single child. When multiple children
+ is needed, use other OffsetBox class to enclose them. By default,
+ the offset box is anchored against its parent axes. You may
+ explicitly specify the bbox_to_anchor.
+ """
+ zorder = 5 # zorder of the legend
+
+ # Location codes
+ codes = {'upper right': 1,
+ 'upper left': 2,
+ 'lower left': 3,
+ 'lower right': 4,
+ 'right': 5,
+ 'center left': 6,
+ 'center right': 7,
+ 'lower center': 8,
+ 'upper center': 9,
+ 'center': 10,
+ }
+
+ def __init__(self, loc,
+ pad=0.4, borderpad=0.5,
+ child=None, prop=None, frameon=True,
+ bbox_to_anchor=None,
+ bbox_transform=None,
+ **kwargs):
+ """
+ loc is a string or an integer specifying the legend location.
+ The valid location codes are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4,
+ 'right' : 5, (same as 'center right', for back-compatibility)
+ 'center left' : 6,
+ 'center right' : 7,
+ 'lower center' : 8,
+ 'upper center' : 9,
+ 'center' : 10,
+
+ pad : pad around the child for drawing a frame. given in
+ fraction of fontsize.
+
+ borderpad : pad between offsetbox frame and the bbox_to_anchor,
+
+ child : OffsetBox instance that will be anchored.
+
+ prop : font property. This is only used as a reference for paddings.
+
+ frameon : draw a frame box if True.
+
+ bbox_to_anchor : bbox to anchor. Use self.axes.bbox if None.
+
+ bbox_transform : with which the bbox_to_anchor will be transformed.
+
+ """
+ super(AnchoredOffsetbox, self).__init__(**kwargs)
+
+ self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
+ self.set_child(child)
+
+ if isinstance(loc, six.string_types):
+ try:
+ loc = self.codes[loc]
+ except KeyError:
+ raise ValueError('Unrecognized location "%s". Valid '
+ 'locations are\n\t%s\n'
+ % (loc, '\n\t'.join(self.codes)))
+
+ self.loc = loc
+ self.borderpad = borderpad
+ self.pad = pad
+
+ if prop is None:
+ self.prop = FontProperties(size=rcParams["legend.fontsize"])
+ elif isinstance(prop, dict):
+ self.prop = FontProperties(**prop)
+ if "size" not in prop:
+ self.prop.set_size(rcParams["legend.fontsize"])
+ else:
+ self.prop = prop
+
+ self.patch = FancyBboxPatch(
+ xy=(0.0, 0.0), width=1., height=1.,
+ facecolor='w', edgecolor='k',
+ mutation_scale=self.prop.get_size_in_points(),
+ snap=True
+ )
+ self.patch.set_boxstyle("square", pad=0)
+ self._drawFrame = frameon
+
+ def set_child(self, child):
+ "set the child to be anchored"
+ self._child = child
+ if child is not None:
+ child.axes = self.axes
+ self.stale = True
+
+ def get_child(self):
+ "return the child"
+ return self._child
+
+ def get_children(self):
+ "return the list of children"
+ return [self._child]
+
+ def get_extent(self, renderer):
+ """
+ return the extent of the artist. The extent of the child
+ added with the pad is returned
+ """
+ w, h, xd, yd = self.get_child().get_extent(renderer)
+ fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
+ pad = self.pad * fontsize
+
+ return w + 2 * pad, h + 2 * pad, xd + pad, yd + pad
+
+ def get_bbox_to_anchor(self):
+ """
+ return the bbox that the legend will be anchored
+ """
+ if self._bbox_to_anchor is None:
+ return self.axes.bbox
+ else:
+ transform = self._bbox_to_anchor_transform
+ if transform is None:
+ return self._bbox_to_anchor
+ else:
+ return TransformedBbox(self._bbox_to_anchor,
+ transform)
+
+ def set_bbox_to_anchor(self, bbox, transform=None):
+ """
+ set the bbox that the child will be anchored.
+
+ *bbox* can be a Bbox instance, a list of [left, bottom, width,
+ height], or a list of [left, bottom] where the width and
+ height will be assumed to be zero. The bbox will be
+ transformed to display coordinate by the given transform.
+ """
+ if bbox is None or isinstance(bbox, BboxBase):
+ self._bbox_to_anchor = bbox
+ else:
+ try:
+ l = len(bbox)
+ except TypeError:
+ raise ValueError("Invalid argument for bbox : %s" % str(bbox))
+
+ if l == 2:
+ bbox = [bbox[0], bbox[1], 0, 0]
+
+ self._bbox_to_anchor = Bbox.from_bounds(*bbox)
+
+ self._bbox_to_anchor_transform = transform
+ self.stale = True
+
+ def get_window_extent(self, renderer):
+ '''
+ get the bounding box in display space.
+ '''
+ self._update_offset_func(renderer)
+ w, h, xd, yd = self.get_extent(renderer)
+ ox, oy = self.get_offset(w, h, xd, yd, renderer)
+ return Bbox.from_bounds(ox - xd, oy - yd, w, h)
+
+ def _update_offset_func(self, renderer, fontsize=None):
+ """
+ Update the offset func which depends on the dpi of the
+ renderer (because of the padding).
+ """
+ if fontsize is None:
+ fontsize = renderer.points_to_pixels(
+ self.prop.get_size_in_points())
+
+ def _offset(w, h, xd, yd, renderer, fontsize=fontsize, self=self):
+ bbox = Bbox.from_bounds(0, 0, w, h)
+ borderpad = self.borderpad * fontsize
+ bbox_to_anchor = self.get_bbox_to_anchor()
+
+ x0, y0 = self._get_anchored_bbox(self.loc,
+ bbox,
+ bbox_to_anchor,
+ borderpad)
+ return x0 + xd, y0 + yd
+
+ self.set_offset(_offset)
+
+ def update_frame(self, bbox, fontsize=None):
+ self.patch.set_bounds(bbox.x0, bbox.y0,
+ bbox.width, bbox.height)
+
+ if fontsize:
+ self.patch.set_mutation_scale(fontsize)
+
+ def draw(self, renderer):
+ "draw the artist"
+
+ if not self.get_visible():
+ return
+
+ fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
+ self._update_offset_func(renderer, fontsize)
+
+ if self._drawFrame:
+ # update the location and size of the legend
+ bbox = self.get_window_extent(renderer)
+ self.update_frame(bbox, fontsize)
+ self.patch.draw(renderer)
+
+ width, height, xdescent, ydescent = self.get_extent(renderer)
+
+ px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
+
+ self.get_child().set_offset((px, py))
+ self.get_child().draw(renderer)
+ self.stale = False
+
+ def _get_anchored_bbox(self, loc, bbox, parentbbox, borderpad):
+ """
+ return the position of the bbox anchored at the parentbbox
+ with the loc code, with the borderpad.
+ """
+ assert loc in range(1, 11) # called only internally
+
+ BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = xrange(11)
+
+ anchor_coefs = {UR: "NE",
+ UL: "NW",
+ LL: "SW",
+ LR: "SE",
+ R: "E",
+ CL: "W",
+ CR: "E",
+ LC: "S",
+ UC: "N",
+ C: "C"}
+
+ c = anchor_coefs[loc]
+
+ container = parentbbox.padded(-borderpad)
+ anchored_box = bbox.anchored(c, container=container)
+ return anchored_box.x0, anchored_box.y0
+
+
+class AnchoredText(AnchoredOffsetbox):
+ """
+ AnchoredOffsetbox with Text.
+ """
+
+ def __init__(self, s, loc, pad=0.4, borderpad=0.5, prop=None, **kwargs):
+ """
+ Parameters
+ ----------
+ s : string
+ Text.
+
+ loc : str
+ Location code.
+
+ pad : float, optional
+ Pad between the text and the frame as fraction of the font
+ size.
+
+ borderpad : float, optional
+ Pad between the frame and the axes (or *bbox_to_anchor*).
+
+ prop : `matplotlib.font_manager.FontProperties`
+ Font properties.
+
+ Notes
+ -----
+ Other keyword parameters of `AnchoredOffsetbox` are also
+ allowed.
+ """
+
+ if prop is None:
+ prop = {}
+ badkwargs = {'ha', 'horizontalalignment', 'va', 'verticalalignment'}
+ if badkwargs & set(prop):
+ warnings.warn("Mixing horizontalalignment or verticalalignment "
+ "with AnchoredText is not supported.")
+
+ self.txt = TextArea(s, textprops=prop, minimumdescent=False)
+ fp = self.txt._text.get_fontproperties()
+ super(AnchoredText, self).__init__(
+ loc, pad=pad, borderpad=borderpad, child=self.txt, prop=fp,
+ **kwargs)
+
+
+class OffsetImage(OffsetBox):
+ def __init__(self, arr,
+ zoom=1,
+ cmap=None,
+ norm=None,
+ interpolation=None,
+ origin=None,
+ filternorm=1,
+ filterrad=4.0,
+ resample=False,
+ dpi_cor=True,
+ **kwargs
+ ):
+
+ OffsetBox.__init__(self)
+ self._dpi_cor = dpi_cor
+
+ self.image = BboxImage(bbox=self.get_window_extent,
+ cmap=cmap,
+ norm=norm,
+ interpolation=interpolation,
+ origin=origin,
+ filternorm=filternorm,
+ filterrad=filterrad,
+ resample=resample,
+ **kwargs
+ )
+
+ self._children = [self.image]
+
+ self.set_zoom(zoom)
+ self.set_data(arr)
+
+ def set_data(self, arr):
+ self._data = np.asarray(arr)
+ self.image.set_data(self._data)
+ self.stale = True
+
+ def get_data(self):
+ return self._data
+
+ def set_zoom(self, zoom):
+ self._zoom = zoom
+ self.stale = True
+
+ def get_zoom(self):
+ return self._zoom
+
+# def set_axes(self, axes):
+# self.image.set_axes(axes)
+# martist.Artist.set_axes(self, axes)
+
+# def set_offset(self, xy):
+# """
+# set offset of the container.
+
+# Accept : tuple of x,y coordinate in display units.
+# """
+# self._offset = xy
+
+# self.offset_transform.clear()
+# self.offset_transform.translate(xy[0], xy[1])
+
+ def get_offset(self):
+ """
+ return offset of the container.
+ """
+ return self._offset
+
+ def get_children(self):
+ return [self.image]
+
+ def get_window_extent(self, renderer):
+ '''
+ get the bounding box in display space.
+ '''
+ w, h, xd, yd = self.get_extent(renderer)
+ ox, oy = self.get_offset()
+ return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
+
+ def get_extent(self, renderer):
+ if self._dpi_cor: # True, do correction
+ dpi_cor = renderer.points_to_pixels(1.)
+ else:
+ dpi_cor = 1.
+
+ zoom = self.get_zoom()
+ data = self.get_data()
+ ny, nx = data.shape[:2]
+ w, h = dpi_cor * nx * zoom, dpi_cor * ny * zoom
+
+ return w, h, 0, 0
+
+ def draw(self, renderer):
+ """
+ Draw the children
+ """
+ self.image.draw(renderer)
+ # bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
+ self.stale = False
+
+
+class AnnotationBbox(martist.Artist, _AnnotationBase):
+ """
+ Annotation-like class, but with offsetbox instead of Text.
+ """
+ zorder = 3
+
+ def __str__(self):
+ return "AnnotationBbox(%g,%g)" % (self.xy[0], self.xy[1])
+
+ @docstring.dedent_interpd
+ def __init__(self, offsetbox, xy,
+ xybox=None,
+ xycoords='data',
+ boxcoords=None,
+ frameon=True, pad=0.4, # BboxPatch
+ annotation_clip=None,
+ box_alignment=(0.5, 0.5),
+ bboxprops=None,
+ arrowprops=None,
+ fontsize=None,
+ **kwargs):
+ """
+ *offsetbox* : OffsetBox instance
+
+ *xycoords* : same as Annotation but can be a tuple of two
+ strings which are interpreted as x and y coordinates.
+
+ *boxcoords* : similar to textcoords as Annotation but can be a
+ tuple of two strings which are interpreted as x and y
+ coordinates.
+
+ *box_alignment* : a tuple of two floats for a vertical and
+ horizontal alignment of the offset box w.r.t. the *boxcoords*.
+ The lower-left corner is (0.0) and upper-right corner is (1.1).
+
+ other parameters are identical to that of Annotation.
+ """
+
+ martist.Artist.__init__(self, **kwargs)
+ _AnnotationBase.__init__(self,
+ xy,
+ xycoords=xycoords,
+ annotation_clip=annotation_clip)
+
+ self.offsetbox = offsetbox
+
+ self.arrowprops = arrowprops
+
+ self.set_fontsize(fontsize)
+
+ if xybox is None:
+ self.xybox = xy
+ else:
+ self.xybox = xybox
+
+ if boxcoords is None:
+ self.boxcoords = xycoords
+ else:
+ self.boxcoords = boxcoords
+
+ if arrowprops is not None:
+ self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5))
+ self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
+ **self.arrowprops)
+ else:
+ self._arrow_relpos = None
+ self.arrow_patch = None
+
+ #self._fw, self._fh = 0., 0. # for alignment
+ self._box_alignment = box_alignment
+
+ # frame
+ self.patch = FancyBboxPatch(
+ xy=(0.0, 0.0), width=1., height=1.,
+ facecolor='w', edgecolor='k',
+ mutation_scale=self.prop.get_size_in_points(),
+ snap=True
+ )
+ self.patch.set_boxstyle("square", pad=pad)
+ if bboxprops:
+ self.patch.set(**bboxprops)
+ self._drawFrame = frameon
+
+ @property
+ def xyann(self):
+ return self.xybox
+
+ @xyann.setter
+ def xyann(self, xyann):
+ self.xybox = xyann
+ self.stale = True
+
+ @property
+ def anncoords(self):
+ return self.boxcoords
+
+ @anncoords.setter
+ def anncoords(self, coords):
+ self.boxcoords = coords
+ self.stale = True
+
+ def contains(self, event):
+ t, tinfo = self.offsetbox.contains(event)
+ #if self.arrow_patch is not None:
+ # a,ainfo=self.arrow_patch.contains(event)
+ # t = t or a
+
+ # self.arrow_patch is currently not checked as this can be a line - JJ
+
+ return t, tinfo
+
+ def get_children(self):
+ children = [self.offsetbox, self.patch]
+ if self.arrow_patch:
+ children.append(self.arrow_patch)
+ return children
+
+ def set_figure(self, fig):
+
+ if self.arrow_patch is not None:
+ self.arrow_patch.set_figure(fig)
+ self.offsetbox.set_figure(fig)
+ martist.Artist.set_figure(self, fig)
+
+ def set_fontsize(self, s=None):
+ """
+ set fontsize in points
+ """
+ if s is None:
+ s = rcParams["legend.fontsize"]
+
+ self.prop = FontProperties(size=s)
+ self.stale = True
+
+ def get_fontsize(self, s=None):
+ """
+ return fontsize in points
+ """
+ return self.prop.get_size_in_points()
+
+ def update_positions(self, renderer):
+ """
+ Update the pixel positions of the annotated point and the text.
+ """
+ xy_pixel = self._get_position_xy(renderer)
+ self._update_position_xybox(renderer, xy_pixel)
+
+ mutation_scale = renderer.points_to_pixels(self.get_fontsize())
+ self.patch.set_mutation_scale(mutation_scale)
+
+ if self.arrow_patch:
+ self.arrow_patch.set_mutation_scale(mutation_scale)
+
+ def _update_position_xybox(self, renderer, xy_pixel):
+ """
+ Update the pixel positions of the annotation text and the arrow
+ patch.
+ """
+
+ x, y = self.xybox
+ if isinstance(self.boxcoords, tuple):
+ xcoord, ycoord = self.boxcoords
+ x1, y1 = self._get_xy(renderer, x, y, xcoord)
+ x2, y2 = self._get_xy(renderer, x, y, ycoord)
+ ox0, oy0 = x1, y2
+ else:
+ ox0, oy0 = self._get_xy(renderer, x, y, self.boxcoords)
+
+ w, h, xd, yd = self.offsetbox.get_extent(renderer)
+
+ _fw, _fh = self._box_alignment
+ self.offsetbox.set_offset((ox0 - _fw * w + xd, oy0 - _fh * h + yd))
+
+ # update patch position
+ bbox = self.offsetbox.get_window_extent(renderer)
+ #self.offsetbox.set_offset((ox0-_fw*w, oy0-_fh*h))
+ self.patch.set_bounds(bbox.x0, bbox.y0,
+ bbox.width, bbox.height)
+
+ x, y = xy_pixel
+
+ ox1, oy1 = x, y
+
+ if self.arrowprops:
+ x0, y0 = x, y
+
+ d = self.arrowprops.copy()
+
+ # Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
+
+ # adjust the starting point of the arrow relative to
+ # the textbox.
+ # TODO : Rotation needs to be accounted.
+ relpos = self._arrow_relpos
+
+ ox0 = bbox.x0 + bbox.width * relpos[0]
+ oy0 = bbox.y0 + bbox.height * relpos[1]
+
+ # The arrow will be drawn from (ox0, oy0) to (ox1,
+ # oy1). It will be first clipped by patchA and patchB.
+ # Then it will be shrunk by shrinkA and shrinkB
+ # (in points). If patch A is not set, self.bbox_patch
+ # is used.
+
+ self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
+ fs = self.prop.get_size_in_points()
+ mutation_scale = d.pop("mutation_scale", fs)
+ mutation_scale = renderer.points_to_pixels(mutation_scale)
+ self.arrow_patch.set_mutation_scale(mutation_scale)
+
+ patchA = d.pop("patchA", self.patch)
+ self.arrow_patch.set_patchA(patchA)
+
+ def draw(self, renderer):
+ """
+ Draw the :class:`Annotation` object to the given *renderer*.
+ """
+
+ if renderer is not None:
+ self._renderer = renderer
+ if not self.get_visible():
+ return
+
+ xy_pixel = self._get_position_xy(renderer)
+
+ if not self._check_xy(renderer, xy_pixel):
+ return
+
+ self.update_positions(renderer)
+
+ if self.arrow_patch is not None:
+ if self.arrow_patch.figure is None and self.figure is not None:
+ self.arrow_patch.figure = self.figure
+ self.arrow_patch.draw(renderer)
+
+ if self._drawFrame:
+ self.patch.draw(renderer)
+
+ self.offsetbox.draw(renderer)
+ self.stale = False
+
+
+class DraggableBase(object):
+ """
+ helper code for a draggable artist (legend, offsetbox)
+ The derived class must override following two method.
+
+ def save_offset(self):
+ pass
+
+ def update_offset(self, dx, dy):
+ pass
+
+ *save_offset* is called when the object is picked for dragging and it
+ is meant to save reference position of the artist.
+
+ *update_offset* is called during the dragging. dx and dy is the pixel
+ offset from the point where the mouse drag started.
+
+ Optionally you may override following two methods.
+
+ def artist_picker(self, artist, evt):
+ return self.ref_artist.contains(evt)
+
+ def finalize_offset(self):
+ pass
+
+ *artist_picker* is a picker method that will be
+ used. *finalize_offset* is called when the mouse is released. In
+ current implementation of DraggableLegend and DraggableAnnotation,
+ *update_offset* places the artists simply in display
+ coordinates. And *finalize_offset* recalculate their position in
+ the normalized axes coordinate and set a relavant attribute.
+
+ """
+ def __init__(self, ref_artist, use_blit=False):
+ self.ref_artist = ref_artist
+ self.got_artist = False
+
+ self.canvas = self.ref_artist.figure.canvas
+ self._use_blit = use_blit and self.canvas.supports_blit
+
+ c2 = self.canvas.mpl_connect('pick_event', self.on_pick)
+ c3 = self.canvas.mpl_connect('button_release_event', self.on_release)
+
+ ref_artist.set_picker(self.artist_picker)
+ self.cids = [c2, c3]
+
+ def on_motion(self, evt):
+ if self.got_artist:
+ dx = evt.x - self.mouse_x
+ dy = evt.y - self.mouse_y
+ self.update_offset(dx, dy)
+ self.canvas.draw()
+
+ def on_motion_blit(self, evt):
+ if self.got_artist:
+ dx = evt.x - self.mouse_x
+ dy = evt.y - self.mouse_y
+ self.update_offset(dx, dy)
+ self.canvas.restore_region(self.background)
+ self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
+ self.canvas.blit(self.ref_artist.figure.bbox)
+
+ def on_pick(self, evt):
+ if evt.artist == self.ref_artist:
+
+ self.mouse_x = evt.mouseevent.x
+ self.mouse_y = evt.mouseevent.y
+ self.got_artist = True
+
+ if self._use_blit:
+ self.ref_artist.set_animated(True)
+ self.canvas.draw()
+ self.background = self.canvas.copy_from_bbox(
+ self.ref_artist.figure.bbox)
+ self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
+ self.canvas.blit(self.ref_artist.figure.bbox)
+ self._c1 = self.canvas.mpl_connect('motion_notify_event',
+ self.on_motion_blit)
+ else:
+ self._c1 = self.canvas.mpl_connect('motion_notify_event',
+ self.on_motion)
+ self.save_offset()
+
+ def on_release(self, event):
+ if self.got_artist:
+ self.finalize_offset()
+ self.got_artist = False
+ self.canvas.mpl_disconnect(self._c1)
+
+ if self._use_blit:
+ self.ref_artist.set_animated(False)
+
+ def disconnect(self):
+ """disconnect the callbacks"""
+ for cid in self.cids:
+ self.canvas.mpl_disconnect(cid)
+ try:
+ c1 = self._c1
+ except AttributeError:
+ pass
+ else:
+ self.canvas.mpl_disconnect(c1)
+
+ def artist_picker(self, artist, evt):
+ return self.ref_artist.contains(evt)
+
+ def save_offset(self):
+ pass
+
+ def update_offset(self, dx, dy):
+ pass
+
+ def finalize_offset(self):
+ pass
+
+
+class DraggableOffsetBox(DraggableBase):
+ def __init__(self, ref_artist, offsetbox, use_blit=False):
+ DraggableBase.__init__(self, ref_artist, use_blit=use_blit)
+ self.offsetbox = offsetbox
+
+ def save_offset(self):
+ offsetbox = self.offsetbox
+ renderer = offsetbox.figure._cachedRenderer
+ w, h, xd, yd = offsetbox.get_extent(renderer)
+ offset = offsetbox.get_offset(w, h, xd, yd, renderer)
+ self.offsetbox_x, self.offsetbox_y = offset
+ self.offsetbox.set_offset(offset)
+
+ def update_offset(self, dx, dy):
+ loc_in_canvas = self.offsetbox_x + dx, self.offsetbox_y + dy
+ self.offsetbox.set_offset(loc_in_canvas)
+
+ def get_loc_in_canvas(self):
+
+ offsetbox = self.offsetbox
+ renderer = offsetbox.figure._cachedRenderer
+ w, h, xd, yd = offsetbox.get_extent(renderer)
+ ox, oy = offsetbox._offset
+ loc_in_canvas = (ox - xd, oy - yd)
+
+ return loc_in_canvas
+
+
+class DraggableAnnotation(DraggableBase):
+ def __init__(self, annotation, use_blit=False):
+ DraggableBase.__init__(self, annotation, use_blit=use_blit)
+ self.annotation = annotation
+
+ def save_offset(self):
+ ann = self.annotation
+ self.ox, self.oy = ann.get_transform().transform(ann.xyann)
+
+ def update_offset(self, dx, dy):
+ ann = self.annotation
+ ann.xyann = ann.get_transform().inverted().transform(
+ (self.ox + dx, self.oy + dy))
+
+
+if __name__ == "__main__":
+ import matplotlib.pyplot as plt
+ fig = plt.figure(1)
+ fig.clf()
+ ax = plt.subplot(121)
+
+ #txt = ax.text(0.5, 0.5, "Test", size=30, ha="center", color="w")
+ kwargs = dict()
+
+ a = np.arange(256).reshape(16, 16) / 256.
+ myimage = OffsetImage(a,
+ zoom=2,
+ norm=None,
+ origin=None,
+ **kwargs
+ )
+ ax.add_artist(myimage)
+
+ myimage.set_offset((100, 100))
+
+ myimage2 = OffsetImage(a,
+ zoom=2,
+ norm=None,
+ origin=None,
+ **kwargs
+ )
+ ann = AnnotationBbox(myimage2, (0.5, 0.5),
+ xybox=(30, 30),
+ xycoords='data',
+ boxcoords="offset points",
+ frameon=True, pad=0.4, # BboxPatch
+ bboxprops=dict(boxstyle="round", fc="y"),
+ fontsize=None,
+ arrowprops=dict(arrowstyle="->"),
+ )
+
+ ax.add_artist(ann)
+
+ plt.draw()
+ plt.show()
diff --git a/contrib/python/matplotlib/py2/matplotlib/patches.py b/contrib/python/matplotlib/py2/matplotlib/patches.py
new file mode 100644
index 00000000000..1d66125561b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/patches.py
@@ -0,0 +1,4720 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import map, zip
+
+import math
+import warnings
+
+import numpy as np
+
+import matplotlib as mpl
+from . import artist, cbook, colors, docstring, lines as mlines, transforms
+from .bezier import (
+ concatenate_paths, get_cos_sin, get_intersection, get_parallels,
+ inside_circle, make_path_regular, make_wedged_bezier2,
+ split_bezier_intersecting_with_closedpath, split_path_inout)
+from .path import Path
+
+_patch_alias_map = {
+ 'antialiased': ['aa'],
+ 'edgecolor': ['ec'],
+ 'facecolor': ['fc'],
+ 'linewidth': ['lw'],
+ 'linestyle': ['ls']
+ }
+
+
+class Patch(artist.Artist):
+ """
+ A patch is a 2D artist with a face color and an edge color.
+
+ If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
+ are *None*, they default to their rc params setting.
+ """
+ zorder = 1
+ validCap = ('butt', 'round', 'projecting')
+ validJoin = ('miter', 'round', 'bevel')
+
+ # Whether to draw an edge by default. Set on a
+ # subclass-by-subclass basis.
+ _edge_default = False
+
+ def __str__(self):
+ return str(self.__class__).split('.')[-1]
+
+ def __init__(self,
+ edgecolor=None,
+ facecolor=None,
+ color=None,
+ linewidth=None,
+ linestyle=None,
+ antialiased=None,
+ hatch=None,
+ fill=True,
+ capstyle=None,
+ joinstyle=None,
+ **kwargs):
+ """
+ The following kwarg properties are supported
+
+ %(Patch)s
+ """
+ artist.Artist.__init__(self)
+
+ if linewidth is None:
+ linewidth = mpl.rcParams['patch.linewidth']
+ if linestyle is None:
+ linestyle = "solid"
+ if capstyle is None:
+ capstyle = 'butt'
+ if joinstyle is None:
+ joinstyle = 'miter'
+ if antialiased is None:
+ antialiased = mpl.rcParams['patch.antialiased']
+
+ self._hatch_color = colors.to_rgba(mpl.rcParams['hatch.color'])
+ self._fill = True # needed for set_facecolor call
+ if color is not None:
+ if (edgecolor is not None or facecolor is not None):
+ warnings.warn("Setting the 'color' property will override"
+ "the edgecolor or facecolor properties. ")
+ self.set_color(color)
+ else:
+ self.set_edgecolor(edgecolor)
+ self.set_facecolor(facecolor)
+ # unscaled dashes. Needed to scale dash patterns by lw
+ self._us_dashes = None
+ self._linewidth = 0
+
+ self.set_fill(fill)
+ self.set_linestyle(linestyle)
+ self.set_linewidth(linewidth)
+ self.set_antialiased(antialiased)
+ self.set_hatch(hatch)
+ self.set_capstyle(capstyle)
+ self.set_joinstyle(joinstyle)
+ self._combined_transform = transforms.IdentityTransform()
+
+ if len(kwargs):
+ self.update(kwargs)
+
+ def get_verts(self):
+ """
+ Return a copy of the vertices used in this patch
+
+ If the patch contains Bezier curves, the curves will be
+ interpolated by line segments. To access the curves as
+ curves, use :meth:`get_path`.
+ """
+ trans = self.get_transform()
+ path = self.get_path()
+ polygons = path.to_polygons(trans)
+ if len(polygons):
+ return polygons[0]
+ return []
+
+ def _process_radius(self, radius):
+ if radius is not None:
+ return radius
+ if cbook.is_numlike(self._picker):
+ _radius = self._picker
+ else:
+ if self.get_edgecolor()[3] == 0:
+ _radius = 0
+ else:
+ _radius = self.get_linewidth()
+ return _radius
+
+ def contains(self, mouseevent, radius=None):
+ """Test whether the mouse event occurred in the patch.
+
+ Returns T/F, {}
+ """
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+ radius = self._process_radius(radius)
+ inside = self.get_path().contains_point(
+ (mouseevent.x, mouseevent.y), self.get_transform(), radius)
+ return inside, {}
+
+ def contains_point(self, point, radius=None):
+ """
+ Returns ``True`` if the given *point* is inside the path
+ (transformed with its transform attribute).
+
+ *radius* allows the path to be made slightly larger or smaller.
+ """
+ radius = self._process_radius(radius)
+ return self.get_path().contains_point(point,
+ self.get_transform(),
+ radius)
+
+ def contains_points(self, points, radius=None):
+ """
+ Returns a bool array which is ``True`` if the (closed) path
+ contains the corresponding point.
+ (transformed with its transform attribute).
+
+ *points* must be Nx2 array.
+ *radius* allows the path to be made slightly larger or smaller.
+ """
+ radius = self._process_radius(radius)
+ return self.get_path().contains_points(points,
+ self.get_transform(),
+ radius)
+
+ def update_from(self, other):
+ """
+ Updates this :class:`Patch` from the properties of *other*.
+ """
+ artist.Artist.update_from(self, other)
+ # For some properties we don't need or don't want to go through the
+ # getters/setters, so we just copy them directly.
+ self._edgecolor = other._edgecolor
+ self._facecolor = other._facecolor
+ self._fill = other._fill
+ self._hatch = other._hatch
+ self._hatch_color = other._hatch_color
+ # copy the unscaled dash pattern
+ self._us_dashes = other._us_dashes
+ self.set_linewidth(other._linewidth) # also sets dash properties
+ self.set_transform(other.get_data_transform())
+
+ def get_extents(self):
+ """
+ Return a :class:`~matplotlib.transforms.Bbox` object defining
+ the axis-aligned extents of the :class:`Patch`.
+ """
+ return self.get_path().get_extents(self.get_transform())
+
+ def get_transform(self):
+ """
+ Return the :class:`~matplotlib.transforms.Transform` applied
+ to the :class:`Patch`.
+ """
+ return self.get_patch_transform() + artist.Artist.get_transform(self)
+
+ def get_data_transform(self):
+ """
+ Return the :class:`~matplotlib.transforms.Transform` instance which
+ maps data coordinates to physical coordinates.
+ """
+ return artist.Artist.get_transform(self)
+
+ def get_patch_transform(self):
+ """
+ Return the :class:`~matplotlib.transforms.Transform` instance which
+ takes patch coordinates to data coordinates.
+
+ For example, one may define a patch of a circle which represents a
+ radius of 5 by providing coordinates for a unit circle, and a
+ transform which scales the coordinates (the patch coordinate) by 5.
+ """
+ return transforms.IdentityTransform()
+
+ def get_antialiased(self):
+ """
+ Returns True if the :class:`Patch` is to be drawn with antialiasing.
+ """
+ return self._antialiased
+ get_aa = get_antialiased
+
+ def get_edgecolor(self):
+ """
+ Return the edge color of the :class:`Patch`.
+ """
+ return self._edgecolor
+ get_ec = get_edgecolor
+
+ def get_facecolor(self):
+ """
+ Return the face color of the :class:`Patch`.
+ """
+ return self._facecolor
+ get_fc = get_facecolor
+
+ def get_linewidth(self):
+ """
+ Return the line width in points.
+ """
+ return self._linewidth
+ get_lw = get_linewidth
+
+ def get_linestyle(self):
+ """
+ Return the linestyle. Will be one of ['solid' | 'dashed' |
+ 'dashdot' | 'dotted']
+ """
+ return self._linestyle
+ get_ls = get_linestyle
+
+ def set_antialiased(self, aa):
+ """
+ Set whether to use antialiased rendering.
+
+ Parameters
+ ----------
+ b : bool or None
+ .. ACCEPTS: bool or None
+ """
+ if aa is None:
+ aa = mpl.rcParams['patch.antialiased']
+ self._antialiased = aa
+ self.stale = True
+
+ def set_aa(self, aa):
+ """alias for set_antialiased"""
+ return self.set_antialiased(aa)
+
+ def _set_edgecolor(self, color):
+ set_hatch_color = True
+ if color is None:
+ if (mpl.rcParams['patch.force_edgecolor'] or
+ not self._fill or self._edge_default):
+ color = mpl.rcParams['patch.edgecolor']
+ else:
+ color = 'none'
+ set_hatch_color = False
+
+ self._edgecolor = colors.to_rgba(color, self._alpha)
+ if set_hatch_color:
+ self._hatch_color = self._edgecolor
+ self.stale = True
+
+ def set_edgecolor(self, color):
+ """
+ Set the patch edge color
+
+ ACCEPTS: mpl color spec, None, 'none', or 'auto'
+ """
+ self._original_edgecolor = color
+ self._set_edgecolor(color)
+
+ def set_ec(self, color):
+ """alias for set_edgecolor"""
+ return self.set_edgecolor(color)
+
+ def _set_facecolor(self, color):
+ if color is None:
+ color = mpl.rcParams['patch.facecolor']
+ alpha = self._alpha if self._fill else 0
+ self._facecolor = colors.to_rgba(color, alpha)
+ self.stale = True
+
+ def set_facecolor(self, color):
+ """
+ Set the patch face color
+
+ ACCEPTS: mpl color spec, or None for default, or 'none' for no color
+ """
+ self._original_facecolor = color
+ self._set_facecolor(color)
+
+ def set_fc(self, color):
+ """alias for set_facecolor"""
+ return self.set_facecolor(color)
+
+ def set_color(self, c):
+ """
+ Set both the edgecolor and the facecolor.
+
+ ACCEPTS: matplotlib color spec
+
+ .. seealso::
+
+ :meth:`set_facecolor`, :meth:`set_edgecolor`
+ For setting the edge or face color individually.
+ """
+ self.set_facecolor(c)
+ self.set_edgecolor(c)
+
+ def set_alpha(self, alpha):
+ """
+ Set the alpha tranparency of the patch.
+
+ ACCEPTS: float or None
+ """
+ if alpha is not None:
+ try:
+ float(alpha)
+ except TypeError:
+ raise TypeError('alpha must be a float or None')
+ artist.Artist.set_alpha(self, alpha)
+ self._set_facecolor(self._original_facecolor)
+ self._set_edgecolor(self._original_edgecolor)
+ # stale is already True
+
+ def set_linewidth(self, w):
+ """
+ Set the patch linewidth in points
+
+ ACCEPTS: float or None for default
+ """
+ if w is None:
+ w = mpl.rcParams['patch.linewidth']
+ if w is None:
+ w = mpl.rcParams['axes.linewidth']
+
+ self._linewidth = float(w)
+ # scale the dash pattern by the linewidth
+ offset, ls = self._us_dashes
+ self._dashoffset, self._dashes = mlines._scale_dashes(
+ offset, ls, self._linewidth)
+ self.stale = True
+
+ def set_lw(self, lw):
+ """alias for set_linewidth"""
+ return self.set_linewidth(lw)
+
+ def set_linestyle(self, ls):
+ """
+ Set the patch linestyle
+
+ =========================== =================
+ linestyle description
+ =========================== =================
+ ``'-'`` or ``'solid'`` solid line
+ ``'--'`` or ``'dashed'`` dashed line
+ ``'-.'`` or ``'dashdot'`` dash-dotted line
+ ``':'`` or ``'dotted'`` dotted line
+ =========================== =================
+
+ Alternatively a dash tuple of the following form can be provided::
+
+ (offset, onoffseq),
+
+ where ``onoffseq`` is an even length tuple of on and off ink
+ in points.
+
+ ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
+ (offset, on-off-dash-seq) |
+ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` |
+ ``' '`` | ``''``]
+
+ Parameters
+ ----------
+ ls : { '-', '--', '-.', ':'} and more see description
+ The line style.
+ """
+ if ls is None:
+ ls = "solid"
+ self._linestyle = ls
+ # get the unscalled dash pattern
+ offset, ls = self._us_dashes = mlines._get_dash_pattern(ls)
+ # scale the dash pattern by the linewidth
+ self._dashoffset, self._dashes = mlines._scale_dashes(
+ offset, ls, self._linewidth)
+ self.stale = True
+
+ def set_ls(self, ls):
+ """alias for set_linestyle"""
+ return self.set_linestyle(ls)
+
+ def set_fill(self, b):
+ """
+ Set whether to fill the patch.
+
+ Parameters
+ ----------
+ b : bool
+ .. ACCEPTS: bool
+ """
+ self._fill = bool(b)
+ self._set_facecolor(self._original_facecolor)
+ self._set_edgecolor(self._original_edgecolor)
+ self.stale = True
+
+ def get_fill(self):
+ 'return whether fill is set'
+ return self._fill
+
+ # Make fill a property so as to preserve the long-standing
+ # but somewhat inconsistent behavior in which fill was an
+ # attribute.
+ fill = property(get_fill, set_fill)
+
+ def set_capstyle(self, s):
+ """
+ Set the patch capstyle
+
+ ACCEPTS: ['butt' | 'round' | 'projecting']
+ """
+ s = s.lower()
+ if s not in self.validCap:
+ raise ValueError('set_capstyle passed "%s";\n' % (s,) +
+ 'valid capstyles are %s' % (self.validCap,))
+ self._capstyle = s
+ self.stale = True
+
+ def get_capstyle(self):
+ "Return the current capstyle"
+ return self._capstyle
+
+ def set_joinstyle(self, s):
+ """
+ Set the patch joinstyle
+
+ ACCEPTS: ['miter' | 'round' | 'bevel']
+ """
+ s = s.lower()
+ if s not in self.validJoin:
+ raise ValueError('set_joinstyle passed "%s";\n' % (s,) +
+ 'valid joinstyles are %s' % (self.validJoin,))
+ self._joinstyle = s
+ self.stale = True
+
+ def get_joinstyle(self):
+ "Return the current joinstyle"
+ return self._joinstyle
+
+ def set_hatch(self, hatch):
+ """
+ Set the hatching pattern
+
+ *hatch* can be one of::
+
+ / - diagonal hatching
+ \\ - back diagonal
+ | - vertical
+ - - horizontal
+ + - crossed
+ x - crossed diagonal
+ o - small circle
+ O - large circle
+ . - dots
+ * - stars
+
+ Letters can be combined, in which case all the specified
+ hatchings are done. If same letter repeats, it increases the
+ density of hatching of that pattern.
+
+ Hatching is supported in the PostScript, PDF, SVG and Agg
+ backends only.
+
+ ACCEPTS: ['/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*']
+ """
+ self._hatch = hatch
+ self.stale = True
+
+ def get_hatch(self):
+ 'Return the current hatching pattern'
+ return self._hatch
+
+ @artist.allow_rasterization
+ def draw(self, renderer):
+ 'Draw the :class:`Patch` to the given *renderer*.'
+ if not self.get_visible():
+ return
+
+ renderer.open_group('patch', self.get_gid())
+ gc = renderer.new_gc()
+
+ gc.set_foreground(self._edgecolor, isRGBA=True)
+
+ lw = self._linewidth
+ if self._edgecolor[3] == 0:
+ lw = 0
+ gc.set_linewidth(lw)
+ gc.set_dashes(0, self._dashes)
+ gc.set_capstyle(self._capstyle)
+ gc.set_joinstyle(self._joinstyle)
+
+ gc.set_antialiased(self._antialiased)
+ self._set_gc_clip(gc)
+ gc.set_url(self._url)
+ gc.set_snap(self.get_snap())
+
+ rgbFace = self._facecolor
+ if rgbFace[3] == 0:
+ rgbFace = None # (some?) renderers expect this as no-fill signal
+
+ gc.set_alpha(self._alpha)
+
+ if self._hatch:
+ gc.set_hatch(self._hatch)
+ try:
+ gc.set_hatch_color(self._hatch_color)
+ except AttributeError:
+ # if we end up with a GC that does not have this method
+ warnings.warn("Your backend does not have support for "
+ "setting the hatch color.")
+
+ if self.get_sketch_params() is not None:
+ gc.set_sketch_params(*self.get_sketch_params())
+
+ path = self.get_path()
+ transform = self.get_transform()
+ tpath = transform.transform_path_non_affine(path)
+ affine = transform.get_affine()
+
+ if self.get_path_effects():
+ from matplotlib.patheffects import PathEffectRenderer
+ renderer = PathEffectRenderer(self.get_path_effects(), renderer)
+
+ renderer.draw_path(gc, tpath, affine, rgbFace)
+
+ gc.restore()
+ renderer.close_group('patch')
+ self.stale = False
+
+ def get_path(self):
+ """
+ Return the path of this patch
+ """
+ raise NotImplementedError('Derived must override')
+
+ def get_window_extent(self, renderer=None):
+ return self.get_path().get_extents(self.get_transform())
+
+
+patchdoc = artist.kwdoc(Patch)
+for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
+ 'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',
+ 'FancyBboxPatch', 'Patch'):
+ docstring.interpd.update({k: patchdoc})
+
+# define Patch.__init__ docstring after the class has been added to interpd
+docstring.dedent_interpd(Patch.__init__)
+
+
+class Shadow(Patch):
+ def __str__(self):
+ return "Shadow(%s)" % (str(self.patch))
+
+ @docstring.dedent_interpd
+ def __init__(self, patch, ox, oy, props=None, **kwargs):
+ """
+ Create a shadow of the given *patch* offset by *ox*, *oy*.
+ *props*, if not *None*, is a patch property update dictionary.
+ If *None*, the shadow will have have the same color as the face,
+ but darkened.
+
+ kwargs are
+ %(Patch)s
+ """
+ Patch.__init__(self)
+ self.patch = patch
+ self.props = props
+ self._ox, self._oy = ox, oy
+ self._shadow_transform = transforms.Affine2D()
+ self._update()
+
+ def _update(self):
+ self.update_from(self.patch)
+
+ # Place the shadow patch directly behind the inherited patch.
+ self.set_zorder(np.nextafter(self.patch.zorder, -np.inf))
+
+ if self.props is not None:
+ self.update(self.props)
+ else:
+ r, g, b, a = colors.to_rgba(self.patch.get_facecolor())
+ rho = 0.3
+ r = rho * r
+ g = rho * g
+ b = rho * b
+
+ self.set_facecolor((r, g, b, 0.5))
+ self.set_edgecolor((r, g, b, 0.5))
+ self.set_alpha(0.5)
+
+ def _update_transform(self, renderer):
+ ox = renderer.points_to_pixels(self._ox)
+ oy = renderer.points_to_pixels(self._oy)
+ self._shadow_transform.clear().translate(ox, oy)
+
+ def _get_ox(self):
+ return self._ox
+
+ def _set_ox(self, ox):
+ self._ox = ox
+
+ def _get_oy(self):
+ return self._oy
+
+ def _set_oy(self, oy):
+ self._oy = oy
+
+ def get_path(self):
+ return self.patch.get_path()
+
+ def get_patch_transform(self):
+ return self.patch.get_patch_transform() + self._shadow_transform
+
+ def draw(self, renderer):
+ self._update_transform(renderer)
+ Patch.draw(self, renderer)
+
+
+class Rectangle(Patch):
+ """
+ Draw a rectangle with lower left at *xy* = (*x*, *y*) with
+ specified *width*, *height* and rotation *angle*.
+ """
+
+ def __str__(self):
+ pars = self._x0, self._y0, self._width, self._height, self.angle
+ fmt = "Rectangle(xy=(%g, %g), width=%g, height=%g, angle=%g)"
+ return fmt % pars
+
+ @docstring.dedent_interpd
+ def __init__(self, xy, width, height, angle=0.0, **kwargs):
+ """
+ Parameters
+ ----------
+ xy: length-2 tuple
+ The bottom and left rectangle coordinates
+ width:
+ Rectangle width
+ height:
+ Rectangle height
+ angle: float, optional
+ rotation in degrees anti-clockwise about *xy* (default is 0.0)
+ fill: bool, optional
+ Whether to fill the rectangle (default is ``True``)
+
+ Notes
+ -----
+ Valid kwargs are:
+ %(Patch)s
+ """
+
+ Patch.__init__(self, **kwargs)
+
+ self._x0 = xy[0]
+ self._y0 = xy[1]
+
+ self._width = width
+ self._height = height
+
+ self._x1 = self._x0 + self._width
+ self._y1 = self._y0 + self._height
+
+ self.angle = float(angle)
+ # Note: This cannot be calculated until this is added to an Axes
+ self._rect_transform = transforms.IdentityTransform()
+
+ def get_path(self):
+ """
+ Return the vertices of the rectangle
+ """
+ return Path.unit_rectangle()
+
+ def _update_patch_transform(self):
+ """NOTE: This cannot be called until after this has been added
+ to an Axes, otherwise unit conversion will fail. This
+ makes it very important to call the accessor method and
+ not directly access the transformation member variable.
+ """
+ x0, y0, x1, y1 = self._convert_units()
+ bbox = transforms.Bbox.from_extents(x0, y0, x1, y1)
+ rot_trans = transforms.Affine2D()
+ rot_trans.rotate_deg_around(x0, y0, self.angle)
+ self._rect_transform = transforms.BboxTransformTo(bbox)
+ self._rect_transform += rot_trans
+
+ def _update_x1(self):
+ self._x1 = self._x0 + self._width
+
+ def _update_y1(self):
+ self._y1 = self._y0 + self._height
+
+ def _convert_units(self):
+ '''
+ Convert bounds of the rectangle
+ '''
+ x0 = self.convert_xunits(self._x0)
+ y0 = self.convert_yunits(self._y0)
+ x1 = self.convert_xunits(self._x1)
+ y1 = self.convert_yunits(self._y1)
+ return x0, y0, x1, y1
+
+ def get_patch_transform(self):
+ self._update_patch_transform()
+ return self._rect_transform
+
+ def get_x(self):
+ "Return the left coord of the rectangle"
+ return self._x0
+
+ def get_y(self):
+ "Return the bottom coord of the rectangle"
+ return self._y0
+
+ def get_xy(self):
+ "Return the left and bottom coords of the rectangle"
+ return self._x0, self._y0
+
+ def get_width(self):
+ "Return the width of the rectangle"
+ return self._width
+
+ def get_height(self):
+ "Return the height of the rectangle"
+ return self._height
+
+ def set_x(self, x):
+ "Set the left coord of the rectangle"
+ self._x0 = x
+ self._update_x1()
+ self.stale = True
+
+ def set_y(self, y):
+ "Set the bottom coord of the rectangle"
+ self._y0 = y
+ self._update_y1()
+ self.stale = True
+
+ def set_xy(self, xy):
+ """
+ Set the left and bottom coords of the rectangle
+
+ ACCEPTS: 2-item sequence
+ """
+ self._x0, self._y0 = xy
+ self._update_x1()
+ self._update_y1()
+ self.stale = True
+
+ def set_width(self, w):
+ "Set the width of the rectangle"
+ self._width = w
+ self._update_x1()
+ self.stale = True
+
+ def set_height(self, h):
+ "Set the height of the rectangle"
+ self._height = h
+ self._update_y1()
+ self.stale = True
+
+ def set_bounds(self, *args):
+ """
+ Set the bounds of the rectangle: l,b,w,h
+
+ ACCEPTS: (left, bottom, width, height)
+ """
+ if len(args) == 0:
+ l, b, w, h = args[0]
+ else:
+ l, b, w, h = args
+ self._x0 = l
+ self._y0 = b
+ self._width = w
+ self._height = h
+ self._update_x1()
+ self._update_y1()
+ self.stale = True
+
+ def get_bbox(self):
+ x0, y0, x1, y1 = self._convert_units()
+ return transforms.Bbox.from_extents(x0, y0, x1, y1)
+
+ xy = property(get_xy, set_xy)
+
+
+class RegularPolygon(Patch):
+ """
+ A regular polygon patch.
+ """
+ def __str__(self):
+ return "Poly%d(%g,%g)" % (self._numVertices, self._xy[0], self._xy[1])
+
+ @docstring.dedent_interpd
+ def __init__(self, xy, numVertices, radius=5, orientation=0,
+ **kwargs):
+ """
+ Constructor arguments:
+
+ *xy*
+ A length 2 tuple (*x*, *y*) of the center.
+
+ *numVertices*
+ the number of vertices.
+
+ *radius*
+ The distance from the center to each of the vertices.
+
+ *orientation*
+ rotates the polygon (in radians).
+
+ Valid kwargs are:
+ %(Patch)s
+ """
+ self._xy = xy
+ self._numVertices = numVertices
+ self._orientation = orientation
+ self._radius = radius
+ self._path = Path.unit_regular_polygon(numVertices)
+ self._poly_transform = transforms.Affine2D()
+ self._update_transform()
+
+ Patch.__init__(self, **kwargs)
+
+ def _update_transform(self):
+ self._poly_transform.clear() \
+ .scale(self.radius) \
+ .rotate(self.orientation) \
+ .translate(*self.xy)
+
+ def _get_xy(self):
+ return self._xy
+
+ def _set_xy(self, xy):
+ self._xy = xy
+ self._update_transform()
+ xy = property(_get_xy, _set_xy)
+
+ def _get_orientation(self):
+ return self._orientation
+
+ def _set_orientation(self, orientation):
+ self._orientation = orientation
+ self._update_transform()
+ orientation = property(_get_orientation, _set_orientation)
+
+ def _get_radius(self):
+ return self._radius
+
+ def _set_radius(self, radius):
+ self._radius = radius
+ self._update_transform()
+ radius = property(_get_radius, _set_radius)
+
+ def _get_numvertices(self):
+ return self._numVertices
+
+ def _set_numvertices(self, numVertices):
+ self._numVertices = numVertices
+
+ numvertices = property(_get_numvertices, _set_numvertices)
+
+ def get_path(self):
+ return self._path
+
+ def get_patch_transform(self):
+ self._update_transform()
+ return self._poly_transform
+
+
+class PathPatch(Patch):
+ """
+ A general polycurve path patch.
+ """
+ _edge_default = True
+
+ def __str__(self):
+ return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
+
+ @docstring.dedent_interpd
+ def __init__(self, path, **kwargs):
+ """
+ *path* is a :class:`matplotlib.path.Path` object.
+
+ Valid kwargs are:
+ %(Patch)s
+
+ .. seealso::
+
+ :class:`Patch`
+ For additional kwargs
+
+ """
+ Patch.__init__(self, **kwargs)
+ self._path = path
+
+ def get_path(self):
+ return self._path
+
+
+class Polygon(Patch):
+ """
+ A general polygon patch.
+ """
+ def __str__(self):
+ return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
+
+ @docstring.dedent_interpd
+ def __init__(self, xy, closed=True, **kwargs):
+ """
+ *xy* is a numpy array with shape Nx2.
+
+ If *closed* is *True*, the polygon will be closed so the
+ starting and ending points are the same.
+
+ Valid kwargs are:
+ %(Patch)s
+
+ .. seealso::
+
+ :class:`Patch`
+ For additional kwargs
+
+ """
+ Patch.__init__(self, **kwargs)
+ self._closed = closed
+ self.set_xy(xy)
+
+ def get_path(self):
+ """
+ Get the path of the polygon
+
+ Returns
+ -------
+ path : Path
+ The :class:`~matplotlib.path.Path` object for
+ the polygon
+ """
+ return self._path
+
+ def get_closed(self):
+ """
+ Returns if the polygon is closed
+
+ Returns
+ -------
+ closed : bool
+ If the path is closed
+ """
+ return self._closed
+
+ def set_closed(self, closed):
+ """
+ Set if the polygon is closed
+
+ Parameters
+ ----------
+ closed : bool
+ True if the polygon is closed
+ """
+ if self._closed == bool(closed):
+ return
+ self._closed = bool(closed)
+ self.set_xy(self.get_xy())
+ self.stale = True
+
+ def get_xy(self):
+ """
+ Get the vertices of the path
+
+ Returns
+ -------
+ vertices : numpy array
+ The coordinates of the vertices as a Nx2
+ ndarray.
+ """
+ return self._path.vertices
+
+ def set_xy(self, xy):
+ """
+ Set the vertices of the polygon
+
+ Parameters
+ ----------
+ xy : numpy array or iterable of pairs
+ The coordinates of the vertices as a Nx2
+ ndarray or iterable of pairs.
+ """
+ xy = np.asarray(xy)
+ if self._closed:
+ if len(xy) and (xy[0] != xy[-1]).any():
+ xy = np.concatenate([xy, [xy[0]]])
+ else:
+ if len(xy) > 2 and (xy[0] == xy[-1]).all():
+ xy = xy[:-1]
+ self._path = Path(xy, closed=self._closed)
+ self.stale = True
+
+ _get_xy = get_xy
+ _set_xy = set_xy
+ xy = property(
+ get_xy, set_xy, None,
+ """Set/get the vertices of the polygon. This property is
+ provided for backward compatibility with matplotlib 0.91.x
+ only. New code should use
+ :meth:`~matplotlib.patches.Polygon.get_xy` and
+ :meth:`~matplotlib.patches.Polygon.set_xy` instead.""")
+
+
+class Wedge(Patch):
+ """
+ Wedge shaped patch.
+ """
+ def __str__(self):
+ pars = (self.center[0], self.center[1], self.r,
+ self.theta1, self.theta2, self.width)
+ fmt = "Wedge(center=(%g, %g), r=%g, theta1=%g, theta2=%g, width=%s)"
+ return fmt % pars
+
+ @docstring.dedent_interpd
+ def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
+ """
+ Draw a wedge centered at *x*, *y* center with radius *r* that
+ sweeps *theta1* to *theta2* (in degrees). If *width* is given,
+ then a partial wedge is drawn from inner radius *r* - *width*
+ to outer radius *r*.
+
+ Valid kwargs are:
+
+ %(Patch)s
+ """
+ Patch.__init__(self, **kwargs)
+ self.center = center
+ self.r, self.width = r, width
+ self.theta1, self.theta2 = theta1, theta2
+ self._patch_transform = transforms.IdentityTransform()
+ self._recompute_path()
+
+ def _recompute_path(self):
+ # Inner and outer rings are connected unless the annulus is complete
+ if abs((self.theta2 - self.theta1) - 360) <= 1e-12:
+ theta1, theta2 = 0, 360
+ connector = Path.MOVETO
+ else:
+ theta1, theta2 = self.theta1, self.theta2
+ connector = Path.LINETO
+
+ # Form the outer ring
+ arc = Path.arc(theta1, theta2)
+
+ if self.width is not None:
+ # Partial annulus needs to draw the outer ring
+ # followed by a reversed and scaled inner ring
+ v1 = arc.vertices
+ v2 = arc.vertices[::-1] * (self.r - self.width) / self.r
+ v = np.vstack([v1, v2, v1[0, :], (0, 0)])
+ c = np.hstack([arc.codes, arc.codes, connector, Path.CLOSEPOLY])
+ c[len(arc.codes)] = connector
+ else:
+ # Wedge doesn't need an inner ring
+ v = np.vstack([arc.vertices, [(0, 0), arc.vertices[0, :], (0, 0)]])
+ c = np.hstack([arc.codes, [connector, connector, Path.CLOSEPOLY]])
+
+ # Shift and scale the wedge to the final location.
+ v *= self.r
+ v += np.asarray(self.center)
+ self._path = Path(v, c)
+
+ def set_center(self, center):
+ self._path = None
+ self.center = center
+ self.stale = True
+
+ def set_radius(self, radius):
+ self._path = None
+ self.r = radius
+ self.stale = True
+
+ def set_theta1(self, theta1):
+ self._path = None
+ self.theta1 = theta1
+ self.stale = True
+
+ def set_theta2(self, theta2):
+ self._path = None
+ self.theta2 = theta2
+ self.stale = True
+
+ def set_width(self, width):
+ self._path = None
+ self.width = width
+ self.stale = True
+
+ def get_path(self):
+ if self._path is None:
+ self._recompute_path()
+ return self._path
+
+
+# COVERAGE NOTE: Not used internally or from examples
+class Arrow(Patch):
+ """
+ An arrow patch.
+ """
+ def __str__(self):
+ return "Arrow()"
+
+ _path = Path([[0.0, 0.1], [0.0, -0.1],
+ [0.8, -0.1], [0.8, -0.3],
+ [1.0, 0.0], [0.8, 0.3],
+ [0.8, 0.1], [0.0, 0.1]],
+ closed=True)
+
+ @docstring.dedent_interpd
+ def __init__(self, x, y, dx, dy, width=1.0, **kwargs):
+ """
+ Draws an arrow from (*x*, *y*) to (*x* + *dx*, *y* + *dy*).
+ The width of the arrow is scaled by *width*.
+
+ Parameters
+ ----------
+ x : scalar
+ x coordinate of the arrow tail
+ y : scalar
+ y coordinate of the arrow tail
+ dx : scalar
+ Arrow length in the x direction
+ dy : scalar
+ Arrow length in the y direction
+ width : scalar, optional (default: 1)
+ Scale factor for the width of the arrow. With a default value of
+ 1, the tail width is 0.2 and head width is 0.6.
+ **kwargs :
+ Keyword arguments control the :class:`~matplotlib.patches.Patch`
+ properties:
+
+ %(Patch)s
+
+ See Also
+ --------
+ :class:`FancyArrow` :
+ Patch that allows independent control of the head and tail
+ properties
+ """
+ Patch.__init__(self, **kwargs)
+ L = np.hypot(dx, dy)
+
+ if L != 0:
+ cx = dx / L
+ sx = dy / L
+ else:
+ # Account for division by zero
+ cx, sx = 0, 1
+
+ trans1 = transforms.Affine2D().scale(L, width)
+ trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)
+ trans3 = transforms.Affine2D().translate(x, y)
+ trans = trans1 + trans2 + trans3
+ self._patch_transform = trans.frozen()
+
+ def get_path(self):
+ return self._path
+
+ def get_patch_transform(self):
+ return self._patch_transform
+
+
+class FancyArrow(Polygon):
+ """
+ Like Arrow, but lets you set head width and head height independently.
+ """
+
+ _edge_default = True
+
+ def __str__(self):
+ return "FancyArrow()"
+
+ @docstring.dedent_interpd
+ def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False,
+ head_width=None, head_length=None, shape='full', overhang=0,
+ head_starts_at_zero=False, **kwargs):
+ """
+ Constructor arguments
+ *width*: float (default: 0.001)
+ width of full arrow tail
+
+ *length_includes_head*: bool (default: False)
+ True if head is to be counted in calculating the length.
+
+ *head_width*: float or None (default: 3*width)
+ total width of the full arrow head
+
+ *head_length*: float or None (default: 1.5 * head_width)
+ length of arrow head
+
+ *shape*: ['full', 'left', 'right'] (default: 'full')
+ draw the left-half, right-half, or full arrow
+
+ *overhang*: float (default: 0)
+ fraction that the arrow is swept back (0 overhang means
+ triangular shape). Can be negative or greater than one.
+
+ *head_starts_at_zero*: bool (default: False)
+ if True, the head starts being drawn at coordinate 0
+ instead of ending at coordinate 0.
+
+ Other valid kwargs (inherited from :class:`Patch`) are:
+ %(Patch)s
+
+ """
+ if head_width is None:
+ head_width = 3 * width
+ if head_length is None:
+ head_length = 1.5 * head_width
+
+ distance = np.hypot(dx, dy)
+
+ if length_includes_head:
+ length = distance
+ else:
+ length = distance + head_length
+ if not length:
+ verts = [] # display nothing if empty
+ else:
+ # start by drawing horizontal arrow, point at (0,0)
+ hw, hl, hs, lw = head_width, head_length, overhang, width
+ left_half_arrow = np.array([
+ [0.0, 0.0], # tip
+ [-hl, -hw / 2.0], # leftmost
+ [-hl * (1 - hs), -lw / 2.0], # meets stem
+ [-length, -lw / 2.0], # bottom left
+ [-length, 0],
+ ])
+ # if we're not including the head, shift up by head length
+ if not length_includes_head:
+ left_half_arrow += [head_length, 0]
+ # if the head starts at 0, shift up by another head length
+ if head_starts_at_zero:
+ left_half_arrow += [head_length / 2.0, 0]
+ # figure out the shape, and complete accordingly
+ if shape == 'left':
+ coords = left_half_arrow
+ else:
+ right_half_arrow = left_half_arrow * [1, -1]
+ if shape == 'right':
+ coords = right_half_arrow
+ elif shape == 'full':
+ # The half-arrows contain the midpoint of the stem,
+ # which we can omit from the full arrow. Including it
+ # twice caused a problem with xpdf.
+ coords = np.concatenate([left_half_arrow[:-1],
+ right_half_arrow[-2::-1]])
+ else:
+ raise ValueError("Got unknown shape: %s" % shape)
+ if distance != 0:
+ cx = dx / distance
+ sx = dy / distance
+ else:
+ # Account for division by zero
+ cx, sx = 0, 1
+ M = [[cx, sx], [-sx, cx]]
+ verts = np.dot(coords, M) + (x + dx, y + dy)
+
+ Polygon.__init__(self, list(map(tuple, verts)), closed=True, **kwargs)
+
+
+docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
+
+
+class YAArrow(Patch):
+ """
+ Yet another arrow class.
+
+ This is an arrow that is defined in display space and has a tip at
+ *x1*, *y1* and a base at *x2*, *y2*.
+ """
+ def __str__(self):
+ return "YAArrow()"
+
+ @docstring.dedent_interpd
+ def __init__(self, figure, xytip, xybase,
+ width=4, frac=0.1, headwidth=12, **kwargs):
+ """
+ Constructor arguments:
+
+ *xytip*
+ (*x*, *y*) location of arrow tip
+
+ *xybase*
+ (*x*, *y*) location the arrow base mid point
+
+ *figure*
+ The :class:`~matplotlib.figure.Figure` instance
+ (fig.dpi)
+
+ *width*
+ The width of the arrow in points
+
+ *frac*
+ The fraction of the arrow length occupied by the head
+
+ *headwidth*
+ The width of the base of the arrow head in points
+
+ Valid kwargs are:
+ %(Patch)s
+
+ """
+ self.xytip = xytip
+ self.xybase = xybase
+ self.width = width
+ self.frac = frac
+ self.headwidth = headwidth
+ Patch.__init__(self, **kwargs)
+ # Set self.figure after Patch.__init__, since it sets self.figure to
+ # None
+ self.figure = figure
+
+ def get_path(self):
+ # Since this is dpi dependent, we need to recompute the path
+ # every time.
+
+ # the base vertices
+ x1, y1 = self.xytip
+ x2, y2 = self.xybase
+ k1 = self.width * self.figure.dpi / 72. / 2.
+ k2 = self.headwidth * self.figure.dpi / 72. / 2.
+ xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)
+
+ # a point on the segment 20% of the distance from the tip to the base
+ theta = math.atan2(y2 - y1, x2 - x1)
+ r = math.sqrt((y2 - y1) ** 2. + (x2 - x1) ** 2.)
+ xm = x1 + self.frac * r * math.cos(theta)
+ ym = y1 + self.frac * r * math.sin(theta)
+ xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)
+ xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)
+
+ xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])
+ ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])
+
+ return Path(np.column_stack([xs, ys]), closed=True)
+
+ def get_patch_transform(self):
+ return transforms.IdentityTransform()
+
+ def getpoints(self, x1, y1, x2, y2, k):
+ """
+ For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)
+ return the points on the line that is perpendicular to the
+ line and intersects (*x2*, *y2*) and the distance from (*x2*,
+ *y2*) of the returned points is *k*.
+ """
+ x1, y1, x2, y2, k = map(float, (x1, y1, x2, y2, k))
+
+ if y2 - y1 == 0:
+ return x2, y2 + k, x2, y2 - k
+ elif x2 - x1 == 0:
+ return x2 + k, y2, x2 - k, y2
+
+ m = (y2 - y1) / (x2 - x1)
+ pm = -1. / m
+ a = 1
+ b = -2 * y2
+ c = y2 ** 2. - k ** 2. * pm ** 2. / (1. + pm ** 2.)
+
+ y3a = (-b + math.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
+ x3a = (y3a - y2) / pm + x2
+
+ y3b = (-b - math.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
+ x3b = (y3b - y2) / pm + x2
+ return x3a, y3a, x3b, y3b
+
+
+class CirclePolygon(RegularPolygon):
+ """
+ A polygon-approximation of a circle patch.
+ """
+ def __str__(self):
+ return "CirclePolygon(%d,%d)" % self.center
+
+ @docstring.dedent_interpd
+ def __init__(self, xy, radius=5,
+ resolution=20, # the number of vertices
+ ** kwargs):
+ """
+ Create a circle at *xy* = (*x*, *y*) with given *radius*.
+ This circle is approximated by a regular polygon with
+ *resolution* sides. For a smoother circle drawn with splines,
+ see :class:`~matplotlib.patches.Circle`.
+
+ Valid kwargs are:
+ %(Patch)s
+
+ """
+ RegularPolygon.__init__(self, xy,
+ resolution,
+ radius,
+ orientation=0,
+ **kwargs)
+
+
+class Ellipse(Patch):
+ """
+ A scale-free ellipse.
+ """
+ def __str__(self):
+ pars = (self.center[0], self.center[1],
+ self.width, self.height, self.angle)
+ fmt = "Ellipse(xy=(%s, %s), width=%s, height=%s, angle=%s)"
+ return fmt % pars
+
+ @docstring.dedent_interpd
+ def __init__(self, xy, width, height, angle=0.0, **kwargs):
+ """
+ *xy*
+ center of ellipse
+
+ *width*
+ total length (diameter) of horizontal axis
+
+ *height*
+ total length (diameter) of vertical axis
+
+ *angle*
+ rotation in degrees (anti-clockwise)
+
+ Valid kwargs are:
+ %(Patch)s
+ """
+ Patch.__init__(self, **kwargs)
+
+ self.center = xy
+ self.width, self.height = width, height
+ self.angle = angle
+ self._path = Path.unit_circle()
+ # Note: This cannot be calculated until this is added to an Axes
+ self._patch_transform = transforms.IdentityTransform()
+
+ def _recompute_transform(self):
+ """NOTE: This cannot be called until after this has been added
+ to an Axes, otherwise unit conversion will fail. This
+ makes it very important to call the accessor method and
+ not directly access the transformation member variable.
+ """
+ center = (self.convert_xunits(self.center[0]),
+ self.convert_yunits(self.center[1]))
+ width = self.convert_xunits(self.width)
+ height = self.convert_yunits(self.height)
+ self._patch_transform = transforms.Affine2D() \
+ .scale(width * 0.5, height * 0.5) \
+ .rotate_deg(self.angle) \
+ .translate(*center)
+
+ def get_path(self):
+ """
+ Return the vertices of the rectangle
+ """
+ return self._path
+
+ def get_patch_transform(self):
+ self._recompute_transform()
+ return self._patch_transform
+
+
+class Circle(Ellipse):
+ """
+ A circle patch.
+ """
+ def __str__(self):
+ pars = self.center[0], self.center[1], self.radius
+ fmt = "Circle(xy=(%g, %g), radius=%g)"
+ return fmt % pars
+
+ @docstring.dedent_interpd
+ def __init__(self, xy, radius=5, **kwargs):
+ """
+ Create true circle at center *xy* = (*x*, *y*) with given
+ *radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`
+ which is a polygonal approximation, this uses Bézier splines
+ and is much closer to a scale-free circle.
+
+ Valid kwargs are:
+ %(Patch)s
+
+ """
+ Ellipse.__init__(self, xy, radius * 2, radius * 2, **kwargs)
+ self.radius = radius
+
+ def set_radius(self, radius):
+ """
+ Set the radius of the circle
+
+ ACCEPTS: float
+ """
+ self.width = self.height = 2 * radius
+ self.stale = True
+
+ def get_radius(self):
+ 'return the radius of the circle'
+ return self.width / 2.
+
+ radius = property(get_radius, set_radius)
+
+
+class Arc(Ellipse):
+ """
+ An elliptical arc. Because it performs various optimizations, it
+ can not be filled.
+
+ The arc must be used in an :class:`~matplotlib.axes.Axes`
+ instance---it can not be added directly to a
+ :class:`~matplotlib.figure.Figure`---because it is optimized to
+ only render the segments that are inside the axes bounding box
+ with high resolution.
+ """
+ def __str__(self):
+ pars = (self.center[0], self.center[1], self.width,
+ self.height, self.angle, self.theta1, self.theta2)
+ fmt = ("Arc(xy=(%g, %g), width=%g, "
+ "height=%g, angle=%g, theta1=%g, theta2=%g)")
+ return fmt % pars
+
+ @docstring.dedent_interpd
+ def __init__(self, xy, width, height, angle=0.0,
+ theta1=0.0, theta2=360.0, **kwargs):
+ """
+ The following args are supported:
+
+ *xy*
+ center of ellipse
+
+ *width*
+ length of horizontal axis
+
+ *height*
+ length of vertical axis
+
+ *angle*
+ rotation in degrees (anti-clockwise)
+
+ *theta1*
+ starting angle of the arc in degrees
+
+ *theta2*
+ ending angle of the arc in degrees
+
+ If *theta1* and *theta2* are not provided, the arc will form a
+ complete ellipse.
+
+ Valid kwargs are:
+
+ %(Patch)s
+ """
+ fill = kwargs.setdefault('fill', False)
+ if fill:
+ raise ValueError("Arc objects can not be filled")
+
+ Ellipse.__init__(self, xy, width, height, angle, **kwargs)
+
+ self.theta1 = theta1
+ self.theta2 = theta2
+
+ @artist.allow_rasterization
+ def draw(self, renderer):
+ """
+ Ellipses are normally drawn using an approximation that uses
+ eight cubic bezier splines. The error of this approximation
+ is 1.89818e-6, according to this unverified source:
+
+ Lancaster, Don. Approximating a Circle or an Ellipse Using
+ Four Bezier Cubic Splines.
+
+ http://www.tinaja.com/glib/ellipse4.pdf
+
+ There is a use case where very large ellipses must be drawn
+ with very high accuracy, and it is too expensive to render the
+ entire ellipse with enough segments (either splines or line
+ segments). Therefore, in the case where either radius of the
+ ellipse is large enough that the error of the spline
+ approximation will be visible (greater than one pixel offset
+ from the ideal), a different technique is used.
+
+ In that case, only the visible parts of the ellipse are drawn,
+ with each visible arc using a fixed number of spline segments
+ (8). The algorithm proceeds as follows:
+
+ 1. The points where the ellipse intersects the axes bounding
+ box are located. (This is done be performing an inverse
+ transformation on the axes bbox such that it is relative
+ to the unit circle -- this makes the intersection
+ calculation much easier than doing rotated ellipse
+ intersection directly).
+
+ This uses the "line intersecting a circle" algorithm
+ from:
+
+ Vince, John. Geometry for Computer Graphics: Formulae,
+ Examples & Proofs. London: Springer-Verlag, 2005.
+
+ 2. The angles of each of the intersection points are
+ calculated.
+
+ 3. Proceeding counterclockwise starting in the positive
+ x-direction, each of the visible arc-segments between the
+ pairs of vertices are drawn using the bezier arc
+ approximation technique implemented in
+ :meth:`matplotlib.path.Path.arc`.
+ """
+ if not hasattr(self, 'axes'):
+ raise RuntimeError('Arcs can only be used in Axes instances')
+
+ self._recompute_transform()
+
+ width = self.convert_xunits(self.width)
+ height = self.convert_yunits(self.height)
+
+ # If the width and height of ellipse are not equal, take into account
+ # stretching when calculating angles to draw between
+ def theta_stretch(theta, scale):
+ theta = np.deg2rad(theta)
+ x = np.cos(theta)
+ y = np.sin(theta)
+ return np.rad2deg(np.arctan2(scale * y, x))
+ theta1 = theta_stretch(self.theta1, width / height)
+ theta2 = theta_stretch(self.theta2, width / height)
+
+ # Get width and height in pixels
+ width, height = self.get_transform().transform_point((width, height))
+ inv_error = (1.0 / 1.89818e-6) * 0.5
+ if width < inv_error and height < inv_error:
+ self._path = Path.arc(theta1, theta2)
+ return Patch.draw(self, renderer)
+
+ def iter_circle_intersect_on_line(x0, y0, x1, y1):
+ dx = x1 - x0
+ dy = y1 - y0
+ dr2 = dx * dx + dy * dy
+ D = x0 * y1 - x1 * y0
+ D2 = D * D
+ discrim = dr2 - D2
+
+ # Single (tangential) intersection
+ if discrim == 0.0:
+ x = (D * dy) / dr2
+ y = (-D * dx) / dr2
+ yield x, y
+ elif discrim > 0.0:
+ # The definition of "sign" here is different from
+ # np.sign: we never want to get 0.0
+ if dy < 0.0:
+ sign_dy = -1.0
+ else:
+ sign_dy = 1.0
+ sqrt_discrim = np.sqrt(discrim)
+ for sign in (1., -1.):
+ x = (D * dy + sign * sign_dy * dx * sqrt_discrim) / dr2
+ y = (-D * dx + sign * np.abs(dy) * sqrt_discrim) / dr2
+ yield x, y
+
+ def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
+ epsilon = 1e-9
+ if x1 < x0:
+ x0e, x1e = x1, x0
+ else:
+ x0e, x1e = x0, x1
+ if y1 < y0:
+ y0e, y1e = y1, y0
+ else:
+ y0e, y1e = y0, y1
+ x0e -= epsilon
+ y0e -= epsilon
+ x1e += epsilon
+ y1e += epsilon
+ for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):
+ if x >= x0e and x <= x1e and y >= y0e and y <= y1e:
+ yield x, y
+
+ # Transforms the axes box_path so that it is relative to the unit
+ # circle in the same way that it is relative to the desired
+ # ellipse.
+ box_path = Path.unit_rectangle()
+ box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \
+ self.get_transform().inverted()
+ box_path = box_path.transformed(box_path_transform)
+
+ thetas = set()
+ # For each of the point pairs, there is a line segment
+ for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
+ x0, y0 = p0
+ x1, y1 = p1
+ for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
+ theta = np.arccos(x)
+ if y < 0:
+ theta = 2 * np.pi - theta
+ # Convert radians to angles
+ theta = np.rad2deg(theta)
+ if theta1 < theta < theta2:
+ thetas.add(theta)
+ thetas = sorted(thetas) + [theta2]
+
+ last_theta = theta1
+ theta1_rad = np.deg2rad(theta1)
+ inside = box_path.contains_point((np.cos(theta1_rad),
+ np.sin(theta1_rad)))
+
+ # save original path
+ path_original = self._path
+ for theta in thetas:
+ if inside:
+ self._path = Path.arc(last_theta, theta, 8)
+ Patch.draw(self, renderer)
+ inside = False
+ else:
+ inside = True
+ last_theta = theta
+
+ # restore original path
+ self._path = path_original
+
+
+def bbox_artist(artist, renderer, props=None, fill=True):
+ """
+ This is a debug function to draw a rectangle around the bounding
+ box returned by
+ :meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
+ to test whether the artist is returning the correct bbox.
+
+ *props* is a dict of rectangle props with the additional property
+ 'pad' that sets the padding around the bbox in points.
+ """
+ if props is None:
+ props = {}
+ props = props.copy() # don't want to alter the pad externally
+ pad = props.pop('pad', 4)
+ pad = renderer.points_to_pixels(pad)
+ bbox = artist.get_window_extent(renderer)
+ l, b, w, h = bbox.bounds
+ l -= pad / 2.
+ b -= pad / 2.
+ w += pad
+ h += pad
+ r = Rectangle(xy=(l, b),
+ width=w,
+ height=h,
+ fill=fill,
+ )
+ r.set_transform(transforms.IdentityTransform())
+ r.set_clip_on(False)
+ r.update(props)
+ r.draw(renderer)
+
+
+def draw_bbox(bbox, renderer, color='k', trans=None):
+ """
+ This is a debug function to draw a rectangle around the bounding
+ box returned by
+ :meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
+ to test whether the artist is returning the correct bbox.
+ """
+
+ l, b, w, h = bbox.bounds
+ r = Rectangle(xy=(l, b),
+ width=w,
+ height=h,
+ edgecolor=color,
+ fill=False,
+ )
+ if trans is not None:
+ r.set_transform(trans)
+ r.set_clip_on(False)
+ r.draw(renderer)
+
+
+def _pprint_table(_table, leadingspace=2):
+ """
+ Given the list of list of strings, return a string of REST table format.
+ """
+ if leadingspace:
+ pad = ' ' * leadingspace
+ else:
+ pad = ''
+
+ columns = [[] for cell in _table[0]]
+
+ for row in _table:
+ for column, cell in zip(columns, row):
+ column.append(cell)
+
+ col_len = [max(len(cell) for cell in column) for column in columns]
+
+ lines = []
+ table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])
+
+ lines.append('')
+ lines.append(table_formatstr)
+ lines.append(pad + ' '.join([cell.ljust(cl)
+ for cell, cl
+ in zip(_table[0], col_len)]))
+ lines.append(table_formatstr)
+
+ lines.extend([(pad + ' '.join([cell.ljust(cl)
+ for cell, cl
+ in zip(row, col_len)]))
+ for row in _table[1:]])
+
+ lines.append(table_formatstr)
+ lines.append('')
+ return "\n".join(lines)
+
+
+def _pprint_styles(_styles):
+ """
+ A helper function for the _Style class. Given the dictionary of
+ (stylename : styleclass), return a formatted string listing all the
+ styles. Used to update the documentation.
+ """
+ import inspect
+
+ _table = [["Class", "Name", "Attrs"]]
+
+ for name, cls in sorted(_styles.items()):
+ if six.PY2:
+ args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
+ else:
+ (args, varargs, varkw, defaults, kwonlyargs, kwonlydefs,
+ annotations) = inspect.getfullargspec(cls.__init__)
+ if defaults:
+ args = [(argname, argdefault)
+ for argname, argdefault in zip(args[1:], defaults)]
+ else:
+ args = None
+
+ if args is None:
+ argstr = 'None'
+ else:
+ argstr = ",".join([("%s=%s" % (an, av))
+ for an, av
+ in args])
+
+ # adding ``quotes`` since - and | have special meaning in reST
+ _table.append([cls.__name__, "``%s``" % name, argstr])
+
+ return _pprint_table(_table)
+
+
+def _simpleprint_styles(_styles):
+ """
+ A helper function for the _Style class. Given the dictionary of
+ (stylename : styleclass), return a string rep of the list of keys.
+ Used to update the documentation.
+ """
+ return "[{}]".format("|".join(map(" '{}' ".format, sorted(_styles))))
+
+
+class _Style(object):
+ """
+ A base class for the Styles. It is meant to be a container class,
+ where actual styles are declared as subclass of it, and it
+ provides some helper functions.
+ """
+ def __new__(self, stylename, **kw):
+ """
+ return the instance of the subclass with the given style name.
+ """
+
+ # the "class" should have the _style_list attribute, which is
+ # a dictionary of stylname, style class paie.
+
+ _list = stylename.replace(" ", "").split(",")
+ _name = _list[0].lower()
+ try:
+ _cls = self._style_list[_name]
+ except KeyError:
+ raise ValueError("Unknown style : %s" % stylename)
+
+ try:
+ _args_pair = [cs.split("=") for cs in _list[1:]]
+ _args = {k: float(v) for k, v in _args_pair}
+ except ValueError:
+ raise ValueError("Incorrect style argument : %s" % stylename)
+ _args.update(kw)
+
+ return _cls(**_args)
+
+ @classmethod
+ def get_styles(klass):
+ """
+ A class method which returns a dictionary of available styles.
+ """
+ return klass._style_list
+
+ @classmethod
+ def pprint_styles(klass):
+ """
+ A class method which returns a string of the available styles.
+ """
+ return _pprint_styles(klass._style_list)
+
+ @classmethod
+ def register(klass, name, style):
+ """
+ Register a new style.
+ """
+
+ if not issubclass(style, klass._Base):
+ raise ValueError("%s must be a subclass of %s" % (style,
+ klass._Base))
+ klass._style_list[name] = style
+
+
+class BoxStyle(_Style):
+ """
+ :class:`BoxStyle` is a container class which defines several
+ boxstyle classes, which are used for :class:`FancyBboxPatch`.
+
+ A style object can be created as::
+
+ BoxStyle.Round(pad=0.2)
+
+ or::
+
+ BoxStyle("Round", pad=0.2)
+
+ or::
+
+ BoxStyle("Round, pad=0.2")
+
+ Following boxstyle classes are defined.
+
+ %(AvailableBoxstyles)s
+
+ An instance of any boxstyle class is an callable object,
+ whose call signature is::
+
+ __call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
+
+ and returns a :class:`Path` instance. *x0*, *y0*, *width* and
+ *height* specify the location and size of the box to be
+ drawn. *mutation_scale* determines the overall size of the
+ mutation (by which I mean the transformation of the rectangle to
+ the fancy box). *mutation_aspect* determines the aspect-ratio of
+ the mutation.
+ """
+
+ _style_list = {}
+
+ class _Base(object):
+ """
+ :class:`BBoxTransmuterBase` and its derivatives are used to make a
+ fancy box around a given rectangle. The :meth:`__call__` method
+ returns the :class:`~matplotlib.path.Path` of the fancy box. This
+ class is not an artist and actual drawing of the fancy box is done
+ by the :class:`FancyBboxPatch` class.
+ """
+
+ # The derived classes are required to be able to be initialized
+ # w/o arguments, i.e., all its argument (except self) must have
+ # the default values.
+
+ def __init__(self):
+ """
+ initializtion.
+ """
+ super(BoxStyle._Base, self).__init__()
+
+ def transmute(self, x0, y0, width, height, mutation_size):
+ """
+ The transmute method is a very core of the
+ :class:`BboxTransmuter` class and must be overridden in the
+ subclasses. It receives the location and size of the
+ rectangle, and the mutation_size, with which the amount of
+ padding and etc. will be scaled. It returns a
+ :class:`~matplotlib.path.Path` instance.
+ """
+ raise NotImplementedError('Derived must override')
+
+ def __call__(self, x0, y0, width, height, mutation_size,
+ aspect_ratio=1.):
+ """
+ Given the location and size of the box, return the path of
+ the box around it.
+
+ - *x0*, *y0*, *width*, *height* : location and size of the box
+ - *mutation_size* : a reference scale for the mutation.
+ - *aspect_ratio* : aspect-ration for the mutation.
+ """
+ # The __call__ method is a thin wrapper around the transmute method
+ # and take care of the aspect.
+
+ if aspect_ratio is not None:
+ # Squeeze the given height by the aspect_ratio
+ y0, height = y0 / aspect_ratio, height / aspect_ratio
+ # call transmute method with squeezed height.
+ path = self.transmute(x0, y0, width, height, mutation_size)
+ vertices, codes = path.vertices, path.codes
+ # Restore the height
+ vertices[:, 1] = vertices[:, 1] * aspect_ratio
+ return Path(vertices, codes)
+ else:
+ return self.transmute(x0, y0, width, height, mutation_size)
+
+ def __reduce__(self):
+ # because we have decided to nest these classes, we need to
+ # add some more information to allow instance pickling.
+ return (cbook._NestedClassGetter(),
+ (BoxStyle, self.__class__.__name__),
+ self.__dict__
+ )
+
+ class Square(_Base):
+ """
+ A simple square box.
+ """
+
+ def __init__(self, pad=0.3):
+ """
+ *pad*
+ amount of padding
+ """
+
+ self.pad = pad
+ super(BoxStyle.Square, self).__init__()
+
+ def transmute(self, x0, y0, width, height, mutation_size):
+ pad = mutation_size * self.pad
+
+ # width and height with padding added.
+ width, height = width + 2*pad, height + 2*pad
+
+ # boundary of the padded box
+ x0, y0 = x0 - pad, y0 - pad,
+ x1, y1 = x0 + width, y0 + height
+
+ vertices = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]
+ codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]
+ return Path(vertices, codes)
+
+ _style_list["square"] = Square
+
+ class Circle(_Base):
+ """A simple circle box."""
+ def __init__(self, pad=0.3):
+ """
+ Parameters
+ ----------
+ pad : float
+ The amount of padding around the original box.
+ """
+ self.pad = pad
+ super(BoxStyle.Circle, self).__init__()
+
+ def transmute(self, x0, y0, width, height, mutation_size):
+ pad = mutation_size * self.pad
+ width, height = width + 2 * pad, height + 2 * pad
+
+ # boundary of the padded box
+ x0, y0 = x0 - pad, y0 - pad,
+ return Path.circle((x0 + width / 2, y0 + height / 2),
+ max(width, height) / 2)
+
+ _style_list["circle"] = Circle
+
+ class LArrow(_Base):
+ """
+ (left) Arrow Box
+ """
+ def __init__(self, pad=0.3):
+ self.pad = pad
+ super(BoxStyle.LArrow, self).__init__()
+
+ def transmute(self, x0, y0, width, height, mutation_size):
+ # padding
+ pad = mutation_size * self.pad
+
+ # width and height with padding added.
+ width, height = width + 2. * pad, height + 2. * pad
+
+ # boundary of the padded box
+ x0, y0 = x0 - pad, y0 - pad,
+ x1, y1 = x0 + width, y0 + height
+
+ dx = (y1 - y0) / 2.
+ dxx = dx * .5
+ # adjust x0. 1.4 <- sqrt(2)
+ x0 = x0 + pad / 1.4
+
+ cp = [(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1),
+ (x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
+ (x0 + dxx, y0 - dxx), # arrow
+ (x0 + dxx, y0), (x0 + dxx, y0)]
+
+ com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.CLOSEPOLY]
+
+ path = Path(cp, com)
+
+ return path
+ _style_list["larrow"] = LArrow
+
+ class RArrow(LArrow):
+ """
+ (right) Arrow Box
+ """
+
+ def __init__(self, pad=0.3):
+ super(BoxStyle.RArrow, self).__init__(pad)
+
+ def transmute(self, x0, y0, width, height, mutation_size):
+
+ p = BoxStyle.LArrow.transmute(self, x0, y0,
+ width, height, mutation_size)
+
+ p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]
+
+ return p
+
+ _style_list["rarrow"] = RArrow
+
+ class DArrow(_Base):
+ """
+ (Double) Arrow Box
+ """
+ # This source is copied from LArrow,
+ # modified to add a right arrow to the bbox.
+
+ def __init__(self, pad=0.3):
+ self.pad = pad
+ super(BoxStyle.DArrow, self).__init__()
+
+ def transmute(self, x0, y0, width, height, mutation_size):
+
+ # padding
+ pad = mutation_size * self.pad
+
+ # width and height with padding added.
+ # The width is padded by the arrows, so we don't need to pad it.
+ height = height + 2. * pad
+
+ # boundary of the padded box
+ x0, y0 = x0 - pad, y0 - pad
+ x1, y1 = x0 + width, y0 + height
+
+ dx = (y1 - y0)/2.
+ dxx = dx * .5
+ # adjust x0. 1.4 <- sqrt(2)
+ x0 = x0 + pad / 1.4
+
+ cp = [(x0 + dxx, y0), (x1, y0), # bot-segment
+ (x1, y0 - dxx), (x1 + dx + dxx, y0 + dx),
+ (x1, y1 + dxx), # right-arrow
+ (x1, y1), (x0 + dxx, y1), # top-segment
+ (x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
+ (x0 + dxx, y0 - dxx), # left-arrow
+ (x0 + dxx, y0), (x0 + dxx, y0)] # close-poly
+
+ com = [Path.MOVETO, Path.LINETO,
+ Path.LINETO, Path.LINETO,
+ Path.LINETO,
+ Path.LINETO, Path.LINETO,
+ Path.LINETO, Path.LINETO,
+ Path.LINETO,
+ Path.LINETO, Path.CLOSEPOLY]
+
+ path = Path(cp, com)
+
+ return path
+
+ _style_list['darrow'] = DArrow
+
+ class Round(_Base):
+ """
+ A box with round corners.
+ """
+
+ def __init__(self, pad=0.3, rounding_size=None):
+ """
+ *pad*
+ amount of padding
+
+ *rounding_size*
+ rounding radius of corners. *pad* if None
+ """
+ self.pad = pad
+ self.rounding_size = rounding_size
+ super(BoxStyle.Round, self).__init__()
+
+ def transmute(self, x0, y0, width, height, mutation_size):
+
+ # padding
+ pad = mutation_size * self.pad
+
+ # size of the roudning corner
+ if self.rounding_size:
+ dr = mutation_size * self.rounding_size
+ else:
+ dr = pad
+
+ width, height = width + 2. * pad, height + 2. * pad
+
+ x0, y0 = x0 - pad, y0 - pad,
+ x1, y1 = x0 + width, y0 + height
+
+ # Round corners are implemented as quadratic bezier. e.g.,
+ # [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
+ cp = [(x0 + dr, y0),
+ (x1 - dr, y0),
+ (x1, y0), (x1, y0 + dr),
+ (x1, y1 - dr),
+ (x1, y1), (x1 - dr, y1),
+ (x0 + dr, y1),
+ (x0, y1), (x0, y1 - dr),
+ (x0, y0 + dr),
+ (x0, y0), (x0 + dr, y0),
+ (x0 + dr, y0)]
+
+ com = [Path.MOVETO,
+ Path.LINETO,
+ Path.CURVE3, Path.CURVE3,
+ Path.LINETO,
+ Path.CURVE3, Path.CURVE3,
+ Path.LINETO,
+ Path.CURVE3, Path.CURVE3,
+ Path.LINETO,
+ Path.CURVE3, Path.CURVE3,
+ Path.CLOSEPOLY]
+
+ path = Path(cp, com)
+
+ return path
+
+ _style_list["round"] = Round
+
+ class Round4(_Base):
+ """
+ Another box with round edges.
+ """
+
+ def __init__(self, pad=0.3, rounding_size=None):
+ """
+ *pad*
+ amount of padding
+
+ *rounding_size*
+ rounding size of edges. *pad* if None
+ """
+
+ self.pad = pad
+ self.rounding_size = rounding_size
+ super(BoxStyle.Round4, self).__init__()
+
+ def transmute(self, x0, y0, width, height, mutation_size):
+
+ # padding
+ pad = mutation_size * self.pad
+
+ # roudning size. Use a half of the pad if not set.
+ if self.rounding_size:
+ dr = mutation_size * self.rounding_size
+ else:
+ dr = pad / 2.
+
+ width, height = (width + 2. * pad - 2 * dr,
+ height + 2. * pad - 2 * dr)
+
+ x0, y0 = x0 - pad + dr, y0 - pad + dr,
+ x1, y1 = x0 + width, y0 + height
+
+ cp = [(x0, y0),
+ (x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0),
+ (x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1),
+ (x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1),
+ (x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0),
+ (x0, y0)]
+
+ com = [Path.MOVETO,
+ Path.CURVE4, Path.CURVE4, Path.CURVE4,
+ Path.CURVE4, Path.CURVE4, Path.CURVE4,
+ Path.CURVE4, Path.CURVE4, Path.CURVE4,
+ Path.CURVE4, Path.CURVE4, Path.CURVE4,
+ Path.CLOSEPOLY]
+
+ path = Path(cp, com)
+
+ return path
+
+ _style_list["round4"] = Round4
+
+ class Sawtooth(_Base):
+ """
+ A sawtooth box.
+ """
+
+ def __init__(self, pad=0.3, tooth_size=None):
+ """
+ *pad*
+ amount of padding
+
+ *tooth_size*
+ size of the sawtooth. pad* if None
+ """
+ self.pad = pad
+ self.tooth_size = tooth_size
+ super(BoxStyle.Sawtooth, self).__init__()
+
+ def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
+
+ # padding
+ pad = mutation_size * self.pad
+
+ # size of sawtooth
+ if self.tooth_size is None:
+ tooth_size = self.pad * .5 * mutation_size
+ else:
+ tooth_size = self.tooth_size * mutation_size
+
+ tooth_size2 = tooth_size / 2.
+ width, height = (width + 2. * pad - tooth_size,
+ height + 2. * pad - tooth_size)
+
+ # the sizes of the vertical and horizontal sawtooth are
+ # separately adjusted to fit the given box size.
+ dsx_n = int(np.round((width - tooth_size) / (tooth_size * 2))) * 2
+ dsx = (width - tooth_size) / dsx_n
+ dsy_n = int(np.round((height - tooth_size) / (tooth_size * 2))) * 2
+ dsy = (height - tooth_size) / dsy_n
+
+ x0, y0 = x0 - pad + tooth_size2, y0 - pad + tooth_size2
+ x1, y1 = x0 + width, y0 + height
+
+ bottom_saw_x = [x0] + \
+ [x0 + tooth_size2 + dsx * .5 * i
+ for i
+ in range(dsx_n * 2)] + \
+ [x1 - tooth_size2]
+
+ bottom_saw_y = [y0] + \
+ [y0 - tooth_size2, y0,
+ y0 + tooth_size2, y0] * dsx_n + \
+ [y0 - tooth_size2]
+
+ right_saw_x = [x1] + \
+ [x1 + tooth_size2,
+ x1,
+ x1 - tooth_size2,
+ x1] * dsx_n + \
+ [x1 + tooth_size2]
+
+ right_saw_y = [y0] + \
+ [y0 + tooth_size2 + dsy * .5 * i
+ for i
+ in range(dsy_n * 2)] + \
+ [y1 - tooth_size2]
+
+ top_saw_x = [x1] + \
+ [x1 - tooth_size2 - dsx * .5 * i
+ for i
+ in range(dsx_n * 2)] + \
+ [x0 + tooth_size2]
+
+ top_saw_y = [y1] + \
+ [y1 + tooth_size2,
+ y1,
+ y1 - tooth_size2,
+ y1] * dsx_n + \
+ [y1 + tooth_size2]
+
+ left_saw_x = [x0] + \
+ [x0 - tooth_size2,
+ x0,
+ x0 + tooth_size2,
+ x0] * dsy_n + \
+ [x0 - tooth_size2]
+
+ left_saw_y = [y1] + \
+ [y1 - tooth_size2 - dsy * .5 * i
+ for i
+ in range(dsy_n * 2)] + \
+ [y0 + tooth_size2]
+
+ saw_vertices = (list(zip(bottom_saw_x, bottom_saw_y)) +
+ list(zip(right_saw_x, right_saw_y)) +
+ list(zip(top_saw_x, top_saw_y)) +
+ list(zip(left_saw_x, left_saw_y)) +
+ [(bottom_saw_x[0], bottom_saw_y[0])])
+
+ return saw_vertices
+
+ def transmute(self, x0, y0, width, height, mutation_size):
+
+ saw_vertices = self._get_sawtooth_vertices(x0, y0, width,
+ height, mutation_size)
+ path = Path(saw_vertices, closed=True)
+ return path
+
+ _style_list["sawtooth"] = Sawtooth
+
+ class Roundtooth(Sawtooth):
+ """A rounded tooth box."""
+ def __init__(self, pad=0.3, tooth_size=None):
+ """
+ *pad*
+ amount of padding
+
+ *tooth_size*
+ size of the sawtooth. pad* if None
+ """
+ super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)
+
+ def transmute(self, x0, y0, width, height, mutation_size):
+ saw_vertices = self._get_sawtooth_vertices(x0, y0,
+ width, height,
+ mutation_size)
+ # Add a trailing vertex to allow us to close the polygon correctly
+ saw_vertices = np.concatenate([np.array(saw_vertices),
+ [saw_vertices[0]]], axis=0)
+ codes = ([Path.MOVETO] +
+ [Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1)//2) +
+ [Path.CLOSEPOLY])
+ return Path(saw_vertices, codes)
+
+ _style_list["roundtooth"] = Roundtooth
+
+ if __doc__: # __doc__ could be None if -OO optimization is enabled
+ __doc__ = cbook.dedent(__doc__) % \
+ {"AvailableBoxstyles": _pprint_styles(_style_list)}
+
+docstring.interpd.update(
+ AvailableBoxstyles=_pprint_styles(BoxStyle._style_list),
+ ListBoxstyles=_simpleprint_styles(BoxStyle._style_list))
+
+
+class FancyBboxPatch(Patch):
+ """
+ Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
+ *y*) with specified width and height.
+
+ :class:`FancyBboxPatch` class is similar to :class:`Rectangle`
+ class, but it draws a fancy box around the rectangle. The
+ transformation of the rectangle box to the fancy box is delegated
+ to the :class:`BoxTransmuterBase` and its derived classes.
+
+ """
+
+ _edge_default = True
+
+ def __str__(self):
+ return self.__class__.__name__ \
+ + "(%g,%g;%gx%g)" % (self._x, self._y,
+ self._width, self._height)
+
+ @docstring.dedent_interpd
+ def __init__(self, xy, width, height,
+ boxstyle="round",
+ bbox_transmuter=None,
+ mutation_scale=1.,
+ mutation_aspect=None,
+ **kwargs):
+ """
+ *xy* = lower left corner
+
+ *width*, *height*
+
+ *boxstyle* determines what kind of fancy box will be drawn. It
+ can be a string of the style name with a comma separated
+ attribute, or an instance of :class:`BoxStyle`. Following box
+ styles are available.
+
+ %(AvailableBoxstyles)s
+
+ *mutation_scale* : a value with which attributes of boxstyle
+ (e.g., pad) will be scaled. default=1.
+
+ *mutation_aspect* : The height of the rectangle will be
+ squeezed by this value before the mutation and the mutated
+ box will be stretched by the inverse of it. default=None.
+
+ Valid kwargs are:
+ %(Patch)s
+ """
+
+ Patch.__init__(self, **kwargs)
+
+ self._x = xy[0]
+ self._y = xy[1]
+ self._width = width
+ self._height = height
+
+ if boxstyle == "custom":
+ if bbox_transmuter is None:
+ raise ValueError("bbox_transmuter argument is needed with "
+ "custom boxstyle")
+ self._bbox_transmuter = bbox_transmuter
+ else:
+ self.set_boxstyle(boxstyle)
+
+ self._mutation_scale = mutation_scale
+ self._mutation_aspect = mutation_aspect
+
+ self.stale = True
+
+ @docstring.dedent_interpd
+ def set_boxstyle(self, boxstyle=None, **kw):
+ """
+ Set the box style.
+
+ *boxstyle* can be a string with boxstyle name with optional
+ comma-separated attributes. Alternatively, the attrs can
+ be provided as keywords::
+
+ set_boxstyle("round,pad=0.2")
+ set_boxstyle("round", pad=0.2)
+
+ Old attrs simply are forgotten.
+
+ Without argument (or with *boxstyle* = None), it returns
+ available box styles.
+
+ The following boxstyles are available:
+ %(AvailableBoxstyles)s
+
+ ACCEPTS: %(ListBoxstyles)s
+
+ """
+ if boxstyle is None:
+ return BoxStyle.pprint_styles()
+
+ if isinstance(boxstyle, BoxStyle._Base) or callable(boxstyle):
+ self._bbox_transmuter = boxstyle
+ else:
+ self._bbox_transmuter = BoxStyle(boxstyle, **kw)
+ self.stale = True
+
+ def set_mutation_scale(self, scale):
+ """
+ Set the mutation scale.
+
+ ACCEPTS: float
+ """
+ self._mutation_scale = scale
+ self.stale = True
+
+ def get_mutation_scale(self):
+ """
+ Return the mutation scale.
+ """
+ return self._mutation_scale
+
+ def set_mutation_aspect(self, aspect):
+ """
+ Set the aspect ratio of the bbox mutation.
+
+ ACCEPTS: float
+ """
+ self._mutation_aspect = aspect
+ self.stale = True
+
+ def get_mutation_aspect(self):
+ """
+ Return the aspect ratio of the bbox mutation.
+ """
+ return self._mutation_aspect
+
+ def get_boxstyle(self):
+ "Return the boxstyle object"
+ return self._bbox_transmuter
+
+ def get_path(self):
+ """
+ Return the mutated path of the rectangle
+ """
+
+ _path = self.get_boxstyle()(self._x, self._y,
+ self._width, self._height,
+ self.get_mutation_scale(),
+ self.get_mutation_aspect())
+ return _path
+
+ # Following methods are borrowed from the Rectangle class.
+
+ def get_x(self):
+ "Return the left coord of the rectangle"
+ return self._x
+
+ def get_y(self):
+ "Return the bottom coord of the rectangle"
+ return self._y
+
+ def get_width(self):
+ "Return the width of the rectangle"
+ return self._width
+
+ def get_height(self):
+ "Return the height of the rectangle"
+ return self._height
+
+ def set_x(self, x):
+ """
+ Set the left coord of the rectangle
+
+ ACCEPTS: float
+ """
+ self._x = x
+ self.stale = True
+
+ def set_y(self, y):
+ """
+ Set the bottom coord of the rectangle
+
+ ACCEPTS: float
+ """
+ self._y = y
+ self.stale = True
+
+ def set_width(self, w):
+ """
+ Set the width rectangle
+
+ ACCEPTS: float
+ """
+ self._width = w
+ self.stale = True
+
+ def set_height(self, h):
+ """
+ Set the width rectangle
+
+ ACCEPTS: float
+ """
+ self._height = h
+ self.stale = True
+
+ def set_bounds(self, *args):
+ """
+ Set the bounds of the rectangle: l,b,w,h
+
+ ACCEPTS: (left, bottom, width, height)
+ """
+ if len(args) == 0:
+ l, b, w, h = args[0]
+ else:
+ l, b, w, h = args
+ self._x = l
+ self._y = b
+ self._width = w
+ self._height = h
+ self.stale = True
+
+ def get_bbox(self):
+ return transforms.Bbox.from_bounds(self._x, self._y,
+ self._width, self._height)
+
+
+class ConnectionStyle(_Style):
+ """
+ :class:`ConnectionStyle` is a container class which defines
+ several connectionstyle classes, which is used to create a path
+ between two points. These are mainly used with
+ :class:`FancyArrowPatch`.
+
+ A connectionstyle object can be either created as::
+
+ ConnectionStyle.Arc3(rad=0.2)
+
+ or::
+
+ ConnectionStyle("Arc3", rad=0.2)
+
+ or::
+
+ ConnectionStyle("Arc3, rad=0.2")
+
+ The following classes are defined
+
+ %(AvailableConnectorstyles)s
+
+
+ An instance of any connection style class is an callable object,
+ whose call signature is::
+
+ __call__(self, posA, posB,
+ patchA=None, patchB=None,
+ shrinkA=2., shrinkB=2.)
+
+ and it returns a :class:`Path` instance. *posA* and *posB* are
+ tuples of x,y coordinates of the two points to be
+ connected. *patchA* (or *patchB*) is given, the returned path is
+ clipped so that it start (or end) from the boundary of the
+ patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
+ which is given in points.
+
+ """
+
+ _style_list = {}
+
+ class _Base(object):
+ """
+ A base class for connectionstyle classes. The subclass needs
+ to implement a *connect* method whose call signature is::
+
+ connect(posA, posB)
+
+ where posA and posB are tuples of x, y coordinates to be
+ connected. The method needs to return a path connecting two
+ points. This base class defines a __call__ method, and a few
+ helper methods.
+ """
+
+ class SimpleEvent:
+ def __init__(self, xy):
+ self.x, self.y = xy
+
+ def _clip(self, path, patchA, patchB):
+ """
+ Clip the path to the boundary of the patchA and patchB.
+ The starting point of the path needed to be inside of the
+ patchA and the end point inside the patch B. The *contains*
+ methods of each patch object is utilized to test if the point
+ is inside the path.
+ """
+
+ if patchA:
+ def insideA(xy_display):
+ xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
+ return patchA.contains(xy_event)[0]
+
+ try:
+ left, right = split_path_inout(path, insideA)
+ except ValueError:
+ right = path
+
+ path = right
+
+ if patchB:
+ def insideB(xy_display):
+ xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
+ return patchB.contains(xy_event)[0]
+
+ try:
+ left, right = split_path_inout(path, insideB)
+ except ValueError:
+ left = path
+
+ path = left
+
+ return path
+
+ def _shrink(self, path, shrinkA, shrinkB):
+ """
+ Shrink the path by fixed size (in points) with shrinkA and shrinkB
+ """
+ if shrinkA:
+ x, y = path.vertices[0]
+ insideA = inside_circle(x, y, shrinkA)
+
+ try:
+ left, right = split_path_inout(path, insideA)
+ path = right
+ except ValueError:
+ pass
+
+ if shrinkB:
+ x, y = path.vertices[-1]
+ insideB = inside_circle(x, y, shrinkB)
+
+ try:
+ left, right = split_path_inout(path, insideB)
+ path = left
+ except ValueError:
+ pass
+
+ return path
+
+ def __call__(self, posA, posB,
+ shrinkA=2., shrinkB=2., patchA=None, patchB=None):
+ """
+ Calls the *connect* method to create a path between *posA*
+ and *posB*. The path is clipped and shrunken.
+ """
+
+ path = self.connect(posA, posB)
+
+ clipped_path = self._clip(path, patchA, patchB)
+ shrunk_path = self._shrink(clipped_path, shrinkA, shrinkB)
+
+ return shrunk_path
+
+ def __reduce__(self):
+ # because we have decided to nest these classes, we need to
+ # add some more information to allow instance pickling.
+ return (cbook._NestedClassGetter(),
+ (ConnectionStyle, self.__class__.__name__),
+ self.__dict__
+ )
+
+ class Arc3(_Base):
+ """
+ Creates a simple quadratic bezier curve between two
+ points. The curve is created so that the middle control point
+ (C1) is located at the same distance from the start (C0) and
+ end points(C2) and the distance of the C1 to the line
+ connecting C0-C2 is *rad* times the distance of C0-C2.
+ """
+
+ def __init__(self, rad=0.):
+ """
+ *rad*
+ curvature of the curve.
+ """
+ self.rad = rad
+
+ def connect(self, posA, posB):
+ x1, y1 = posA
+ x2, y2 = posB
+ x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
+ dx, dy = x2 - x1, y2 - y1
+
+ f = self.rad
+
+ cx, cy = x12 + f * dy, y12 - f * dx
+
+ vertices = [(x1, y1),
+ (cx, cy),
+ (x2, y2)]
+ codes = [Path.MOVETO,
+ Path.CURVE3,
+ Path.CURVE3]
+
+ return Path(vertices, codes)
+
+ _style_list["arc3"] = Arc3
+
+ class Angle3(_Base):
+ """
+ Creates a simple quadratic bezier curve between two
+ points. The middle control points is placed at the
+ intersecting point of two lines which crosses the start (or
+ end) point and has a angle of angleA (or angleB).
+ """
+
+ def __init__(self, angleA=90, angleB=0):
+ """
+ *angleA*
+ starting angle of the path
+
+ *angleB*
+ ending angle of the path
+ """
+
+ self.angleA = angleA
+ self.angleB = angleB
+
+ def connect(self, posA, posB):
+ x1, y1 = posA
+ x2, y2 = posB
+
+ cosA = math.cos(math.radians(self.angleA))
+ sinA = math.sin(math.radians(self.angleA))
+ cosB = math.cos(math.radians(self.angleB))
+ sinB = math.sin(math.radians(self.angleB))
+
+ cx, cy = get_intersection(x1, y1, cosA, sinA,
+ x2, y2, cosB, sinB)
+
+ vertices = [(x1, y1), (cx, cy), (x2, y2)]
+ codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
+
+ return Path(vertices, codes)
+
+ _style_list["angle3"] = Angle3
+
+ class Angle(_Base):
+ """
+ Creates a picewise continuous quadratic bezier path between
+ two points. The path has a one passing-through point placed at
+ the intersecting point of two lines which crosses the start
+ (or end) point and has a angle of angleA (or angleB). The
+ connecting edges are rounded with *rad*.
+ """
+
+ def __init__(self, angleA=90, angleB=0, rad=0.):
+ """
+ *angleA*
+ starting angle of the path
+
+ *angleB*
+ ending angle of the path
+
+ *rad*
+ rounding radius of the edge
+ """
+
+ self.angleA = angleA
+ self.angleB = angleB
+
+ self.rad = rad
+
+ def connect(self, posA, posB):
+ x1, y1 = posA
+ x2, y2 = posB
+
+ cosA = math.cos(math.radians(self.angleA))
+ sinA = math.sin(math.radians(self.angleA))
+ cosB = math.cos(math.radians(self.angleB))
+ sinB = math.sin(math.radians(self.angleB))
+
+ cx, cy = get_intersection(x1, y1, cosA, sinA,
+ x2, y2, cosB, sinB)
+
+ vertices = [(x1, y1)]
+ codes = [Path.MOVETO]
+
+ if self.rad == 0.:
+ vertices.append((cx, cy))
+ codes.append(Path.LINETO)
+ else:
+ dx1, dy1 = x1 - cx, y1 - cy
+ d1 = (dx1 ** 2 + dy1 ** 2) ** .5
+ f1 = self.rad / d1
+ dx2, dy2 = x2 - cx, y2 - cy
+ d2 = (dx2 ** 2 + dy2 ** 2) ** .5
+ f2 = self.rad / d2
+ vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),
+ (cx, cy),
+ (cx + dx2 * f2, cy + dy2 * f2)])
+ codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
+
+ vertices.append((x2, y2))
+ codes.append(Path.LINETO)
+
+ return Path(vertices, codes)
+
+ _style_list["angle"] = Angle
+
+ class Arc(_Base):
+ """
+ Creates a picewise continuous quadratic bezier path between
+ two points. The path can have two passing-through points, a
+ point placed at the distance of armA and angle of angleA from
+ point A, another point with respect to point B. The edges are
+ rounded with *rad*.
+ """
+
+ def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
+ """
+ *angleA* :
+ starting angle of the path
+
+ *angleB* :
+ ending angle of the path
+
+ *armA* :
+ length of the starting arm
+
+ *armB* :
+ length of the ending arm
+
+ *rad* :
+ rounding radius of the edges
+ """
+
+ self.angleA = angleA
+ self.angleB = angleB
+ self.armA = armA
+ self.armB = armB
+
+ self.rad = rad
+
+ def connect(self, posA, posB):
+ x1, y1 = posA
+ x2, y2 = posB
+
+ vertices = [(x1, y1)]
+ rounded = []
+ codes = [Path.MOVETO]
+
+ if self.armA:
+ cosA = math.cos(math.radians(self.angleA))
+ sinA = math.sin(math.radians(self.angleA))
+ # x_armA, y_armB
+ d = self.armA - self.rad
+ rounded.append((x1 + d * cosA, y1 + d * sinA))
+ d = self.armA
+ rounded.append((x1 + d * cosA, y1 + d * sinA))
+
+ if self.armB:
+ cosB = math.cos(math.radians(self.angleB))
+ sinB = math.sin(math.radians(self.angleB))
+ x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB
+
+ if rounded:
+ xp, yp = rounded[-1]
+ dx, dy = x_armB - xp, y_armB - yp
+ dd = (dx * dx + dy * dy) ** .5
+
+ rounded.append((xp + self.rad * dx / dd,
+ yp + self.rad * dy / dd))
+ vertices.extend(rounded)
+ codes.extend([Path.LINETO,
+ Path.CURVE3,
+ Path.CURVE3])
+ else:
+ xp, yp = vertices[-1]
+ dx, dy = x_armB - xp, y_armB - yp
+ dd = (dx * dx + dy * dy) ** .5
+
+ d = dd - self.rad
+ rounded = [(xp + d * dx / dd, yp + d * dy / dd),
+ (x_armB, y_armB)]
+
+ if rounded:
+ xp, yp = rounded[-1]
+ dx, dy = x2 - xp, y2 - yp
+ dd = (dx * dx + dy * dy) ** .5
+
+ rounded.append((xp + self.rad * dx / dd,
+ yp + self.rad * dy / dd))
+ vertices.extend(rounded)
+ codes.extend([Path.LINETO,
+ Path.CURVE3,
+ Path.CURVE3])
+
+ vertices.append((x2, y2))
+ codes.append(Path.LINETO)
+
+ return Path(vertices, codes)
+
+ _style_list["arc"] = Arc
+
+ class Bar(_Base):
+ """
+ A line with *angle* between A and B with *armA* and
+ *armB*. One of the arms is extended so that they are connected in
+ a right angle. The length of armA is determined by (*armA*
+ + *fraction* x AB distance). Same for armB.
+ """
+
+ def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):
+ """
+ Parameters
+ ----------
+ armA : float
+ minimum length of armA
+
+ armB : float
+ minimum length of armB
+
+ fraction : float
+ a fraction of the distance between two points that
+ will be added to armA and armB.
+
+ angle : float or None
+ angle of the connecting line (if None, parallel
+ to A and B)
+ """
+ self.armA = armA
+ self.armB = armB
+ self.fraction = fraction
+ self.angle = angle
+
+ def connect(self, posA, posB):
+ x1, y1 = posA
+ x20, y20 = x2, y2 = posB
+
+ theta1 = math.atan2(y2 - y1, x2 - x1)
+ dx, dy = x2 - x1, y2 - y1
+ dd = (dx * dx + dy * dy) ** .5
+ ddx, ddy = dx / dd, dy / dd
+
+ armA, armB = self.armA, self.armB
+
+ if self.angle is not None:
+ theta0 = np.deg2rad(self.angle)
+ dtheta = theta1 - theta0
+ dl = dd * math.sin(dtheta)
+ dL = dd * math.cos(dtheta)
+ x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)
+ armB = armB - dl
+
+ # update
+ dx, dy = x2 - x1, y2 - y1
+ dd2 = (dx * dx + dy * dy) ** .5
+ ddx, ddy = dx / dd2, dy / dd2
+
+ else:
+ dl = 0.
+
+ arm = max(armA, armB)
+ f = self.fraction * dd + arm
+
+ cx1, cy1 = x1 + f * ddy, y1 - f * ddx
+ cx2, cy2 = x2 + f * ddy, y2 - f * ddx
+
+ vertices = [(x1, y1),
+ (cx1, cy1),
+ (cx2, cy2),
+ (x20, y20)]
+ codes = [Path.MOVETO,
+ Path.LINETO,
+ Path.LINETO,
+ Path.LINETO]
+
+ return Path(vertices, codes)
+
+ _style_list["bar"] = Bar
+
+ if __doc__:
+ __doc__ = cbook.dedent(__doc__) % \
+ {"AvailableConnectorstyles": _pprint_styles(_style_list)}
+
+
+def _point_along_a_line(x0, y0, x1, y1, d):
+ """
+ find a point along a line connecting (x0, y0) -- (x1, y1) whose
+ distance from (x0, y0) is d.
+ """
+ dx, dy = x0 - x1, y0 - y1
+ ff = d / (dx * dx + dy * dy) ** .5
+ x2, y2 = x0 - ff * dx, y0 - ff * dy
+
+ return x2, y2
+
+
+class ArrowStyle(_Style):
+ """
+ :class:`ArrowStyle` is a container class which defines several
+ arrowstyle classes, which is used to create an arrow path along a
+ given path. These are mainly used with :class:`FancyArrowPatch`.
+
+ A arrowstyle object can be either created as::
+
+ ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
+
+ or::
+
+ ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
+
+ or::
+
+ ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
+
+ The following classes are defined
+
+ %(AvailableArrowstyles)s
+
+
+ An instance of any arrow style class is a callable object,
+ whose call signature is::
+
+ __call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
+
+ and it returns a tuple of a :class:`Path` instance and a boolean
+ value. *path* is a :class:`Path` instance along which the arrow
+ will be drawn. *mutation_size* and *aspect_ratio* have the same
+ meaning as in :class:`BoxStyle`. *linewidth* is a line width to be
+ stroked. This is meant to be used to correct the location of the
+ head so that it does not overshoot the destination point, but not all
+ classes support it.
+ """
+
+ _style_list = {}
+
+ class _Base(object):
+ """
+ Arrow Transmuter Base class
+
+ ArrowTransmuterBase and its derivatives are used to make a fancy
+ arrow around a given path. The __call__ method returns a path
+ (which will be used to create a PathPatch instance) and a boolean
+ value indicating the path is open therefore is not fillable. This
+ class is not an artist and actual drawing of the fancy arrow is
+ done by the FancyArrowPatch class.
+
+ """
+
+ # The derived classes are required to be able to be initialized
+ # w/o arguments, i.e., all its argument (except self) must have
+ # the default values.
+
+ @staticmethod
+ def ensure_quadratic_bezier(path):
+ """ Some ArrowStyle class only wokrs with a simple
+ quaratic bezier curve (created with Arc3Connetion or
+ Angle3Connector). This static method is to check if the
+ provided path is a simple quadratic bezier curve and returns
+ its control points if true.
+ """
+ segments = list(path.iter_segments())
+ if (len(segments) != 2 or segments[0][1] != Path.MOVETO or
+ segments[1][1] != Path.CURVE3):
+ raise ValueError(
+ "'path' it's not a valid quadratic Bezier curve")
+
+ return list(segments[0][0]) + list(segments[1][0])
+
+ def transmute(self, path, mutation_size, linewidth):
+ """
+ The transmute method is the very core of the ArrowStyle
+ class and must be overridden in the subclasses. It receives
+ the path object along which the arrow will be drawn, and
+ the mutation_size, with which the arrow head etc.
+ will be scaled. The linewidth may be used to adjust
+ the path so that it does not pass beyond the given
+ points. It returns a tuple of a Path instance and a
+ boolean. The boolean value indicate whether the path can
+ be filled or not. The return value can also be a list of paths
+ and list of booleans of a same length.
+ """
+
+ raise NotImplementedError('Derived must override')
+
+ def __call__(self, path, mutation_size, linewidth,
+ aspect_ratio=1.):
+ """
+ The __call__ method is a thin wrapper around the transmute method
+ and take care of the aspect ratio.
+ """
+
+ path = make_path_regular(path)
+
+ if aspect_ratio is not None:
+ # Squeeze the given height by the aspect_ratio
+
+ vertices, codes = path.vertices[:], path.codes[:]
+ # Squeeze the height
+ vertices[:, 1] = vertices[:, 1] / aspect_ratio
+ path_shrunk = Path(vertices, codes)
+ # call transmute method with squeezed height.
+ path_mutated, fillable = self.transmute(path_shrunk,
+ linewidth,
+ mutation_size)
+ if cbook.iterable(fillable):
+ path_list = []
+ for p in zip(path_mutated):
+ v, c = p.vertices, p.codes
+ # Restore the height
+ v[:, 1] = v[:, 1] * aspect_ratio
+ path_list.append(Path(v, c))
+ return path_list, fillable
+ else:
+ return path_mutated, fillable
+ else:
+ return self.transmute(path, mutation_size, linewidth)
+
+ def __reduce__(self):
+ # because we have decided to nest these classes, we need to
+ # add some more information to allow instance pickling.
+ return (cbook._NestedClassGetter(),
+ (ArrowStyle, self.__class__.__name__),
+ self.__dict__
+ )
+
+ class _Curve(_Base):
+ """
+ A simple arrow which will work with any path instance. The
+ returned path is simply concatenation of the original path + at
+ most two paths representing the arrow head at the begin point and the
+ at the end point. The arrow heads can be either open or closed.
+ """
+
+ def __init__(self, beginarrow=None, endarrow=None,
+ fillbegin=False, fillend=False,
+ head_length=.2, head_width=.1):
+ """
+ The arrows are drawn if *beginarrow* and/or *endarrow* are
+ true. *head_length* and *head_width* determines the size
+ of the arrow relative to the *mutation scale*. The
+ arrowhead at the begin (or end) is closed if fillbegin (or
+ fillend) is True.
+ """
+ self.beginarrow, self.endarrow = beginarrow, endarrow
+ self.head_length, self.head_width = head_length, head_width
+ self.fillbegin, self.fillend = fillbegin, fillend
+ super(ArrowStyle._Curve, self).__init__()
+
+ def _get_arrow_wedge(self, x0, y0, x1, y1,
+ head_dist, cos_t, sin_t, linewidth
+ ):
+ """
+ Return the paths for arrow heads. Since arrow lines are
+ drawn with capstyle=projected, The arrow goes beyond the
+ desired point. This method also returns the amount of the path
+ to be shrunken so that it does not overshoot.
+ """
+
+ # arrow from x0, y0 to x1, y1
+ dx, dy = x0 - x1, y0 - y1
+
+ cp_distance = np.hypot(dx, dy)
+
+ # pad_projected : amount of pad to account the
+ # overshooting of the projection of the wedge
+ pad_projected = (.5 * linewidth / sin_t)
+
+ # Account for division by zero
+ if cp_distance == 0:
+ cp_distance = 1
+
+ # apply pad for projected edge
+ ddx = pad_projected * dx / cp_distance
+ ddy = pad_projected * dy / cp_distance
+
+ # offset for arrow wedge
+ dx = dx / cp_distance * head_dist
+ dy = dy / cp_distance * head_dist
+
+ dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
+ dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
+
+ vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),
+ (x1 + ddx, y1 + ddy),
+ (x1 + ddx + dx2, y1 + ddy + dy2)]
+ codes_arrow = [Path.MOVETO,
+ Path.LINETO,
+ Path.LINETO]
+
+ return vertices_arrow, codes_arrow, ddx, ddy
+
+ def transmute(self, path, mutation_size, linewidth):
+
+ head_length = self.head_length * mutation_size
+ head_width = self.head_width * mutation_size
+ head_dist = math.sqrt(head_length ** 2 + head_width ** 2)
+ cos_t, sin_t = head_length / head_dist, head_width / head_dist
+
+ # begin arrow
+ x0, y0 = path.vertices[0]
+ x1, y1 = path.vertices[1]
+
+ # If there is no room for an arrow and a line, then skip the arrow
+ has_begin_arrow = self.beginarrow and not (x0 == x1 and y0 == y1)
+ if has_begin_arrow:
+ verticesA, codesA, ddxA, ddyA = \
+ self._get_arrow_wedge(x1, y1, x0, y0,
+ head_dist, cos_t, sin_t,
+ linewidth)
+ else:
+ verticesA, codesA = [], []
+ ddxA, ddyA = 0., 0.
+
+ # end arrow
+ x2, y2 = path.vertices[-2]
+ x3, y3 = path.vertices[-1]
+
+ # If there is no room for an arrow and a line, then skip the arrow
+ has_end_arrow = (self.endarrow and not ((x2 == x3) and (y2 == y3)))
+ if has_end_arrow:
+ verticesB, codesB, ddxB, ddyB = \
+ self._get_arrow_wedge(x2, y2, x3, y3,
+ head_dist, cos_t, sin_t,
+ linewidth)
+ else:
+ verticesB, codesB = [], []
+ ddxB, ddyB = 0., 0.
+
+ # this simple code will not work if ddx, ddy is greater than
+ # separation bettern vertices.
+ _path = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],
+ path.vertices[1:-1],
+ [(x3 + ddxB, y3 + ddyB)]]),
+ path.codes)]
+ _fillable = [False]
+
+ if has_begin_arrow:
+ if self.fillbegin:
+ p = np.concatenate([verticesA, [verticesA[0],
+ verticesA[0]], ])
+ c = np.concatenate([codesA, [Path.LINETO, Path.CLOSEPOLY]])
+ _path.append(Path(p, c))
+ _fillable.append(True)
+ else:
+ _path.append(Path(verticesA, codesA))
+ _fillable.append(False)
+
+ if has_end_arrow:
+ if self.fillend:
+ _fillable.append(True)
+ p = np.concatenate([verticesB, [verticesB[0],
+ verticesB[0]], ])
+ c = np.concatenate([codesB, [Path.LINETO, Path.CLOSEPOLY]])
+ _path.append(Path(p, c))
+ else:
+ _fillable.append(False)
+ _path.append(Path(verticesB, codesB))
+
+ return _path, _fillable
+
+ class Curve(_Curve):
+ """
+ A simple curve without any arrow head.
+ """
+
+ def __init__(self):
+ super(ArrowStyle.Curve, self).__init__(
+ beginarrow=False, endarrow=False)
+
+ _style_list["-"] = Curve
+
+ class CurveA(_Curve):
+ """
+ An arrow with a head at its begin point.
+ """
+
+ def __init__(self, head_length=.4, head_width=.2):
+ """
+ Parameters
+ ----------
+ head_length : float, optional, default : 0.4
+ Length of the arrow head
+
+ head_width : float, optional, default : 0.2
+ Width of the arrow head
+ """
+
+ super(ArrowStyle.CurveA, self).__init__(
+ beginarrow=True, endarrow=False,
+ head_length=head_length, head_width=head_width)
+
+ _style_list["<-"] = CurveA
+
+ class CurveB(_Curve):
+ """
+ An arrow with a head at its end point.
+ """
+
+ def __init__(self, head_length=.4, head_width=.2):
+ """
+ Parameters
+ ----------
+ head_length : float, optional, default : 0.4
+ Length of the arrow head
+
+ head_width : float, optional, default : 0.2
+ Width of the arrow head
+ """
+
+ super(ArrowStyle.CurveB, self).__init__(
+ beginarrow=False, endarrow=True,
+ head_length=head_length, head_width=head_width)
+
+ _style_list["->"] = CurveB
+
+ class CurveAB(_Curve):
+ """
+ An arrow with heads both at the begin and the end point.
+ """
+
+ def __init__(self, head_length=.4, head_width=.2):
+ """
+ Parameters
+ ----------
+ head_length : float, optional, default : 0.4
+ Length of the arrow head
+
+ head_width : float, optional, default : 0.2
+ Width of the arrow head
+ """
+
+ super(ArrowStyle.CurveAB, self).__init__(
+ beginarrow=True, endarrow=True,
+ head_length=head_length, head_width=head_width)
+
+ _style_list["<->"] = CurveAB
+
+ class CurveFilledA(_Curve):
+ """
+ An arrow with filled triangle head at the begin.
+ """
+
+ def __init__(self, head_length=.4, head_width=.2):
+ """
+ Parameters
+ ----------
+ head_length : float, optional, default : 0.4
+ Length of the arrow head
+
+ head_width : float, optional, default : 0.2
+ Width of the arrow head
+ """
+
+ super(ArrowStyle.CurveFilledA, self).__init__(
+ beginarrow=True, endarrow=False,
+ fillbegin=True, fillend=False,
+ head_length=head_length, head_width=head_width)
+
+ _style_list["<|-"] = CurveFilledA
+
+ class CurveFilledB(_Curve):
+ """
+ An arrow with filled triangle head at the end.
+ """
+
+ def __init__(self, head_length=.4, head_width=.2):
+ """
+ Parameters
+ ----------
+ head_length : float, optional, default : 0.4
+ Length of the arrow head
+
+ head_width : float, optional, default : 0.2
+ Width of the arrow head
+ """
+
+ super(ArrowStyle.CurveFilledB, self).__init__(
+ beginarrow=False, endarrow=True,
+ fillbegin=False, fillend=True,
+ head_length=head_length, head_width=head_width)
+
+ _style_list["-|>"] = CurveFilledB
+
+ class CurveFilledAB(_Curve):
+ """
+ An arrow with filled triangle heads at both ends.
+ """
+
+ def __init__(self, head_length=.4, head_width=.2):
+ """
+ Parameters
+ ----------
+ head_length : float, optional, default : 0.4
+ Length of the arrow head
+
+ head_width : float, optional, default : 0.2
+ Width of the arrow head
+ """
+
+ super(ArrowStyle.CurveFilledAB, self).__init__(
+ beginarrow=True, endarrow=True,
+ fillbegin=True, fillend=True,
+ head_length=head_length, head_width=head_width)
+
+ _style_list["<|-|>"] = CurveFilledAB
+
+ class _Bracket(_Base):
+
+ def __init__(self, bracketA=None, bracketB=None,
+ widthA=1., widthB=1.,
+ lengthA=0.2, lengthB=0.2,
+ angleA=None, angleB=None,
+ scaleA=None, scaleB=None):
+ self.bracketA, self.bracketB = bracketA, bracketB
+ self.widthA, self.widthB = widthA, widthB
+ self.lengthA, self.lengthB = lengthA, lengthB
+ self.angleA, self.angleB = angleA, angleB
+ self.scaleA, self.scaleB = scaleA, scaleB
+
+ def _get_bracket(self, x0, y0,
+ cos_t, sin_t, width, length):
+
+ # arrow from x0, y0 to x1, y1
+ from matplotlib.bezier import get_normal_points
+ x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
+
+ dx, dy = length * cos_t, length * sin_t
+
+ vertices_arrow = [(x1 + dx, y1 + dy),
+ (x1, y1),
+ (x2, y2),
+ (x2 + dx, y2 + dy)]
+ codes_arrow = [Path.MOVETO,
+ Path.LINETO,
+ Path.LINETO,
+ Path.LINETO]
+
+ return vertices_arrow, codes_arrow
+
+ def transmute(self, path, mutation_size, linewidth):
+
+ if self.scaleA is None:
+ scaleA = mutation_size
+ else:
+ scaleA = self.scaleA
+
+ if self.scaleB is None:
+ scaleB = mutation_size
+ else:
+ scaleB = self.scaleB
+
+ vertices_list, codes_list = [], []
+
+ if self.bracketA:
+ x0, y0 = path.vertices[0]
+ x1, y1 = path.vertices[1]
+ cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
+ verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
+ self.widthA * scaleA,
+ self.lengthA * scaleA)
+ vertices_list.append(verticesA)
+ codes_list.append(codesA)
+
+ vertices_list.append(path.vertices)
+ codes_list.append(path.codes)
+
+ if self.bracketB:
+ x0, y0 = path.vertices[-1]
+ x1, y1 = path.vertices[-2]
+ cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
+ verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
+ self.widthB * scaleB,
+ self.lengthB * scaleB)
+ vertices_list.append(verticesB)
+ codes_list.append(codesB)
+
+ vertices = np.concatenate(vertices_list)
+ codes = np.concatenate(codes_list)
+
+ p = Path(vertices, codes)
+
+ return p, False
+
+ class BracketAB(_Bracket):
+ """
+ An arrow with a bracket(]) at both ends.
+ """
+
+ def __init__(self,
+ widthA=1., lengthA=0.2, angleA=None,
+ widthB=1., lengthB=0.2, angleB=None):
+ """
+ Parameters
+ ----------
+ widthA : float, optional, default : 1.0
+ Width of the bracket
+
+ lengthA : float, optional, default : 0.2
+ Length of the bracket
+
+ angleA : float, optional, default : None
+ Angle between the bracket and the line
+
+ widthB : float, optional, default : 1.0
+ Width of the bracket
+
+ lengthB : float, optional, default : 0.2
+ Length of the bracket
+
+ angleB : float, optional, default : None
+ Angle between the bracket and the line
+ """
+
+ super(ArrowStyle.BracketAB, self).__init__(
+ True, True, widthA=widthA, lengthA=lengthA,
+ angleA=angleA, widthB=widthB, lengthB=lengthB,
+ angleB=angleB)
+
+ _style_list["]-["] = BracketAB
+
+ class BracketA(_Bracket):
+ """
+ An arrow with a bracket(]) at its end.
+ """
+
+ def __init__(self, widthA=1., lengthA=0.2, angleA=None):
+ """
+ Parameters
+ ----------
+ widthA : float, optional, default : 1.0
+ Width of the bracket
+
+ lengthA : float, optional, default : 0.2
+ Length of the bracket
+
+ angleA : float, optional, default : None
+ Angle between the bracket and the line
+ """
+
+ super(ArrowStyle.BracketA, self).__init__(True, None,
+ widthA=widthA,
+ lengthA=lengthA,
+ angleA=angleA)
+
+ _style_list["]-"] = BracketA
+
+ class BracketB(_Bracket):
+ """
+ An arrow with a bracket([) at its end.
+ """
+
+ def __init__(self, widthB=1., lengthB=0.2, angleB=None):
+ """
+ Parameters
+ ----------
+ widthB : float, optional, default : 1.0
+ Width of the bracket
+
+ lengthB : float, optional, default : 0.2
+ Length of the bracket
+
+ angleB : float, optional, default : None
+ Angle between the bracket and the line
+ """
+
+ super(ArrowStyle.BracketB, self).__init__(None, True,
+ widthB=widthB,
+ lengthB=lengthB,
+ angleB=angleB)
+
+ _style_list["-["] = BracketB
+
+ class BarAB(_Bracket):
+ """
+ An arrow with a bar(|) at both ends.
+ """
+
+ def __init__(self,
+ widthA=1., angleA=None,
+ widthB=1., angleB=None):
+ """
+ Parameters
+ ----------
+ widthA : float, optional, default : 1.0
+ Width of the bracket
+
+ angleA : float, optional, default : None
+ Angle between the bracket and the line
+
+ widthB : float, optional, default : 1.0
+ Width of the bracket
+
+ angleB : float, optional, default : None
+ Angle between the bracket and the line
+ """
+
+ super(ArrowStyle.BarAB, self).__init__(
+ True, True, widthA=widthA, lengthA=0, angleA=angleA,
+ widthB=widthB, lengthB=0, angleB=angleB)
+
+ _style_list["|-|"] = BarAB
+
+ class Simple(_Base):
+ """
+ A simple arrow. Only works with a quadratic bezier curve.
+ """
+
+ def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
+ """
+ Parameters
+ ----------
+ head_length : float, optional, default : 0.5
+ Length of the arrow head
+
+ head_width : float, optional, default : 0.5
+ Width of the arrow head
+
+ tail_width : float, optional, default : 0.2
+ Width of the arrow tail
+ """
+
+ self.head_length, self.head_width, self.tail_width = \
+ head_length, head_width, tail_width
+ super(ArrowStyle.Simple, self).__init__()
+
+ def transmute(self, path, mutation_size, linewidth):
+
+ x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
+
+ # divide the path into a head and a tail
+ head_length = self.head_length * mutation_size
+ in_f = inside_circle(x2, y2, head_length)
+ arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
+
+ from .bezier import NonIntersectingPathException
+
+ try:
+ arrow_out, arrow_in = \
+ split_bezier_intersecting_with_closedpath(arrow_path,
+ in_f,
+ tolerence=0.01)
+ except NonIntersectingPathException:
+ # if this happens, make a straight line of the head_length
+ # long.
+ x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
+ x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
+ arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]
+ arrow_out = None
+
+ # head
+ head_width = self.head_width * mutation_size
+ head_left, head_right = make_wedged_bezier2(arrow_in,
+ head_width / 2., wm=.5)
+
+ # tail
+ if arrow_out is not None:
+ tail_width = self.tail_width * mutation_size
+ tail_left, tail_right = get_parallels(arrow_out,
+ tail_width / 2.)
+
+ patch_path = [(Path.MOVETO, tail_right[0]),
+ (Path.CURVE3, tail_right[1]),
+ (Path.CURVE3, tail_right[2]),
+ (Path.LINETO, head_right[0]),
+ (Path.CURVE3, head_right[1]),
+ (Path.CURVE3, head_right[2]),
+ (Path.CURVE3, head_left[1]),
+ (Path.CURVE3, head_left[0]),
+ (Path.LINETO, tail_left[2]),
+ (Path.CURVE3, tail_left[1]),
+ (Path.CURVE3, tail_left[0]),
+ (Path.LINETO, tail_right[0]),
+ (Path.CLOSEPOLY, tail_right[0]),
+ ]
+ else:
+ patch_path = [(Path.MOVETO, head_right[0]),
+ (Path.CURVE3, head_right[1]),
+ (Path.CURVE3, head_right[2]),
+ (Path.CURVE3, head_left[1]),
+ (Path.CURVE3, head_left[0]),
+ (Path.CLOSEPOLY, head_left[0]),
+ ]
+
+ path = Path([p for c, p in patch_path], [c for c, p in patch_path])
+
+ return path, True
+
+ _style_list["simple"] = Simple
+
+ class Fancy(_Base):
+ """
+ A fancy arrow. Only works with a quadratic bezier curve.
+ """
+
+ def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
+ """
+ Parameters
+ ----------
+ head_length : float, optional, default : 0.4
+ Length of the arrow head
+
+ head_width : float, optional, default : 0.4
+ Width of the arrow head
+
+ tail_width : float, optional, default : 0.4
+ Width of the arrow tail
+ """
+
+ self.head_length, self.head_width, self.tail_width = \
+ head_length, head_width, tail_width
+ super(ArrowStyle.Fancy, self).__init__()
+
+ def transmute(self, path, mutation_size, linewidth):
+
+ x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
+
+ # divide the path into a head and a tail
+ head_length = self.head_length * mutation_size
+ arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
+
+ from .bezier import NonIntersectingPathException
+
+ # path for head
+ in_f = inside_circle(x2, y2, head_length)
+ try:
+ path_out, path_in = \
+ split_bezier_intersecting_with_closedpath(
+ arrow_path,
+ in_f,
+ tolerence=0.01)
+ except NonIntersectingPathException:
+ # if this happens, make a straight line of the head_length
+ # long.
+ x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
+ x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
+ arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]
+ path_head = arrow_path
+ else:
+ path_head = path_in
+
+ # path for head
+ in_f = inside_circle(x2, y2, head_length * .8)
+ path_out, path_in = split_bezier_intersecting_with_closedpath(
+ arrow_path,
+ in_f,
+ tolerence=0.01
+ )
+ path_tail = path_out
+
+ # head
+ head_width = self.head_width * mutation_size
+ head_l, head_r = make_wedged_bezier2(path_head,
+ head_width / 2.,
+ wm=.6)
+
+ # tail
+ tail_width = self.tail_width * mutation_size
+ tail_left, tail_right = make_wedged_bezier2(path_tail,
+ tail_width * .5,
+ w1=1., wm=0.6, w2=0.3)
+
+ # path for head
+ in_f = inside_circle(x0, y0, tail_width * .3)
+ path_in, path_out = split_bezier_intersecting_with_closedpath(
+ arrow_path,
+ in_f,
+ tolerence=0.01
+ )
+ tail_start = path_in[-1]
+
+ head_right, head_left = head_r, head_l
+ patch_path = [(Path.MOVETO, tail_start),
+ (Path.LINETO, tail_right[0]),
+ (Path.CURVE3, tail_right[1]),
+ (Path.CURVE3, tail_right[2]),
+ (Path.LINETO, head_right[0]),
+ (Path.CURVE3, head_right[1]),
+ (Path.CURVE3, head_right[2]),
+ (Path.CURVE3, head_left[1]),
+ (Path.CURVE3, head_left[0]),
+ (Path.LINETO, tail_left[2]),
+ (Path.CURVE3, tail_left[1]),
+ (Path.CURVE3, tail_left[0]),
+ (Path.LINETO, tail_start),
+ (Path.CLOSEPOLY, tail_start),
+ ]
+ path = Path([p for c, p in patch_path], [c for c, p in patch_path])
+
+ return path, True
+
+ _style_list["fancy"] = Fancy
+
+ class Wedge(_Base):
+ """
+ Wedge(?) shape. Only works with a quadratic bezier curve. The
+ begin point has a width of the tail_width and the end point has a
+ width of 0. At the middle, the width is shrink_factor*tail_width.
+ """
+
+ def __init__(self, tail_width=.3, shrink_factor=0.5):
+ """
+ Parameters
+ ----------
+ tail_width : float, optional, default : 0.3
+ Width of the tail
+
+ shrink_factor : float, optional, default : 0.5
+ Fraction of the arrow width at the middle point
+ """
+
+ self.tail_width = tail_width
+ self.shrink_factor = shrink_factor
+ super(ArrowStyle.Wedge, self).__init__()
+
+ def transmute(self, path, mutation_size, linewidth):
+
+ x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
+
+ arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
+ b_plus, b_minus = make_wedged_bezier2(
+ arrow_path,
+ self.tail_width * mutation_size / 2.,
+ wm=self.shrink_factor)
+
+ patch_path = [(Path.MOVETO, b_plus[0]),
+ (Path.CURVE3, b_plus[1]),
+ (Path.CURVE3, b_plus[2]),
+ (Path.LINETO, b_minus[2]),
+ (Path.CURVE3, b_minus[1]),
+ (Path.CURVE3, b_minus[0]),
+ (Path.CLOSEPOLY, b_minus[0]),
+ ]
+ path = Path([p for c, p in patch_path], [c for c, p in patch_path])
+
+ return path, True
+
+ _style_list["wedge"] = Wedge
+
+ if __doc__:
+ __doc__ = cbook.dedent(__doc__) % \
+ {"AvailableArrowstyles": _pprint_styles(_style_list)}
+
+
+docstring.interpd.update(
+ AvailableArrowstyles=_pprint_styles(ArrowStyle._style_list),
+ AvailableConnectorstyles=_pprint_styles(ConnectionStyle._style_list),
+)
+
+
+class FancyArrowPatch(Patch):
+ """
+ A fancy arrow patch. It draws an arrow using the :class:`ArrowStyle`.
+
+ The head and tail positions are fixed at the specified start and end points
+ of the arrow, but the size and shape (in display coordinates) of the arrow
+ does not change when the axis is moved or zoomed.
+ """
+ _edge_default = True
+
+ def __str__(self):
+
+ if self._posA_posB is not None:
+ (x1, y1), (x2, y2) = self._posA_posB
+ return self.__class__.__name__ \
+ + "(%g,%g->%g,%g)" % (x1, y1, x2, y2)
+ else:
+ return self.__class__.__name__ \
+ + "(%s)" % (str(self._path_original),)
+
+ @docstring.dedent_interpd
+ def __init__(self, posA=None, posB=None,
+ path=None,
+ arrowstyle="simple",
+ arrow_transmuter=None,
+ connectionstyle="arc3",
+ connector=None,
+ patchA=None,
+ patchB=None,
+ shrinkA=2,
+ shrinkB=2,
+ mutation_scale=1,
+ mutation_aspect=None,
+ dpi_cor=1,
+ **kwargs):
+ """
+ If *posA* and *posB* are given, a path connecting two points is
+ created according to *connectionstyle*. The path will be
+ clipped with *patchA* and *patchB* and further shrunken by
+ *shrinkA* and *shrinkB*. An arrow is drawn along this
+ resulting path using the *arrowstyle* parameter.
+
+ Alternatively if *path* is provided, an arrow is drawn along this path
+ and *patchA*, *patchB*, *shrinkA*, and *shrinkB* are ignored.
+
+ Parameters
+ ----------
+
+ posA, posB : None, tuple, optional (default: None)
+ (x,y) coordinates of arrow tail and arrow head respectively.
+
+ path : None, Path (default: None)
+ :class:`matplotlib.path.Path` instance. If provided, an arrow is
+ drawn along this path and *patchA*, *patchB*, *shrinkA*, and
+ *shrinkB* are ignored.
+
+ arrowstyle : str or ArrowStyle, optional (default: 'simple')
+ Describes how the fancy arrow will be
+ drawn. It can be string of the available arrowstyle names,
+ with optional comma-separated attributes, or an
+ :class:`ArrowStyle` instance. The optional attributes are meant to
+ be scaled with the *mutation_scale*. The following arrow styles are
+ available:
+
+ %(AvailableArrowstyles)s
+
+ arrow_transmuter :
+ Ignored
+
+ connectionstyle : str, ConnectionStyle, or None, optional
+ (default: 'arc3')
+ Describes how *posA* and *posB* are connected. It can be an
+ instance of the :class:`ConnectionStyle` class or a string of the
+ connectionstyle name, with optional comma-separated attributes. The
+ following connection styles are available:
+
+ %(AvailableConnectorstyles)s
+
+ connector :
+ Ignored
+
+ patchA, patchB : None, Patch, optional (default: None)
+ Head and tail patch respectively. :class:`matplotlib.patch.Patch`
+ instance.
+
+ shrinkA, shrinkB : scalar, optional (default: 2)
+ Shrinking factor of the tail and head of the arrow respectively
+
+ mutation_scale : scalar, optional (default: 1)
+ Value with which attributes of *arrowstyle* (e.g., *head_length*)
+ will be scaled.
+
+ mutation_aspect : None, scalar, optional (default: None)
+ The height of the rectangle will be squeezed by this value before
+ the mutation and the mutated box will be stretched by the inverse
+ of it.
+
+ dpi_cor : scalar, optional (default: 1)
+ dpi_cor is currently used for linewidth-related things and shrink
+ factor. Mutation scale is affected by this.
+
+ Notes
+ -----
+ Valid kwargs are:
+ %(Patch)s
+ """
+ Patch.__init__(self, **kwargs)
+
+ if posA is not None and posB is not None and path is None:
+ self._posA_posB = [posA, posB]
+
+ if connectionstyle is None:
+ connectionstyle = "arc3"
+ self.set_connectionstyle(connectionstyle)
+
+ elif posA is None and posB is None and path is not None:
+ self._posA_posB = None
+ self._connetors = None
+ else:
+ raise ValueError("either posA and posB, or path need to provided")
+
+ self.patchA = patchA
+ self.patchB = patchB
+ self.shrinkA = shrinkA
+ self.shrinkB = shrinkB
+
+ self._path_original = path
+
+ self.set_arrowstyle(arrowstyle)
+
+ self._mutation_scale = mutation_scale
+ self._mutation_aspect = mutation_aspect
+
+ self.set_dpi_cor(dpi_cor)
+
+ def set_dpi_cor(self, dpi_cor):
+ """
+ dpi_cor is currently used for linewidth-related things and
+ shrink factor. Mutation scale is affected by this.
+
+ Parameters
+ ----------
+ dpi_cor : scalar
+ """
+ self._dpi_cor = dpi_cor
+ self.stale = True
+
+ def get_dpi_cor(self):
+ """
+ dpi_cor is currently used for linewidth-related things and
+ shrink factor. Mutation scale is affected by this.
+
+ Returns
+ -------
+ dpi_cor : scalar
+ """
+ return self._dpi_cor
+
+ def set_positions(self, posA, posB):
+ """
+ Set the begin and end positions of the connecting path.
+
+ Parameters
+ ----------
+ posA, posB : None, tuple
+ (x,y) coordinates of arrow tail and arrow head respectively. If
+ `None` use current value.
+ """
+ if posA is not None:
+ self._posA_posB[0] = posA
+ if posB is not None:
+ self._posA_posB[1] = posB
+ self.stale = True
+
+ def set_patchA(self, patchA):
+ """
+ Set the tail patch.
+
+ Parameters
+ ----------
+ patchA : Patch
+ :class:`matplotlib.patch.Patch` instance.
+ """
+ self.patchA = patchA
+ self.stale = True
+
+ def set_patchB(self, patchB):
+ """
+ Set the head patch.
+
+ Parameters
+ ----------
+ patchB : Patch
+ :class:`matplotlib.patch.Patch` instance.
+ """
+ self.patchB = patchB
+ self.stale = True
+
+ def set_connectionstyle(self, connectionstyle, **kw):
+ """
+ Set the connection style. Old attributes are forgotten.
+
+ Parameters
+ ----------
+ connectionstyle : None, ConnectionStyle instance, or string
+ Can be a string with connectionstyle name with
+ optional comma-separated attributes, e.g.::
+
+ set_connectionstyle("arc,angleA=0,armA=30,rad=10")
+
+ Alternatively, the attributes can be provided as keywords, e.g.::
+
+ set_connectionstyle("arc", angleA=0,armA=30,rad=10)
+
+ Without any arguments (or with ``connectionstyle=None``), return
+ available styles as a list of strings.
+ """
+
+ if connectionstyle is None:
+ return ConnectionStyle.pprint_styles()
+
+ if (isinstance(connectionstyle, ConnectionStyle._Base) or
+ callable(connectionstyle)):
+ self._connector = connectionstyle
+ else:
+ self._connector = ConnectionStyle(connectionstyle, **kw)
+ self.stale = True
+
+ def get_connectionstyle(self):
+ """
+ Return the :class:`ConnectionStyle` instance.
+ """
+ return self._connector
+
+ def set_arrowstyle(self, arrowstyle=None, **kw):
+ """
+ Set the arrow style. Old attributes are forgotten. Without arguments
+ (or with ``arrowstyle=None``) returns available box styles as a list of
+ strings.
+
+ Parameters
+ ----------
+ arrowstyle : None, ArrowStyle, str, optional (default: None)
+ Can be a string with arrowstyle name with optional comma-separated
+ attributes, e.g.::
+
+ set_arrowstyle("Fancy,head_length=0.2")
+
+ Alternatively attributes can be provided as keywords, e.g.::
+
+ set_arrowstyle("fancy", head_length=0.2)
+
+ """
+
+ if arrowstyle is None:
+ return ArrowStyle.pprint_styles()
+
+ if isinstance(arrowstyle, ArrowStyle._Base):
+ self._arrow_transmuter = arrowstyle
+ else:
+ self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
+ self.stale = True
+
+ def get_arrowstyle(self):
+ """
+ Return the arrowstyle object.
+ """
+ return self._arrow_transmuter
+
+ def set_mutation_scale(self, scale):
+ """
+ Set the mutation scale.
+
+ Parameters
+ ----------
+ scale : scalar
+ """
+ self._mutation_scale = scale
+ self.stale = True
+
+ def get_mutation_scale(self):
+ """
+ Return the mutation scale.
+
+ Returns
+ -------
+ scale : scalar
+ """
+ return self._mutation_scale
+
+ def set_mutation_aspect(self, aspect):
+ """
+ Set the aspect ratio of the bbox mutation.
+
+ Parameters
+ ----------
+ aspect : scalar
+ """
+ self._mutation_aspect = aspect
+ self.stale = True
+
+ def get_mutation_aspect(self):
+ """
+ Return the aspect ratio of the bbox mutation.
+ """
+ return self._mutation_aspect
+
+ def get_path(self):
+ """
+ Return the path of the arrow in the data coordinates. Use
+ get_path_in_displaycoord() method to retrieve the arrow path
+ in display coordinates.
+ """
+ _path, fillable = self.get_path_in_displaycoord()
+
+ if cbook.iterable(fillable):
+ _path = concatenate_paths(_path)
+
+ return self.get_transform().inverted().transform_path(_path)
+
+ def get_path_in_displaycoord(self):
+ """
+ Return the mutated path of the arrow in display coordinates.
+ """
+
+ dpi_cor = self.get_dpi_cor()
+
+ if self._posA_posB is not None:
+ posA = self.get_transform().transform_point(self._posA_posB[0])
+ posB = self.get_transform().transform_point(self._posA_posB[1])
+ _path = self.get_connectionstyle()(posA, posB,
+ patchA=self.patchA,
+ patchB=self.patchB,
+ shrinkA=self.shrinkA * dpi_cor,
+ shrinkB=self.shrinkB * dpi_cor
+ )
+ else:
+ _path = self.get_transform().transform_path(self._path_original)
+
+ _path, fillable = self.get_arrowstyle()(
+ _path,
+ self.get_mutation_scale() * dpi_cor,
+ self.get_linewidth() * dpi_cor,
+ self.get_mutation_aspect())
+
+ # if not fillable:
+ # self._fill = False
+
+ return _path, fillable
+
+ def draw(self, renderer):
+ if not self.get_visible():
+ return
+
+ renderer.open_group('patch', self.get_gid())
+ gc = renderer.new_gc()
+
+ gc.set_foreground(self._edgecolor, isRGBA=True)
+
+ lw = self._linewidth
+ if self._edgecolor[3] == 0:
+ lw = 0
+ gc.set_linewidth(lw)
+ gc.set_dashes(self._dashoffset, self._dashes)
+
+ gc.set_antialiased(self._antialiased)
+ self._set_gc_clip(gc)
+ gc.set_capstyle('round')
+ gc.set_snap(self.get_snap())
+
+ rgbFace = self._facecolor
+ if rgbFace[3] == 0:
+ rgbFace = None # (some?) renderers expect this as no-fill signal
+
+ gc.set_alpha(self._alpha)
+
+ if self._hatch:
+ gc.set_hatch(self._hatch)
+ if self._hatch_color is not None:
+ try:
+ gc.set_hatch_color(self._hatch_color)
+ except AttributeError:
+ # if we end up with a GC that does not have this method
+ warnings.warn("Your backend does not support setting the "
+ "hatch color.")
+
+ if self.get_sketch_params() is not None:
+ gc.set_sketch_params(*self.get_sketch_params())
+
+ # FIXME : dpi_cor is for the dpi-dependecy of the
+ # linewidth. There could be room for improvement.
+ #
+ # dpi_cor = renderer.points_to_pixels(1.)
+ self.set_dpi_cor(renderer.points_to_pixels(1.))
+ path, fillable = self.get_path_in_displaycoord()
+
+ if not cbook.iterable(fillable):
+ path = [path]
+ fillable = [fillable]
+
+ affine = transforms.IdentityTransform()
+
+ if self.get_path_effects():
+ from matplotlib.patheffects import PathEffectRenderer
+ renderer = PathEffectRenderer(self.get_path_effects(), renderer)
+
+ for p, f in zip(path, fillable):
+ if f:
+ renderer.draw_path(gc, p, affine, rgbFace)
+ else:
+ renderer.draw_path(gc, p, affine, None)
+
+ gc.restore()
+ renderer.close_group('patch')
+ self.stale = False
+
+
+class ConnectionPatch(FancyArrowPatch):
+ """
+ A :class:`~matplotlib.patches.ConnectionPatch` class is to make
+ connecting lines between two points (possibly in different axes).
+ """
+ def __str__(self):
+ return "ConnectionPatch((%g,%g),(%g,%g))" % \
+ (self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])
+
+ @docstring.dedent_interpd
+ def __init__(self, xyA, xyB, coordsA, coordsB=None,
+ axesA=None, axesB=None,
+ arrowstyle="-",
+ arrow_transmuter=None,
+ connectionstyle="arc3",
+ connector=None,
+ patchA=None,
+ patchB=None,
+ shrinkA=0.,
+ shrinkB=0.,
+ mutation_scale=10.,
+ mutation_aspect=None,
+ clip_on=False,
+ dpi_cor=1.,
+ **kwargs):
+ """
+ Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*
+
+
+ Valid keys are
+
+
+ =============== ======================================================
+ Key Description
+ =============== ======================================================
+ arrowstyle the arrow style
+ connectionstyle the connection style
+ relpos default is (0.5, 0.5)
+ patchA default is bounding box of the text
+ patchB default is None
+ shrinkA default is 2 points
+ shrinkB default is 2 points
+ mutation_scale default is text size (in points)
+ mutation_aspect default is 1.
+ ? any key for :class:`matplotlib.patches.PathPatch`
+ =============== ======================================================
+
+
+ *coordsA* and *coordsB* are strings that indicate the
+ coordinates of *xyA* and *xyB*.
+
+ ================= ===================================================
+ Property Description
+ ================= ===================================================
+ 'figure points' points from the lower left corner of the figure
+ 'figure pixels' pixels from the lower left corner of the figure
+ 'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
+ 'axes points' points from lower left corner of axes
+ 'axes pixels' pixels from lower left corner of axes
+ 'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
+ 'data' use the coordinate system of the object being
+ annotated (default)
+ 'offset points' Specify an offset (in points) from the *xy* value
+
+ 'polar' you can specify *theta*, *r* for the annotation,
+ even in cartesian plots. Note that if you
+ are using a polar axes, you do not need
+ to specify polar for the coordinate
+ system since that is the native "data" coordinate
+ system.
+ ================= ===================================================
+
+ """
+ if coordsB is None:
+ coordsB = coordsA
+ # we'll draw ourself after the artist we annotate by default
+ self.xy1 = xyA
+ self.xy2 = xyB
+ self.coords1 = coordsA
+ self.coords2 = coordsB
+
+ self.axesA = axesA
+ self.axesB = axesB
+
+ FancyArrowPatch.__init__(self,
+ posA=(0, 0), posB=(1, 1),
+ arrowstyle=arrowstyle,
+ arrow_transmuter=arrow_transmuter,
+ connectionstyle=connectionstyle,
+ connector=connector,
+ patchA=patchA,
+ patchB=patchB,
+ shrinkA=shrinkA,
+ shrinkB=shrinkB,
+ mutation_scale=mutation_scale,
+ mutation_aspect=mutation_aspect,
+ clip_on=clip_on,
+ dpi_cor=dpi_cor,
+ **kwargs)
+
+ # if True, draw annotation only if self.xy is inside the axes
+ self._annotation_clip = None
+
+ def _get_xy(self, x, y, s, axes=None):
+ """
+ calculate the pixel position of given point
+ """
+
+ if axes is None:
+ axes = self.axes
+
+ if s == 'data':
+ trans = axes.transData
+ x = float(self.convert_xunits(x))
+ y = float(self.convert_yunits(y))
+ return trans.transform_point((x, y))
+ elif s == 'offset points':
+ # convert the data point
+ dx, dy = self.xy
+
+ # prevent recursion
+ if self.xycoords == 'offset points':
+ return self._get_xy(dx, dy, 'data')
+
+ dx, dy = self._get_xy(dx, dy, self.xycoords)
+
+ # convert the offset
+ dpi = self.figure.get_dpi()
+ x *= dpi / 72.
+ y *= dpi / 72.
+
+ # add the offset to the data point
+ x += dx
+ y += dy
+
+ return x, y
+ elif s == 'polar':
+ theta, r = x, y
+ x = r * np.cos(theta)
+ y = r * np.sin(theta)
+ trans = axes.transData
+ return trans.transform_point((x, y))
+ elif s == 'figure points':
+ # points from the lower left corner of the figure
+ dpi = self.figure.dpi
+ l, b, w, h = self.figure.bbox.bounds
+ r = l + w
+ t = b + h
+
+ x *= dpi / 72.
+ y *= dpi / 72.
+ if x < 0:
+ x = r + x
+ if y < 0:
+ y = t + y
+ return x, y
+ elif s == 'figure pixels':
+ # pixels from the lower left corner of the figure
+ l, b, w, h = self.figure.bbox.bounds
+ r = l + w
+ t = b + h
+ if x < 0:
+ x = r + x
+ if y < 0:
+ y = t + y
+ return x, y
+ elif s == 'figure fraction':
+ # (0,0) is lower left, (1,1) is upper right of figure
+ trans = self.figure.transFigure
+ return trans.transform_point((x, y))
+ elif s == 'axes points':
+ # points from the lower left corner of the axes
+ dpi = self.figure.dpi
+ l, b, w, h = axes.bbox.bounds
+ r = l + w
+ t = b + h
+ if x < 0:
+ x = r + x * dpi / 72.
+ else:
+ x = l + x * dpi / 72.
+ if y < 0:
+ y = t + y * dpi / 72.
+ else:
+ y = b + y * dpi / 72.
+ return x, y
+ elif s == 'axes pixels':
+ #pixels from the lower left corner of the axes
+
+ l, b, w, h = axes.bbox.bounds
+ r = l + w
+ t = b + h
+ if x < 0:
+ x = r + x
+ else:
+ x = l + x
+ if y < 0:
+ y = t + y
+ else:
+ y = b + y
+ return x, y
+ elif s == 'axes fraction':
+ #(0,0) is lower left, (1,1) is upper right of axes
+ trans = axes.transAxes
+ return trans.transform_point((x, y))
+
+ def set_annotation_clip(self, b):
+ """
+ set *annotation_clip* attribute.
+
+ * True: the annotation will only be drawn when self.xy is inside the
+ axes.
+ * False: the annotation will always be drawn regardless of its
+ position.
+ * None: the self.xy will be checked only if *xycoords* is "data"
+ """
+ self._annotation_clip = b
+ self.stale = True
+
+ def get_annotation_clip(self):
+ """
+ Return *annotation_clip* attribute.
+ See :meth:`set_annotation_clip` for the meaning of return values.
+ """
+ return self._annotation_clip
+
+ def get_path_in_displaycoord(self):
+ """
+ Return the mutated path of the arrow in the display coord
+ """
+
+ dpi_cor = self.get_dpi_cor()
+
+ x, y = self.xy1
+ posA = self._get_xy(x, y, self.coords1, self.axesA)
+
+ x, y = self.xy2
+ posB = self._get_xy(x, y, self.coords2, self.axesB)
+
+ _path = self.get_connectionstyle()(posA, posB,
+ patchA=self.patchA,
+ patchB=self.patchB,
+ shrinkA=self.shrinkA * dpi_cor,
+ shrinkB=self.shrinkB * dpi_cor
+ )
+
+ _path, fillable = self.get_arrowstyle()(
+ _path,
+ self.get_mutation_scale() * dpi_cor,
+ self.get_linewidth() * dpi_cor,
+ self.get_mutation_aspect()
+ )
+
+ return _path, fillable
+
+ def _check_xy(self, renderer):
+ """
+ check if the annotation need to
+ be drawn.
+ """
+
+ b = self.get_annotation_clip()
+
+ if b or (b is None and self.coords1 == "data"):
+ x, y = self.xy1
+ xy_pixel = self._get_xy(x, y, self.coords1, self.axesA)
+ if not self.axes.contains_point(xy_pixel):
+ return False
+
+ if b or (b is None and self.coords2 == "data"):
+ x, y = self.xy2
+ xy_pixel = self._get_xy(x, y, self.coords2, self.axesB)
+ if self.axesB is None:
+ axes = self.axes
+ else:
+ axes = self.axesB
+ if not axes.contains_point(xy_pixel):
+ return False
+
+ return True
+
+ def draw(self, renderer):
+ """
+ Draw.
+ """
+
+ if renderer is not None:
+ self._renderer = renderer
+ if not self.get_visible():
+ return
+
+ if not self._check_xy(renderer):
+ return
+
+ FancyArrowPatch.draw(self, renderer)
diff --git a/contrib/python/matplotlib/py2/matplotlib/path.py b/contrib/python/matplotlib/py2/matplotlib/path.py
new file mode 100644
index 00000000000..77d752ec240
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/path.py
@@ -0,0 +1,1028 @@
+r"""
+A module for dealing with the polylines used throughout Matplotlib.
+
+The primary class for polyline handling in Matplotlib is `Path`. Almost all
+vector drawing makes use of `Path`\s somewhere in the drawing pipeline.
+
+Whilst a `Path` instance itself cannot be drawn, some `.Artist` subclasses,
+such as `.PathPatch` and `.PathCollection`, can be used for convenient `Path`
+visualisation.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from weakref import WeakValueDictionary
+
+import numpy as np
+
+from . import _path, rcParams
+from .cbook import (_to_unmasked_float_array, simple_linear_interpolation,
+ maxdict)
+
+
+class Path(object):
+ """
+ :class:`Path` represents a series of possibly disconnected,
+ possibly closed, line and curve segments.
+
+ The underlying storage is made up of two parallel numpy arrays:
+ - *vertices*: an Nx2 float array of vertices
+ - *codes*: an N-length uint8 array of vertex types
+
+ These two arrays always have the same length in the first
+ dimension. For example, to represent a cubic curve, you must
+ provide three vertices as well as three codes ``CURVE3``.
+
+ The code types are:
+
+ - ``STOP`` : 1 vertex (ignored)
+ A marker for the end of the entire path (currently not
+ required and ignored)
+
+ - ``MOVETO`` : 1 vertex
+ Pick up the pen and move to the given vertex.
+
+ - ``LINETO`` : 1 vertex
+ Draw a line from the current position to the given vertex.
+
+ - ``CURVE3`` : 1 control point, 1 endpoint
+ Draw a quadratic Bezier curve from the current position,
+ with the given control point, to the given end point.
+
+ - ``CURVE4`` : 2 control points, 1 endpoint
+ Draw a cubic Bezier curve from the current position, with
+ the given control points, to the given end point.
+
+ - ``CLOSEPOLY`` : 1 vertex (ignored)
+ Draw a line segment to the start point of the current
+ polyline.
+
+ Users of Path objects should not access the vertices and codes
+ arrays directly. Instead, they should use :meth:`iter_segments`
+ or :meth:`cleaned` to get the vertex/code pairs. This is important,
+ since many :class:`Path` objects, as an optimization, do not store a
+ *codes* at all, but have a default one provided for them by
+ :meth:`iter_segments`.
+
+ Some behavior of Path objects can be controlled by rcParams. See
+ the rcParams whose keys contain 'path.'.
+
+ .. note::
+
+ The vertices and codes arrays should be treated as
+ immutable -- there are a number of optimizations and assumptions
+ made up front in the constructor that will not change when the
+ data changes.
+
+ """
+
+ # Path codes
+ STOP = 0 # 1 vertex
+ MOVETO = 1 # 1 vertex
+ LINETO = 2 # 1 vertex
+ CURVE3 = 3 # 2 vertices
+ CURVE4 = 4 # 3 vertices
+ CLOSEPOLY = 79 # 1 vertex
+
+ #: A dictionary mapping Path codes to the number of vertices that the
+ #: code expects.
+ NUM_VERTICES_FOR_CODE = {STOP: 1,
+ MOVETO: 1,
+ LINETO: 1,
+ CURVE3: 2,
+ CURVE4: 3,
+ CLOSEPOLY: 1}
+
+ code_type = np.uint8
+
+ def __init__(self, vertices, codes=None, _interpolation_steps=1,
+ closed=False, readonly=False):
+ """
+ Create a new path with the given vertices and codes.
+
+ Parameters
+ ----------
+ vertices : array_like
+ The ``(n, 2)`` float array, masked array or sequence of pairs
+ representing the vertices of the path.
+
+ If *vertices* contains masked values, they will be converted
+ to NaNs which are then handled correctly by the Agg
+ PathIterator and other consumers of path data, such as
+ :meth:`iter_segments`.
+ codes : {None, array_like}, optional
+ n-length array integers representing the codes of the path.
+ If not None, codes must be the same length as vertices.
+ If None, *vertices* will be treated as a series of line segments.
+ _interpolation_steps : int, optional
+ Used as a hint to certain projections, such as Polar, that this
+ path should be linearly interpolated immediately before drawing.
+ This attribute is primarily an implementation detail and is not
+ intended for public use.
+ closed : bool, optional
+ If *codes* is None and closed is True, vertices will be treated as
+ line segments of a closed polygon.
+ readonly : bool, optional
+ Makes the path behave in an immutable way and sets the vertices
+ and codes as read-only arrays.
+ """
+ vertices = _to_unmasked_float_array(vertices)
+ if (vertices.ndim != 2) or (vertices.shape[1] != 2):
+ raise ValueError(
+ "'vertices' must be a 2D list or array with shape Nx2")
+
+ if codes is not None:
+ codes = np.asarray(codes, self.code_type)
+ if (codes.ndim != 1) or len(codes) != len(vertices):
+ raise ValueError("'codes' must be a 1D list or array with the "
+ "same length of 'vertices'")
+ if len(codes) and codes[0] != self.MOVETO:
+ raise ValueError("The first element of 'code' must be equal "
+ "to 'MOVETO' ({})".format(self.MOVETO))
+ elif closed:
+ codes = np.empty(len(vertices), dtype=self.code_type)
+ codes[0] = self.MOVETO
+ codes[1:-1] = self.LINETO
+ codes[-1] = self.CLOSEPOLY
+
+ self._vertices = vertices
+ self._codes = codes
+ self._interpolation_steps = _interpolation_steps
+ self._update_values()
+
+ if readonly:
+ self._vertices.flags.writeable = False
+ if self._codes is not None:
+ self._codes.flags.writeable = False
+ self._readonly = True
+ else:
+ self._readonly = False
+
+ @classmethod
+ def _fast_from_codes_and_verts(cls, verts, codes, internals=None):
+ """
+ Creates a Path instance without the expense of calling the constructor
+
+ Parameters
+ ----------
+ verts : numpy array
+ codes : numpy array
+ internals : dict or None
+ The attributes that the resulting path should have.
+ Allowed keys are ``readonly``, ``should_simplify``,
+ ``simplify_threshold``, ``has_nonfinite`` and
+ ``interpolation_steps``.
+
+ """
+ internals = internals or {}
+ pth = cls.__new__(cls)
+ pth._vertices = _to_unmasked_float_array(verts)
+ pth._codes = codes
+ pth._readonly = internals.pop('readonly', False)
+ pth.should_simplify = internals.pop('should_simplify', True)
+ pth.simplify_threshold = (
+ internals.pop('simplify_threshold',
+ rcParams['path.simplify_threshold'])
+ )
+ pth._has_nonfinite = internals.pop('has_nonfinite', False)
+ pth._interpolation_steps = internals.pop('interpolation_steps', 1)
+ if internals:
+ raise ValueError('Unexpected internals provided to '
+ '_fast_from_codes_and_verts: '
+ '{0}'.format('\n *'.join(internals)))
+ return pth
+
+ def _update_values(self):
+ self._simplify_threshold = rcParams['path.simplify_threshold']
+ self._should_simplify = (
+ self._simplify_threshold > 0 and
+ rcParams['path.simplify'] and
+ len(self._vertices) >= 128 and
+ (self._codes is None or np.all(self._codes <= Path.LINETO))
+ )
+ self._has_nonfinite = not np.isfinite(self._vertices).all()
+
+ @property
+ def vertices(self):
+ """
+ The list of vertices in the `Path` as an Nx2 numpy array.
+ """
+ return self._vertices
+
+ @vertices.setter
+ def vertices(self, vertices):
+ if self._readonly:
+ raise AttributeError("Can't set vertices on a readonly Path")
+ self._vertices = vertices
+ self._update_values()
+
+ @property
+ def codes(self):
+ """
+ The list of codes in the `Path` as a 1-D numpy array. Each
+ code is one of `STOP`, `MOVETO`, `LINETO`, `CURVE3`, `CURVE4`
+ or `CLOSEPOLY`. For codes that correspond to more than one
+ vertex (`CURVE3` and `CURVE4`), that code will be repeated so
+ that the length of `self.vertices` and `self.codes` is always
+ the same.
+ """
+ return self._codes
+
+ @codes.setter
+ def codes(self, codes):
+ if self._readonly:
+ raise AttributeError("Can't set codes on a readonly Path")
+ self._codes = codes
+ self._update_values()
+
+ @property
+ def simplify_threshold(self):
+ """
+ The fraction of a pixel difference below which vertices will
+ be simplified out.
+ """
+ return self._simplify_threshold
+
+ @simplify_threshold.setter
+ def simplify_threshold(self, threshold):
+ self._simplify_threshold = threshold
+
+ @property
+ def has_nonfinite(self):
+ """
+ `True` if the vertices array has nonfinite values.
+ """
+ return self._has_nonfinite
+
+ @property
+ def should_simplify(self):
+ """
+ `True` if the vertices array should be simplified.
+ """
+ return self._should_simplify
+
+ @should_simplify.setter
+ def should_simplify(self, should_simplify):
+ self._should_simplify = should_simplify
+
+ @property
+ def readonly(self):
+ """
+ `True` if the `Path` is read-only.
+ """
+ return self._readonly
+
+ def __copy__(self):
+ """
+ Returns a shallow copy of the `Path`, which will share the
+ vertices and codes with the source `Path`.
+ """
+ import copy
+ return copy.copy(self)
+
+ copy = __copy__
+
+ def __deepcopy__(self, memo=None):
+ """
+ Returns a deepcopy of the `Path`. The `Path` will not be
+ readonly, even if the source `Path` is.
+ """
+ try:
+ codes = self.codes.copy()
+ except AttributeError:
+ codes = None
+ return self.__class__(
+ self.vertices.copy(), codes,
+ _interpolation_steps=self._interpolation_steps)
+
+ deepcopy = __deepcopy__
+
+ @classmethod
+ def make_compound_path_from_polys(cls, XY):
+ """
+ Make a compound path object to draw a number
+ of polygons with equal numbers of sides XY is a (numpolys x
+ numsides x 2) numpy array of vertices. Return object is a
+ :class:`Path`
+
+ .. plot:: gallery/api/histogram_path.py
+
+ """
+
+ # for each poly: 1 for the MOVETO, (numsides-1) for the LINETO, 1 for
+ # the CLOSEPOLY; the vert for the closepoly is ignored but we still
+ # need it to keep the codes aligned with the vertices
+ numpolys, numsides, two = XY.shape
+ if two != 2:
+ raise ValueError("The third dimension of 'XY' must be 2")
+ stride = numsides + 1
+ nverts = numpolys * stride
+ verts = np.zeros((nverts, 2))
+ codes = np.ones(nverts, int) * cls.LINETO
+ codes[0::stride] = cls.MOVETO
+ codes[numsides::stride] = cls.CLOSEPOLY
+ for i in range(numsides):
+ verts[i::stride] = XY[:, i]
+
+ return cls(verts, codes)
+
+ @classmethod
+ def make_compound_path(cls, *args):
+ """Make a compound path from a list of Path objects."""
+ # Handle an empty list in args (i.e. no args).
+ if not args:
+ return Path(np.empty([0, 2], dtype=np.float32))
+
+ lengths = [len(x) for x in args]
+ total_length = sum(lengths)
+
+ vertices = np.vstack([x.vertices for x in args])
+ vertices.reshape((total_length, 2))
+
+ codes = np.empty(total_length, dtype=cls.code_type)
+ i = 0
+ for path in args:
+ if path.codes is None:
+ codes[i] = cls.MOVETO
+ codes[i + 1:i + len(path.vertices)] = cls.LINETO
+ else:
+ codes[i:i + len(path.codes)] = path.codes
+ i += len(path.vertices)
+
+ return cls(vertices, codes)
+
+ def __repr__(self):
+ return "Path(%r, %r)" % (self.vertices, self.codes)
+
+ def __len__(self):
+ return len(self.vertices)
+
+ def iter_segments(self, transform=None, remove_nans=True, clip=None,
+ snap=False, stroke_width=1.0, simplify=None,
+ curves=True, sketch=None):
+ """
+ Iterates over all of the curve segments in the path. Each
+ iteration returns a 2-tuple (*vertices*, *code*), where
+ *vertices* is a sequence of 1 - 3 coordinate pairs, and *code* is
+ one of the :class:`Path` codes.
+
+ Additionally, this method can provide a number of standard
+ cleanups and conversions to the path.
+
+ Parameters
+ ----------
+ transform : None or :class:`~matplotlib.transforms.Transform` instance
+ If not None, the given affine transformation will
+ be applied to the path.
+ remove_nans : {False, True}, optional
+ If True, will remove all NaNs from the path and
+ insert MOVETO commands to skip over them.
+ clip : None or sequence, optional
+ If not None, must be a four-tuple (x1, y1, x2, y2)
+ defining a rectangle in which to clip the path.
+ snap : None or bool, optional
+ If None, auto-snap to pixels, to reduce
+ fuzziness of rectilinear lines. If True, force snapping, and
+ if False, don't snap.
+ stroke_width : float, optional
+ The width of the stroke being drawn. Needed
+ as a hint for the snapping algorithm.
+ simplify : None or bool, optional
+ If True, perform simplification, to remove
+ vertices that do not affect the appearance of the path. If
+ False, perform no simplification. If None, use the
+ should_simplify member variable. See also the rcParams
+ path.simplify and path.simplify_threshold.
+ curves : {True, False}, optional
+ If True, curve segments will be returned as curve
+ segments. If False, all curves will be converted to line
+ segments.
+ sketch : None or sequence, optional
+ If not None, must be a 3-tuple of the form
+ (scale, length, randomness), representing the sketch
+ parameters.
+ """
+ if not len(self):
+ return
+
+ cleaned = self.cleaned(transform=transform,
+ remove_nans=remove_nans, clip=clip,
+ snap=snap, stroke_width=stroke_width,
+ simplify=simplify, curves=curves,
+ sketch=sketch)
+ vertices = cleaned.vertices
+ codes = cleaned.codes
+ len_vertices = vertices.shape[0]
+
+ # Cache these object lookups for performance in the loop.
+ NUM_VERTICES_FOR_CODE = self.NUM_VERTICES_FOR_CODE
+ STOP = self.STOP
+
+ i = 0
+ while i < len_vertices:
+ code = codes[i]
+ if code == STOP:
+ return
+ else:
+ num_vertices = NUM_VERTICES_FOR_CODE[code]
+ curr_vertices = vertices[i:i+num_vertices].flatten()
+ yield curr_vertices, code
+ i += num_vertices
+
+ def cleaned(self, transform=None, remove_nans=False, clip=None,
+ quantize=False, simplify=False, curves=False,
+ stroke_width=1.0, snap=False, sketch=None):
+ """
+ Cleans up the path according to the parameters returning a new
+ Path instance.
+
+ .. seealso::
+
+ See :meth:`iter_segments` for details of the keyword arguments.
+
+ Returns
+ -------
+ Path instance with cleaned up vertices and codes.
+
+ """
+ vertices, codes = _path.cleanup_path(self, transform,
+ remove_nans, clip,
+ snap, stroke_width,
+ simplify, curves, sketch)
+ internals = {'should_simplify': self.should_simplify and not simplify,
+ 'has_nonfinite': self.has_nonfinite and not remove_nans,
+ 'simplify_threshold': self.simplify_threshold,
+ 'interpolation_steps': self._interpolation_steps}
+ return Path._fast_from_codes_and_verts(vertices, codes, internals)
+
+ def transformed(self, transform):
+ """
+ Return a transformed copy of the path.
+
+ .. seealso::
+
+ :class:`matplotlib.transforms.TransformedPath`
+ A specialized path class that will cache the
+ transformed result and automatically update when the
+ transform changes.
+ """
+ return Path(transform.transform(self.vertices), self.codes,
+ self._interpolation_steps)
+
+ def contains_point(self, point, transform=None, radius=0.0):
+ """
+ Returns whether the (closed) path contains the given point.
+
+ If *transform* is not ``None``, the path will be transformed before
+ performing the test.
+
+ *radius* allows the path to be made slightly larger or smaller.
+ """
+ if transform is not None:
+ transform = transform.frozen()
+ # `point_in_path` does not handle nonlinear transforms, so we
+ # transform the path ourselves. If `transform` is affine, letting
+ # `point_in_path` handle the transform avoids allocating an extra
+ # buffer.
+ if transform and not transform.is_affine:
+ self = transform.transform_path(self)
+ transform = None
+ return _path.point_in_path(point[0], point[1], radius, self, transform)
+
+ def contains_points(self, points, transform=None, radius=0.0):
+ """
+ Returns a bool array which is ``True`` if the (closed) path contains
+ the corresponding point.
+
+ If *transform* is not ``None``, the path will be transformed before
+ performing the test.
+
+ *radius* allows the path to be made slightly larger or smaller.
+ """
+ if transform is not None:
+ transform = transform.frozen()
+ result = _path.points_in_path(points, radius, self, transform)
+ return result.astype('bool')
+
+ def contains_path(self, path, transform=None):
+ """
+ Returns whether this (closed) path completely contains the given path.
+
+ If *transform* is not ``None``, the path will be transformed before
+ performing the test.
+ """
+ if transform is not None:
+ transform = transform.frozen()
+ return _path.path_in_path(self, None, path, transform)
+
+ def get_extents(self, transform=None):
+ """
+ Returns the extents (*xmin*, *ymin*, *xmax*, *ymax*) of the
+ path.
+
+ Unlike computing the extents on the *vertices* alone, this
+ algorithm will take into account the curves and deal with
+ control points appropriately.
+ """
+ from .transforms import Bbox
+ path = self
+ if transform is not None:
+ transform = transform.frozen()
+ if not transform.is_affine:
+ path = self.transformed(transform)
+ transform = None
+ return Bbox(_path.get_path_extents(path, transform))
+
+ def intersects_path(self, other, filled=True):
+ """
+ Returns *True* if this path intersects another given path.
+
+ *filled*, when True, treats the paths as if they were filled.
+ That is, if one path completely encloses the other,
+ :meth:`intersects_path` will return True.
+ """
+ return _path.path_intersects_path(self, other, filled)
+
+ def intersects_bbox(self, bbox, filled=True):
+ """
+ Returns *True* if this path intersects a given
+ :class:`~matplotlib.transforms.Bbox`.
+
+ *filled*, when True, treats the path as if it was filled.
+ That is, if the path completely encloses the bounding box,
+ :meth:`intersects_bbox` will return True.
+
+ The bounding box is always considered filled.
+ """
+ return _path.path_intersects_rectangle(self,
+ bbox.x0, bbox.y0, bbox.x1, bbox.y1, filled)
+
+ def interpolated(self, steps):
+ """
+ Returns a new path resampled to length N x steps. Does not
+ currently handle interpolating curves.
+ """
+ if steps == 1:
+ return self
+
+ vertices = simple_linear_interpolation(self.vertices, steps)
+ codes = self.codes
+ if codes is not None:
+ new_codes = Path.LINETO * np.ones(((len(codes) - 1) * steps + 1, ))
+ new_codes[0::steps] = codes
+ else:
+ new_codes = None
+ return Path(vertices, new_codes)
+
+ def to_polygons(self, transform=None, width=0, height=0, closed_only=True):
+ """
+ Convert this path to a list of polygons or polylines. Each
+ polygon/polyline is an Nx2 array of vertices. In other words,
+ each polygon has no ``MOVETO`` instructions or curves. This
+ is useful for displaying in backends that do not support
+ compound paths or Bezier curves, such as GDK.
+
+ If *width* and *height* are both non-zero then the lines will
+ be simplified so that vertices outside of (0, 0), (width,
+ height) will be clipped.
+
+ If *closed_only* is `True` (default), only closed polygons,
+ with the last point being the same as the first point, will be
+ returned. Any unclosed polylines in the path will be
+ explicitly closed. If *closed_only* is `False`, any unclosed
+ polygons in the path will be returned as unclosed polygons,
+ and the closed polygons will be returned explicitly closed by
+ setting the last point to the same as the first point.
+ """
+ if len(self.vertices) == 0:
+ return []
+
+ if transform is not None:
+ transform = transform.frozen()
+
+ if self.codes is None and (width == 0 or height == 0):
+ vertices = self.vertices
+ if closed_only:
+ if len(vertices) < 3:
+ return []
+ elif np.any(vertices[0] != vertices[-1]):
+ vertices = list(vertices) + [vertices[0]]
+
+ if transform is None:
+ return [vertices]
+ else:
+ return [transform.transform(vertices)]
+
+ # Deal with the case where there are curves and/or multiple
+ # subpaths (using extension code)
+ return _path.convert_path_to_polygons(
+ self, transform, width, height, closed_only)
+
+ _unit_rectangle = None
+
+ @classmethod
+ def unit_rectangle(cls):
+ """
+ Return a :class:`Path` instance of the unit rectangle
+ from (0, 0) to (1, 1).
+ """
+ if cls._unit_rectangle is None:
+ cls._unit_rectangle = \
+ cls([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0],
+ [0.0, 0.0]],
+ [cls.MOVETO, cls.LINETO, cls.LINETO, cls.LINETO,
+ cls.CLOSEPOLY],
+ readonly=True)
+ return cls._unit_rectangle
+
+ _unit_regular_polygons = WeakValueDictionary()
+
+ @classmethod
+ def unit_regular_polygon(cls, numVertices):
+ """
+ Return a :class:`Path` instance for a unit regular
+ polygon with the given *numVertices* and radius of 1.0,
+ centered at (0, 0).
+ """
+ if numVertices <= 16:
+ path = cls._unit_regular_polygons.get(numVertices)
+ else:
+ path = None
+ if path is None:
+ theta = (2*np.pi/numVertices *
+ np.arange(numVertices + 1).reshape((numVertices + 1, 1)))
+ # This initial rotation is to make sure the polygon always
+ # "points-up"
+ theta += np.pi / 2.0
+ verts = np.concatenate((np.cos(theta), np.sin(theta)), 1)
+ codes = np.empty((numVertices + 1,))
+ codes[0] = cls.MOVETO
+ codes[1:-1] = cls.LINETO
+ codes[-1] = cls.CLOSEPOLY
+ path = cls(verts, codes, readonly=True)
+ if numVertices <= 16:
+ cls._unit_regular_polygons[numVertices] = path
+ return path
+
+ _unit_regular_stars = WeakValueDictionary()
+
+ @classmethod
+ def unit_regular_star(cls, numVertices, innerCircle=0.5):
+ """
+ Return a :class:`Path` for a unit regular star
+ with the given numVertices and radius of 1.0, centered at (0,
+ 0).
+ """
+ if numVertices <= 16:
+ path = cls._unit_regular_stars.get((numVertices, innerCircle))
+ else:
+ path = None
+ if path is None:
+ ns2 = numVertices * 2
+ theta = (2*np.pi/ns2 * np.arange(ns2 + 1))
+ # This initial rotation is to make sure the polygon always
+ # "points-up"
+ theta += np.pi / 2.0
+ r = np.ones(ns2 + 1)
+ r[1::2] = innerCircle
+ verts = np.vstack((r*np.cos(theta), r*np.sin(theta))).transpose()
+ codes = np.empty((ns2 + 1,))
+ codes[0] = cls.MOVETO
+ codes[1:-1] = cls.LINETO
+ codes[-1] = cls.CLOSEPOLY
+ path = cls(verts, codes, readonly=True)
+ if numVertices <= 16:
+ cls._unit_regular_stars[(numVertices, innerCircle)] = path
+ return path
+
+ @classmethod
+ def unit_regular_asterisk(cls, numVertices):
+ """
+ Return a :class:`Path` for a unit regular
+ asterisk with the given numVertices and radius of 1.0,
+ centered at (0, 0).
+ """
+ return cls.unit_regular_star(numVertices, 0.0)
+
+ _unit_circle = None
+
+ @classmethod
+ def unit_circle(cls):
+ """
+ Return the readonly :class:`Path` of the unit circle.
+
+ For most cases, :func:`Path.circle` will be what you want.
+
+ """
+ if cls._unit_circle is None:
+ cls._unit_circle = cls.circle(center=(0, 0), radius=1,
+ readonly=True)
+ return cls._unit_circle
+
+ @classmethod
+ def circle(cls, center=(0., 0.), radius=1., readonly=False):
+ """
+ Return a Path representing a circle of a given radius and center.
+
+ Parameters
+ ----------
+ center : pair of floats
+ The center of the circle. Default ``(0, 0)``.
+ radius : float
+ The radius of the circle. Default is 1.
+ readonly : bool
+ Whether the created path should have the "readonly" argument
+ set when creating the Path instance.
+
+ Notes
+ -----
+ The circle is approximated using cubic Bezier curves. This
+ uses 8 splines around the circle using the approach presented
+ here:
+
+ Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
+ Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
+
+ """
+ MAGIC = 0.2652031
+ SQRTHALF = np.sqrt(0.5)
+ MAGIC45 = SQRTHALF * MAGIC
+
+ vertices = np.array([[0.0, -1.0],
+
+ [MAGIC, -1.0],
+ [SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
+ [SQRTHALF, -SQRTHALF],
+
+ [SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
+ [1.0, -MAGIC],
+ [1.0, 0.0],
+
+ [1.0, MAGIC],
+ [SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
+ [SQRTHALF, SQRTHALF],
+
+ [SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
+ [MAGIC, 1.0],
+ [0.0, 1.0],
+
+ [-MAGIC, 1.0],
+ [-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],
+ [-SQRTHALF, SQRTHALF],
+
+ [-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],
+ [-1.0, MAGIC],
+ [-1.0, 0.0],
+
+ [-1.0, -MAGIC],
+ [-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],
+ [-SQRTHALF, -SQRTHALF],
+
+ [-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],
+ [-MAGIC, -1.0],
+ [0.0, -1.0],
+
+ [0.0, -1.0]],
+ dtype=float)
+
+ codes = [cls.CURVE4] * 26
+ codes[0] = cls.MOVETO
+ codes[-1] = cls.CLOSEPOLY
+ return Path(vertices * radius + center, codes, readonly=readonly)
+
+ _unit_circle_righthalf = None
+
+ @classmethod
+ def unit_circle_righthalf(cls):
+ """
+ Return a :class:`Path` of the right half
+ of a unit circle. The circle is approximated using cubic Bezier
+ curves. This uses 4 splines around the circle using the approach
+ presented here:
+
+ Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
+ Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
+ """
+ if cls._unit_circle_righthalf is None:
+ MAGIC = 0.2652031
+ SQRTHALF = np.sqrt(0.5)
+ MAGIC45 = SQRTHALF * MAGIC
+
+ vertices = np.array(
+ [[0.0, -1.0],
+
+ [MAGIC, -1.0],
+ [SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
+ [SQRTHALF, -SQRTHALF],
+
+ [SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
+ [1.0, -MAGIC],
+ [1.0, 0.0],
+
+ [1.0, MAGIC],
+ [SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
+ [SQRTHALF, SQRTHALF],
+
+ [SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
+ [MAGIC, 1.0],
+ [0.0, 1.0],
+
+ [0.0, -1.0]],
+
+ float)
+
+ codes = cls.CURVE4 * np.ones(14)
+ codes[0] = cls.MOVETO
+ codes[-1] = cls.CLOSEPOLY
+
+ cls._unit_circle_righthalf = cls(vertices, codes, readonly=True)
+ return cls._unit_circle_righthalf
+
+ @classmethod
+ def arc(cls, theta1, theta2, n=None, is_wedge=False):
+ """
+ Return an arc on the unit circle from angle
+ *theta1* to angle *theta2* (in degrees).
+
+ *theta2* is unwrapped to produce the shortest arc within 360 degrees.
+ That is, if *theta2* > *theta1* + 360, the arc will be from *theta1* to
+ *theta2* - 360 and not a full circle plus some extra overlap.
+
+ If *n* is provided, it is the number of spline segments to make.
+ If *n* is not provided, the number of spline segments is
+ determined based on the delta between *theta1* and *theta2*.
+
+ Masionobe, L. 2003. `Drawing an elliptical arc using
+ polylines, quadratic or cubic Bezier curves
+ <http://www.spaceroots.org/documents/ellipse/index.html>`_.
+ """
+ halfpi = np.pi * 0.5
+
+ eta1 = theta1
+ eta2 = theta2 - 360 * np.floor((theta2 - theta1) / 360)
+ # Ensure 2pi range is not flattened to 0 due to floating-point errors,
+ # but don't try to expand existing 0 range.
+ if theta2 != theta1 and eta2 <= eta1:
+ eta2 += 360
+ eta1, eta2 = np.deg2rad([eta1, eta2])
+
+ # number of curve segments to make
+ if n is None:
+ n = int(2 ** np.ceil((eta2 - eta1) / halfpi))
+ if n < 1:
+ raise ValueError("n must be >= 1 or None")
+
+ deta = (eta2 - eta1) / n
+ t = np.tan(0.5 * deta)
+ alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0
+
+ steps = np.linspace(eta1, eta2, n + 1, True)
+ cos_eta = np.cos(steps)
+ sin_eta = np.sin(steps)
+
+ xA = cos_eta[:-1]
+ yA = sin_eta[:-1]
+ xA_dot = -yA
+ yA_dot = xA
+
+ xB = cos_eta[1:]
+ yB = sin_eta[1:]
+ xB_dot = -yB
+ yB_dot = xB
+
+ if is_wedge:
+ length = n * 3 + 4
+ vertices = np.zeros((length, 2), float)
+ codes = cls.CURVE4 * np.ones((length, ), cls.code_type)
+ vertices[1] = [xA[0], yA[0]]
+ codes[0:2] = [cls.MOVETO, cls.LINETO]
+ codes[-2:] = [cls.LINETO, cls.CLOSEPOLY]
+ vertex_offset = 2
+ end = length - 2
+ else:
+ length = n * 3 + 1
+ vertices = np.empty((length, 2), float)
+ codes = cls.CURVE4 * np.ones((length, ), cls.code_type)
+ vertices[0] = [xA[0], yA[0]]
+ codes[0] = cls.MOVETO
+ vertex_offset = 1
+ end = length
+
+ vertices[vertex_offset:end:3, 0] = xA + alpha * xA_dot
+ vertices[vertex_offset:end:3, 1] = yA + alpha * yA_dot
+ vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot
+ vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot
+ vertices[vertex_offset+2:end:3, 0] = xB
+ vertices[vertex_offset+2:end:3, 1] = yB
+
+ return cls(vertices, codes, readonly=True)
+
+ @classmethod
+ def wedge(cls, theta1, theta2, n=None):
+ """
+ Return a wedge of the unit circle from angle
+ *theta1* to angle *theta2* (in degrees).
+
+ *theta2* is unwrapped to produce the shortest wedge within 360 degrees.
+ That is, if *theta2* > *theta1* + 360, the wedge will be from *theta1*
+ to *theta2* - 360 and not a full circle plus some extra overlap.
+
+ If *n* is provided, it is the number of spline segments to make.
+ If *n* is not provided, the number of spline segments is
+ determined based on the delta between *theta1* and *theta2*.
+ """
+ return cls.arc(theta1, theta2, n, True)
+
+ _hatch_dict = maxdict(8)
+
+ @classmethod
+ def hatch(cls, hatchpattern, density=6):
+ """
+ Given a hatch specifier, *hatchpattern*, generates a Path that
+ can be used in a repeated hatching pattern. *density* is the
+ number of lines per unit square.
+ """
+ from matplotlib.hatch import get_path
+
+ if hatchpattern is None:
+ return None
+
+ hatch_path = cls._hatch_dict.get((hatchpattern, density))
+ if hatch_path is not None:
+ return hatch_path
+
+ hatch_path = get_path(hatchpattern, density)
+ cls._hatch_dict[(hatchpattern, density)] = hatch_path
+ return hatch_path
+
+ def clip_to_bbox(self, bbox, inside=True):
+ """
+ Clip the path to the given bounding box.
+
+ The path must be made up of one or more closed polygons. This
+ algorithm will not behave correctly for unclosed paths.
+
+ If *inside* is `True`, clip to the inside of the box, otherwise
+ to the outside of the box.
+ """
+ # Use make_compound_path_from_polys
+ verts = _path.clip_path_to_rect(self, bbox, inside)
+ paths = [Path(poly) for poly in verts]
+ return self.make_compound_path(*paths)
+
+
+def get_path_collection_extents(
+ master_transform, paths, transforms, offsets, offset_transform):
+ """
+ Given a sequence of :class:`Path` objects,
+ :class:`~matplotlib.transforms.Transform` objects and offsets, as
+ found in a :class:`~matplotlib.collections.PathCollection`,
+ returns the bounding box that encapsulates all of them.
+
+ *master_transform* is a global transformation to apply to all paths
+
+ *paths* is a sequence of :class:`Path` instances.
+
+ *transforms* is a sequence of
+ :class:`~matplotlib.transforms.Affine2D` instances.
+
+ *offsets* is a sequence of (x, y) offsets (or an Nx2 array)
+
+ *offset_transform* is a :class:`~matplotlib.transforms.Affine2D`
+ to apply to the offsets before applying the offset to the path.
+
+ The way that *paths*, *transforms* and *offsets* are combined
+ follows the same method as for collections. Each is iterated over
+ independently, so if you have 3 paths, 2 transforms and 1 offset,
+ their combinations are as follows:
+
+ (A, A, A), (B, B, A), (C, A, A)
+ """
+ from .transforms import Bbox
+ if len(paths) == 0:
+ raise ValueError("No paths provided")
+ return Bbox.from_extents(*_path.get_path_collection_extents(
+ master_transform, paths, np.atleast_3d(transforms),
+ offsets, offset_transform))
+
+
+def get_paths_extents(paths, transforms=[]):
+ """
+ Given a sequence of :class:`Path` objects and optional
+ :class:`~matplotlib.transforms.Transform` objects, returns the
+ bounding box that encapsulates all of them.
+
+ *paths* is a sequence of :class:`Path` instances.
+
+ *transforms* is an optional sequence of
+ :class:`~matplotlib.transforms.Affine2D` instances to apply to
+ each path.
+ """
+ from .transforms import Bbox, Affine2D
+ if len(paths) == 0:
+ raise ValueError("No paths provided")
+ return Bbox.from_extents(*_path.get_path_collection_extents(
+ Affine2D(), paths, transforms, [], Affine2D()))
diff --git a/contrib/python/matplotlib/py2/matplotlib/patheffects.py b/contrib/python/matplotlib/py2/matplotlib/patheffects.py
new file mode 100644
index 00000000000..c0265ec7191
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/patheffects.py
@@ -0,0 +1,393 @@
+"""
+Defines classes for path effects. The path effects are supported in
+:class:`~matplotlib.text.Text`, :class:`~matplotlib.lines.Line2D`
+and :class:`~matplotlib.patches.Patch`.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from matplotlib.backend_bases import RendererBase
+from matplotlib import colors as mcolors
+from matplotlib import patches as mpatches
+from matplotlib import transforms as mtransforms
+
+
+class AbstractPathEffect(object):
+ """
+ A base class for path effects.
+
+ Subclasses should override the ``draw_path`` method to add effect
+ functionality.
+
+ """
+ def __init__(self, offset=(0., 0.)):
+ """
+ Parameters
+ ----------
+ offset : pair of floats
+ The offset to apply to the path, measured in points.
+ """
+ self._offset = offset
+ self._offset_trans = mtransforms.Affine2D()
+
+ def _offset_transform(self, renderer, transform):
+ """Apply the offset to the given transform."""
+ offset_x = renderer.points_to_pixels(self._offset[0])
+ offset_y = renderer.points_to_pixels(self._offset[1])
+ return transform + self._offset_trans.clear().translate(offset_x,
+ offset_y)
+
+ def _update_gc(self, gc, new_gc_dict):
+ """
+ Update the given GraphicsCollection with the given
+ dictionary of properties. The keys in the dictionary are used to
+ identify the appropriate set_ method on the gc.
+
+ """
+ new_gc_dict = new_gc_dict.copy()
+
+ dashes = new_gc_dict.pop("dashes", None)
+ if dashes:
+ gc.set_dashes(**dashes)
+
+ for k, v in six.iteritems(new_gc_dict):
+ set_method = getattr(gc, 'set_' + k, None)
+ if not callable(set_method):
+ raise AttributeError('Unknown property {0}'.format(k))
+ set_method(v)
+ return gc
+
+ def draw_path(self, renderer, gc, tpath, affine, rgbFace=None):
+ """
+ Derived should override this method. The arguments are the same
+ as :meth:`matplotlib.backend_bases.RendererBase.draw_path`
+ except the first argument is a renderer.
+
+ """
+ # Get the real renderer, not a PathEffectRenderer.
+ if isinstance(renderer, PathEffectRenderer):
+ renderer = renderer._renderer
+ return renderer.draw_path(gc, tpath, affine, rgbFace)
+
+
+class PathEffectRenderer(RendererBase):
+ """
+ Implements a Renderer which contains another renderer.
+
+ This proxy then intercepts draw calls, calling the appropriate
+ :class:`AbstractPathEffect` draw method.
+
+ .. note::
+ Not all methods have been overridden on this RendererBase subclass.
+ It may be necessary to add further methods to extend the PathEffects
+ capabilities further.
+
+ """
+ def __init__(self, path_effects, renderer):
+ """
+ Parameters
+ ----------
+ path_effects : iterable of :class:`AbstractPathEffect`
+ The path effects which this renderer represents.
+ renderer : :class:`matplotlib.backend_bases.RendererBase` instance
+
+ """
+ self._path_effects = path_effects
+ self._renderer = renderer
+
+ def new_gc(self):
+ return self._renderer.new_gc()
+
+ def copy_with_path_effect(self, path_effects):
+ return self.__class__(path_effects, self._renderer)
+
+ def draw_path(self, gc, tpath, affine, rgbFace=None):
+ for path_effect in self._path_effects:
+ path_effect.draw_path(self._renderer, gc, tpath, affine,
+ rgbFace)
+
+ def draw_markers(self, gc, marker_path, marker_trans, path, *args,
+ **kwargs):
+ # We do a little shimmy so that all markers are drawn for each path
+ # effect in turn. Essentially, we induce recursion (depth 1) which is
+ # terminated once we have just a single path effect to work with.
+ if len(self._path_effects) == 1:
+ # Call the base path effect function - this uses the unoptimised
+ # approach of calling "draw_path" multiple times.
+ return RendererBase.draw_markers(self, gc, marker_path,
+ marker_trans, path, *args,
+ **kwargs)
+
+ for path_effect in self._path_effects:
+ renderer = self.copy_with_path_effect([path_effect])
+ # Recursively call this method, only next time we will only have
+ # one path effect.
+ renderer.draw_markers(gc, marker_path, marker_trans, path,
+ *args, **kwargs)
+
+ def draw_path_collection(self, gc, master_transform, paths, *args,
+ **kwargs):
+ # We do a little shimmy so that all paths are drawn for each path
+ # effect in turn. Essentially, we induce recursion (depth 1) which is
+ # terminated once we have just a single path effect to work with.
+ if len(self._path_effects) == 1:
+ # Call the base path effect function - this uses the unoptimised
+ # approach of calling "draw_path" multiple times.
+ return RendererBase.draw_path_collection(self, gc,
+ master_transform, paths,
+ *args, **kwargs)
+
+ for path_effect in self._path_effects:
+ renderer = self.copy_with_path_effect([path_effect])
+ # Recursively call this method, only next time we will only have
+ # one path effect.
+ renderer.draw_path_collection(gc, master_transform, paths,
+ *args, **kwargs)
+
+ def points_to_pixels(self, points):
+ return self._renderer.points_to_pixels(points)
+
+ def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
+ # Implements the naive text drawing as is found in RendererBase.
+ path, transform = self._get_text_path_transform(x, y, s, prop,
+ angle, ismath)
+ color = gc.get_rgb()
+ gc.set_linewidth(0.0)
+ self.draw_path(gc, path, transform, rgbFace=color)
+
+ def __getattribute__(self, name):
+ if name in ['_text2path', 'flipy', 'height', 'width']:
+ return getattr(self._renderer, name)
+ else:
+ return object.__getattribute__(self, name)
+
+
+class Normal(AbstractPathEffect):
+ """
+ The "identity" PathEffect.
+
+ The Normal PathEffect's sole purpose is to draw the original artist with
+ no special path effect.
+ """
+ pass
+
+
+class Stroke(AbstractPathEffect):
+ """A line based PathEffect which re-draws a stroke."""
+ def __init__(self, offset=(0, 0), **kwargs):
+ """
+ The path will be stroked with its gc updated with the given
+ keyword arguments, i.e., the keyword arguments should be valid
+ gc parameter values.
+ """
+ super(Stroke, self).__init__(offset)
+ self._gc = kwargs
+
+ def draw_path(self, renderer, gc, tpath, affine, rgbFace):
+ """
+ draw the path with updated gc.
+ """
+ # Do not modify the input! Use copy instead.
+
+ gc0 = renderer.new_gc()
+ gc0.copy_properties(gc)
+
+ gc0 = self._update_gc(gc0, self._gc)
+ trans = self._offset_transform(renderer, affine)
+ renderer.draw_path(gc0, tpath, trans, rgbFace)
+ gc0.restore()
+
+
+class withStroke(Stroke):
+ """
+ Adds a simple :class:`Stroke` and then draws the
+ original Artist to avoid needing to call :class:`Normal`.
+
+ """
+ def draw_path(self, renderer, gc, tpath, affine, rgbFace):
+ Stroke.draw_path(self, renderer, gc, tpath, affine, rgbFace)
+ renderer.draw_path(gc, tpath, affine, rgbFace)
+
+
+class SimplePatchShadow(AbstractPathEffect):
+ """A simple shadow via a filled patch."""
+ def __init__(self, offset=(2, -2),
+ shadow_rgbFace=None, alpha=None,
+ rho=0.3, **kwargs):
+ """
+ Parameters
+ ----------
+ offset : pair of floats
+ The offset of the shadow in points.
+ shadow_rgbFace : color
+ The shadow color.
+ alpha : float
+ The alpha transparency of the created shadow patch.
+ Default is 0.3.
+ http://matplotlib.1069221.n5.nabble.com/path-effects-question-td27630.html
+ rho : float
+ A scale factor to apply to the rgbFace color if `shadow_rgbFace`
+ is not specified. Default is 0.3.
+ **kwargs
+ Extra keywords are stored and passed through to
+ :meth:`AbstractPathEffect._update_gc`.
+
+ """
+ super(SimplePatchShadow, self).__init__(offset)
+
+ if shadow_rgbFace is None:
+ self._shadow_rgbFace = shadow_rgbFace
+ else:
+ self._shadow_rgbFace = mcolors.to_rgba(shadow_rgbFace)
+
+ if alpha is None:
+ alpha = 0.3
+
+ self._alpha = alpha
+ self._rho = rho
+
+ #: The dictionary of keywords to update the graphics collection with.
+ self._gc = kwargs
+
+ #: The offset transform object. The offset isn't calculated yet
+ #: as we don't know how big the figure will be in pixels.
+ self._offset_tran = mtransforms.Affine2D()
+
+ def draw_path(self, renderer, gc, tpath, affine, rgbFace):
+ """
+ Overrides the standard draw_path to add the shadow offset and
+ necessary color changes for the shadow.
+
+ """
+ # IMPORTANT: Do not modify the input - we copy everything instead.
+ affine0 = self._offset_transform(renderer, affine)
+ gc0 = renderer.new_gc()
+ gc0.copy_properties(gc)
+
+ if self._shadow_rgbFace is None:
+ r,g,b = (rgbFace or (1., 1., 1.))[:3]
+ # Scale the colors by a factor to improve the shadow effect.
+ shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)
+ else:
+ shadow_rgbFace = self._shadow_rgbFace
+
+ gc0.set_foreground("none")
+ gc0.set_alpha(self._alpha)
+ gc0.set_linewidth(0)
+
+ gc0 = self._update_gc(gc0, self._gc)
+ renderer.draw_path(gc0, tpath, affine0, shadow_rgbFace)
+ gc0.restore()
+
+
+class withSimplePatchShadow(SimplePatchShadow):
+ """
+ Adds a simple :class:`SimplePatchShadow` and then draws the
+ original Artist to avoid needing to call :class:`Normal`.
+
+ """
+ def draw_path(self, renderer, gc, tpath, affine, rgbFace):
+ SimplePatchShadow.draw_path(self, renderer, gc, tpath, affine, rgbFace)
+ renderer.draw_path(gc, tpath, affine, rgbFace)
+
+
+class SimpleLineShadow(AbstractPathEffect):
+ """A simple shadow via a line."""
+ def __init__(self, offset=(2,-2),
+ shadow_color='k', alpha=0.3, rho=0.3, **kwargs):
+ """
+ Parameters
+ ----------
+ offset : pair of floats
+ The offset to apply to the path, in points.
+ shadow_color : color
+ The shadow color. Default is black.
+ A value of ``None`` takes the original artist's color
+ with a scale factor of `rho`.
+ alpha : float
+ The alpha transparency of the created shadow patch.
+ Default is 0.3.
+ rho : float
+ A scale factor to apply to the rgbFace color if `shadow_rgbFace`
+ is ``None``. Default is 0.3.
+ **kwargs
+ Extra keywords are stored and passed through to
+ :meth:`AbstractPathEffect._update_gc`.
+
+ """
+ super(SimpleLineShadow, self).__init__(offset)
+ if shadow_color is None:
+ self._shadow_color = shadow_color
+ else:
+ self._shadow_color = mcolors.to_rgba(shadow_color)
+ self._alpha = alpha
+ self._rho = rho
+
+ #: The dictionary of keywords to update the graphics collection with.
+ self._gc = kwargs
+
+ #: The offset transform object. The offset isn't calculated yet
+ #: as we don't know how big the figure will be in pixels.
+ self._offset_tran = mtransforms.Affine2D()
+
+ def draw_path(self, renderer, gc, tpath, affine, rgbFace):
+ """
+ Overrides the standard draw_path to add the shadow offset and
+ necessary color changes for the shadow.
+
+ """
+ # IMPORTANT: Do not modify the input - we copy everything instead.
+ affine0 = self._offset_transform(renderer, affine)
+ gc0 = renderer.new_gc()
+ gc0.copy_properties(gc)
+
+ if self._shadow_color is None:
+ r,g,b = (gc0.get_foreground() or (1., 1., 1.))[:3]
+ # Scale the colors by a factor to improve the shadow effect.
+ shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)
+ else:
+ shadow_rgbFace = self._shadow_color
+
+ fill_color = None
+
+ gc0.set_foreground(shadow_rgbFace)
+ gc0.set_alpha(self._alpha)
+
+ gc0 = self._update_gc(gc0, self._gc)
+ renderer.draw_path(gc0, tpath, affine0, fill_color)
+ gc0.restore()
+
+
+class PathPatchEffect(AbstractPathEffect):
+ """
+ Draws a :class:`~matplotlib.patches.PathPatch` instance whose Path
+ comes from the original PathEffect artist.
+
+ """
+ def __init__(self, offset=(0, 0), **kwargs):
+ """
+ Parameters
+ ----------
+ offset : pair of floats
+ The offset to apply to the path, in points.
+ **kwargs :
+ All keyword arguments are passed through to the
+ :class:`~matplotlib.patches.PathPatch` constructor. The
+ properties which cannot be overridden are "path", "clip_box"
+ "transform" and "clip_path".
+ """
+ super(PathPatchEffect, self).__init__(offset=offset)
+ self.patch = mpatches.PathPatch([], **kwargs)
+
+ def draw_path(self, renderer, gc, tpath, affine, rgbFace):
+ affine = self._offset_transform(renderer, affine)
+ self.patch._path = tpath
+ self.patch.set_transform(affine)
+ self.patch.set_clip_box(gc.get_clip_rectangle())
+ clip_path = gc.get_clip_path()
+ if clip_path:
+ self.patch.set_clip_path(*clip_path)
+ self.patch.draw(renderer)
diff --git a/contrib/python/matplotlib/py2/matplotlib/projections/__init__.py b/contrib/python/matplotlib/py2/matplotlib/projections/__init__.py
new file mode 100644
index 00000000000..1e423420b0b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/projections/__init__.py
@@ -0,0 +1,110 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from .geo import AitoffAxes, HammerAxes, LambertAxes, MollweideAxes
+from .polar import PolarAxes
+from matplotlib import axes
+
+class ProjectionRegistry(object):
+ """
+ Manages the set of projections available to the system.
+ """
+ def __init__(self):
+ self._all_projection_types = {}
+
+ def register(self, *projections):
+ """
+ Register a new set of projection(s).
+ """
+ for projection in projections:
+ name = projection.name
+ self._all_projection_types[name] = projection
+
+ def get_projection_class(self, name):
+ """
+ Get a projection class from its *name*.
+ """
+ return self._all_projection_types[name]
+
+ def get_projection_names(self):
+ """
+ Get a list of the names of all projections currently
+ registered.
+ """
+ return sorted(self._all_projection_types)
+projection_registry = ProjectionRegistry()
+
+projection_registry.register(
+ axes.Axes,
+ PolarAxes,
+ AitoffAxes,
+ HammerAxes,
+ LambertAxes,
+ MollweideAxes)
+
+
+def register_projection(cls):
+ projection_registry.register(cls)
+
+
+def get_projection_class(projection=None):
+ """
+ Get a projection class from its name.
+
+ If *projection* is None, a standard rectilinear projection is
+ returned.
+ """
+ if projection is None:
+ projection = 'rectilinear'
+
+ try:
+ return projection_registry.get_projection_class(projection)
+ except KeyError:
+ raise ValueError("Unknown projection '%s'" % projection)
+
+
+def process_projection_requirements(figure, *args, **kwargs):
+ """
+ Handle the args/kwargs to for add_axes/add_subplot/gca,
+ returning::
+
+ (axes_proj_class, proj_class_kwargs, proj_stack_key)
+
+ Which can be used for new axes initialization/identification.
+
+ .. note:: **kwargs** is modified in place.
+
+ """
+ ispolar = kwargs.pop('polar', False)
+ projection = kwargs.pop('projection', None)
+ if ispolar:
+ if projection is not None and projection != 'polar':
+ raise ValueError(
+ "polar=True, yet projection=%r. "
+ "Only one of these arguments should be supplied." %
+ projection)
+ projection = 'polar'
+
+ if isinstance(projection, six.string_types) or projection is None:
+ projection_class = get_projection_class(projection)
+ elif hasattr(projection, '_as_mpl_axes'):
+ projection_class, extra_kwargs = projection._as_mpl_axes()
+ kwargs.update(**extra_kwargs)
+ else:
+ raise TypeError('projection must be a string, None or implement a '
+ '_as_mpl_axes method. Got %r' % projection)
+
+ # Make the key without projection kwargs, this is used as a unique
+ # lookup for axes instances
+ key = figure._make_key(*args, **kwargs)
+
+ return projection_class, kwargs, key
+
+
+def get_projection_names():
+ """
+ Get a list of acceptable projection names.
+ """
+ return projection_registry.get_projection_names()
diff --git a/contrib/python/matplotlib/py2/matplotlib/projections/geo.py b/contrib/python/matplotlib/py2/matplotlib/projections/geo.py
new file mode 100644
index 00000000000..3ed5dc74564
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/projections/geo.py
@@ -0,0 +1,547 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import numpy as np
+
+import matplotlib
+from matplotlib import rcParams
+from matplotlib.axes import Axes
+import matplotlib.axis as maxis
+from matplotlib.patches import Circle
+from matplotlib.path import Path
+import matplotlib.spines as mspines
+from matplotlib.ticker import (
+ Formatter, NullLocator, FixedLocator, NullFormatter)
+from matplotlib.transforms import Affine2D, BboxTransformTo, Transform
+
+
+class GeoAxes(Axes):
+ """An abstract base class for geographic projections."""
+ class ThetaFormatter(Formatter):
+ """
+ Used to format the theta tick labels. Converts the native
+ unit of radians into degrees and adds a degree symbol.
+ """
+ def __init__(self, round_to=1.0):
+ self._round_to = round_to
+
+ def __call__(self, x, pos=None):
+ degrees = (x / np.pi) * 180.0
+ degrees = np.round(degrees / self._round_to) * self._round_to
+ if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
+ return r"$%0.0f^\circ$" % degrees
+ else:
+ return "%0.0f\N{DEGREE SIGN}" % degrees
+
+ RESOLUTION = 75
+
+ def _init_axis(self):
+ self.xaxis = maxis.XAxis(self)
+ self.yaxis = maxis.YAxis(self)
+ # Do not register xaxis or yaxis with spines -- as done in
+ # Axes._init_axis() -- until GeoAxes.xaxis.cla() works.
+ # self.spines['geo'].register_axis(self.yaxis)
+ self._update_transScale()
+
+ def cla(self):
+ Axes.cla(self)
+
+ self.set_longitude_grid(30)
+ self.set_latitude_grid(15)
+ self.set_longitude_grid_ends(75)
+ self.xaxis.set_minor_locator(NullLocator())
+ self.yaxis.set_minor_locator(NullLocator())
+ self.xaxis.set_ticks_position('none')
+ self.yaxis.set_ticks_position('none')
+ self.yaxis.set_tick_params(label1On=True)
+ # Why do we need to turn on yaxis tick labels, but
+ # xaxis tick labels are already on?
+
+ self.grid(rcParams['axes.grid'])
+
+ Axes.set_xlim(self, -np.pi, np.pi)
+ Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
+
+ def _set_lim_and_transforms(self):
+ # A (possibly non-linear) projection on the (already scaled) data
+ self.transProjection = self._get_core_transform(self.RESOLUTION)
+
+ self.transAffine = self._get_affine_transform()
+
+ self.transAxes = BboxTransformTo(self.bbox)
+
+ # The complete data transformation stack -- from data all the
+ # way to display coordinates
+ self.transData = \
+ self.transProjection + \
+ self.transAffine + \
+ self.transAxes
+
+ # This is the transform for longitude ticks.
+ self._xaxis_pretransform = \
+ Affine2D() \
+ .scale(1, self._longitude_cap * 2) \
+ .translate(0, -self._longitude_cap)
+ self._xaxis_transform = \
+ self._xaxis_pretransform + \
+ self.transData
+ self._xaxis_text1_transform = \
+ Affine2D().scale(1, 0) + \
+ self.transData + \
+ Affine2D().translate(0, 4)
+ self._xaxis_text2_transform = \
+ Affine2D().scale(1, 0) + \
+ self.transData + \
+ Affine2D().translate(0, -4)
+
+ # This is the transform for latitude ticks.
+ yaxis_stretch = Affine2D().scale(np.pi * 2, 1).translate(-np.pi, 0)
+ yaxis_space = Affine2D().scale(1, 1.1)
+ self._yaxis_transform = \
+ yaxis_stretch + \
+ self.transData
+ yaxis_text_base = \
+ yaxis_stretch + \
+ self.transProjection + \
+ (yaxis_space + \
+ self.transAffine + \
+ self.transAxes)
+ self._yaxis_text1_transform = \
+ yaxis_text_base + \
+ Affine2D().translate(-8, 0)
+ self._yaxis_text2_transform = \
+ yaxis_text_base + \
+ Affine2D().translate(8, 0)
+
+ def _get_affine_transform(self):
+ transform = self._get_core_transform(1)
+ xscale, _ = transform.transform_point((np.pi, 0))
+ _, yscale = transform.transform_point((0, np.pi / 2))
+ return Affine2D() \
+ .scale(0.5 / xscale, 0.5 / yscale) \
+ .translate(0.5, 0.5)
+
+ def get_xaxis_transform(self,which='grid'):
+ if which not in ['tick1', 'tick2', 'grid']:
+ raise ValueError(
+ "'which' must be one of 'tick1', 'tick2', or 'grid'")
+ return self._xaxis_transform
+
+ def get_xaxis_text1_transform(self, pad):
+ return self._xaxis_text1_transform, 'bottom', 'center'
+
+ def get_xaxis_text2_transform(self, pad):
+ return self._xaxis_text2_transform, 'top', 'center'
+
+ def get_yaxis_transform(self,which='grid'):
+ if which not in ['tick1', 'tick2', 'grid']:
+ raise ValueError(
+ "'which' must be one of 'tick1', 'tick2', or 'grid'")
+ return self._yaxis_transform
+
+ def get_yaxis_text1_transform(self, pad):
+ return self._yaxis_text1_transform, 'center', 'right'
+
+ def get_yaxis_text2_transform(self, pad):
+ return self._yaxis_text2_transform, 'center', 'left'
+
+ def _gen_axes_patch(self):
+ return Circle((0.5, 0.5), 0.5)
+
+ def _gen_axes_spines(self):
+ return {'geo':mspines.Spine.circular_spine(self,
+ (0.5, 0.5), 0.5)}
+
+ def set_yscale(self, *args, **kwargs):
+ if args[0] != 'linear':
+ raise NotImplementedError
+
+ set_xscale = set_yscale
+
+ def set_xlim(self, *args, **kwargs):
+ raise TypeError("It is not possible to change axes limits "
+ "for geographic projections. Please consider "
+ "using Basemap or Cartopy.")
+
+ set_ylim = set_xlim
+
+ def format_coord(self, lon, lat):
+ 'return a format string formatting the coordinate'
+ lon, lat = np.rad2deg([lon, lat])
+ if lat >= 0.0:
+ ns = 'N'
+ else:
+ ns = 'S'
+ if lon >= 0.0:
+ ew = 'E'
+ else:
+ ew = 'W'
+ return ('%f\N{DEGREE SIGN}%s, %f\N{DEGREE SIGN}%s'
+ % (abs(lat), ns, abs(lon), ew))
+
+ def set_longitude_grid(self, degrees):
+ """
+ Set the number of degrees between each longitude grid.
+ """
+ # Skip -180 and 180, which are the fixed limits.
+ grid = np.arange(-180 + degrees, 180, degrees)
+ self.xaxis.set_major_locator(FixedLocator(np.deg2rad(grid)))
+ self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
+
+ def set_latitude_grid(self, degrees):
+ """
+ Set the number of degrees between each latitude grid.
+ """
+ # Skip -90 and 90, which are the fixed limits.
+ grid = np.arange(-90 + degrees, 90, degrees)
+ self.yaxis.set_major_locator(FixedLocator(np.deg2rad(grid)))
+ self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
+
+ def set_longitude_grid_ends(self, degrees):
+ """
+ Set the latitude(s) at which to stop drawing the longitude grids.
+ """
+ self._longitude_cap = np.deg2rad(degrees)
+ self._xaxis_pretransform \
+ .clear() \
+ .scale(1.0, self._longitude_cap * 2.0) \
+ .translate(0.0, -self._longitude_cap)
+
+ def get_data_ratio(self):
+ '''
+ Return the aspect ratio of the data itself.
+ '''
+ return 1.0
+
+ ### Interactive panning
+
+ def can_zoom(self):
+ """
+ Return *True* if this axes supports the zoom box button functionality.
+
+ This axes object does not support interactive zoom box.
+ """
+ return False
+
+ def can_pan(self) :
+ """
+ Return *True* if this axes supports the pan/zoom button functionality.
+
+ This axes object does not support interactive pan/zoom.
+ """
+ return False
+
+ def start_pan(self, x, y, button):
+ pass
+
+ def end_pan(self):
+ pass
+
+ def drag_pan(self, button, key, x, y):
+ pass
+
+
+class _GeoTransform(Transform):
+ # Factoring out some common functionality.
+ input_dims = 2
+ output_dims = 2
+ is_separable = False
+
+ def __init__(self, resolution):
+ """
+ Create a new geographical transform.
+
+ Resolution is the number of steps to interpolate between each input
+ line segment to approximate its path in curved space.
+ """
+ Transform.__init__(self)
+ self._resolution = resolution
+
+ def __str__(self):
+ return "{}({})".format(type(self).__name__, self._resolution)
+
+ def transform_path_non_affine(self, path):
+ vertices = path.vertices
+ ipath = path.interpolated(self._resolution)
+ return Path(self.transform(ipath.vertices), ipath.codes)
+ transform_path_non_affine.__doc__ = \
+ Transform.transform_path_non_affine.__doc__
+
+
+class AitoffAxes(GeoAxes):
+ name = 'aitoff'
+
+ class AitoffTransform(_GeoTransform):
+ """The base Aitoff transform."""
+
+ def transform_non_affine(self, ll):
+ longitude = ll[:, 0]
+ latitude = ll[:, 1]
+
+ # Pre-compute some values
+ half_long = longitude / 2.0
+ cos_latitude = np.cos(latitude)
+
+ alpha = np.arccos(cos_latitude * np.cos(half_long))
+ # Avoid divide-by-zero errors using same method as NumPy.
+ alpha[alpha == 0.0] = 1e-20
+ # We want unnormalized sinc. numpy.sinc gives us normalized
+ sinc_alpha = np.sin(alpha) / alpha
+
+ xy = np.empty_like(ll, float)
+ xy[:, 0] = (cos_latitude * np.sin(half_long)) / sinc_alpha
+ xy[:, 1] = np.sin(latitude) / sinc_alpha
+ return xy
+ transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
+
+ def inverted(self):
+ return AitoffAxes.InvertedAitoffTransform(self._resolution)
+ inverted.__doc__ = Transform.inverted.__doc__
+
+ class InvertedAitoffTransform(_GeoTransform):
+
+ def transform_non_affine(self, xy):
+ # MGDTODO: Math is hard ;(
+ return xy
+ transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
+
+ def inverted(self):
+ return AitoffAxes.AitoffTransform(self._resolution)
+ inverted.__doc__ = Transform.inverted.__doc__
+
+ def __init__(self, *args, **kwargs):
+ self._longitude_cap = np.pi / 2.0
+ GeoAxes.__init__(self, *args, **kwargs)
+ self.set_aspect(0.5, adjustable='box', anchor='C')
+ self.cla()
+
+ def _get_core_transform(self, resolution):
+ return self.AitoffTransform(resolution)
+
+
+class HammerAxes(GeoAxes):
+ name = 'hammer'
+
+ class HammerTransform(_GeoTransform):
+ """The base Hammer transform."""
+
+ def transform_non_affine(self, ll):
+ longitude = ll[:, 0:1]
+ latitude = ll[:, 1:2]
+
+ # Pre-compute some values
+ half_long = longitude / 2.0
+ cos_latitude = np.cos(latitude)
+ sqrt2 = np.sqrt(2.0)
+
+ alpha = np.sqrt(1.0 + cos_latitude * np.cos(half_long))
+ x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
+ y = (sqrt2 * np.sin(latitude)) / alpha
+ return np.concatenate((x, y), 1)
+ transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
+
+ def inverted(self):
+ return HammerAxes.InvertedHammerTransform(self._resolution)
+ inverted.__doc__ = Transform.inverted.__doc__
+
+ class InvertedHammerTransform(_GeoTransform):
+
+ def transform_non_affine(self, xy):
+ x, y = xy.T
+ z = np.sqrt(1 - (x / 4) ** 2 - (y / 2) ** 2)
+ longitude = 2 * np.arctan((z * x) / (2 * (2 * z ** 2 - 1)))
+ latitude = np.arcsin(y*z)
+ return np.column_stack([longitude, latitude])
+ transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
+
+ def inverted(self):
+ return HammerAxes.HammerTransform(self._resolution)
+ inverted.__doc__ = Transform.inverted.__doc__
+
+ def __init__(self, *args, **kwargs):
+ self._longitude_cap = np.pi / 2.0
+ GeoAxes.__init__(self, *args, **kwargs)
+ self.set_aspect(0.5, adjustable='box', anchor='C')
+ self.cla()
+
+ def _get_core_transform(self, resolution):
+ return self.HammerTransform(resolution)
+
+
+class MollweideAxes(GeoAxes):
+ name = 'mollweide'
+
+ class MollweideTransform(_GeoTransform):
+ """The base Mollweide transform."""
+
+ def transform_non_affine(self, ll):
+ def d(theta):
+ delta = (-(theta + np.sin(theta) - pi_sin_l)
+ / (1 + np.cos(theta)))
+ return delta, np.abs(delta) > 0.001
+
+ longitude = ll[:, 0]
+ latitude = ll[:, 1]
+
+ clat = np.pi/2 - np.abs(latitude)
+ ihigh = clat < 0.087 # within 5 degrees of the poles
+ ilow = ~ihigh
+ aux = np.empty(latitude.shape, dtype=float)
+
+ if ilow.any(): # Newton-Raphson iteration
+ pi_sin_l = np.pi * np.sin(latitude[ilow])
+ theta = 2.0 * latitude[ilow]
+ delta, large_delta = d(theta)
+ while np.any(large_delta):
+ theta[large_delta] += delta[large_delta]
+ delta, large_delta = d(theta)
+ aux[ilow] = theta / 2
+
+ if ihigh.any(): # Taylor series-based approx. solution
+ e = clat[ihigh]
+ d = 0.5 * (3 * np.pi * e**2) ** (1.0/3)
+ aux[ihigh] = (np.pi/2 - d) * np.sign(latitude[ihigh])
+
+ xy = np.empty(ll.shape, dtype=float)
+ xy[:,0] = (2.0 * np.sqrt(2.0) / np.pi) * longitude * np.cos(aux)
+ xy[:,1] = np.sqrt(2.0) * np.sin(aux)
+
+ return xy
+ transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
+
+ def inverted(self):
+ return MollweideAxes.InvertedMollweideTransform(self._resolution)
+ inverted.__doc__ = Transform.inverted.__doc__
+
+ class InvertedMollweideTransform(_GeoTransform):
+
+ def transform_non_affine(self, xy):
+ x = xy[:, 0:1]
+ y = xy[:, 1:2]
+
+ # from Equations (7, 8) of
+ # http://mathworld.wolfram.com/MollweideProjection.html
+ theta = np.arcsin(y / np.sqrt(2))
+ lon = (np.pi / (2 * np.sqrt(2))) * x / np.cos(theta)
+ lat = np.arcsin((2 * theta + np.sin(2 * theta)) / np.pi)
+
+ return np.concatenate((lon, lat), 1)
+ transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
+
+ def inverted(self):
+ return MollweideAxes.MollweideTransform(self._resolution)
+ inverted.__doc__ = Transform.inverted.__doc__
+
+ def __init__(self, *args, **kwargs):
+ self._longitude_cap = np.pi / 2.0
+ GeoAxes.__init__(self, *args, **kwargs)
+ self.set_aspect(0.5, adjustable='box', anchor='C')
+ self.cla()
+
+ def _get_core_transform(self, resolution):
+ return self.MollweideTransform(resolution)
+
+
+class LambertAxes(GeoAxes):
+ name = 'lambert'
+
+ class LambertTransform(_GeoTransform):
+ """The base Lambert transform."""
+
+ def __init__(self, center_longitude, center_latitude, resolution):
+ """
+ Create a new Lambert transform. Resolution is the number of steps
+ to interpolate between each input line segment to approximate its
+ path in curved Lambert space.
+ """
+ _GeoTransform.__init__(self, resolution)
+ self._center_longitude = center_longitude
+ self._center_latitude = center_latitude
+
+ def transform_non_affine(self, ll):
+ longitude = ll[:, 0:1]
+ latitude = ll[:, 1:2]
+ clong = self._center_longitude
+ clat = self._center_latitude
+ cos_lat = np.cos(latitude)
+ sin_lat = np.sin(latitude)
+ diff_long = longitude - clong
+ cos_diff_long = np.cos(diff_long)
+
+ inner_k = (1.0 +
+ np.sin(clat)*sin_lat +
+ np.cos(clat)*cos_lat*cos_diff_long)
+ # Prevent divide-by-zero problems
+ inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
+ k = np.sqrt(2.0 / inner_k)
+ x = k*cos_lat*np.sin(diff_long)
+ y = k*(np.cos(clat)*sin_lat -
+ np.sin(clat)*cos_lat*cos_diff_long)
+
+ return np.concatenate((x, y), 1)
+ transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
+
+ def inverted(self):
+ return LambertAxes.InvertedLambertTransform(
+ self._center_longitude,
+ self._center_latitude,
+ self._resolution)
+ inverted.__doc__ = Transform.inverted.__doc__
+
+ class InvertedLambertTransform(_GeoTransform):
+
+ def __init__(self, center_longitude, center_latitude, resolution):
+ _GeoTransform.__init__(self, resolution)
+ self._center_longitude = center_longitude
+ self._center_latitude = center_latitude
+
+ def transform_non_affine(self, xy):
+ x = xy[:, 0:1]
+ y = xy[:, 1:2]
+ clong = self._center_longitude
+ clat = self._center_latitude
+ p = np.sqrt(x*x + y*y)
+ p = np.where(p == 0.0, 1e-9, p)
+ c = 2.0 * np.arcsin(0.5 * p)
+ sin_c = np.sin(c)
+ cos_c = np.cos(c)
+
+ lat = np.arcsin(cos_c*np.sin(clat) +
+ ((y*sin_c*np.cos(clat)) / p))
+ lon = clong + np.arctan(
+ (x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
+
+ return np.concatenate((lon, lat), 1)
+ transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
+
+ def inverted(self):
+ return LambertAxes.LambertTransform(
+ self._center_longitude,
+ self._center_latitude,
+ self._resolution)
+ inverted.__doc__ = Transform.inverted.__doc__
+
+ def __init__(self, *args, **kwargs):
+ self._longitude_cap = np.pi / 2.0
+ self._center_longitude = kwargs.pop("center_longitude", 0.0)
+ self._center_latitude = kwargs.pop("center_latitude", 0.0)
+ GeoAxes.__init__(self, *args, **kwargs)
+ self.set_aspect('equal', adjustable='box', anchor='C')
+ self.cla()
+
+ def cla(self):
+ GeoAxes.cla(self)
+ self.yaxis.set_major_formatter(NullFormatter())
+
+ def _get_core_transform(self, resolution):
+ return self.LambertTransform(
+ self._center_longitude,
+ self._center_latitude,
+ resolution)
+
+ def _get_affine_transform(self):
+ return Affine2D() \
+ .scale(0.25) \
+ .translate(0.5, 0.5)
diff --git a/contrib/python/matplotlib/py2/matplotlib/projections/polar.py b/contrib/python/matplotlib/py2/matplotlib/projections/polar.py
new file mode 100644
index 00000000000..62fc0f9ac60
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/projections/polar.py
@@ -0,0 +1,1537 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from collections import OrderedDict
+
+import numpy as np
+
+from matplotlib.axes import Axes
+import matplotlib.axis as maxis
+from matplotlib import cbook
+from matplotlib import docstring
+import matplotlib.markers as mmarkers
+import matplotlib.patches as mpatches
+import matplotlib.path as mpath
+from matplotlib import rcParams
+import matplotlib.ticker as mticker
+import matplotlib.transforms as mtransforms
+import matplotlib.spines as mspines
+
+
+class PolarTransform(mtransforms.Transform):
+ """
+ The base polar transform. This handles projection *theta* and
+ *r* into Cartesian coordinate space *x* and *y*, but does not
+ perform the ultimate affine transformation into the correct
+ position.
+ """
+ input_dims = 2
+ output_dims = 2
+ is_separable = False
+
+ def __init__(self, axis=None, use_rmin=True,
+ _apply_theta_transforms=True):
+ mtransforms.Transform.__init__(self)
+ self._axis = axis
+ self._use_rmin = use_rmin
+ self._apply_theta_transforms = _apply_theta_transforms
+
+ def __str__(self):
+ return ("{}(\n"
+ "{},\n"
+ " use_rmin={},\n"
+ " _apply_theta_transforms={})"
+ .format(type(self).__name__,
+ mtransforms._indent_str(self._axis),
+ self._use_rmin,
+ self._apply_theta_transforms))
+
+ def transform_non_affine(self, tr):
+ xy = np.empty(tr.shape, float)
+
+ t = tr[:, 0:1]
+ r = tr[:, 1:2]
+ x = xy[:, 0:1]
+ y = xy[:, 1:2]
+
+ # PolarAxes does not use the theta transforms here, but apply them for
+ # backwards-compatibility if not being used by it.
+ if self._apply_theta_transforms and self._axis is not None:
+ t *= self._axis.get_theta_direction()
+ t += self._axis.get_theta_offset()
+
+ if self._use_rmin and self._axis is not None:
+ r = r - self._axis.get_rorigin()
+ mask = r < 0
+ x[:] = np.where(mask, np.nan, r * np.cos(t))
+ y[:] = np.where(mask, np.nan, r * np.sin(t))
+
+ return xy
+ transform_non_affine.__doc__ = \
+ mtransforms.Transform.transform_non_affine.__doc__
+
+ def transform_path_non_affine(self, path):
+ vertices = path.vertices
+ if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
+ return mpath.Path(self.transform(vertices), path.codes)
+ ipath = path.interpolated(path._interpolation_steps)
+ return mpath.Path(self.transform(ipath.vertices), ipath.codes)
+ transform_path_non_affine.__doc__ = \
+ mtransforms.Transform.transform_path_non_affine.__doc__
+
+ def inverted(self):
+ return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin,
+ self._apply_theta_transforms)
+ inverted.__doc__ = mtransforms.Transform.inverted.__doc__
+
+
+class PolarAffine(mtransforms.Affine2DBase):
+ """
+ The affine part of the polar projection. Scales the output so
+ that maximum radius rests on the edge of the axes circle.
+ """
+ def __init__(self, scale_transform, limits):
+ """
+ *limits* is the view limit of the data. The only part of
+ its bounds that is used is the y limits (for the radius limits).
+ The theta range is handled by the non-affine transform.
+ """
+ mtransforms.Affine2DBase.__init__(self)
+ self._scale_transform = scale_transform
+ self._limits = limits
+ self.set_children(scale_transform, limits)
+ self._mtx = None
+
+ def __str__(self):
+ return ("{}(\n"
+ "{},\n"
+ "{})"
+ .format(type(self).__name__,
+ mtransforms._indent_str(self._scale_transform),
+ mtransforms._indent_str(self._limits)))
+
+ def get_matrix(self):
+ if self._invalid:
+ limits_scaled = self._limits.transformed(self._scale_transform)
+ yscale = limits_scaled.ymax - limits_scaled.ymin
+ affine = mtransforms.Affine2D() \
+ .scale(0.5 / yscale) \
+ .translate(0.5, 0.5)
+ self._mtx = affine.get_matrix()
+ self._inverted = None
+ self._invalid = 0
+ return self._mtx
+ get_matrix.__doc__ = mtransforms.Affine2DBase.get_matrix.__doc__
+
+
+class InvertedPolarTransform(mtransforms.Transform):
+ """
+ The inverse of the polar transform, mapping Cartesian
+ coordinate space *x* and *y* back to *theta* and *r*.
+ """
+ input_dims = 2
+ output_dims = 2
+ is_separable = False
+
+ def __init__(self, axis=None, use_rmin=True,
+ _apply_theta_transforms=True):
+ mtransforms.Transform.__init__(self)
+ self._axis = axis
+ self._use_rmin = use_rmin
+ self._apply_theta_transforms = _apply_theta_transforms
+
+ def __str__(self):
+ return ("{}(\n"
+ "{},\n"
+ " use_rmin={},\n"
+ " _apply_theta_transforms={})"
+ .format(type(self).__name__,
+ mtransforms._indent_str(self._axis),
+ self._use_rmin,
+ self._apply_theta_transforms))
+
+ def transform_non_affine(self, xy):
+ x = xy[:, 0:1]
+ y = xy[:, 1:]
+ r = np.sqrt(x*x + y*y)
+ with np.errstate(invalid='ignore'):
+ # At x=y=r=0 this will raise an
+ # invalid value warning when doing 0/0
+ # Divide by zero warnings are only raised when
+ # the numerator is different from 0. That
+ # should not happen here.
+ theta = np.arccos(x / r)
+ theta = np.where(y < 0, 2 * np.pi - theta, theta)
+
+ # PolarAxes does not use the theta transforms here, but apply them for
+ # backwards-compatibility if not being used by it.
+ if self._apply_theta_transforms and self._axis is not None:
+ theta -= self._axis.get_theta_offset()
+ theta *= self._axis.get_theta_direction()
+ theta %= 2 * np.pi
+
+ if self._use_rmin and self._axis is not None:
+ r += self._axis.get_rorigin()
+
+ return np.concatenate((theta, r), 1)
+ transform_non_affine.__doc__ = \
+ mtransforms.Transform.transform_non_affine.__doc__
+
+ def inverted(self):
+ return PolarAxes.PolarTransform(self._axis, self._use_rmin,
+ self._apply_theta_transforms)
+ inverted.__doc__ = mtransforms.Transform.inverted.__doc__
+
+
+class ThetaFormatter(mticker.Formatter):
+ """
+ Used to format the *theta* tick labels. Converts the native
+ unit of radians into degrees and adds a degree symbol.
+ """
+ def __call__(self, x, pos=None):
+ vmin, vmax = self.axis.get_view_interval()
+ d = np.rad2deg(abs(vmax - vmin))
+ digits = max(-int(np.log10(d) - 1.5), 0)
+
+ if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
+ format_str = r"${value:0.{digits:d}f}^\circ$"
+ return format_str.format(value=np.rad2deg(x), digits=digits)
+ else:
+ # we use unicode, rather than mathtext with \circ, so
+ # that it will work correctly with any arbitrary font
+ # (assuming it has a degree sign), whereas $5\circ$
+ # will only work correctly with one of the supported
+ # math fonts (Computer Modern and STIX)
+ format_str = "{value:0.{digits:d}f}\N{DEGREE SIGN}"
+ return format_str.format(value=np.rad2deg(x), digits=digits)
+
+
+class _AxisWrapper(object):
+ def __init__(self, axis):
+ self._axis = axis
+
+ def get_view_interval(self):
+ return np.rad2deg(self._axis.get_view_interval())
+
+ def set_view_interval(self, vmin, vmax):
+ self._axis.set_view_interval(*np.deg2rad((vmin, vmax)))
+
+ def get_minpos(self):
+ return np.rad2deg(self._axis.get_minpos())
+
+ def get_data_interval(self):
+ return np.rad2deg(self._axis.get_data_interval())
+
+ def set_data_interval(self, vmin, vmax):
+ self._axis.set_data_interval(*np.deg2rad((vmin, vmax)))
+
+ def get_tick_space(self):
+ return self._axis.get_tick_space()
+
+
+class ThetaLocator(mticker.Locator):
+ """
+ Used to locate theta ticks.
+
+ This will work the same as the base locator except in the case that the
+ view spans the entire circle. In such cases, the previously used default
+ locations of every 45 degrees are returned.
+ """
+ def __init__(self, base):
+ self.base = base
+ self.axis = self.base.axis = _AxisWrapper(self.base.axis)
+
+ def set_axis(self, axis):
+ self.axis = _AxisWrapper(axis)
+ self.base.set_axis(self.axis)
+
+ def __call__(self):
+ lim = self.axis.get_view_interval()
+ if _is_full_circle_deg(lim[0], lim[1]):
+ return np.arange(8) * 2 * np.pi / 8
+ else:
+ return np.deg2rad(self.base())
+
+ def autoscale(self):
+ return self.base.autoscale()
+
+ def pan(self, numsteps):
+ return self.base.pan(numsteps)
+
+ def refresh(self):
+ return self.base.refresh()
+
+ def view_limits(self, vmin, vmax):
+ vmin, vmax = np.rad2deg((vmin, vmax))
+ return np.deg2rad(self.base.view_limits(vmin, vmax))
+
+ def zoom(self, direction):
+ return self.base.zoom(direction)
+
+
+class ThetaTick(maxis.XTick):
+ """
+ A theta-axis tick.
+
+ This subclass of `XTick` provides angular ticks with some small
+ modification to their re-positioning such that ticks are rotated based on
+ tick location. This results in ticks that are correctly perpendicular to
+ the arc spine.
+
+ When 'auto' rotation is enabled, labels are also rotated to be parallel to
+ the spine. The label padding is also applied here since it's not possible
+ to use a generic axes transform to produce tick-specific padding.
+ """
+ def __init__(self, axes, *args, **kwargs):
+ self._text1_translate = mtransforms.ScaledTranslation(
+ 0, 0,
+ axes.figure.dpi_scale_trans)
+ self._text2_translate = mtransforms.ScaledTranslation(
+ 0, 0,
+ axes.figure.dpi_scale_trans)
+ super(ThetaTick, self).__init__(axes, *args, **kwargs)
+
+ def _get_text1(self):
+ t = super(ThetaTick, self)._get_text1()
+ t.set_rotation_mode('anchor')
+ t.set_transform(t.get_transform() + self._text1_translate)
+ return t
+
+ def _get_text2(self):
+ t = super(ThetaTick, self)._get_text2()
+ t.set_rotation_mode('anchor')
+ t.set_transform(t.get_transform() + self._text2_translate)
+ return t
+
+ def _apply_params(self, **kw):
+ super(ThetaTick, self)._apply_params(**kw)
+
+ # Ensure transform is correct; sometimes this gets reset.
+ trans = self.label1.get_transform()
+ if not trans.contains_branch(self._text1_translate):
+ self.label1.set_transform(trans + self._text1_translate)
+ trans = self.label2.get_transform()
+ if not trans.contains_branch(self._text2_translate):
+ self.label2.set_transform(trans + self._text2_translate)
+
+ def _update_padding(self, pad, angle):
+ padx = pad * np.cos(angle) / 72
+ pady = pad * np.sin(angle) / 72
+ self._text1_translate._t = (padx, pady)
+ self._text1_translate.invalidate()
+ self._text2_translate._t = (-padx, -pady)
+ self._text2_translate.invalidate()
+
+ def update_position(self, loc):
+ super(ThetaTick, self).update_position(loc)
+ axes = self.axes
+ angle = loc * axes.get_theta_direction() + axes.get_theta_offset()
+ text_angle = np.rad2deg(angle) % 360 - 90
+ angle -= np.pi / 2
+
+ if self.tick1On:
+ marker = self.tick1line.get_marker()
+ if marker in (mmarkers.TICKUP, '|'):
+ trans = mtransforms.Affine2D().scale(1.0, 1.0).rotate(angle)
+ elif marker == mmarkers.TICKDOWN:
+ trans = mtransforms.Affine2D().scale(1.0, -1.0).rotate(angle)
+ else:
+ # Don't modify custom tick line markers.
+ trans = self.tick1line._marker._transform
+ self.tick1line._marker._transform = trans
+ if self.tick2On:
+ marker = self.tick2line.get_marker()
+ if marker in (mmarkers.TICKUP, '|'):
+ trans = mtransforms.Affine2D().scale(1.0, 1.0).rotate(angle)
+ elif marker == mmarkers.TICKDOWN:
+ trans = mtransforms.Affine2D().scale(1.0, -1.0).rotate(angle)
+ else:
+ # Don't modify custom tick line markers.
+ trans = self.tick2line._marker._transform
+ self.tick2line._marker._transform = trans
+
+ mode, user_angle = self._labelrotation
+ if mode == 'default':
+ text_angle = user_angle
+ else:
+ if text_angle > 90:
+ text_angle -= 180
+ elif text_angle < -90:
+ text_angle += 180
+ text_angle += user_angle
+ if self.label1On:
+ self.label1.set_rotation(text_angle)
+ if self.label2On:
+ self.label2.set_rotation(text_angle)
+
+ # This extra padding helps preserve the look from previous releases but
+ # is also needed because labels are anchored to their center.
+ pad = self._pad + 7
+ self._update_padding(pad,
+ self._loc * axes.get_theta_direction() +
+ axes.get_theta_offset())
+
+
+class ThetaAxis(maxis.XAxis):
+ """
+ A theta Axis.
+
+ This overrides certain properties of an `XAxis` to provide special-casing
+ for an angular axis.
+ """
+ __name__ = 'thetaaxis'
+ axis_name = 'theta'
+
+ def _get_tick(self, major):
+ if major:
+ tick_kw = self._major_tick_kw
+ else:
+ tick_kw = self._minor_tick_kw
+ return ThetaTick(self.axes, 0, '', major=major, **tick_kw)
+
+ def _wrap_locator_formatter(self):
+ self.set_major_locator(ThetaLocator(self.get_major_locator()))
+ self.set_major_formatter(ThetaFormatter())
+ self.isDefault_majloc = True
+ self.isDefault_majfmt = True
+
+ def cla(self):
+ super(ThetaAxis, self).cla()
+ self.set_ticks_position('none')
+ self._wrap_locator_formatter()
+
+ def _set_scale(self, value, **kwargs):
+ super(ThetaAxis, self)._set_scale(value, **kwargs)
+ self._wrap_locator_formatter()
+
+ def _copy_tick_props(self, src, dest):
+ 'Copy the props from src tick to dest tick'
+ if src is None or dest is None:
+ return
+ super(ThetaAxis, self)._copy_tick_props(src, dest)
+
+ # Ensure that tick transforms are independent so that padding works.
+ trans = dest._get_text1_transform()[0]
+ dest.label1.set_transform(trans + dest._text1_translate)
+ trans = dest._get_text2_transform()[0]
+ dest.label2.set_transform(trans + dest._text2_translate)
+
+
+class RadialLocator(mticker.Locator):
+ """
+ Used to locate radius ticks.
+
+ Ensures that all ticks are strictly positive. For all other
+ tasks, it delegates to the base
+ :class:`~matplotlib.ticker.Locator` (which may be different
+ depending on the scale of the *r*-axis.
+ """
+ def __init__(self, base, axes=None):
+ self.base = base
+ self._axes = axes
+
+ def __call__(self):
+ show_all = True
+ # Ensure previous behaviour with full circle non-annular views.
+ if self._axes:
+ if _is_full_circle_rad(*self._axes.viewLim.intervalx):
+ rorigin = self._axes.get_rorigin()
+ if self._axes.get_rmin() <= rorigin:
+ show_all = False
+
+ if show_all:
+ return self.base()
+ else:
+ return [tick for tick in self.base() if tick > rorigin]
+
+ def autoscale(self):
+ return self.base.autoscale()
+
+ def pan(self, numsteps):
+ return self.base.pan(numsteps)
+
+ def zoom(self, direction):
+ return self.base.zoom(direction)
+
+ def refresh(self):
+ return self.base.refresh()
+
+ def view_limits(self, vmin, vmax):
+ vmin, vmax = self.base.view_limits(vmin, vmax)
+ return mtransforms.nonsingular(min(0, vmin), vmax)
+
+
+class _ThetaShift(mtransforms.ScaledTranslation):
+ """
+ Apply a padding shift based on axes theta limits.
+
+ This is used to create padding for radial ticks.
+
+ Parameters
+ ----------
+ axes : matplotlib.axes.Axes
+ The owning axes; used to determine limits.
+ pad : float
+ The padding to apply, in points.
+ start : str, {'min', 'max', 'rlabel'}
+ Whether to shift away from the start (``'min'``) or the end (``'max'``)
+ of the axes, or using the rlabel position (``'rlabel'``).
+ """
+ def __init__(self, axes, pad, mode):
+ mtransforms.ScaledTranslation.__init__(self, pad, pad,
+ axes.figure.dpi_scale_trans)
+ self.set_children(axes._realViewLim)
+ self.axes = axes
+ self.mode = mode
+ self.pad = pad
+
+ def __str__(self):
+ return ("{}(\n"
+ "{},\n"
+ "{},\n"
+ "{})"
+ .format(type(self).__name__,
+ mtransforms._indent_str(self.axes),
+ mtransforms._indent_str(self.pad),
+ mtransforms._indent_str(repr(self.mode))))
+
+ def get_matrix(self):
+ if self._invalid:
+ if self.mode == 'rlabel':
+ angle = (
+ np.deg2rad(self.axes.get_rlabel_position()) *
+ self.axes.get_theta_direction() +
+ self.axes.get_theta_offset()
+ )
+ else:
+ if self.mode == 'min':
+ angle = self.axes._realViewLim.xmin
+ elif self.mode == 'max':
+ angle = self.axes._realViewLim.xmax
+
+ if self.mode in ('rlabel', 'min'):
+ padx = np.cos(angle - np.pi / 2)
+ pady = np.sin(angle - np.pi / 2)
+ else:
+ padx = np.cos(angle + np.pi / 2)
+ pady = np.sin(angle + np.pi / 2)
+
+ self._t = (self.pad * padx / 72, self.pad * pady / 72)
+ return mtransforms.ScaledTranslation.get_matrix(self)
+
+
+class RadialTick(maxis.YTick):
+ """
+ A radial-axis tick.
+
+ This subclass of `YTick` provides radial ticks with some small modification
+ to their re-positioning such that ticks are rotated based on axes limits.
+ This results in ticks that are correctly perpendicular to the spine. Labels
+ are also rotated to be perpendicular to the spine, when 'auto' rotation is
+ enabled.
+ """
+ def _get_text1(self):
+ t = super(RadialTick, self)._get_text1()
+ t.set_rotation_mode('anchor')
+ return t
+
+ def _get_text2(self):
+ t = super(RadialTick, self)._get_text2()
+ t.set_rotation_mode('anchor')
+ return t
+
+ def _determine_anchor(self, mode, angle, start):
+ # Note: angle is the (spine angle - 90) because it's used for the tick
+ # & text setup, so all numbers below are -90 from (normed) spine angle.
+ if mode == 'auto':
+ if start:
+ if -90 <= angle <= 90:
+ return 'left', 'center'
+ else:
+ return 'right', 'center'
+ else:
+ if -90 <= angle <= 90:
+ return 'right', 'center'
+ else:
+ return 'left', 'center'
+ else:
+ if start:
+ if angle < -68.5:
+ return 'center', 'top'
+ elif angle < -23.5:
+ return 'left', 'top'
+ elif angle < 22.5:
+ return 'left', 'center'
+ elif angle < 67.5:
+ return 'left', 'bottom'
+ elif angle < 112.5:
+ return 'center', 'bottom'
+ elif angle < 157.5:
+ return 'right', 'bottom'
+ elif angle < 202.5:
+ return 'right', 'center'
+ elif angle < 247.5:
+ return 'right', 'top'
+ else:
+ return 'center', 'top'
+ else:
+ if angle < -68.5:
+ return 'center', 'bottom'
+ elif angle < -23.5:
+ return 'right', 'bottom'
+ elif angle < 22.5:
+ return 'right', 'center'
+ elif angle < 67.5:
+ return 'right', 'top'
+ elif angle < 112.5:
+ return 'center', 'top'
+ elif angle < 157.5:
+ return 'left', 'top'
+ elif angle < 202.5:
+ return 'left', 'center'
+ elif angle < 247.5:
+ return 'left', 'bottom'
+ else:
+ return 'center', 'bottom'
+
+ def update_position(self, loc):
+ super(RadialTick, self).update_position(loc)
+ axes = self.axes
+ thetamin = axes.get_thetamin()
+ thetamax = axes.get_thetamax()
+ direction = axes.get_theta_direction()
+ offset_rad = axes.get_theta_offset()
+ offset = np.rad2deg(offset_rad)
+ full = _is_full_circle_deg(thetamin, thetamax)
+
+ if full:
+ angle = (axes.get_rlabel_position() * direction +
+ offset) % 360 - 90
+ tick_angle = 0
+ if angle > 90:
+ text_angle = angle - 180
+ elif angle < -90:
+ text_angle = angle + 180
+ else:
+ text_angle = angle
+ else:
+ angle = (thetamin * direction + offset) % 360 - 90
+ if direction > 0:
+ tick_angle = np.deg2rad(angle)
+ else:
+ tick_angle = np.deg2rad(angle + 180)
+ if angle > 90:
+ text_angle = angle - 180
+ elif angle < -90:
+ text_angle = angle + 180
+ else:
+ text_angle = angle
+ mode, user_angle = self._labelrotation
+ if mode == 'auto':
+ text_angle += user_angle
+ else:
+ text_angle = user_angle
+ if self.label1On:
+ if full:
+ ha = self.label1.get_ha()
+ va = self.label1.get_va()
+ else:
+ ha, va = self._determine_anchor(mode, angle, direction > 0)
+ self.label1.set_ha(ha)
+ self.label1.set_va(va)
+ self.label1.set_rotation(text_angle)
+ if self.tick1On:
+ marker = self.tick1line.get_marker()
+ if marker == mmarkers.TICKLEFT:
+ trans = (mtransforms.Affine2D()
+ .scale(1.0, 1.0)
+ .rotate(tick_angle))
+ elif marker == '_':
+ trans = (mtransforms.Affine2D()
+ .scale(1.0, 1.0)
+ .rotate(tick_angle + np.pi / 2))
+ elif marker == mmarkers.TICKRIGHT:
+ trans = (mtransforms.Affine2D()
+ .scale(-1.0, 1.0)
+ .rotate(tick_angle))
+ else:
+ # Don't modify custom tick line markers.
+ trans = self.tick1line._marker._transform
+ self.tick1line._marker._transform = trans
+
+ if full:
+ self.label2On = False
+ self.tick2On = False
+ else:
+ angle = (thetamax * direction + offset) % 360 - 90
+ if direction > 0:
+ tick_angle = np.deg2rad(angle)
+ else:
+ tick_angle = np.deg2rad(angle + 180)
+ if angle > 90:
+ text_angle = angle - 180
+ elif angle < -90:
+ text_angle = angle + 180
+ else:
+ text_angle = angle
+ mode, user_angle = self._labelrotation
+ if mode == 'auto':
+ text_angle += user_angle
+ else:
+ text_angle = user_angle
+ if self.label2On:
+ ha, va = self._determine_anchor(mode, angle, direction < 0)
+ self.label2.set_ha(ha)
+ self.label2.set_va(va)
+ self.label2.set_rotation(text_angle)
+ if self.tick2On:
+ marker = self.tick2line.get_marker()
+ if marker == mmarkers.TICKLEFT:
+ trans = (mtransforms.Affine2D()
+ .scale(1.0, 1.0)
+ .rotate(tick_angle))
+ elif marker == '_':
+ trans = (mtransforms.Affine2D()
+ .scale(1.0, 1.0)
+ .rotate(tick_angle + np.pi / 2))
+ elif marker == mmarkers.TICKRIGHT:
+ trans = (mtransforms.Affine2D()
+ .scale(-1.0, 1.0)
+ .rotate(tick_angle))
+ else:
+ # Don't modify custom tick line markers.
+ trans = self.tick2line._marker._transform
+ self.tick2line._marker._transform = trans
+
+
+class RadialAxis(maxis.YAxis):
+ """
+ A radial Axis.
+
+ This overrides certain properties of a `YAxis` to provide special-casing
+ for a radial axis.
+ """
+ __name__ = 'radialaxis'
+ axis_name = 'radius'
+
+ def __init__(self, *args, **kwargs):
+ super(RadialAxis, self).__init__(*args, **kwargs)
+ self.sticky_edges.y.append(0)
+
+ def _get_tick(self, major):
+ if major:
+ tick_kw = self._major_tick_kw
+ else:
+ tick_kw = self._minor_tick_kw
+ return RadialTick(self.axes, 0, '', major=major, **tick_kw)
+
+ def _wrap_locator_formatter(self):
+ self.set_major_locator(RadialLocator(self.get_major_locator(),
+ self.axes))
+ self.isDefault_majloc = True
+
+ def cla(self):
+ super(RadialAxis, self).cla()
+ self.set_ticks_position('none')
+ self._wrap_locator_formatter()
+
+ def _set_scale(self, value, **kwargs):
+ super(RadialAxis, self)._set_scale(value, **kwargs)
+ self._wrap_locator_formatter()
+
+
+def _is_full_circle_deg(thetamin, thetamax):
+ """
+ Determine if a wedge (in degrees) spans the full circle.
+
+ The condition is derived from :class:`~matplotlib.patches.Wedge`.
+ """
+ return abs(abs(thetamax - thetamin) - 360.0) < 1e-12
+
+
+def _is_full_circle_rad(thetamin, thetamax):
+ """
+ Determine if a wedge (in radians) spans the full circle.
+
+ The condition is derived from :class:`~matplotlib.patches.Wedge`.
+ """
+ return abs(abs(thetamax - thetamin) - 2 * np.pi) < 1.74e-14
+
+
+class _WedgeBbox(mtransforms.Bbox):
+ """
+ Transform (theta,r) wedge Bbox into axes bounding box.
+
+ Parameters
+ ----------
+ center : tuple of float
+ Center of the wedge
+ viewLim : `~matplotlib.transforms.Bbox`
+ Bbox determining the boundaries of the wedge
+ originLim : `~matplotlib.transforms.Bbox`
+ Bbox determining the origin for the wedge, if different from *viewLim*
+ """
+ def __init__(self, center, viewLim, originLim, **kwargs):
+ mtransforms.Bbox.__init__(self,
+ np.array([[0.0, 0.0], [1.0, 1.0]], np.float),
+ **kwargs)
+ self._center = center
+ self._viewLim = viewLim
+ self._originLim = originLim
+ self.set_children(viewLim, originLim)
+
+ def __str__(self):
+ return ("{}(\n"
+ "{},\n"
+ "{},\n"
+ "{})"
+ .format(type(self).__name__,
+ mtransforms._indent_str(self._center),
+ mtransforms._indent_str(self._viewLim),
+ mtransforms._indent_str(self._originLim)))
+
+ def get_points(self):
+ if self._invalid:
+ points = self._viewLim.get_points().copy()
+
+ # Scale angular limits to work with Wedge.
+ points[:, 0] *= 180 / np.pi
+ if points[0, 0] > points[1, 0]:
+ points[:, 0] = points[::-1, 0]
+
+ # Scale radial limits based on origin radius.
+ points[:, 1] -= self._originLim.y0
+
+ # Scale radial limits to match axes limits.
+ rscale = 0.5 / points[1, 1]
+ points[:, 1] *= rscale
+ width = min(points[1, 1] - points[0, 1], 0.5)
+
+ # Generate bounding box for wedge.
+ wedge = mpatches.Wedge(self._center, points[1, 1],
+ points[0, 0], points[1, 0],
+ width=width)
+ self.update_from_path(wedge.get_path())
+
+ # Ensure equal aspect ratio.
+ w, h = self._points[1] - self._points[0]
+ if h < w:
+ deltah = (w - h) / 2.0
+ deltaw = 0.0
+ elif w < h:
+ deltah = 0.0
+ deltaw = (h - w) / 2.0
+ else:
+ deltah = 0.0
+ deltaw = 0.0
+ self._points += np.array([[-deltaw, -deltah], [deltaw, deltah]])
+
+ self._invalid = 0
+
+ return self._points
+ get_points.__doc__ = mtransforms.Bbox.get_points.__doc__
+
+
+class PolarAxes(Axes):
+ """
+ A polar graph projection, where the input dimensions are *theta*, *r*.
+
+ Theta starts pointing east and goes anti-clockwise.
+ """
+ name = 'polar'
+
+ def __init__(self, *args, **kwargs):
+ """
+ Create a new Polar Axes for a polar plot.
+ """
+ self._default_theta_offset = kwargs.pop('theta_offset', 0)
+ self._default_theta_direction = kwargs.pop('theta_direction', 1)
+ self._default_rlabel_position = np.deg2rad(
+ kwargs.pop('rlabel_position', 22.5))
+
+ Axes.__init__(self, *args, **kwargs)
+ self.use_sticky_edges = True
+ self.set_aspect('equal', adjustable='box', anchor='C')
+ self.cla()
+ __init__.__doc__ = Axes.__init__.__doc__
+
+ def cla(self):
+ Axes.cla(self)
+
+ self.title.set_y(1.05)
+
+ start = self.spines.get('start', None)
+ if start:
+ start.set_visible(False)
+ end = self.spines.get('end', None)
+ if end:
+ end.set_visible(False)
+ self.set_xlim(0.0, 2 * np.pi)
+
+ self.grid(rcParams['polaraxes.grid'])
+ inner = self.spines.get('inner', None)
+ if inner:
+ inner.set_visible(False)
+
+ self.set_rorigin(None)
+ self.set_theta_offset(self._default_theta_offset)
+ self.set_theta_direction(self._default_theta_direction)
+
+ def _init_axis(self):
+ "move this out of __init__ because non-separable axes don't use it"
+ self.xaxis = ThetaAxis(self)
+ self.yaxis = RadialAxis(self)
+ # Calling polar_axes.xaxis.cla() or polar_axes.xaxis.cla()
+ # results in weird artifacts. Therefore we disable this for
+ # now.
+ # self.spines['polar'].register_axis(self.yaxis)
+ self._update_transScale()
+
+ def _set_lim_and_transforms(self):
+ # A view limit where the minimum radius can be locked if the user
+ # specifies an alternate origin.
+ self._originViewLim = mtransforms.LockableBbox(self.viewLim)
+
+ # Handle angular offset and direction.
+ self._direction = mtransforms.Affine2D() \
+ .scale(self._default_theta_direction, 1.0)
+ self._theta_offset = mtransforms.Affine2D() \
+ .translate(self._default_theta_offset, 0.0)
+ self.transShift = mtransforms.composite_transform_factory(
+ self._direction,
+ self._theta_offset)
+ # A view limit shifted to the correct location after accounting for
+ # orientation and offset.
+ self._realViewLim = mtransforms.TransformedBbox(self.viewLim,
+ self.transShift)
+
+ # Transforms the x and y axis separately by a scale factor
+ # It is assumed that this part will have non-linear components
+ self.transScale = mtransforms.TransformWrapper(
+ mtransforms.IdentityTransform())
+
+ # Scale view limit into a bbox around the selected wedge. This may be
+ # smaller than the usual unit axes rectangle if not plotting the full
+ # circle.
+ self.axesLim = _WedgeBbox((0.5, 0.5),
+ self._realViewLim, self._originViewLim)
+
+ # Scale the wedge to fill the axes.
+ self.transWedge = mtransforms.BboxTransformFrom(self.axesLim)
+
+ # Scale the axes to fill the figure.
+ self.transAxes = mtransforms.BboxTransformTo(self.bbox)
+
+ # A (possibly non-linear) projection on the (already scaled)
+ # data. This one is aware of rmin
+ self.transProjection = self.PolarTransform(
+ self,
+ _apply_theta_transforms=False)
+ # Add dependency on rorigin.
+ self.transProjection.set_children(self._originViewLim)
+
+ # An affine transformation on the data, generally to limit the
+ # range of the axes
+ self.transProjectionAffine = self.PolarAffine(self.transScale,
+ self._originViewLim)
+
+ # The complete data transformation stack -- from data all the
+ # way to display coordinates
+ self.transData = (
+ self.transScale + self.transShift + self.transProjection +
+ (self.transProjectionAffine + self.transWedge + self.transAxes))
+
+ # This is the transform for theta-axis ticks. It is
+ # equivalent to transData, except it always puts r == 0.0 and r == 1.0
+ # at the edge of the axis circles.
+ self._xaxis_transform = (
+ mtransforms.blended_transform_factory(
+ mtransforms.IdentityTransform(),
+ mtransforms.BboxTransformTo(self.viewLim)) +
+ self.transData)
+ # The theta labels are flipped along the radius, so that text 1 is on
+ # the outside by default. This should work the same as before.
+ flipr_transform = mtransforms.Affine2D() \
+ .translate(0.0, -0.5) \
+ .scale(1.0, -1.0) \
+ .translate(0.0, 0.5)
+ self._xaxis_text_transform = flipr_transform + self._xaxis_transform
+
+ # This is the transform for r-axis ticks. It scales the theta
+ # axis so the gridlines from 0.0 to 1.0, now go from thetamin to
+ # thetamax.
+ self._yaxis_transform = (
+ mtransforms.blended_transform_factory(
+ mtransforms.BboxTransformTo(self.viewLim),
+ mtransforms.IdentityTransform()) +
+ self.transData)
+ # The r-axis labels are put at an angle and padded in the r-direction
+ self._r_label_position = mtransforms.Affine2D() \
+ .translate(self._default_rlabel_position, 0.0)
+ self._yaxis_text_transform = mtransforms.TransformWrapper(
+ self._r_label_position + self.transData)
+
+ def get_xaxis_transform(self, which='grid'):
+ if which not in ['tick1', 'tick2', 'grid']:
+ raise ValueError(
+ "'which' must be one of 'tick1', 'tick2', or 'grid'")
+ return self._xaxis_transform
+
+ def get_xaxis_text1_transform(self, pad):
+ return self._xaxis_text_transform, 'center', 'center'
+
+ def get_xaxis_text2_transform(self, pad):
+ return self._xaxis_text_transform, 'center', 'center'
+
+ def get_yaxis_transform(self, which='grid'):
+ if which in ('tick1', 'tick2'):
+ return self._yaxis_text_transform
+ elif which == 'grid':
+ return self._yaxis_transform
+ else:
+ raise ValueError(
+ "'which' must be one of 'tick1', 'tick2', or 'grid'")
+
+ def get_yaxis_text1_transform(self, pad):
+ thetamin, thetamax = self._realViewLim.intervalx
+ if _is_full_circle_rad(thetamin, thetamax):
+ return self._yaxis_text_transform, 'bottom', 'left'
+ elif self.get_theta_direction() > 0:
+ halign = 'left'
+ pad_shift = _ThetaShift(self, pad, 'min')
+ else:
+ halign = 'right'
+ pad_shift = _ThetaShift(self, pad, 'max')
+ return self._yaxis_text_transform + pad_shift, 'center', halign
+
+ def get_yaxis_text2_transform(self, pad):
+ if self.get_theta_direction() > 0:
+ halign = 'right'
+ pad_shift = _ThetaShift(self, pad, 'max')
+ else:
+ halign = 'left'
+ pad_shift = _ThetaShift(self, pad, 'min')
+ return self._yaxis_text_transform + pad_shift, 'center', halign
+
+ def draw(self, *args, **kwargs):
+ thetamin, thetamax = np.rad2deg(self._realViewLim.intervalx)
+ if thetamin > thetamax:
+ thetamin, thetamax = thetamax, thetamin
+ rmin, rmax = self._realViewLim.intervaly - self.get_rorigin()
+
+ if isinstance(self.patch, mpatches.Wedge):
+ # Backwards-compatibility: Any subclassed Axes might override the
+ # patch to not be the Wedge that PolarAxes uses.
+ center = self.transWedge.transform_point((0.5, 0.5))
+ self.patch.set_center(center)
+ self.patch.set_theta1(thetamin)
+ self.patch.set_theta2(thetamax)
+
+ edge, _ = self.transWedge.transform_point((1, 0))
+ radius = edge - center[0]
+ width = min(radius * (rmax - rmin) / rmax, radius)
+ self.patch.set_radius(radius)
+ self.patch.set_width(width)
+
+ inner_width = radius - width
+ inner = self.spines.get('inner', None)
+ if inner:
+ inner.set_visible(inner_width != 0.0)
+
+ visible = not _is_full_circle_deg(thetamin, thetamax)
+ # For backwards compatibility, any subclassed Axes might override the
+ # spines to not include start/end that PolarAxes uses.
+ start = self.spines.get('start', None)
+ end = self.spines.get('end', None)
+ if start:
+ start.set_visible(visible)
+ if end:
+ end.set_visible(visible)
+ if visible:
+ yaxis_text_transform = self._yaxis_transform
+ else:
+ yaxis_text_transform = self._r_label_position + self.transData
+ if self._yaxis_text_transform != yaxis_text_transform:
+ self._yaxis_text_transform.set(yaxis_text_transform)
+ self.yaxis.reset_ticks()
+ self.yaxis.set_clip_path(self.patch)
+
+ Axes.draw(self, *args, **kwargs)
+
+ def _gen_axes_patch(self):
+ return mpatches.Wedge((0.5, 0.5), 0.5, 0.0, 360.0)
+
+ def _gen_axes_spines(self):
+ spines = OrderedDict([
+ ('polar', mspines.Spine.arc_spine(self, 'top',
+ (0.5, 0.5), 0.5, 0.0, 360.0)),
+ ('start', mspines.Spine.linear_spine(self, 'left')),
+ ('end', mspines.Spine.linear_spine(self, 'right')),
+ ('inner', mspines.Spine.arc_spine(self, 'bottom',
+ (0.5, 0.5), 0.0, 0.0, 360.0))
+ ])
+ spines['polar'].set_transform(self.transWedge + self.transAxes)
+ spines['inner'].set_transform(self.transWedge + self.transAxes)
+ spines['start'].set_transform(self._yaxis_transform)
+ spines['end'].set_transform(self._yaxis_transform)
+ return spines
+
+ def set_thetamax(self, thetamax):
+ self.viewLim.x1 = np.deg2rad(thetamax)
+
+ def get_thetamax(self):
+ return np.rad2deg(self.viewLim.xmax)
+
+ def set_thetamin(self, thetamin):
+ self.viewLim.x0 = np.deg2rad(thetamin)
+
+ def get_thetamin(self):
+ return np.rad2deg(self.viewLim.xmin)
+
+ def set_thetalim(self, *args, **kwargs):
+ if 'thetamin' in kwargs:
+ kwargs['xmin'] = np.deg2rad(kwargs.pop('thetamin'))
+ if 'thetamax' in kwargs:
+ kwargs['xmax'] = np.deg2rad(kwargs.pop('thetamax'))
+ return tuple(np.rad2deg(self.set_xlim(*args, **kwargs)))
+
+ def set_theta_offset(self, offset):
+ """
+ Set the offset for the location of 0 in radians.
+ """
+ mtx = self._theta_offset.get_matrix()
+ mtx[0, 2] = offset
+ self._theta_offset.invalidate()
+
+ def get_theta_offset(self):
+ """
+ Get the offset for the location of 0 in radians.
+ """
+ return self._theta_offset.get_matrix()[0, 2]
+
+ def set_theta_zero_location(self, loc, offset=0.0):
+ """
+ Sets the location of theta's zero. (Calls set_theta_offset
+ with the correct value in radians under the hood.)
+
+ loc : str
+ May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".
+
+ offset : float, optional
+ An offset in degrees to apply from the specified `loc`. **Note:**
+ this offset is *always* applied counter-clockwise regardless of
+ the direction setting.
+ """
+ mapping = {
+ 'N': np.pi * 0.5,
+ 'NW': np.pi * 0.75,
+ 'W': np.pi,
+ 'SW': np.pi * 1.25,
+ 'S': np.pi * 1.5,
+ 'SE': np.pi * 1.75,
+ 'E': 0,
+ 'NE': np.pi * 0.25}
+ return self.set_theta_offset(mapping[loc] + np.deg2rad(offset))
+
+ def set_theta_direction(self, direction):
+ """
+ Set the direction in which theta increases.
+
+ clockwise, -1:
+ Theta increases in the clockwise direction
+
+ counterclockwise, anticlockwise, 1:
+ Theta increases in the counterclockwise direction
+ """
+ mtx = self._direction.get_matrix()
+ if direction in ('clockwise',):
+ mtx[0, 0] = -1
+ elif direction in ('counterclockwise', 'anticlockwise'):
+ mtx[0, 0] = 1
+ elif direction in (1, -1):
+ mtx[0, 0] = direction
+ else:
+ raise ValueError(
+ "direction must be 1, -1, clockwise or counterclockwise")
+ self._direction.invalidate()
+
+ def get_theta_direction(self):
+ """
+ Get the direction in which theta increases.
+
+ -1:
+ Theta increases in the clockwise direction
+
+ 1:
+ Theta increases in the counterclockwise direction
+ """
+ return self._direction.get_matrix()[0, 0]
+
+ def set_rmax(self, rmax):
+ self.viewLim.y1 = rmax
+
+ def get_rmax(self):
+ return self.viewLim.ymax
+
+ def set_rmin(self, rmin):
+ self.viewLim.y0 = rmin
+
+ def get_rmin(self):
+ return self.viewLim.ymin
+
+ def set_rorigin(self, rorigin):
+ self._originViewLim.locked_y0 = rorigin
+
+ def get_rorigin(self):
+ return self._originViewLim.y0
+
+ def set_rlim(self, *args, **kwargs):
+ if 'rmin' in kwargs:
+ kwargs['ymin'] = kwargs.pop('rmin')
+ if 'rmax' in kwargs:
+ kwargs['ymax'] = kwargs.pop('rmax')
+ return self.set_ylim(*args, **kwargs)
+
+ def get_rlabel_position(self):
+ """
+ Returns
+ -------
+ float
+ The theta position of the radius labels in degrees.
+ """
+ return np.rad2deg(self._r_label_position.get_matrix()[0, 2])
+
+ def set_rlabel_position(self, value):
+ """Updates the theta position of the radius labels.
+
+ Parameters
+ ----------
+ value : number
+ The angular position of the radius labels in degrees.
+ """
+ self._r_label_position.clear().translate(np.deg2rad(value), 0.0)
+
+ def set_yscale(self, *args, **kwargs):
+ Axes.set_yscale(self, *args, **kwargs)
+ self.yaxis.set_major_locator(
+ self.RadialLocator(self.yaxis.get_major_locator(), self))
+
+ def set_rscale(self, *args, **kwargs):
+ return Axes.set_yscale(self, *args, **kwargs)
+
+ def set_rticks(self, *args, **kwargs):
+ return Axes.set_yticks(self, *args, **kwargs)
+
+ @docstring.dedent_interpd
+ def set_thetagrids(self, angles, labels=None, frac=None, fmt=None,
+ **kwargs):
+ """
+ Set the angles at which to place the theta grids (these
+ gridlines are equal along the theta dimension). *angles* is in
+ degrees.
+
+ *labels*, if not None, is a ``len(angles)`` list of strings of
+ the labels to use at each angle.
+
+ If *labels* is None, the labels will be ``fmt %% angle``
+
+ *frac* is the fraction of the polar axes radius at which to
+ place the label (1 is the edge). e.g., 1.05 is outside the axes
+ and 0.95 is inside the axes.
+
+ Return value is a list of tuples (*line*, *label*), where
+ *line* is :class:`~matplotlib.lines.Line2D` instances and the
+ *label* is :class:`~matplotlib.text.Text` instances.
+
+ kwargs are optional text properties for the labels:
+
+ %(Text)s
+
+ ACCEPTS: sequence of floats
+ """
+ if frac is not None:
+ cbook.warn_deprecated('2.1', name='frac', obj_type='parameter',
+ alternative='tick padding via '
+ 'Axes.tick_params')
+
+ # Make sure we take into account unitized data
+ angles = self.convert_yunits(angles)
+ angles = np.deg2rad(angles)
+ self.set_xticks(angles)
+ if labels is not None:
+ self.set_xticklabels(labels)
+ elif fmt is not None:
+ self.xaxis.set_major_formatter(mticker.FormatStrFormatter(fmt))
+ for t in self.xaxis.get_ticklabels():
+ t.update(kwargs)
+ return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
+
+ @docstring.dedent_interpd
+ def set_rgrids(self, radii, labels=None, angle=None, fmt=None,
+ **kwargs):
+ """
+ Set the radial locations and labels of the *r* grids.
+
+ The labels will appear at radial distances *radii* at the
+ given *angle* in degrees.
+
+ *labels*, if not None, is a ``len(radii)`` list of strings of the
+ labels to use at each radius.
+
+ If *labels* is None, the built-in formatter will be used.
+
+ Return value is a list of tuples (*line*, *label*), where
+ *line* is :class:`~matplotlib.lines.Line2D` instances and the
+ *label* is :class:`~matplotlib.text.Text` instances.
+
+ kwargs are optional text properties for the labels:
+
+ %(Text)s
+
+ ACCEPTS: sequence of floats
+ """
+ # Make sure we take into account unitized data
+ radii = self.convert_xunits(radii)
+ radii = np.asarray(radii)
+
+ self.set_yticks(radii)
+ if labels is not None:
+ self.set_yticklabels(labels)
+ elif fmt is not None:
+ self.yaxis.set_major_formatter(mticker.FormatStrFormatter(fmt))
+ if angle is None:
+ angle = self.get_rlabel_position()
+ self.set_rlabel_position(angle)
+ for t in self.yaxis.get_ticklabels():
+ t.update(kwargs)
+ return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()
+
+ def set_xscale(self, scale, *args, **kwargs):
+ if scale != 'linear':
+ raise NotImplementedError(
+ "You can not set the xscale on a polar plot.")
+
+ def format_coord(self, theta, r):
+ """
+ Return a format string formatting the coordinate using Unicode
+ characters.
+ """
+ if theta < 0:
+ theta += 2 * np.pi
+ theta /= np.pi
+ return ('\N{GREEK SMALL LETTER THETA}=%0.3f\N{GREEK SMALL LETTER PI} '
+ '(%0.3f\N{DEGREE SIGN}), r=%0.3f') % (theta, theta * 180.0, r)
+
+ def get_data_ratio(self):
+ '''
+ Return the aspect ratio of the data itself. For a polar plot,
+ this should always be 1.0
+ '''
+ return 1.0
+
+ # # # Interactive panning
+
+ def can_zoom(self):
+ """
+ Return *True* if this axes supports the zoom box button functionality.
+
+ Polar axes do not support zoom boxes.
+ """
+ return False
+
+ def can_pan(self):
+ """
+ Return *True* if this axes supports the pan/zoom button functionality.
+
+ For polar axes, this is slightly misleading. Both panning and
+ zooming are performed by the same button. Panning is performed
+ in azimuth while zooming is done along the radial.
+ """
+ return True
+
+ def start_pan(self, x, y, button):
+ angle = np.deg2rad(self.get_rlabel_position())
+ mode = ''
+ if button == 1:
+ epsilon = np.pi / 45.0
+ t, r = self.transData.inverted().transform_point((x, y))
+ if t >= angle - epsilon and t <= angle + epsilon:
+ mode = 'drag_r_labels'
+ elif button == 3:
+ mode = 'zoom'
+
+ self._pan_start = cbook.Bunch(
+ rmax=self.get_rmax(),
+ trans=self.transData.frozen(),
+ trans_inverse=self.transData.inverted().frozen(),
+ r_label_angle=self.get_rlabel_position(),
+ x=x,
+ y=y,
+ mode=mode)
+
+ def end_pan(self):
+ del self._pan_start
+
+ def drag_pan(self, button, key, x, y):
+ p = self._pan_start
+
+ if p.mode == 'drag_r_labels':
+ startt, startr = p.trans_inverse.transform_point((p.x, p.y))
+ t, r = p.trans_inverse.transform_point((x, y))
+
+ # Deal with theta
+ dt0 = t - startt
+ dt1 = startt - t
+ if abs(dt1) < abs(dt0):
+ dt = abs(dt1) * np.sign(dt0) * -1.0
+ else:
+ dt = dt0 * -1.0
+ dt = (dt / np.pi) * 180.0
+ self.set_rlabel_position(p.r_label_angle - dt)
+
+ trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0)
+ trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0)
+ for t in self.yaxis.majorTicks + self.yaxis.minorTicks:
+ t.label1.set_va(vert1)
+ t.label1.set_ha(horiz1)
+ t.label2.set_va(vert2)
+ t.label2.set_ha(horiz2)
+
+ elif p.mode == 'zoom':
+ startt, startr = p.trans_inverse.transform_point((p.x, p.y))
+ t, r = p.trans_inverse.transform_point((x, y))
+
+ # Deal with r
+ scale = r / startr
+ self.set_rmax(p.rmax / scale)
+
+
+# to keep things all self contained, we can put aliases to the Polar classes
+# defined above. This isn't strictly necessary, but it makes some of the
+# code more readable (and provides a backwards compatible Polar API)
+PolarAxes.PolarTransform = PolarTransform
+PolarAxes.PolarAffine = PolarAffine
+PolarAxes.InvertedPolarTransform = InvertedPolarTransform
+PolarAxes.ThetaFormatter = ThetaFormatter
+PolarAxes.RadialLocator = RadialLocator
+PolarAxes.ThetaLocator = ThetaLocator
+
+
+# These are a couple of aborted attempts to project a polar plot using
+# cubic bezier curves.
+
+# def transform_path(self, path):
+# twopi = 2.0 * np.pi
+# halfpi = 0.5 * np.pi
+
+# vertices = path.vertices
+# t0 = vertices[0:-1, 0]
+# t1 = vertices[1: , 0]
+# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
+# maxtd = td.max()
+# interpolate = np.ceil(maxtd / halfpi)
+# if interpolate > 1.0:
+# vertices = self.interpolate(vertices, interpolate)
+
+# vertices = self.transform(vertices)
+
+# result = np.zeros((len(vertices) * 3 - 2, 2), float)
+# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ),
+# mpath.Path.code_type)
+# result[0] = vertices[0]
+# codes[0] = mpath.Path.MOVETO
+
+# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
+# kappa = 0.5
+
+# p0 = vertices[0:-1]
+# p1 = vertices[1: ]
+
+# x0 = p0[:, 0:1]
+# y0 = p0[:, 1: ]
+# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
+# a0 = y0 - b0*x0
+
+# x1 = p1[:, 0:1]
+# y1 = p1[:, 1: ]
+# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
+# a1 = y1 - b1*x1
+
+# x = -(a0-a1) / (b0-b1)
+# y = a0 + b0*x
+
+# xk = (x - x0) * kappa + x0
+# yk = (y - y0) * kappa + y0
+
+# result[1::3, 0:1] = xk
+# result[1::3, 1: ] = yk
+
+# xk = (x - x1) * kappa + x1
+# yk = (y - y1) * kappa + y1
+
+# result[2::3, 0:1] = xk
+# result[2::3, 1: ] = yk
+
+# result[3::3] = p1
+
+# print(vertices[-2:])
+# print(result[-2:])
+
+# return mpath.Path(result, codes)
+
+# twopi = 2.0 * np.pi
+# halfpi = 0.5 * np.pi
+
+# vertices = path.vertices
+# t0 = vertices[0:-1, 0]
+# t1 = vertices[1: , 0]
+# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
+# maxtd = td.max()
+# interpolate = np.ceil(maxtd / halfpi)
+
+# print("interpolate", interpolate)
+# if interpolate > 1.0:
+# vertices = self.interpolate(vertices, interpolate)
+
+# result = np.zeros((len(vertices) * 3 - 2, 2), float)
+# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ),
+# mpath.Path.code_type)
+# result[0] = vertices[0]
+# codes[0] = mpath.Path.MOVETO
+
+# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
+# tkappa = np.arctan(kappa)
+# hyp_kappa = np.sqrt(kappa*kappa + 1.0)
+
+# t0 = vertices[0:-1, 0]
+# t1 = vertices[1: , 0]
+# r0 = vertices[0:-1, 1]
+# r1 = vertices[1: , 1]
+
+# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
+# td_scaled = td / (np.pi * 0.5)
+# rd = r1 - r0
+# r0kappa = r0 * kappa * td_scaled
+# r1kappa = r1 * kappa * td_scaled
+# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
+
+# result[1::3, 0] = t0 + (tkappa * td_scaled)
+# result[1::3, 1] = r0*hyp_kappa
+# # result[1::3, 1] = r0 / np.cos(tkappa * td_scaled)
+# # np.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
+
+# result[2::3, 0] = t1 - (tkappa * td_scaled)
+# result[2::3, 1] = r1*hyp_kappa
+# # result[2::3, 1] = r1 / np.cos(tkappa * td_scaled)
+# # np.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
+
+# result[3::3, 0] = t1
+# result[3::3, 1] = r1
+
+# print(vertices[:6], result[:6], t0[:6], t1[:6], td[:6],
+# td_scaled[:6], tkappa)
+# result = self.transform(result)
+# return mpath.Path(result, codes)
+# transform_path_non_affine = transform_path
diff --git a/contrib/python/matplotlib/py2/matplotlib/pylab.py b/contrib/python/matplotlib/py2/matplotlib/pylab.py
new file mode 100644
index 00000000000..67bb7fa1f1c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/pylab.py
@@ -0,0 +1,268 @@
+"""
+This is a procedural interface to the matplotlib object-oriented
+plotting library.
+
+The following plotting commands are provided; the majority have
+MATLAB |reg| [*]_ analogs and similar arguments.
+
+.. |reg| unicode:: 0xAE
+
+_Plotting commands
+ acorr - plot the autocorrelation function
+ annotate - annotate something in the figure
+ arrow - add an arrow to the axes
+ axes - Create a new axes
+ axhline - draw a horizontal line across axes
+ axvline - draw a vertical line across axes
+ axhspan - draw a horizontal bar across axes
+ axvspan - draw a vertical bar across axes
+ axis - Set or return the current axis limits
+ autoscale - turn axis autoscaling on or off, and apply it
+ bar - make a bar chart
+ barh - a horizontal bar chart
+ broken_barh - a set of horizontal bars with gaps
+ box - set the axes frame on/off state
+ boxplot - make a box and whisker plot
+ violinplot - make a violin plot
+ cla - clear current axes
+ clabel - label a contour plot
+ clf - clear a figure window
+ clim - adjust the color limits of the current image
+ close - close a figure window
+ colorbar - add a colorbar to the current figure
+ cohere - make a plot of coherence
+ contour - make a contour plot
+ contourf - make a filled contour plot
+ csd - make a plot of cross spectral density
+ delaxes - delete an axes from the current figure
+ draw - Force a redraw of the current figure
+ errorbar - make an errorbar graph
+ figlegend - make legend on the figure rather than the axes
+ figimage - make a figure image
+ figtext - add text in figure coords
+ figure - create or change active figure
+ fill - make filled polygons
+ findobj - recursively find all objects matching some criteria
+ gca - return the current axes
+ gcf - return the current figure
+ gci - get the current image, or None
+ getp - get a graphics property
+ grid - set whether gridding is on
+ hist - make a histogram
+ ioff - turn interaction mode off
+ ion - turn interaction mode on
+ isinteractive - return True if interaction mode is on
+ imread - load image file into array
+ imsave - save array as an image file
+ imshow - plot image data
+ legend - make an axes legend
+ locator_params - adjust parameters used in locating axis ticks
+ loglog - a log log plot
+ matshow - display a matrix in a new figure preserving aspect
+ margins - set margins used in autoscaling
+ pause - pause for a specified interval
+ pcolor - make a pseudocolor plot
+ pcolormesh - make a pseudocolor plot using a quadrilateral mesh
+ pie - make a pie chart
+ plot - make a line plot
+ plot_date - plot dates
+ plotfile - plot column data from an ASCII tab/space/comma delimited file
+ pie - pie charts
+ polar - make a polar plot on a PolarAxes
+ psd - make a plot of power spectral density
+ quiver - make a direction field (arrows) plot
+ rc - control the default params
+ rgrids - customize the radial grids and labels for polar
+ savefig - save the current figure
+ scatter - make a scatter plot
+ setp - set a graphics property
+ semilogx - log x axis
+ semilogy - log y axis
+ show - show the figures
+ specgram - a spectrogram plot
+ spy - plot sparsity pattern using markers or image
+ stem - make a stem plot
+ subplot - make one subplot (numrows, numcols, axesnum)
+ subplots - make a figure with a set of (numrows, numcols) subplots
+ subplots_adjust - change the params controlling the subplot positions of current figure
+ subplot_tool - launch the subplot configuration tool
+ suptitle - add a figure title
+ table - add a table to the plot
+ text - add some text at location x,y to the current axes
+ thetagrids - customize the radial theta grids and labels for polar
+ tick_params - control the appearance of ticks and tick labels
+ ticklabel_format - control the format of tick labels
+ title - add a title to the current axes
+ tricontour - make a contour plot on a triangular grid
+ tricontourf - make a filled contour plot on a triangular grid
+ tripcolor - make a pseudocolor plot on a triangular grid
+ triplot - plot a triangular grid
+ xcorr - plot the autocorrelation function of x and y
+ xlim - set/get the xlimits
+ ylim - set/get the ylimits
+ xticks - set/get the xticks
+ yticks - set/get the yticks
+ xlabel - add an xlabel to the current axes
+ ylabel - add a ylabel to the current axes
+
+ autumn - set the default colormap to autumn
+ bone - set the default colormap to bone
+ cool - set the default colormap to cool
+ copper - set the default colormap to copper
+ flag - set the default colormap to flag
+ gray - set the default colormap to gray
+ hot - set the default colormap to hot
+ hsv - set the default colormap to hsv
+ jet - set the default colormap to jet
+ pink - set the default colormap to pink
+ prism - set the default colormap to prism
+ spring - set the default colormap to spring
+ summer - set the default colormap to summer
+ winter - set the default colormap to winter
+
+_Event handling
+
+ connect - register an event handler
+ disconnect - remove a connected event handler
+
+_Matrix commands
+
+ cumprod - the cumulative product along a dimension
+ cumsum - the cumulative sum along a dimension
+ detrend - remove the mean or besdt fit line from an array
+ diag - the k-th diagonal of matrix
+ diff - the n-th differnce of an array
+ eig - the eigenvalues and eigen vectors of v
+ eye - a matrix where the k-th diagonal is ones, else zero
+ find - return the indices where a condition is nonzero
+ fliplr - flip the rows of a matrix up/down
+ flipud - flip the columns of a matrix left/right
+ linspace - a linear spaced vector of N values from min to max inclusive
+ logspace - a log spaced vector of N values from min to max inclusive
+ meshgrid - repeat x and y to make regular matrices
+ ones - an array of ones
+ rand - an array from the uniform distribution [0,1]
+ randn - an array from the normal distribution
+ rot90 - rotate matrix k*90 degress counterclockwise
+ squeeze - squeeze an array removing any dimensions of length 1
+ tri - a triangular matrix
+ tril - a lower triangular matrix
+ triu - an upper triangular matrix
+ vander - the Vandermonde matrix of vector x
+ svd - singular value decomposition
+ zeros - a matrix of zeros
+
+_Probability
+
+ normpdf - The Gaussian probability density function
+ rand - random numbers from the uniform distribution
+ randn - random numbers from the normal distribution
+
+_Statistics
+
+ amax - the maximum along dimension m
+ amin - the minimum along dimension m
+ corrcoef - correlation coefficient
+ cov - covariance matrix
+ mean - the mean along dimension m
+ median - the median along dimension m
+ norm - the norm of vector x
+ prod - the product along dimension m
+ ptp - the max-min along dimension m
+ std - the standard deviation along dimension m
+ asum - the sum along dimension m
+ ksdensity - the kernel density estimate
+
+_Time series analysis
+
+ bartlett - M-point Bartlett window
+ blackman - M-point Blackman window
+ cohere - the coherence using average periodiogram
+ csd - the cross spectral density using average periodiogram
+ fft - the fast Fourier transform of vector x
+ hamming - M-point Hamming window
+ hanning - M-point Hanning window
+ hist - compute the histogram of x
+ kaiser - M length Kaiser window
+ psd - the power spectral density using average periodiogram
+ sinc - the sinc function of array x
+
+_Dates
+
+ date2num - convert python datetimes to numeric representation
+ drange - create an array of numbers for date plots
+ num2date - convert numeric type (float days since 0001) to datetime
+
+_Other
+
+ angle - the angle of a complex array
+ griddata - interpolate irregularly distributed data to a regular grid
+ load - Deprecated--please use loadtxt.
+ loadtxt - load ASCII data into array.
+ polyfit - fit x, y to an n-th order polynomial
+ polyval - evaluate an n-th order polynomial
+ roots - the roots of the polynomial coefficients in p
+ save - Deprecated--please use savetxt.
+ savetxt - save an array to an ASCII file.
+ trapz - trapezoidal integration
+
+__end
+
+.. [*] MATLAB is a registered trademark of The MathWorks, Inc.
+
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import warnings
+
+from matplotlib.cbook import (
+ flatten, exception_to_str, silent_list, iterable, dedent)
+
+import matplotlib as mpl
+
+from matplotlib.dates import (
+ date2num, num2date, datestr2num, strpdate2num, drange, epoch2num,
+ num2epoch, mx2num, DateFormatter, IndexDateFormatter, DateLocator,
+ RRuleLocator, YearLocator, MonthLocator, WeekdayLocator, DayLocator,
+ HourLocator, MinuteLocator, SecondLocator, rrule, MO, TU, WE, TH, FR,
+ SA, SU, YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY,
+ relativedelta)
+
+# bring all the symbols in so folks can import them from
+# pylab in one fell swoop
+
+## We are still importing too many things from mlab; more cleanup is needed.
+
+from matplotlib.mlab import (
+ amap, base_repr, binary_repr, bivariate_normal, center_matrix, csv2rec,
+ demean, detrend, detrend_linear, detrend_mean, detrend_none, dist,
+ dist_point_to_segment, distances_along_curve, entropy, exp_safe,
+ fftsurr, find, frange, get_sparse_matrix, get_xyz_where, griddata,
+ identity, inside_poly, is_closed_polygon, ispower2, isvector, l1norm,
+ l2norm, log2, longest_contiguous_ones, longest_ones, movavg, norm_flat,
+ normpdf, path_length, poly_below, poly_between, prctile, prctile_rank,
+ rec2csv, rec_append_fields, rec_drop_fields, rec_join, rk4, rms_flat,
+ segments_intersect, slopes, stineman_interp, vector_lengths,
+ window_hanning, window_none)
+
+from matplotlib import cbook, mlab, pyplot as plt
+from matplotlib.pyplot import *
+
+from numpy import *
+from numpy.fft import *
+from numpy.random import *
+from numpy.linalg import *
+
+import numpy as np
+import numpy.ma as ma
+
+# don't let numpy's datetime hide stdlib
+import datetime
+
+# This is needed, or bytes will be numpy.random.bytes from
+# "from numpy.random import *" above
+bytes = six.moves.builtins.bytes
diff --git a/contrib/python/matplotlib/py2/matplotlib/pyplot.py b/contrib/python/matplotlib/py2/matplotlib/pyplot.py
new file mode 100644
index 00000000000..fb5928dc65f
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/pyplot.py
@@ -0,0 +1,4099 @@
+# Note: The first part of this file can be modified in place, but the latter
+# part is autogenerated by the boilerplate.py script.
+
+"""
+`matplotlib.pyplot` is a state-based interface to matplotlib. It provides
+a MATLAB-like way of plotting.
+
+pyplot is mainly intended for interactive plots and simple cases of programmatic
+plot generation::
+
+ import numpy as np
+ import matplotlib.pyplot as plt
+
+ x = np.arange(0, 5, 0.1)
+ y = np.sin(x)
+ plt.plot(x, y)
+
+The object-oriented API is recommended for more complex plots.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import sys
+import time
+import warnings
+
+from cycler import cycler
+import matplotlib
+import matplotlib.colorbar
+from matplotlib import style
+from matplotlib import _pylab_helpers, interactive
+from matplotlib.cbook import dedent, silent_list, is_numlike
+from matplotlib.cbook import _string_to_bool
+from matplotlib.cbook import deprecated, warn_deprecated
+from matplotlib import docstring
+from matplotlib.backend_bases import FigureCanvasBase
+from matplotlib.figure import Figure, figaspect
+from matplotlib.gridspec import GridSpec
+from matplotlib.image import imread as _imread
+from matplotlib.image import imsave as _imsave
+from matplotlib import rcParams, rcParamsDefault, get_backend
+from matplotlib import rc_context
+from matplotlib.rcsetup import interactive_bk as _interactive_bk
+from matplotlib.artist import getp, get, Artist
+from matplotlib.artist import setp as _setp
+from matplotlib.axes import Axes, Subplot
+from matplotlib.projections import PolarAxes
+from matplotlib import mlab # for csv2rec, detrend_none, window_hanning
+from matplotlib.scale import get_scale_docs, get_scale_names
+
+from matplotlib import cm
+from matplotlib.cm import get_cmap, register_cmap
+
+import numpy as np
+
+# We may not need the following imports here:
+from matplotlib.colors import Normalize
+from matplotlib.lines import Line2D
+from matplotlib.text import Text, Annotation
+from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
+from matplotlib.widgets import SubplotTool, Button, Slider, Widget
+
+from .ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
+ FuncFormatter, FormatStrFormatter, ScalarFormatter,\
+ LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
+ Locator, IndexLocator, FixedLocator, NullLocator,\
+ LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
+ MaxNLocator
+from matplotlib.backends import pylab_setup
+
+## Backend detection ##
+def _backend_selection():
+ """ If rcParams['backend_fallback'] is true, check to see if the
+ current backend is compatible with the current running event
+ loop, and if not switches to a compatible one.
+ """
+ backend = rcParams['backend']
+ if not rcParams['backend_fallback'] or backend not in _interactive_bk:
+ return
+ is_agg_backend = rcParams['backend'].endswith('Agg')
+ if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
+ import wx
+ if wx.App.IsMainLoopRunning():
+ rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
+ elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
+ import PyQt4.QtGui
+ if not PyQt4.QtGui.qApp.startingUp():
+ # The mainloop is running.
+ rcParams['backend'] = 'qt4Agg'
+ elif 'PyQt5.QtCore' in sys.modules and not backend == 'Qt5Agg':
+ import PyQt5.QtWidgets
+ if not PyQt5.QtWidgets.qApp.startingUp():
+ # The mainloop is running.
+ rcParams['backend'] = 'qt5Agg'
+ elif ('gtk' in sys.modules and
+ backend not in ('GTK', 'GTKAgg', 'GTKCairo')):
+ if 'gi' in sys.modules:
+ from gi.repository import GObject
+ ml = GObject.MainLoop
+ else:
+ import gobject
+ ml = gobject.MainLoop
+ if ml().is_running():
+ rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
+ elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
+ # import Tkinter
+ pass # what if anything do we need to do for tkinter?
+
+_backend_selection()
+
+## Global ##
+
+_backend_mod, new_figure_manager, draw_if_interactive, _show = pylab_setup()
+
+_IP_REGISTERED = None
+_INSTALL_FIG_OBSERVER = False
+
+
+def install_repl_displayhook():
+ """
+ Install a repl display hook so that any stale figure are automatically
+ redrawn when control is returned to the repl.
+
+ This works with IPython terminals and kernels,
+ as well as vanilla python shells.
+ """
+ global _IP_REGISTERED
+ global _INSTALL_FIG_OBSERVER
+
+ class _NotIPython(Exception):
+ pass
+
+ # see if we have IPython hooks around, if use them
+
+ try:
+ if 'IPython' in sys.modules:
+ from IPython import get_ipython
+ ip = get_ipython()
+ if ip is None:
+ raise _NotIPython()
+
+ if _IP_REGISTERED:
+ return
+
+ def post_execute():
+ if matplotlib.is_interactive():
+ draw_all()
+
+ # IPython >= 2
+ try:
+ ip.events.register('post_execute', post_execute)
+ except AttributeError:
+ # IPython 1.x
+ ip.register_post_execute(post_execute)
+
+ _IP_REGISTERED = post_execute
+ _INSTALL_FIG_OBSERVER = False
+
+ # trigger IPython's eventloop integration, if available
+ from IPython.core.pylabtools import backend2gui
+
+ ipython_gui_name = backend2gui.get(get_backend())
+ if ipython_gui_name:
+ ip.enable_gui(ipython_gui_name)
+ else:
+ _INSTALL_FIG_OBSERVER = True
+
+ # import failed or ipython is not running
+ except (ImportError, _NotIPython):
+ _INSTALL_FIG_OBSERVER = True
+
+
+def uninstall_repl_displayhook():
+ """
+ Uninstalls the matplotlib display hook.
+
+ .. warning
+
+ Need IPython >= 2 for this to work. For IPython < 2 will raise a
+ ``NotImplementedError``
+
+ .. warning
+
+ If you are using vanilla python and have installed another
+ display hook this will reset ``sys.displayhook`` to what ever
+ function was there when matplotlib installed it's displayhook,
+ possibly discarding your changes.
+ """
+ global _IP_REGISTERED
+ global _INSTALL_FIG_OBSERVER
+ if _IP_REGISTERED:
+ from IPython import get_ipython
+ ip = get_ipython()
+ try:
+ ip.events.unregister('post_execute', _IP_REGISTERED)
+ except AttributeError:
+ raise NotImplementedError("Can not unregister events "
+ "in IPython < 2.0")
+ _IP_REGISTERED = None
+
+ if _INSTALL_FIG_OBSERVER:
+ _INSTALL_FIG_OBSERVER = False
+
+
+draw_all = _pylab_helpers.Gcf.draw_all
+
+
+@docstring.copy_dedent(Artist.findobj)
+def findobj(o=None, match=None, include_self=True):
+ if o is None:
+ o = gcf()
+ return o.findobj(match, include_self=include_self)
+
+
+def switch_backend(newbackend):
+ """
+ Switch the default backend. This feature is **experimental**, and
+ is only expected to work switching to an image backend. e.g., if
+ you have a bunch of PostScript scripts that you want to run from
+ an interactive ipython session, you may want to switch to the PS
+ backend before running them to avoid having a bunch of GUI windows
+ popup. If you try to interactively switch from one GUI backend to
+ another, you will explode.
+
+ Calling this command will close all open windows.
+ """
+ close('all')
+ global _backend_mod, new_figure_manager, draw_if_interactive, _show
+ matplotlib.use(newbackend, warn=False, force=True)
+ from matplotlib.backends import pylab_setup
+ _backend_mod, new_figure_manager, draw_if_interactive, _show = pylab_setup()
+
+
+def show(*args, **kw):
+ """
+ Display a figure.
+ When running in ipython with its pylab mode, display all
+ figures and return to the ipython prompt.
+
+ In non-interactive mode, display all figures and block until
+ the figures have been closed; in interactive mode it has no
+ effect unless figures were created prior to a change from
+ non-interactive to interactive mode (not recommended). In
+ that case it displays the figures but does not block.
+
+ A single experimental keyword argument, *block*, may be
+ set to True or False to override the blocking behavior
+ described above.
+ """
+ global _show
+ return _show(*args, **kw)
+
+
+def isinteractive():
+ """
+ Return status of interactive mode.
+ """
+ return matplotlib.is_interactive()
+
+
+def ioff():
+ """Turn interactive mode off."""
+ matplotlib.interactive(False)
+ uninstall_repl_displayhook()
+
+
+def ion():
+ """Turn interactive mode on."""
+ matplotlib.interactive(True)
+ install_repl_displayhook()
+
+
+def pause(interval):
+ """
+ Pause for *interval* seconds.
+
+ If there is an active figure, it will be updated and displayed before the
+ pause, and the GUI event loop (if any) will run during the pause.
+
+ This can be used for crude animation. For more complex animation, see
+ :mod:`matplotlib.animation`.
+
+ Notes
+ -----
+ This function is experimental; its behavior may be changed or extended in a
+ future release.
+ """
+ manager = _pylab_helpers.Gcf.get_active()
+ if manager is not None:
+ canvas = manager.canvas
+ if canvas.figure.stale:
+ canvas.draw_idle()
+ show(block=False)
+ canvas.start_event_loop(interval)
+ else:
+ time.sleep(interval)
+
+
+@docstring.copy_dedent(matplotlib.rc)
+def rc(*args, **kwargs):
+ matplotlib.rc(*args, **kwargs)
+
+
+@docstring.copy_dedent(matplotlib.rc_context)
+def rc_context(rc=None, fname=None):
+ return matplotlib.rc_context(rc, fname)
+
+
+@docstring.copy_dedent(matplotlib.rcdefaults)
+def rcdefaults():
+ matplotlib.rcdefaults()
+ if matplotlib.is_interactive():
+ draw_all()
+
+
+# The current "image" (ScalarMappable) is retrieved or set
+# only via the pyplot interface using the following two
+# functions:
+def gci():
+ """
+ Get the current colorable artist. Specifically, returns the
+ current :class:`~matplotlib.cm.ScalarMappable` instance (image or
+ patch collection), or *None* if no images or patch collections
+ have been defined. The commands :func:`~matplotlib.pyplot.imshow`
+ and :func:`~matplotlib.pyplot.figimage` create
+ :class:`~matplotlib.image.Image` instances, and the commands
+ :func:`~matplotlib.pyplot.pcolor` and
+ :func:`~matplotlib.pyplot.scatter` create
+ :class:`~matplotlib.collections.Collection` instances. The
+ current image is an attribute of the current axes, or the nearest
+ earlier axes in the current figure that contains an image.
+ """
+ return gcf()._gci()
+
+
+def sci(im):
+ """
+ Set the current image. This image will be the target of colormap
+ commands like :func:`~matplotlib.pyplot.jet`,
+ :func:`~matplotlib.pyplot.hot` or
+ :func:`~matplotlib.pyplot.clim`). The current image is an
+ attribute of the current axes.
+ """
+ gca()._sci(im)
+
+
+## Any Artist ##
+# (getp is simply imported)
+@docstring.copy(_setp)
+def setp(*args, **kwargs):
+ return _setp(*args, **kwargs)
+
+
+def xkcd(scale=1, length=100, randomness=2):
+ """
+ Turns on `xkcd <https://xkcd.com/>`_ sketch-style drawing mode.
+ This will only have effect on things drawn after this function is
+ called.
+
+ For best results, the "Humor Sans" font should be installed: it is
+ not included with matplotlib.
+
+ Parameters
+ ----------
+ scale : float, optional
+ The amplitude of the wiggle perpendicular to the source line.
+ length : float, optional
+ The length of the wiggle along the line.
+ randomness : float, optional
+ The scale factor by which the length is shrunken or expanded.
+
+ Notes
+ -----
+ This function works by a number of rcParams, so it will probably
+ override others you have set before.
+
+ If you want the effects of this function to be temporary, it can
+ be used as a context manager, for example::
+
+ with plt.xkcd():
+ # This figure will be in XKCD-style
+ fig1 = plt.figure()
+ # ...
+
+ # This figure will be in regular style
+ fig2 = plt.figure()
+ """
+ if rcParams['text.usetex']:
+ raise RuntimeError(
+ "xkcd mode is not compatible with text.usetex = True")
+
+ from matplotlib import patheffects
+ return rc_context({
+ 'font.family': ['xkcd', 'Humor Sans', 'Comic Sans MS'],
+ 'font.size': 14.0,
+ 'path.sketch': (scale, length, randomness),
+ 'path.effects': [patheffects.withStroke(linewidth=4, foreground="w")],
+ 'axes.linewidth': 1.5,
+ 'lines.linewidth': 2.0,
+ 'figure.facecolor': 'white',
+ 'grid.linewidth': 0.0,
+ 'axes.grid': False,
+ 'axes.unicode_minus': False,
+ 'axes.edgecolor': 'black',
+ 'xtick.major.size': 8,
+ 'xtick.major.width': 3,
+ 'ytick.major.size': 8,
+ 'ytick.major.width': 3,
+ })
+
+
+## Figures ##
+
+def figure(num=None, # autoincrement if None, else integer from 1-N
+ figsize=None, # defaults to rc figure.figsize
+ dpi=None, # defaults to rc figure.dpi
+ facecolor=None, # defaults to rc figure.facecolor
+ edgecolor=None, # defaults to rc figure.edgecolor
+ frameon=True,
+ FigureClass=Figure,
+ clear=False,
+ **kwargs
+ ):
+ """
+ Creates a new figure.
+
+ Parameters
+ ----------
+
+ num : integer or string, optional, default: none
+ If not provided, a new figure will be created, and the figure number
+ will be incremented. The figure objects holds this number in a `number`
+ attribute.
+ If num is provided, and a figure with this id already exists, make
+ it active, and returns a reference to it. If this figure does not
+ exists, create it and returns it.
+ If num is a string, the window title will be set to this figure's
+ `num`.
+
+ figsize : tuple of integers, optional, default: None
+ width, height in inches. If not provided, defaults to rc
+ figure.figsize.
+
+ dpi : integer, optional, default: None
+ resolution of the figure. If not provided, defaults to rc figure.dpi.
+
+ facecolor :
+ the background color. If not provided, defaults to rc figure.facecolor.
+
+ edgecolor :
+ the border color. If not provided, defaults to rc figure.edgecolor.
+
+ frameon : bool, optional, default: True
+ If False, suppress drawing the figure frame.
+
+ FigureClass : class derived from matplotlib.figure.Figure
+ Optionally use a custom Figure instance.
+
+ clear : bool, optional, default: False
+ If True and the figure already exists, then it is cleared.
+
+ Returns
+ -------
+ figure : Figure
+ The Figure instance returned will also be passed to new_figure_manager
+ in the backends, which allows to hook custom Figure classes into the
+ pylab interface. Additional kwargs will be passed to the figure init
+ function.
+
+ Notes
+ -----
+ If you are creating many figures, make sure you explicitly call "close"
+ on the figures you are not using, because this will enable pylab
+ to properly clean up the memory.
+
+ rcParams defines the default values, which can be modified in the
+ matplotlibrc file
+
+ """
+
+ if figsize is None:
+ figsize = rcParams['figure.figsize']
+ if dpi is None:
+ dpi = rcParams['figure.dpi']
+ if facecolor is None:
+ facecolor = rcParams['figure.facecolor']
+ if edgecolor is None:
+ edgecolor = rcParams['figure.edgecolor']
+
+ allnums = get_fignums()
+ next_num = max(allnums) + 1 if allnums else 1
+ figLabel = ''
+ if num is None:
+ num = next_num
+ elif isinstance(num, six.string_types):
+ figLabel = num
+ allLabels = get_figlabels()
+ if figLabel not in allLabels:
+ if figLabel == 'all':
+ warnings.warn("close('all') closes all existing figures")
+ num = next_num
+ else:
+ inum = allLabels.index(figLabel)
+ num = allnums[inum]
+ else:
+ num = int(num) # crude validation of num argument
+
+ figManager = _pylab_helpers.Gcf.get_fig_manager(num)
+ if figManager is None:
+ max_open_warning = rcParams['figure.max_open_warning']
+
+ if (max_open_warning >= 1 and len(allnums) >= max_open_warning):
+ warnings.warn(
+ "More than %d figures have been opened. Figures "
+ "created through the pyplot interface "
+ "(`matplotlib.pyplot.figure`) are retained until "
+ "explicitly closed and may consume too much memory. "
+ "(To control this warning, see the rcParam "
+ "`figure.max_open_warning`)." %
+ max_open_warning, RuntimeWarning)
+
+ if get_backend().lower() == 'ps':
+ dpi = 72
+
+ figManager = new_figure_manager(num, figsize=figsize,
+ dpi=dpi,
+ facecolor=facecolor,
+ edgecolor=edgecolor,
+ frameon=frameon,
+ FigureClass=FigureClass,
+ **kwargs)
+
+ if figLabel:
+ figManager.set_window_title(figLabel)
+ figManager.canvas.figure.set_label(figLabel)
+
+ # make this figure current on button press event
+ def make_active(event):
+ _pylab_helpers.Gcf.set_active(figManager)
+
+ cid = figManager.canvas.mpl_connect('button_press_event', make_active)
+ figManager._cidgcf = cid
+
+ _pylab_helpers.Gcf.set_active(figManager)
+ fig = figManager.canvas.figure
+ fig.number = num
+
+ # make sure backends (inline) that we don't ship that expect this
+ # to be called in plotting commands to make the figure call show
+ # still work. There is probably a better way to do this in the
+ # FigureManager base class.
+ if matplotlib.is_interactive():
+ draw_if_interactive()
+
+ if _INSTALL_FIG_OBSERVER:
+ fig.stale_callback = _auto_draw_if_interactive
+
+ if clear:
+ figManager.canvas.figure.clear()
+
+ return figManager.canvas.figure
+
+
+def _auto_draw_if_interactive(fig, val):
+ """
+ This is an internal helper function for making sure that auto-redrawing
+ works as intended in the plain python repl.
+
+ Parameters
+ ----------
+ fig : Figure
+ A figure object which is assumed to be associated with a canvas
+ """
+ if val and matplotlib.is_interactive() and not fig.canvas.is_saving():
+ fig.canvas.draw_idle()
+
+
+def gcf():
+ """Get a reference to the current figure."""
+ figManager = _pylab_helpers.Gcf.get_active()
+ if figManager is not None:
+ return figManager.canvas.figure
+ else:
+ return figure()
+
+
+def fignum_exists(num):
+ return _pylab_helpers.Gcf.has_fignum(num) or num in get_figlabels()
+
+
+def get_fignums():
+ """Return a list of existing figure numbers."""
+ return sorted(_pylab_helpers.Gcf.figs)
+
+
+def get_figlabels():
+ """Return a list of existing figure labels."""
+ figManagers = _pylab_helpers.Gcf.get_all_fig_managers()
+ figManagers.sort(key=lambda m: m.num)
+ return [m.canvas.figure.get_label() for m in figManagers]
+
+
+def get_current_fig_manager():
+ figManager = _pylab_helpers.Gcf.get_active()
+ if figManager is None:
+ gcf() # creates an active figure as a side effect
+ figManager = _pylab_helpers.Gcf.get_active()
+ return figManager
+
+
+@docstring.copy_dedent(FigureCanvasBase.mpl_connect)
+def connect(s, func):
+ return get_current_fig_manager().canvas.mpl_connect(s, func)
+
+
+@docstring.copy_dedent(FigureCanvasBase.mpl_disconnect)
+def disconnect(cid):
+ return get_current_fig_manager().canvas.mpl_disconnect(cid)
+
+
+def close(*args):
+ """
+ Close a figure window.
+
+ ``close()`` by itself closes the current figure
+
+ ``close(fig)`` closes the `.Figure` instance *fig*
+
+ ``close(num)`` closes the figure number *num*
+
+ ``close(name)`` where *name* is a string, closes figure with that label
+
+ ``close('all')`` closes all the figure windows
+ """
+
+ if len(args) == 0:
+ figManager = _pylab_helpers.Gcf.get_active()
+ if figManager is None:
+ return
+ else:
+ _pylab_helpers.Gcf.destroy(figManager.num)
+ elif len(args) == 1:
+ arg = args[0]
+ if arg == 'all':
+ _pylab_helpers.Gcf.destroy_all()
+ elif isinstance(arg, six.integer_types):
+ _pylab_helpers.Gcf.destroy(arg)
+ elif hasattr(arg, 'int'):
+ # if we are dealing with a type UUID, we
+ # can use its integer representation
+ _pylab_helpers.Gcf.destroy(arg.int)
+ elif isinstance(arg, six.string_types):
+ allLabels = get_figlabels()
+ if arg in allLabels:
+ num = get_fignums()[allLabels.index(arg)]
+ _pylab_helpers.Gcf.destroy(num)
+ elif isinstance(arg, Figure):
+ _pylab_helpers.Gcf.destroy_fig(arg)
+ else:
+ raise TypeError('Unrecognized argument type %s to close' % type(arg))
+ else:
+ raise TypeError('close takes 0 or 1 arguments')
+
+
+def clf():
+ """
+ Clear the current figure.
+ """
+ gcf().clf()
+
+
+def draw():
+ """Redraw the current figure.
+
+ This is used to update a figure that has been altered, but not
+ automatically re-drawn. If interactive mode is on (:func:`.ion()`), this
+ should be only rarely needed, but there may be ways to modify the state of
+ a figure without marking it as `stale`. Please report these cases as
+ bugs.
+
+ A more object-oriented alternative, given any
+ :class:`~matplotlib.figure.Figure` instance, :attr:`fig`, that
+ was created using a :mod:`~matplotlib.pyplot` function, is::
+
+ fig.canvas.draw_idle()
+ """
+ get_current_fig_manager().canvas.draw_idle()
+
+
+@docstring.copy_dedent(Figure.savefig)
+def savefig(*args, **kwargs):
+ fig = gcf()
+ res = fig.savefig(*args, **kwargs)
+ fig.canvas.draw_idle() # need this if 'transparent=True' to reset colors
+ return res
+
+
+@docstring.copy_dedent(Figure.ginput)
+def ginput(*args, **kwargs):
+ """
+ Blocking call to interact with the figure.
+
+ This will wait for *n* clicks from the user and return a list of the
+ coordinates of each click.
+
+ If *timeout* is negative, does not timeout.
+ """
+ return gcf().ginput(*args, **kwargs)
+
+
+@docstring.copy_dedent(Figure.waitforbuttonpress)
+def waitforbuttonpress(*args, **kwargs):
+ """
+ Blocking call to interact with the figure.
+
+ This will wait for *n* key or mouse clicks from the user and
+ return a list containing True's for keyboard clicks and False's
+ for mouse clicks.
+
+ If *timeout* is negative, does not timeout.
+ """
+ return gcf().waitforbuttonpress(*args, **kwargs)
+
+
+# Putting things in figures
+
+@docstring.copy_dedent(Figure.text)
+def figtext(*args, **kwargs):
+ return gcf().text(*args, **kwargs)
+
+
+@docstring.copy_dedent(Figure.suptitle)
+def suptitle(*args, **kwargs):
+ return gcf().suptitle(*args, **kwargs)
+
+
+@docstring.copy_dedent(Figure.figimage)
+def figimage(*args, **kwargs):
+ return gcf().figimage(*args, **kwargs)
+
+
+def figlegend(*args, **kwargs):
+ """
+ Place a legend in the figure.
+
+ *labels*
+ a sequence of strings
+
+ *handles*
+ a sequence of :class:`~matplotlib.lines.Line2D` or
+ :class:`~matplotlib.patches.Patch` instances
+
+ *loc*
+ can be a string or an integer specifying the legend
+ location
+
+ A :class:`matplotlib.legend.Legend` instance is returned.
+
+ Examples
+ --------
+
+ To make a legend from existing artists on every axes::
+
+ figlegend()
+
+ To make a legend for a list of lines and labels::
+
+ figlegend( (line1, line2, line3),
+ ('label1', 'label2', 'label3'),
+ 'upper right' )
+
+ .. seealso::
+
+ :func:`~matplotlib.pyplot.legend`
+
+ """
+ return gcf().legend(*args, **kwargs)
+
+
+## Figure and Axes hybrid ##
+
+_hold_msg = """pyplot.hold is deprecated.
+ Future behavior will be consistent with the long-time default:
+ plot commands add elements without first clearing the
+ Axes and/or Figure."""
+
+@deprecated("2.0", message=_hold_msg)
+def hold(b=None):
+ """
+ Set the hold state. If *b* is None (default), toggle the
+ hold state, else set the hold state to boolean value *b*::
+
+ hold() # toggle hold
+ hold(True) # hold is on
+ hold(False) # hold is off
+
+ When *hold* is *True*, subsequent plot commands will add elements to
+ the current axes. When *hold* is *False*, the current axes and
+ figure will be cleared on the next plot command.
+
+ """
+
+ fig = gcf()
+ ax = fig.gca()
+
+ if b is not None:
+ b = bool(b)
+ fig._hold = b
+ ax._hold = b
+
+ # b=None toggles the hold state, so let's get get the current hold
+ # state; but should pyplot hold toggle the rc setting - me thinks
+ # not
+ b = ax._hold
+
+ # The comment above looks ancient; and probably the line below,
+ # contrary to the comment, is equally ancient. It will trigger
+ # a second warning, but "Oh, well...".
+ rc('axes', hold=b)
+
+@deprecated("2.0", message=_hold_msg)
+def ishold():
+ """
+ Return the hold status of the current axes.
+ """
+ return gca()._hold
+
+
+@deprecated("2.0", message=_hold_msg)
+def over(func, *args, **kwargs):
+ """
+ Call a function with hold(True).
+
+ Calls::
+
+ func(*args, **kwargs)
+
+ with ``hold(True)`` and then restores the hold state.
+
+ """
+ ax = gca()
+ h = ax._hold
+ ax._hold = True
+ func(*args, **kwargs)
+ ax._hold = h
+
+## Axes ##
+
+
+def axes(arg=None, **kwargs):
+ """
+ Add an axes to the current figure and make it the current axes.
+
+ Parameters
+ ----------
+ arg : None or 4-tuple or Axes
+ The exact behavior of this function depends on the type:
+
+ - *None*: A new full window axes is added using
+ ``subplot(111, **kwargs)``
+ - 4-tuple of floats *rect* = ``[left, bottom, width, height]``.
+ A new axes is added with dimensions *rect* in normalized
+ (0, 1) units using `~.Figure.add_axes` on the current figure.
+ - `~matplotlib.axes.Axes`: This is equivalent to `.pyplot.sca`.
+ It sets the current axes to *arg*. Note: This implicitly
+ changes the current figure to the parent of *arg*.
+
+ .. note:: The use of an Axes as an argument is deprecated and will be
+ removed in v3.0. Please use `.pyplot.sca` instead.
+
+ Other Parameters
+ ----------------
+ **kwargs :
+ For allowed keyword arguments see `.pyplot.subplot` and
+ `.Figure.add_axes` respectively. Some common keyword arguments are
+ listed below:
+
+ ========= =========== =================================================
+ kwarg Accepts Description
+ ========= =========== =================================================
+ facecolor color the axes background color
+ frameon bool whether to display the frame
+ sharex otherax share x-axis with *otherax*
+ sharey otherax share y-axis with *otherax*
+ polar bool whether to use polar axes
+ aspect [str | num] ['equal', 'auto'] or a number. If a number, the
+ ratio of y-unit/x-unit in screen-space. See also
+ `~.Axes.set_aspect`.
+ ========= =========== =================================================
+
+ Returns
+ -------
+ axes : Axes
+ The created or activated axes.
+
+ Examples
+ --------
+ Creating a new full window axes::
+
+ >>> plt.axes()
+
+ Creating a new axes with specified dimensions and some kwargs::
+
+ >>> plt.axes((left, bottom, width, height), facecolor='w')
+
+ """
+
+ if arg is None:
+ return subplot(111, **kwargs)
+
+ if isinstance(arg, Axes):
+ warn_deprecated("2.2",
+ message="Using pyplot.axes(ax) with ax an Axes "
+ "argument is deprecated. Please use "
+ "pyplot.sca(ax) instead.")
+ ax = arg
+ sca(ax)
+ return ax
+ else:
+ rect = arg
+ return gcf().add_axes(rect, **kwargs)
+
+
+def delaxes(ax=None):
+ """
+ Remove the given `Axes` *ax* from the current figure. If *ax* is *None*,
+ the current axes is removed. A KeyError is raised if the axes doesn't exist.
+ """
+ if ax is None:
+ ax = gca()
+ gcf().delaxes(ax)
+
+
+def sca(ax):
+ """
+ Set the current Axes instance to *ax*.
+
+ The current Figure is updated to the parent of *ax*.
+ """
+ managers = _pylab_helpers.Gcf.get_all_fig_managers()
+ for m in managers:
+ if ax in m.canvas.figure.axes:
+ _pylab_helpers.Gcf.set_active(m)
+ m.canvas.figure.sca(ax)
+ return
+ raise ValueError("Axes instance argument was not found in a figure.")
+
+
+def gca(**kwargs):
+ """
+ Get the current :class:`~matplotlib.axes.Axes` instance on the
+ current figure matching the given keyword args, or create one.
+
+ Examples
+ --------
+ To get the current polar axes on the current figure::
+
+ plt.gca(projection='polar')
+
+ If the current axes doesn't exist, or isn't a polar one, the appropriate
+ axes will be created and then returned.
+
+ See Also
+ --------
+ matplotlib.figure.Figure.gca : The figure's gca method.
+ """
+ return gcf().gca(**kwargs)
+
+# More ways of creating axes:
+
+
+def subplot(*args, **kwargs):
+ """
+ Return a subplot axes at the given grid position.
+
+ Call signature::
+
+ subplot(nrows, ncols, index, **kwargs)
+
+ In the current figure, create and return an `~matplotlib.axes.Axes`,
+ at position *index* of a (virtual) grid of *nrows* by *ncols* axes.
+ Indexes go from 1 to ``nrows * ncols``, incrementing in row-major order.
+
+ If *nrows*, *ncols* and *index* are all less than 10, they can also be
+ given as a single, concatenated, three-digit number.
+
+ For example, ``subplot(2, 3, 3)`` and ``subplot(233)`` both create an
+ `matplotlib.axes.Axes` at the top right corner of the current figure,
+ occupying half of the figure height and a third of the figure width.
+
+ .. note::
+
+ Creating a subplot will delete any pre-existing subplot that overlaps
+ with it beyond sharing a boundary::
+
+ import matplotlib.pyplot as plt
+ # plot a line, implicitly creating a subplot(111)
+ plt.plot([1,2,3])
+ # now create a subplot which represents the top plot of a grid
+ # with 2 rows and 1 column. Since this subplot will overlap the
+ # first, the plot (and its axes) previously created, will be removed
+ plt.subplot(211)
+ plt.plot(range(12))
+ plt.subplot(212, facecolor='y') # creates 2nd subplot with yellow background
+
+ If you do not want this behavior, use the
+ :meth:`~matplotlib.figure.Figure.add_subplot` method or the
+ :func:`~matplotlib.pyplot.axes` function instead.
+
+ Keyword arguments:
+
+ *facecolor*:
+ The background color of the subplot, which can be any valid
+ color specifier. See :mod:`matplotlib.colors` for more
+ information.
+
+ *polar*:
+ A boolean flag indicating whether the subplot plot should be
+ a polar projection. Defaults to *False*.
+
+ *projection*:
+ A string giving the name of a custom projection to be used
+ for the subplot. This projection must have been previously
+ registered. See :mod:`matplotlib.projections`.
+
+ .. seealso::
+
+ :func:`~matplotlib.pyplot.axes`
+ For additional information on :func:`axes` and
+ :func:`subplot` keyword arguments.
+
+ :file:`gallery/pie_and_polar_charts/polar_scatter.py`
+ For an example
+
+ **Example:**
+
+ .. plot:: gallery/subplots_axes_and_figures/subplot.py
+
+ """
+ # if subplot called without arguments, create subplot(1,1,1)
+ if len(args)==0:
+ args=(1,1,1)
+
+ # This check was added because it is very easy to type
+ # subplot(1, 2, False) when subplots(1, 2, False) was intended
+ # (sharex=False, that is). In most cases, no error will
+ # ever occur, but mysterious behavior can result because what was
+ # intended to be the sharex argument is instead treated as a
+ # subplot index for subplot()
+ if len(args) >= 3 and isinstance(args[2], bool) :
+ warnings.warn("The subplot index argument to subplot() appears"
+ " to be a boolean. Did you intend to use subplots()?")
+
+ fig = gcf()
+ a = fig.add_subplot(*args, **kwargs)
+ bbox = a.bbox
+ byebye = []
+ for other in fig.axes:
+ if other==a: continue
+ if bbox.fully_overlaps(other.bbox):
+ byebye.append(other)
+ for ax in byebye: delaxes(ax)
+
+ return a
+
+
+def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
+ subplot_kw=None, gridspec_kw=None, **fig_kw):
+ """
+ Create a figure and a set of subplots
+
+ This utility wrapper makes it convenient to create common layouts of
+ subplots, including the enclosing figure object, in a single call.
+
+ Parameters
+ ----------
+ nrows, ncols : int, optional, default: 1
+ Number of rows/columns of the subplot grid.
+
+ sharex, sharey : bool or {'none', 'all', 'row', 'col'}, default: False
+ Controls sharing of properties among x (`sharex`) or y (`sharey`)
+ axes:
+
+ - True or 'all': x- or y-axis will be shared among all
+ subplots.
+ - False or 'none': each subplot x- or y-axis will be
+ independent.
+ - 'row': each subplot row will share an x- or y-axis.
+ - 'col': each subplot column will share an x- or y-axis.
+
+ When subplots have a shared x-axis along a column, only the x tick
+ labels of the bottom subplot are created. Similarly, when subplots
+ have a shared y-axis along a row, only the y tick labels of the first
+ column subplot are created. To later turn other subplots' ticklabels
+ on, use :meth:`~matplotlib.axes.Axes.tick_params`.
+
+ squeeze : bool, optional, default: True
+ - If True, extra dimensions are squeezed out from the returned
+ array of Axes:
+
+ - if only one subplot is constructed (nrows=ncols=1), the
+ resulting single Axes object is returned as a scalar.
+ - for Nx1 or 1xM subplots, the returned object is a 1D numpy
+ object array of Axes objects.
+ - for NxM, subplots with N>1 and M>1 are returned as a 2D array.
+
+ - If False, no squeezing at all is done: the returned Axes object is
+ always a 2D array containing Axes instances, even if it ends up
+ being 1x1.
+
+ subplot_kw : dict, optional
+ Dict with keywords passed to the
+ :meth:`~matplotlib.figure.Figure.add_subplot` call used to create each
+ subplot.
+
+ gridspec_kw : dict, optional
+ Dict with keywords passed to the
+ :class:`~matplotlib.gridspec.GridSpec` constructor used to create the
+ grid the subplots are placed on.
+
+ **fig_kw :
+ All additional keyword arguments are passed to the :func:`figure` call.
+
+ Returns
+ -------
+ fig : :class:`matplotlib.figure.Figure` object
+
+ ax : Axes object or array of Axes objects.
+
+ ax can be either a single :class:`matplotlib.axes.Axes` object or an
+ array of Axes objects if more than one subplot was created. The
+ dimensions of the resulting array can be controlled with the squeeze
+ keyword, see above.
+
+ Examples
+ --------
+ First create some toy data:
+
+ >>> x = np.linspace(0, 2*np.pi, 400)
+ >>> y = np.sin(x**2)
+
+ Creates just a figure and only one subplot
+
+ >>> fig, ax = plt.subplots()
+ >>> ax.plot(x, y)
+ >>> ax.set_title('Simple plot')
+
+ Creates two subplots and unpacks the output array immediately
+
+ >>> f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
+ >>> ax1.plot(x, y)
+ >>> ax1.set_title('Sharing Y axis')
+ >>> ax2.scatter(x, y)
+
+ Creates four polar axes, and accesses them through the returned array
+
+ >>> fig, axes = plt.subplots(2, 2, subplot_kw=dict(polar=True))
+ >>> axes[0, 0].plot(x, y)
+ >>> axes[1, 1].scatter(x, y)
+
+ Share a X axis with each column of subplots
+
+ >>> plt.subplots(2, 2, sharex='col')
+
+ Share a Y axis with each row of subplots
+
+ >>> plt.subplots(2, 2, sharey='row')
+
+ Share both X and Y axes with all subplots
+
+ >>> plt.subplots(2, 2, sharex='all', sharey='all')
+
+ Note that this is the same as
+
+ >>> plt.subplots(2, 2, sharex=True, sharey=True)
+
+ See Also
+ --------
+ figure
+ subplot
+ """
+ fig = figure(**fig_kw)
+ axs = fig.subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey,
+ squeeze=squeeze, subplot_kw=subplot_kw,
+ gridspec_kw=gridspec_kw)
+ return fig, axs
+
+
+def subplot2grid(shape, loc, rowspan=1, colspan=1, fig=None, **kwargs):
+ """
+ Create an axis at specific location inside a regular grid.
+
+ Parameters
+ ----------
+ shape : sequence of 2 ints
+ Shape of grid in which to place axis.
+ First entry is number of rows, second entry is number of columns.
+
+ loc : sequence of 2 ints
+ Location to place axis within grid.
+ First entry is row number, second entry is column number.
+
+ rowspan : int
+ Number of rows for the axis to span to the right.
+
+ colspan : int
+ Number of columns for the axis to span downwards.
+
+ fig : `Figure`, optional
+ Figure to place axis in. Defaults to current figure.
+
+ **kwargs
+ Additional keyword arguments are handed to `add_subplot`.
+
+
+ Notes
+ -----
+ The following call ::
+
+ subplot2grid(shape, loc, rowspan=1, colspan=1)
+
+ is identical to ::
+
+ gridspec=GridSpec(shape[0], shape[1])
+ subplotspec=gridspec.new_subplotspec(loc, rowspan, colspan)
+ subplot(subplotspec)
+ """
+
+ if fig is None:
+ fig = gcf()
+
+ s1, s2 = shape
+ subplotspec = GridSpec(s1, s2).new_subplotspec(loc,
+ rowspan=rowspan,
+ colspan=colspan)
+ a = fig.add_subplot(subplotspec, **kwargs)
+ bbox = a.bbox
+ byebye = []
+ for other in fig.axes:
+ if other == a:
+ continue
+ if bbox.fully_overlaps(other.bbox):
+ byebye.append(other)
+ for ax in byebye:
+ delaxes(ax)
+
+ return a
+
+
+def twinx(ax=None):
+ """
+ Make a second axes that shares the *x*-axis. The new axes will
+ overlay *ax* (or the current axes if *ax* is *None*). The ticks
+ for *ax2* will be placed on the right, and the *ax2* instance is
+ returned.
+
+ .. seealso::
+
+ :file:`examples/api_examples/two_scales.py`
+ For an example
+ """
+ if ax is None:
+ ax=gca()
+ ax1 = ax.twinx()
+ return ax1
+
+
+def twiny(ax=None):
+ """
+ Make a second axes that shares the *y*-axis. The new axis will
+ overlay *ax* (or the current axes if *ax* is *None*). The ticks
+ for *ax2* will be placed on the top, and the *ax2* instance is
+ returned.
+ """
+ if ax is None:
+ ax=gca()
+ ax1 = ax.twiny()
+ return ax1
+
+
+def subplots_adjust(*args, **kwargs):
+ """
+ Tune the subplot layout.
+
+ call signature::
+
+ subplots_adjust(left=None, bottom=None, right=None, top=None,
+ wspace=None, hspace=None)
+
+ The parameter meanings (and suggested defaults) are::
+
+ left = 0.125 # the left side of the subplots of the figure
+ right = 0.9 # the right side of the subplots of the figure
+ bottom = 0.1 # the bottom of the subplots of the figure
+ top = 0.9 # the top of the subplots of the figure
+ wspace = 0.2 # the amount of width reserved for space between subplots,
+ # expressed as a fraction of the average axis width
+ hspace = 0.2 # the amount of height reserved for space between subplots,
+ # expressed as a fraction of the average axis height
+
+ The actual defaults are controlled by the rc file
+ """
+ fig = gcf()
+ fig.subplots_adjust(*args, **kwargs)
+
+
+def subplot_tool(targetfig=None):
+ """
+ Launch a subplot tool window for a figure.
+
+ A :class:`matplotlib.widgets.SubplotTool` instance is returned.
+ """
+ tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
+ rcParams['toolbar'] = 'None'
+ if targetfig is None:
+ manager = get_current_fig_manager()
+ targetfig = manager.canvas.figure
+ else:
+ # find the manager for this figure
+ for manager in _pylab_helpers.Gcf._activeQue:
+ if manager.canvas.figure==targetfig: break
+ else: raise RuntimeError('Could not find manager for targetfig')
+
+ toolfig = figure(figsize=(6,3))
+ toolfig.subplots_adjust(top=0.9)
+ ret = SubplotTool(targetfig, toolfig)
+ rcParams['toolbar'] = tbar
+ _pylab_helpers.Gcf.set_active(manager) # restore the current figure
+ return ret
+
+
+def tight_layout(pad=1.08, h_pad=None, w_pad=None, rect=None):
+ """
+ Automatically adjust subplot parameters to give specified padding.
+
+ Parameters
+ ----------
+ pad : float
+ padding between the figure edge and the edges of subplots, as a fraction of the font-size.
+ h_pad, w_pad : float
+ padding (height/width) between edges of adjacent subplots.
+ Defaults to `pad_inches`.
+ rect : if rect is given, it is interpreted as a rectangle
+ (left, bottom, right, top) in the normalized figure
+ coordinate that the whole subplots area (including
+ labels) will fit into. Default is (0, 0, 1, 1).
+
+ """
+ fig = gcf()
+ fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
+
+
+def box(on=None):
+ """
+ Turn the axes box on or off on the current axes.
+
+ Parameters
+ ----------
+ on : bool or None
+ The new `~matplotlib.axes.Axes` box state. If ``None``, toggle
+ the state.
+
+ See Also
+ --------
+ :meth:`matplotlib.axes.Axes.set_frame_on`
+ :meth:`matplotlib.axes.Axes.get_frame_on`
+ """
+ ax = gca()
+ if on is None:
+ on = not ax.get_frame_on()
+ on = _string_to_bool(on)
+ ax.set_frame_on(on)
+
+
+def title(s, *args, **kwargs):
+ """
+ Set a title of the current axes.
+
+ Set one of the three available axes titles. The available titles are
+ positioned above the axes in the center, flush with the left edge,
+ and flush with the right edge.
+
+ .. seealso::
+ See :func:`~matplotlib.pyplot.text` for adding text
+ to the current axes
+
+ Parameters
+ ----------
+ label : str
+ Text to use for the title
+
+ fontdict : dict
+ A dictionary controlling the appearance of the title text,
+ the default `fontdict` is:
+
+ {'fontsize': rcParams['axes.titlesize'],
+ 'fontweight' : rcParams['axes.titleweight'],
+ 'verticalalignment': 'baseline',
+ 'horizontalalignment': loc}
+
+ loc : {'center', 'left', 'right'}, str, optional
+ Which title to set, defaults to 'center'
+
+ Returns
+ -------
+ text : :class:`~matplotlib.text.Text`
+ The matplotlib text instance representing the title
+
+ Other parameters
+ ----------------
+ kwargs : text properties
+ Other keyword arguments are text properties, see
+ :class:`~matplotlib.text.Text` for a list of valid text
+ properties.
+
+ """
+ return gca().set_title(s, *args, **kwargs)
+
+## Axis ##
+
+
+def axis(*v, **kwargs):
+ """
+ Convenience method to get or set axis properties.
+
+ Calling with no arguments::
+
+ >>> axis()
+
+ returns the current axes limits ``[xmin, xmax, ymin, ymax]``.::
+
+ >>> axis(v)
+
+ sets the min and max of the x and y axes, with
+ ``v = [xmin, xmax, ymin, ymax]``.::
+
+ >>> axis('off')
+
+ turns off the axis lines and labels.::
+
+ >>> axis('equal')
+
+ changes limits of *x* or *y* axis so that equal increments of *x*
+ and *y* have the same length; a circle is circular.::
+
+ >>> axis('scaled')
+
+ achieves the same result by changing the dimensions of the plot box instead
+ of the axis data limits.::
+
+ >>> axis('tight')
+
+ changes *x* and *y* axis limits such that all data is shown. If
+ all data is already shown, it will move it to the center of the
+ figure without modifying (*xmax* - *xmin*) or (*ymax* -
+ *ymin*). Note this is slightly different than in MATLAB.::
+
+ >>> axis('image')
+
+ is 'scaled' with the axis limits equal to the data limits.::
+
+ >>> axis('auto')
+
+ and::
+
+ >>> axis('normal')
+
+ are deprecated. They restore default behavior; axis limits are automatically
+ scaled to make the data fit comfortably within the plot box.
+
+ if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
+ as kwargs selectively to alter just those limits without changing
+ the others.
+
+ >>> axis('square')
+
+ changes the limit ranges (*xmax*-*xmin*) and (*ymax*-*ymin*) of
+ the *x* and *y* axes to be the same, and have the same scaling,
+ resulting in a square plot.
+
+ The xmin, xmax, ymin, ymax tuple is returned
+
+ .. seealso::
+
+ :func:`xlim`, :func:`ylim`
+ For setting the x- and y-limits individually.
+ """
+ return gca().axis(*v, **kwargs)
+
+
+def xlabel(s, *args, **kwargs):
+ """
+ Set the x-axis label of the current axes.
+
+ Call signature::
+
+ xlabel(label, fontdict=None, labelpad=None, **kwargs)
+
+ This is the pyplot equivalent of calling `.set_xlabel` on the current axes.
+ See there for a full parameter description.
+ """
+ return gca().set_xlabel(s, *args, **kwargs)
+
+
+def ylabel(s, *args, **kwargs):
+ """
+ Set the y-axis label of the current axes.
+
+ Call signature::
+
+ ylabel(label, fontdict=None, labelpad=None, **kwargs)
+
+ This is the pyplot equivalent of calling `.set_ylabel` on the current axes.
+ See there for a full parameter description.
+ """
+ return gca().set_ylabel(s, *args, **kwargs)
+
+
+def xlim(*args, **kwargs):
+ """
+ Get or set the x limits of the current axes.
+
+ Call signatures::
+
+ xmin, xmax = xlim() # return the current xlim
+ xlim((xmin, xmax)) # set the xlim to xmin, xmax
+ xlim(xmin, xmax) # set the xlim to xmin, xmax
+
+ If you do not specify args, you can pass *xmin* or *xmax* as kwargs, i.e.::
+
+ xlim(xmax=3) # adjust the max leaving min unchanged
+ xlim(xmin=1) # adjust the min leaving max unchanged
+
+ Setting limits turns autoscaling off for the x-axis.
+
+ Returns
+ -------
+ xmin, xmax
+ A tuple of the new x-axis limits.
+
+ Notes
+ -----
+ Calling this function with no arguments (e.g. ``xlim()``) is the pyplot
+ equivalent of calling `~.Axes.get_xlim` on the current axes.
+ Calling this function with arguments is the pyplot equivalent of calling
+ `~.Axes.set_xlim` on the current axes. All arguments are passed though.
+ """
+ ax = gca()
+ if not args and not kwargs:
+ return ax.get_xlim()
+ ret = ax.set_xlim(*args, **kwargs)
+ return ret
+
+
+def ylim(*args, **kwargs):
+ """
+ Get or set the y-limits of the current axes.
+
+ Call signatures::
+
+ ymin, ymax = ylim() # return the current ylim
+ ylim((ymin, ymax)) # set the ylim to ymin, ymax
+ ylim(ymin, ymax) # set the ylim to ymin, ymax
+
+ If you do not specify args, you can alternatively pass *ymin* or *ymax* as
+ kwargs, i.e.::
+
+ ylim(ymax=3) # adjust the max leaving min unchanged
+ ylim(ymin=1) # adjust the min leaving max unchanged
+
+ Setting limits turns autoscaling off for the y-axis.
+
+ Returns
+ -------
+ ymin, ymax
+ A tuple of the new y-axis limits.
+
+ Notes
+ -----
+ Calling this function with no arguments (e.g. ``ylim()``) is the pyplot
+ equivalent of calling `~.Axes.get_ylim` on the current axes.
+ Calling this function with arguments is the pyplot equivalent of calling
+ `~.Axes.set_ylim` on the current axes. All arguments are passed though.
+ """
+ ax = gca()
+ if not args and not kwargs:
+ return ax.get_ylim()
+ ret = ax.set_ylim(*args, **kwargs)
+ return ret
+
+
+@docstring.dedent_interpd
+def xscale(*args, **kwargs):
+ """
+ Set the scaling of the x-axis.
+
+ Call signature::
+
+ xscale(scale, **kwargs)
+
+ Parameters
+ ----------
+ scale : [%(scale)s]
+ The scaling type.
+ **kwargs
+ Additional parameters depend on *scale*. See Notes.
+
+ Notes
+ -----
+ This is the pyplot equivalent of calling `~.Axes.set_xscale` on the
+ current axes.
+
+ Different keywords may be accepted, depending on the scale:
+
+ %(scale_docs)s
+ """
+ gca().set_xscale(*args, **kwargs)
+
+
+@docstring.dedent_interpd
+def yscale(*args, **kwargs):
+ """
+ Set the scaling of the y-axis.
+
+ Call signature::
+
+ yscale(scale, **kwargs)
+
+ Parameters
+ ----------
+ scale : [%(scale)s]
+ The scaling type.
+ **kwargs
+ Additional parameters depend on *scale*. See Notes.
+
+ Notes
+ -----
+ This is the pyplot equivalent of calling `~.Axes.set_yscale` on the
+ current axes.
+
+ Different keywords may be accepted, depending on the scale:
+
+ %(scale_docs)s
+ """
+ gca().set_yscale(*args, **kwargs)
+
+
+def xticks(*args, **kwargs):
+ """
+ Get or set the current tick locations and labels of the x-axis.
+
+ Call signatures::
+
+ locs, labels = xticks() # Get locations and labels
+
+ xticks(locs, [labels], **kwargs) # Set locations and labels
+
+ Parameters
+ ----------
+ locs : array_like
+ A list of positions at which ticks should be placed. You can pass an
+ empty list to disable xticks.
+
+ labels : array_like, optional
+ A list of explicit labels to place at the given *locs*.
+
+ **kwargs
+ :class:`.Text` properties can be used to control the appearance of
+ the labels.
+
+ Returns
+ -------
+ locs
+ An array of label locations.
+ labels
+ A list of `.Text` objects.
+
+ Notes
+ -----
+ Calling this function with no arguments (e.g. ``xticks()``) is the pyplot
+ equivalent of calling `~.Axes.get_xticks` and `~.Axes.get_xticklabels` on
+ the current axes.
+ Calling this function with arguments is the pyplot equivalent of calling
+ `~.Axes.set_xticks` and `~.Axes.set_xticklabels` on the current axes.
+
+ Examples
+ --------
+ Get the current locations and labels:
+
+ >>> locs, labels = xticks()
+
+ Set label locations:
+
+ >>> xticks(np.arange(0, 1, step=0.2))
+
+ Set text labels:
+
+ >>> xticks(np.arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue'))
+
+ Set text labels and properties:
+
+ >>> xticks(np.arange(12), calendar.month_name[1:13], rotation=20)
+
+ Disable xticks:
+
+ >>> xticks([])
+ """
+ ax = gca()
+
+ if len(args)==0:
+ locs = ax.get_xticks()
+ labels = ax.get_xticklabels()
+ elif len(args)==1:
+ locs = ax.set_xticks(args[0])
+ labels = ax.get_xticklabels()
+ elif len(args)==2:
+ locs = ax.set_xticks(args[0])
+ labels = ax.set_xticklabels(args[1], **kwargs)
+ else: raise TypeError('Illegal number of arguments to xticks')
+ if len(kwargs):
+ for l in labels:
+ l.update(kwargs)
+
+ return locs, silent_list('Text xticklabel', labels)
+
+
+def yticks(*args, **kwargs):
+ """
+ Get or set the current tick locations and labels of the y-axis.
+
+ Call signatures::
+
+ locs, labels = yticks() # Get locations and labels
+
+ yticks(locs, [labels], **kwargs) # Set locations and labels
+
+ Parameters
+ ----------
+ locs : array_like
+ A list of positions at which ticks should be placed. You can pass an
+ empty list to disable yticks.
+
+ labels : array_like, optional
+ A list of explicit labels to place at the given *locs*.
+
+ **kwargs
+ :class:`.Text` properties can be used to control the appearance of
+ the labels.
+
+ Returns
+ -------
+ locs
+ An array of label locations.
+ labels
+ A list of `.Text` objects.
+
+ Notes
+ -----
+ Calling this function with no arguments (e.g. ``yticks()``) is the pyplot
+ equivalent of calling `~.Axes.get_yticks` and `~.Axes.get_yticklabels` on
+ the current axes.
+ Calling this function with arguments is the pyplot equivalent of calling
+ `~.Axes.set_yticks` and `~.Axes.set_yticklabels` on the current axes.
+
+ Examples
+ --------
+ Get the current locations and labels:
+
+ >>> locs, labels = yticks()
+
+ Set label locations:
+
+ >>> yticks(np.arange(0, 1, step=0.2))
+
+ Set text labels:
+
+ >>> yticks(np.arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue'))
+
+ Set text labels and properties:
+
+ >>> yticks(np.arange(12), calendar.month_name[1:13], rotation=45)
+
+ Disable yticks:
+
+ >>> yticks([])
+ """
+ ax = gca()
+
+ if len(args)==0:
+ locs = ax.get_yticks()
+ labels = ax.get_yticklabels()
+ elif len(args)==1:
+ locs = ax.set_yticks(args[0])
+ labels = ax.get_yticklabels()
+ elif len(args)==2:
+ locs = ax.set_yticks(args[0])
+ labels = ax.set_yticklabels(args[1], **kwargs)
+ else: raise TypeError('Illegal number of arguments to yticks')
+ if len(kwargs):
+ for l in labels:
+ l.update(kwargs)
+
+
+ return ( locs,
+ silent_list('Text yticklabel', labels)
+ )
+
+
+def minorticks_on():
+ """
+ Display minor ticks on the current plot.
+
+ Displaying minor ticks reduces performance; turn them off using
+ minorticks_off() if drawing speed is a problem.
+ """
+ gca().minorticks_on()
+
+
+def minorticks_off():
+ """
+ Remove minor ticks from the current plot.
+ """
+ gca().minorticks_off()
+
+
+def rgrids(*args, **kwargs):
+ """
+ Get or set the radial gridlines on a polar plot.
+
+ call signatures::
+
+ lines, labels = rgrids()
+ lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
+
+ When called with no arguments, :func:`rgrid` simply returns the
+ tuple (*lines*, *labels*), where *lines* is an array of radial
+ gridlines (:class:`~matplotlib.lines.Line2D` instances) and
+ *labels* is an array of tick labels
+ (:class:`~matplotlib.text.Text` instances). When called with
+ arguments, the labels will appear at the specified radial
+ distances and angles.
+
+ *labels*, if not *None*, is a len(*radii*) list of strings of the
+ labels to use at each angle.
+
+ If *labels* is None, the rformatter will be used
+
+ Examples::
+
+ # set the locations of the radial gridlines and labels
+ lines, labels = rgrids( (0.25, 0.5, 1.0) )
+
+ # set the locations and labels of the radial gridlines and labels
+ lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
+
+ """
+ ax = gca()
+ if not isinstance(ax, PolarAxes):
+ raise RuntimeError('rgrids only defined for polar axes')
+ if len(args)==0:
+ lines = ax.yaxis.get_gridlines()
+ labels = ax.yaxis.get_ticklabels()
+ else:
+ lines, labels = ax.set_rgrids(*args, **kwargs)
+
+ return ( silent_list('Line2D rgridline', lines),
+ silent_list('Text rgridlabel', labels) )
+
+
+def thetagrids(*args, **kwargs):
+ """
+ Get or set the theta locations of the gridlines in a polar plot.
+
+ If no arguments are passed, return a tuple (*lines*, *labels*)
+ where *lines* is an array of radial gridlines
+ (:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
+ array of tick labels (:class:`~matplotlib.text.Text` instances)::
+
+ lines, labels = thetagrids()
+
+ Otherwise the syntax is::
+
+ lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
+
+ set the angles at which to place the theta grids (these gridlines
+ are equal along the theta dimension).
+
+ *angles* is in degrees.
+
+ *labels*, if not *None*, is a len(angles) list of strings of the
+ labels to use at each angle.
+
+ If *labels* is *None*, the labels will be ``fmt%angle``.
+
+ *frac* is the fraction of the polar axes radius at which to place
+ the label (1 is the edge). e.g., 1.05 is outside the axes and 0.95
+ is inside the axes.
+
+ Return value is a list of tuples (*lines*, *labels*):
+
+ - *lines* are :class:`~matplotlib.lines.Line2D` instances
+
+ - *labels* are :class:`~matplotlib.text.Text` instances.
+
+ Note that on input, the *labels* argument is a list of strings,
+ and on output it is a list of :class:`~matplotlib.text.Text`
+ instances.
+
+ Examples::
+
+ # set the locations of the radial gridlines and labels
+ lines, labels = thetagrids( range(45,360,90) )
+
+ # set the locations and labels of the radial gridlines and labels
+ lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
+ """
+ ax = gca()
+ if not isinstance(ax, PolarAxes):
+ raise RuntimeError('rgrids only defined for polar axes')
+ if len(args)==0:
+ lines = ax.xaxis.get_ticklines()
+ labels = ax.xaxis.get_ticklabels()
+ else:
+ lines, labels = ax.set_thetagrids(*args, **kwargs)
+
+ return (silent_list('Line2D thetagridline', lines),
+ silent_list('Text thetagridlabel', labels)
+ )
+
+
+## Plotting Info ##
+
+def plotting():
+ pass
+
+
+def get_plot_commands():
+ """
+ Get a sorted list of all of the plotting commands.
+ """
+ # This works by searching for all functions in this module and
+ # removing a few hard-coded exclusions, as well as all of the
+ # colormap-setting functions, and anything marked as private with
+ # a preceding underscore.
+
+ import inspect
+
+ exclude = {'colormaps', 'colors', 'connect', 'disconnect',
+ 'get_plot_commands', 'get_current_fig_manager', 'ginput',
+ 'plotting', 'waitforbuttonpress'}
+ exclude |= set(colormaps())
+ this_module = inspect.getmodule(get_plot_commands)
+
+ commands = set()
+ for name, obj in list(six.iteritems(globals())):
+ if name.startswith('_') or name in exclude:
+ continue
+ if inspect.isfunction(obj) and inspect.getmodule(obj) is this_module:
+ commands.add(name)
+
+ return sorted(commands)
+
+
+@deprecated('2.1')
+def colors():
+ """
+ This is a do-nothing function to provide you with help on how
+ matplotlib handles colors.
+
+ Commands which take color arguments can use several formats to
+ specify the colors. For the basic built-in colors, you can use a
+ single letter
+
+ ===== =======
+ Alias Color
+ ===== =======
+ 'b' blue
+ 'g' green
+ 'r' red
+ 'c' cyan
+ 'm' magenta
+ 'y' yellow
+ 'k' black
+ 'w' white
+ ===== =======
+
+ For a greater range of colors, you have two options. You can
+ specify the color using an html hex string, as in::
+
+ color = '#eeefff'
+
+ or you can pass an R,G,B tuple, where each of R,G,B are in the
+ range [0,1].
+
+ You can also use any legal html name for a color, for example::
+
+ color = 'red'
+ color = 'burlywood'
+ color = 'chartreuse'
+
+ The example below creates a subplot with a dark
+ slate gray background::
+
+ subplot(111, facecolor=(0.1843, 0.3098, 0.3098))
+
+ Here is an example that creates a pale turquoise title::
+
+ title('Is this the best color?', color='#afeeee')
+
+ """
+ pass
+
+
+def colormaps():
+ """
+ Matplotlib provides a number of colormaps, and others can be added using
+ :func:`~matplotlib.cm.register_cmap`. This function documents the built-in
+ colormaps, and will also return a list of all registered colormaps if called.
+
+ You can set the colormap for an image, pcolor, scatter, etc,
+ using a keyword argument::
+
+ imshow(X, cmap=cm.hot)
+
+ or using the :func:`set_cmap` function::
+
+ imshow(X)
+ pyplot.set_cmap('hot')
+ pyplot.set_cmap('jet')
+
+ In interactive mode, :func:`set_cmap` will update the colormap post-hoc,
+ allowing you to see which one works best for your data.
+
+ All built-in colormaps can be reversed by appending ``_r``: For instance,
+ ``gray_r`` is the reverse of ``gray``.
+
+ There are several common color schemes used in visualization:
+
+ Sequential schemes
+ for unipolar data that progresses from low to high
+ Diverging schemes
+ for bipolar data that emphasizes positive or negative deviations from a
+ central value
+ Cyclic schemes
+ meant for plotting values that wrap around at the
+ endpoints, such as phase angle, wind direction, or time of day
+ Qualitative schemes
+ for nominal data that has no inherent ordering, where color is used
+ only to distinguish categories
+
+ Matplotlib ships with 4 perceptually uniform color maps which are
+ the recommended color maps for sequential data:
+
+ ========= ===================================================
+ Colormap Description
+ ========= ===================================================
+ inferno perceptually uniform shades of black-red-yellow
+ magma perceptually uniform shades of black-red-white
+ plasma perceptually uniform shades of blue-red-yellow
+ viridis perceptually uniform shades of blue-green-yellow
+ ========= ===================================================
+
+ The following colormaps are based on the `ColorBrewer
+ <http://colorbrewer2.org>`_ color specifications and designs developed by
+ Cynthia Brewer:
+
+ ColorBrewer Diverging (luminance is highest at the midpoint, and
+ decreases towards differently-colored endpoints):
+
+ ======== ===================================
+ Colormap Description
+ ======== ===================================
+ BrBG brown, white, blue-green
+ PiYG pink, white, yellow-green
+ PRGn purple, white, green
+ PuOr orange, white, purple
+ RdBu red, white, blue
+ RdGy red, white, gray
+ RdYlBu red, yellow, blue
+ RdYlGn red, yellow, green
+ Spectral red, orange, yellow, green, blue
+ ======== ===================================
+
+ ColorBrewer Sequential (luminance decreases monotonically):
+
+ ======== ====================================
+ Colormap Description
+ ======== ====================================
+ Blues white to dark blue
+ BuGn white, light blue, dark green
+ BuPu white, light blue, dark purple
+ GnBu white, light green, dark blue
+ Greens white to dark green
+ Greys white to black (not linear)
+ Oranges white, orange, dark brown
+ OrRd white, orange, dark red
+ PuBu white, light purple, dark blue
+ PuBuGn white, light purple, dark green
+ PuRd white, light purple, dark red
+ Purples white to dark purple
+ RdPu white, pink, dark purple
+ Reds white to dark red
+ YlGn light yellow, dark green
+ YlGnBu light yellow, light green, dark blue
+ YlOrBr light yellow, orange, dark brown
+ YlOrRd light yellow, orange, dark red
+ ======== ====================================
+
+ ColorBrewer Qualitative:
+
+ (For plotting nominal data, :class:`ListedColormap` is used,
+ not :class:`LinearSegmentedColormap`. Different sets of colors are
+ recommended for different numbers of categories.)
+
+ * Accent
+ * Dark2
+ * Paired
+ * Pastel1
+ * Pastel2
+ * Set1
+ * Set2
+ * Set3
+
+ A set of colormaps derived from those of the same name provided
+ with Matlab are also included:
+
+ ========= =======================================================
+ Colormap Description
+ ========= =======================================================
+ autumn sequential linearly-increasing shades of red-orange-yellow
+ bone sequential increasing black-white color map with
+ a tinge of blue, to emulate X-ray film
+ cool linearly-decreasing shades of cyan-magenta
+ copper sequential increasing shades of black-copper
+ flag repetitive red-white-blue-black pattern (not cyclic at
+ endpoints)
+ gray sequential linearly-increasing black-to-white
+ grayscale
+ hot sequential black-red-yellow-white, to emulate blackbody
+ radiation from an object at increasing temperatures
+ hsv cyclic red-yellow-green-cyan-blue-magenta-red, formed
+ by changing the hue component in the HSV color space
+ jet a spectral map with dark endpoints, blue-cyan-yellow-red;
+ based on a fluid-jet simulation by NCSA [#]_
+ pink sequential increasing pastel black-pink-white, meant
+ for sepia tone colorization of photographs
+ prism repetitive red-yellow-green-blue-purple-...-green pattern
+ (not cyclic at endpoints)
+ spring linearly-increasing shades of magenta-yellow
+ summer sequential linearly-increasing shades of green-yellow
+ winter linearly-increasing shades of blue-green
+ ========= =======================================================
+
+ A set of palettes from the `Yorick scientific visualisation
+ package <https://dhmunro.github.io/yorick-doc/>`_, an evolution of
+ the GIST package, both by David H. Munro are included:
+
+ ============ =======================================================
+ Colormap Description
+ ============ =======================================================
+ gist_earth mapmaker's colors from dark blue deep ocean to green
+ lowlands to brown highlands to white mountains
+ gist_heat sequential increasing black-red-orange-white, to emulate
+ blackbody radiation from an iron bar as it grows hotter
+ gist_ncar pseudo-spectral black-blue-green-yellow-red-purple-white
+ colormap from National Center for Atmospheric
+ Research [#]_
+ gist_rainbow runs through the colors in spectral order from red to
+ violet at full saturation (like *hsv* but not cyclic)
+ gist_stern "Stern special" color table from Interactive Data
+ Language software
+ ============ =======================================================
+
+
+ Other miscellaneous schemes:
+
+ ============= =======================================================
+ Colormap Description
+ ============= =======================================================
+ afmhot sequential black-orange-yellow-white blackbody
+ spectrum, commonly used in atomic force microscopy
+ brg blue-red-green
+ bwr diverging blue-white-red
+ coolwarm diverging blue-gray-red, meant to avoid issues with 3D
+ shading, color blindness, and ordering of colors [#]_
+ CMRmap "Default colormaps on color images often reproduce to
+ confusing grayscale images. The proposed colormap
+ maintains an aesthetically pleasing color image that
+ automatically reproduces to a monotonic grayscale with
+ discrete, quantifiable saturation levels." [#]_
+ cubehelix Unlike most other color schemes cubehelix was designed
+ by D.A. Green to be monotonically increasing in terms
+ of perceived brightness. Also, when printed on a black
+ and white postscript printer, the scheme results in a
+ greyscale with monotonically increasing brightness.
+ This color scheme is named cubehelix because the r,g,b
+ values produced can be visualised as a squashed helix
+ around the diagonal in the r,g,b color cube.
+ gnuplot gnuplot's traditional pm3d scheme
+ (black-blue-red-yellow)
+ gnuplot2 sequential color printable as gray
+ (black-blue-violet-yellow-white)
+ ocean green-blue-white
+ rainbow spectral purple-blue-green-yellow-orange-red colormap
+ with diverging luminance
+ seismic diverging blue-white-red
+ nipy_spectral black-purple-blue-green-yellow-red-white spectrum,
+ originally from the Neuroimaging in Python project
+ terrain mapmaker's colors, blue-green-yellow-brown-white,
+ originally from IGOR Pro
+ ============= =======================================================
+
+ The following colormaps are redundant and may be removed in future
+ versions. It's recommended to use the names in the descriptions
+ instead, which produce identical output:
+
+ ========= =======================================================
+ Colormap Description
+ ========= =======================================================
+ gist_gray identical to *gray*
+ gist_yarg identical to *gray_r*
+ binary identical to *gray_r*
+ spectral identical to *nipy_spectral* [#]_
+ ========= =======================================================
+
+ .. rubric:: Footnotes
+
+ .. [#] Rainbow colormaps, ``jet`` in particular, are considered a poor
+ choice for scientific visualization by many researchers: `Rainbow Color
+ Map (Still) Considered Harmful
+ <http://ieeexplore.ieee.org/document/4118486/?arnumber=4118486>`_
+
+ .. [#] Resembles "BkBlAqGrYeOrReViWh200" from NCAR Command
+ Language. See `Color Table Gallery
+ <https://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml>`_
+
+ .. [#] See `Diverging Color Maps for Scientific Visualization
+ <http://www.kennethmoreland.com/color-maps/>`_ by Kenneth Moreland.
+
+ .. [#] See `A Color Map for Effective Black-and-White Rendering of
+ Color-Scale Images
+ <https://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m>`_
+ by Carey Rappaport
+
+ .. [#] Changed to distinguish from ColorBrewer's *Spectral* map.
+ :func:`spectral` still works, but
+ ``set_cmap('nipy_spectral')`` is recommended for clarity.
+
+
+ """
+ return sorted(cm.cmap_d)
+
+
+def _setup_pyplot_info_docstrings():
+ """
+ Generates the plotting and docstring.
+
+ These must be done after the entire module is imported, so it is
+ called from the end of this module, which is generated by
+ boilerplate.py.
+ """
+ # Generate the plotting docstring
+ import re
+
+ def pad(s, l):
+ """Pad string *s* to length *l*."""
+ if l < len(s):
+ return s[:l]
+ return s + ' ' * (l - len(s))
+
+ commands = get_plot_commands()
+
+ first_sentence = re.compile(r"(?:\s*).+?\.(?:\s+|$)", flags=re.DOTALL)
+
+ # Collect the first sentence of the docstring for all of the
+ # plotting commands.
+ rows = []
+ max_name = 0
+ max_summary = 0
+ for name in commands:
+ doc = globals()[name].__doc__
+ summary = ''
+ if doc is not None:
+ match = first_sentence.match(doc)
+ if match is not None:
+ summary = match.group(0).strip().replace('\n', ' ')
+ name = '`%s`' % name
+ rows.append([name, summary])
+ max_name = max(max_name, len(name))
+ max_summary = max(max_summary, len(summary))
+
+ lines = []
+ sep = '=' * max_name + ' ' + '=' * max_summary
+ lines.append(sep)
+ lines.append(' '.join([pad("Function", max_name),
+ pad("Description", max_summary)]))
+ lines.append(sep)
+ for name, summary in rows:
+ lines.append(' '.join([pad(name, max_name),
+ pad(summary, max_summary)]))
+ lines.append(sep)
+
+ plotting.__doc__ = '\n'.join(lines)
+
+## Plotting part 1: manually generated functions and wrappers ##
+
+def colorbar(mappable=None, cax=None, ax=None, **kw):
+ if mappable is None:
+ mappable = gci()
+ if mappable is None:
+ raise RuntimeError('No mappable was found to use for colorbar '
+ 'creation. First define a mappable such as '
+ 'an image (with imshow) or a contour set ('
+ 'with contourf).')
+ if ax is None:
+ ax = gca()
+
+ ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
+ return ret
+colorbar.__doc__ = matplotlib.colorbar.colorbar_doc
+
+
+def clim(vmin=None, vmax=None):
+ """
+ Set the color limits of the current image.
+
+ To apply clim to all axes images do::
+
+ clim(0, 0.5)
+
+ If either *vmin* or *vmax* is None, the image min/max respectively
+ will be used for color scaling.
+
+ If you want to set the clim of multiple images,
+ use, for example::
+
+ for im in gca().get_images():
+ im.set_clim(0, 0.05)
+
+ """
+ im = gci()
+ if im is None:
+ raise RuntimeError('You must first define an image, e.g., with imshow')
+
+ im.set_clim(vmin, vmax)
+
+
+def set_cmap(cmap):
+ """
+ Set the default colormap. Applies to the current image if any.
+ See help(colormaps) for more information.
+
+ *cmap* must be a :class:`~matplotlib.colors.Colormap` instance, or
+ the name of a registered colormap.
+
+ See :func:`matplotlib.cm.register_cmap` and
+ :func:`matplotlib.cm.get_cmap`.
+ """
+ cmap = cm.get_cmap(cmap)
+
+ rc('image', cmap=cmap.name)
+ im = gci()
+
+ if im is not None:
+ im.set_cmap(cmap)
+
+
+
+@docstring.copy_dedent(_imread)
+def imread(*args, **kwargs):
+ return _imread(*args, **kwargs)
+
+
+@docstring.copy_dedent(_imsave)
+def imsave(*args, **kwargs):
+ return _imsave(*args, **kwargs)
+
+
+def matshow(A, fignum=None, **kwargs):
+ """
+ Display an array as a matrix in a new figure window.
+
+ The origin is set at the upper left hand corner and rows (first
+ dimension of the array) are displayed horizontally. The aspect
+ ratio of the figure window is that of the array, unless this would
+ make an excessively short or narrow figure.
+
+ Tick labels for the xaxis are placed on top.
+
+ Parameters
+ ----------
+ A : array-like(M, N)
+ The matrix to be displayed.
+
+ fignum : None or int or False
+ If *None*, create a new figure window with automatic numbering.
+
+ If *fignum* is an integer, draw into the figure with the given number
+ (create it if it does not exist).
+
+ If 0 or *False*, use the current axes if it exists instead of creating
+ a new figure.
+
+ .. note::
+
+ Because of how `.Axes.matshow` tries to set the figure aspect
+ ratio to be the one of the array, strange things may happen if you
+ reuse an existing figure.
+
+ Returns
+ -------
+ image : `~matplotlib.image.AxesImage`
+
+ Other Parameters
+ ----------------
+ **kwargs : `~matplotlib.axes.Axes.imshow` arguments
+
+ """
+ A = np.asanyarray(A)
+ if fignum is False or fignum is 0:
+ ax = gca()
+ else:
+ # Extract actual aspect ratio of array and make appropriately sized figure
+ fig = figure(fignum, figsize=figaspect(A))
+ ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
+
+ im = ax.matshow(A, **kwargs)
+ sci(im)
+
+ return im
+
+
+def polar(*args, **kwargs):
+ """
+ Make a polar plot.
+
+ call signature::
+
+ polar(theta, r, **kwargs)
+
+ Multiple *theta*, *r* arguments are supported, with format
+ strings, as in :func:`~matplotlib.pyplot.plot`.
+
+ """
+ # If an axis already exists, check if it has a polar projection
+ if gcf().get_axes():
+ if not isinstance(gca(), PolarAxes):
+ warnings.warn('Trying to create polar plot on an axis that does '
+ 'not have a polar projection.')
+ ax = gca(polar=True)
+ ret = ax.plot(*args, **kwargs)
+ return ret
+
+
+def plotfile(fname, cols=(0,), plotfuncs=None,
+ comments='#', skiprows=0, checkrows=5, delimiter=',',
+ names=None, subplots=True, newfig=True, **kwargs):
+ """
+ Plot the data in a file.
+
+ *cols* is a sequence of column identifiers to plot. An identifier
+ is either an int or a string. If it is an int, it indicates the
+ column number. If it is a string, it indicates the column header.
+ matplotlib will make column headers lower case, replace spaces with
+ underscores, and remove all illegal characters; so ``'Adj Close*'``
+ will have name ``'adj_close'``.
+
+ - If len(*cols*) == 1, only that column will be plotted on the *y* axis.
+
+ - If len(*cols*) > 1, the first element will be an identifier for
+ data for the *x* axis and the remaining elements will be the
+ column indexes for multiple subplots if *subplots* is *True*
+ (the default), or for lines in a single subplot if *subplots*
+ is *False*.
+
+ *plotfuncs*, if not *None*, is a dictionary mapping identifier to
+ an :class:`~matplotlib.axes.Axes` plotting function as a string.
+ Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
+ etc. You must use the same type of identifier in the *cols*
+ vector as you use in the *plotfuncs* dictionary, e.g., integer
+ column numbers in both or column names in both. If *subplots*
+ is *False*, then including any function such as 'semilogy'
+ that changes the axis scaling will set the scaling for all
+ columns.
+
+ *comments*, *skiprows*, *checkrows*, *delimiter*, and *names*
+ are all passed on to :func:`matplotlib.pylab.csv2rec` to
+ load the data into a record array.
+
+ If *newfig* is *True*, the plot always will be made in a new figure;
+ if *False*, it will be made in the current figure if one exists,
+ else in a new figure.
+
+ kwargs are passed on to plotting functions.
+
+ Example usage::
+
+ # plot the 2nd and 4th column against the 1st in two subplots
+ plotfile(fname, (0,1,3))
+
+ # plot using column names; specify an alternate plot type for volume
+ plotfile(fname, ('date', 'volume', 'adj_close'),
+ plotfuncs={'volume': 'semilogy'})
+
+ Note: plotfile is intended as a convenience for quickly plotting
+ data from flat files; it is not intended as an alternative
+ interface to general plotting with pyplot or matplotlib.
+ """
+
+ if newfig:
+ fig = figure()
+ else:
+ fig = gcf()
+
+ if len(cols)<1:
+ raise ValueError('must have at least one column of data')
+
+ if plotfuncs is None:
+ plotfuncs = dict()
+ from matplotlib.cbook import mplDeprecation
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore', mplDeprecation)
+ r = mlab.csv2rec(fname, comments=comments, skiprows=skiprows,
+ checkrows=checkrows, delimiter=delimiter, names=names)
+
+ def getname_val(identifier):
+ 'return the name and column data for identifier'
+ if isinstance(identifier, six.string_types):
+ return identifier, r[identifier]
+ elif is_numlike(identifier):
+ name = r.dtype.names[int(identifier)]
+ return name, r[name]
+ else:
+ raise TypeError('identifier must be a string or integer')
+
+ xname, x = getname_val(cols[0])
+ ynamelist = []
+
+ if len(cols)==1:
+ ax1 = fig.add_subplot(1,1,1)
+ funcname = plotfuncs.get(cols[0], 'plot')
+ func = getattr(ax1, funcname)
+ func(x, **kwargs)
+ ax1.set_ylabel(xname)
+ else:
+ N = len(cols)
+ for i in range(1,N):
+ if subplots:
+ if i==1:
+ ax = ax1 = fig.add_subplot(N-1,1,i)
+ else:
+ ax = fig.add_subplot(N-1,1,i, sharex=ax1)
+ elif i==1:
+ ax = fig.add_subplot(1,1,1)
+
+ yname, y = getname_val(cols[i])
+ ynamelist.append(yname)
+
+ funcname = plotfuncs.get(cols[i], 'plot')
+ func = getattr(ax, funcname)
+
+ func(x, y, **kwargs)
+ if subplots:
+ ax.set_ylabel(yname)
+ if ax.is_last_row():
+ ax.set_xlabel(xname)
+ else:
+ ax.set_xlabel('')
+
+ if not subplots:
+ ax.legend(ynamelist, loc='best')
+
+ if xname=='date':
+ fig.autofmt_xdate()
+
+
+def _autogen_docstring(base):
+ """Autogenerated wrappers will get their docstring from a base function
+ with an addendum."""
+ #msg = "\n\nAdditional kwargs: hold = [True|False] overrides default hold state"
+ msg = ''
+ addendum = docstring.Appender(msg, '\n\n')
+ return lambda func: addendum(docstring.copy_dedent(base)(func))
+
+# This function cannot be generated by boilerplate.py because it may
+# return an image or a line.
+@_autogen_docstring(Axes.spy)
+def spy(Z, precision=0, marker=None, markersize=None, aspect='equal', **kwargs):
+ ax = gca()
+ hold = kwargs.pop('hold', None)
+ # allow callers to override the hold state by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.spy(Z, precision, marker, markersize, aspect, **kwargs)
+ finally:
+ ax._hold = washold
+ if isinstance(ret, cm.ScalarMappable):
+ sci(ret)
+ return ret
+
+# just to be safe. Interactive mode can be turned on without
+# calling `plt.ion()` so register it again here.
+# This is safe because multiple calls to `install_repl_displayhook`
+# are no-ops and the registered function respect `mpl.is_interactive()`
+# to determine if they should trigger a draw.
+install_repl_displayhook()
+
+################# REMAINING CONTENT GENERATED BY boilerplate.py ##############
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.acorr)
+def acorr(x, hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.acorr(x, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.angle_spectrum)
+def angle_spectrum(x, Fs=None, Fc=None, window=None, pad_to=None, sides=None,
+ hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.angle_spectrum(x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to,
+ sides=sides, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.arrow)
+def arrow(x, y, dx, dy, hold=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.arrow(x, y, dx, dy, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.axhline)
+def axhline(y=0, xmin=0, xmax=1, hold=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.axhline(y=y, xmin=xmin, xmax=xmax, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.axhspan)
+def axhspan(ymin, ymax, xmin=0, xmax=1, hold=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.axhspan(ymin, ymax, xmin=xmin, xmax=xmax, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.axvline)
+def axvline(x=0, ymin=0, ymax=1, hold=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.axvline(x=x, ymin=ymin, ymax=ymax, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.axvspan)
+def axvspan(xmin, xmax, ymin=0, ymax=1, hold=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.axvspan(xmin, xmax, ymin=ymin, ymax=ymax, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.bar)
+def bar(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.bar(*args, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.barh)
+def barh(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.barh(*args, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.broken_barh)
+def broken_barh(xranges, yrange, hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.broken_barh(xranges, yrange, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.boxplot)
+def boxplot(x, notch=None, sym=None, vert=None, whis=None, positions=None,
+ widths=None, patch_artist=None, bootstrap=None, usermedians=None,
+ conf_intervals=None, meanline=None, showmeans=None, showcaps=None,
+ showbox=None, showfliers=None, boxprops=None, labels=None,
+ flierprops=None, medianprops=None, meanprops=None, capprops=None,
+ whiskerprops=None, manage_xticks=True, autorange=False, zorder=None,
+ hold=None, data=None):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.boxplot(x, notch=notch, sym=sym, vert=vert, whis=whis,
+ positions=positions, widths=widths,
+ patch_artist=patch_artist, bootstrap=bootstrap,
+ usermedians=usermedians,
+ conf_intervals=conf_intervals, meanline=meanline,
+ showmeans=showmeans, showcaps=showcaps,
+ showbox=showbox, showfliers=showfliers,
+ boxprops=boxprops, labels=labels,
+ flierprops=flierprops, medianprops=medianprops,
+ meanprops=meanprops, capprops=capprops,
+ whiskerprops=whiskerprops,
+ manage_xticks=manage_xticks, autorange=autorange,
+ zorder=zorder, data=data)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.cohere)
+def cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
+ window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default',
+ scale_by_freq=None, hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.cohere(x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
+ window=window, noverlap=noverlap, pad_to=pad_to,
+ sides=sides, scale_by_freq=scale_by_freq, data=data,
+ **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.clabel)
+def clabel(CS, *args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.clabel(CS, *args, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.contour)
+def contour(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.contour(*args, **kwargs)
+ finally:
+ ax._hold = washold
+ if ret._A is not None: sci(ret)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.contourf)
+def contourf(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.contourf(*args, **kwargs)
+ finally:
+ ax._hold = washold
+ if ret._A is not None: sci(ret)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.csd)
+def csd(x, y, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
+ noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
+ return_line=None, hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.csd(x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
+ window=window, noverlap=noverlap, pad_to=pad_to,
+ sides=sides, scale_by_freq=scale_by_freq,
+ return_line=return_line, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.errorbar)
+def errorbar(x, y, yerr=None, xerr=None, fmt='', ecolor=None, elinewidth=None,
+ capsize=None, barsabove=False, lolims=False, uplims=False,
+ xlolims=False, xuplims=False, errorevery=1, capthick=None,
+ hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.errorbar(x, y, yerr=yerr, xerr=xerr, fmt=fmt, ecolor=ecolor,
+ elinewidth=elinewidth, capsize=capsize,
+ barsabove=barsabove, lolims=lolims, uplims=uplims,
+ xlolims=xlolims, xuplims=xuplims,
+ errorevery=errorevery, capthick=capthick, data=data,
+ **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.eventplot)
+def eventplot(positions, orientation='horizontal', lineoffsets=1, linelengths=1,
+ linewidths=None, colors=None, linestyles='solid', hold=None,
+ data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.eventplot(positions, orientation=orientation,
+ lineoffsets=lineoffsets, linelengths=linelengths,
+ linewidths=linewidths, colors=colors,
+ linestyles=linestyles, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.fill)
+def fill(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.fill(*args, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.fill_between)
+def fill_between(x, y1, y2=0, where=None, interpolate=False, step=None,
+ hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.fill_between(x, y1, y2=y2, where=where,
+ interpolate=interpolate, step=step, data=data,
+ **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.fill_betweenx)
+def fill_betweenx(y, x1, x2=0, where=None, step=None, interpolate=False,
+ hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.fill_betweenx(y, x1, x2=x2, where=where, step=step,
+ interpolate=interpolate, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.hexbin)
+def hexbin(x, y, C=None, gridsize=100, bins=None, xscale='linear',
+ yscale='linear', extent=None, cmap=None, norm=None, vmin=None,
+ vmax=None, alpha=None, linewidths=None, edgecolors='face',
+ reduce_C_function=np.mean, mincnt=None, marginals=False, hold=None,
+ data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.hexbin(x, y, C=C, gridsize=gridsize, bins=bins, xscale=xscale,
+ yscale=yscale, extent=extent, cmap=cmap, norm=norm,
+ vmin=vmin, vmax=vmax, alpha=alpha,
+ linewidths=linewidths, edgecolors=edgecolors,
+ reduce_C_function=reduce_C_function, mincnt=mincnt,
+ marginals=marginals, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+ sci(ret)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.hist)
+def hist(x, bins=None, range=None, density=None, weights=None, cumulative=False,
+ bottom=None, histtype='bar', align='mid', orientation='vertical',
+ rwidth=None, log=False, color=None, label=None, stacked=False,
+ normed=None, hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.hist(x, bins=bins, range=range, density=density,
+ weights=weights, cumulative=cumulative, bottom=bottom,
+ histtype=histtype, align=align, orientation=orientation,
+ rwidth=rwidth, log=log, color=color, label=label,
+ stacked=stacked, normed=normed, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.hist2d)
+def hist2d(x, y, bins=10, range=None, normed=False, weights=None, cmin=None,
+ cmax=None, hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.hist2d(x, y, bins=bins, range=range, normed=normed,
+ weights=weights, cmin=cmin, cmax=cmax, data=data,
+ **kwargs)
+ finally:
+ ax._hold = washold
+ sci(ret[-1])
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.hlines)
+def hlines(y, xmin, xmax, colors='k', linestyles='solid', label='', hold=None,
+ data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.hlines(y, xmin, xmax, colors=colors, linestyles=linestyles,
+ label=label, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.imshow)
+def imshow(X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=None,
+ vmin=None, vmax=None, origin=None, extent=None, shape=None,
+ filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None,
+ hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.imshow(X, cmap=cmap, norm=norm, aspect=aspect,
+ interpolation=interpolation, alpha=alpha, vmin=vmin,
+ vmax=vmax, origin=origin, extent=extent, shape=shape,
+ filternorm=filternorm, filterrad=filterrad,
+ imlim=imlim, resample=resample, url=url, data=data,
+ **kwargs)
+ finally:
+ ax._hold = washold
+ sci(ret)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.loglog)
+def loglog(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.loglog(*args, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.magnitude_spectrum)
+def magnitude_spectrum(x, Fs=None, Fc=None, window=None, pad_to=None,
+ sides=None, scale=None, hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.magnitude_spectrum(x, Fs=Fs, Fc=Fc, window=window,
+ pad_to=pad_to, sides=sides, scale=scale,
+ data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.pcolor)
+def pcolor(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.pcolor(*args, **kwargs)
+ finally:
+ ax._hold = washold
+ sci(ret)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.pcolormesh)
+def pcolormesh(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.pcolormesh(*args, **kwargs)
+ finally:
+ ax._hold = washold
+ sci(ret)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.phase_spectrum)
+def phase_spectrum(x, Fs=None, Fc=None, window=None, pad_to=None, sides=None,
+ hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.phase_spectrum(x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to,
+ sides=sides, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.pie)
+def pie(x, explode=None, labels=None, colors=None, autopct=None,
+ pctdistance=0.6, shadow=False, labeldistance=1.1, startangle=None,
+ radius=None, counterclock=True, wedgeprops=None, textprops=None,
+ center=(0, 0), frame=False, rotatelabels=False, hold=None, data=None):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.pie(x, explode=explode, labels=labels, colors=colors,
+ autopct=autopct, pctdistance=pctdistance, shadow=shadow,
+ labeldistance=labeldistance, startangle=startangle,
+ radius=radius, counterclock=counterclock,
+ wedgeprops=wedgeprops, textprops=textprops, center=center,
+ frame=frame, rotatelabels=rotatelabels, data=data)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.plot)
+def plot(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.plot(*args, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.plot_date)
+def plot_date(x, y, fmt='o', tz=None, xdate=True, ydate=False, hold=None,
+ data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.plot_date(x, y, fmt=fmt, tz=tz, xdate=xdate, ydate=ydate,
+ data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.psd)
+def psd(x, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
+ noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
+ return_line=None, hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.psd(x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
+ window=window, noverlap=noverlap, pad_to=pad_to,
+ sides=sides, scale_by_freq=scale_by_freq,
+ return_line=return_line, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.quiver)
+def quiver(*args, **kw):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kw.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.quiver(*args, **kw)
+ finally:
+ ax._hold = washold
+ sci(ret)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.quiverkey)
+def quiverkey(*args, **kw):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kw.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.quiverkey(*args, **kw)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.scatter)
+def scatter(x, y, s=None, c=None, marker=None, cmap=None, norm=None, vmin=None,
+ vmax=None, alpha=None, linewidths=None, verts=None, edgecolors=None,
+ hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.scatter(x, y, s=s, c=c, marker=marker, cmap=cmap, norm=norm,
+ vmin=vmin, vmax=vmax, alpha=alpha,
+ linewidths=linewidths, verts=verts,
+ edgecolors=edgecolors, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+ sci(ret)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.semilogx)
+def semilogx(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.semilogx(*args, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.semilogy)
+def semilogy(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.semilogy(*args, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.specgram)
+def specgram(x, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
+ noverlap=None, cmap=None, xextent=None, pad_to=None, sides=None,
+ scale_by_freq=None, mode=None, scale=None, vmin=None, vmax=None,
+ hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.specgram(x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
+ window=window, noverlap=noverlap, cmap=cmap,
+ xextent=xextent, pad_to=pad_to, sides=sides,
+ scale_by_freq=scale_by_freq, mode=mode, scale=scale,
+ vmin=vmin, vmax=vmax, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+ sci(ret[-1])
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.stackplot)
+def stackplot(x, *args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.stackplot(x, *args, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.stem)
+def stem(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.stem(*args, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.step)
+def step(x, y, *args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.step(x, y, *args, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.streamplot)
+def streamplot(x, y, u, v, density=1, linewidth=None, color=None, cmap=None,
+ norm=None, arrowsize=1, arrowstyle='-|>', minlength=0.1,
+ transform=None, zorder=None, start_points=None, maxlength=4.0,
+ integration_direction='both', hold=None, data=None):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.streamplot(x, y, u, v, density=density, linewidth=linewidth,
+ color=color, cmap=cmap, norm=norm,
+ arrowsize=arrowsize, arrowstyle=arrowstyle,
+ minlength=minlength, transform=transform,
+ zorder=zorder, start_points=start_points,
+ maxlength=maxlength,
+ integration_direction=integration_direction,
+ data=data)
+ finally:
+ ax._hold = washold
+ sci(ret.lines)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.tricontour)
+def tricontour(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.tricontour(*args, **kwargs)
+ finally:
+ ax._hold = washold
+ if ret._A is not None: sci(ret)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.tricontourf)
+def tricontourf(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.tricontourf(*args, **kwargs)
+ finally:
+ ax._hold = washold
+ if ret._A is not None: sci(ret)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.tripcolor)
+def tripcolor(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.tripcolor(*args, **kwargs)
+ finally:
+ ax._hold = washold
+ sci(ret)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.triplot)
+def triplot(*args, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kwargs.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.triplot(*args, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.violinplot)
+def violinplot(dataset, positions=None, vert=True, widths=0.5, showmeans=False,
+ showextrema=True, showmedians=False, points=100, bw_method=None,
+ hold=None, data=None):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.violinplot(dataset, positions=positions, vert=vert,
+ widths=widths, showmeans=showmeans,
+ showextrema=showextrema, showmedians=showmedians,
+ points=points, bw_method=bw_method, data=data)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.vlines)
+def vlines(x, ymin, ymax, colors='k', linestyles='solid', label='', hold=None,
+ data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.vlines(x, ymin, ymax, colors=colors, linestyles=linestyles,
+ label=label, data=data, **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.xcorr)
+def xcorr(x, y, normed=True, detrend=mlab.detrend_none, usevlines=True,
+ maxlags=10, hold=None, data=None, **kwargs):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.xcorr(x, y, normed=normed, detrend=detrend,
+ usevlines=usevlines, maxlags=maxlags, data=data,
+ **kwargs)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@_autogen_docstring(Axes.barbs)
+def barbs(*args, **kw):
+ ax = gca()
+ # Deprecated: allow callers to override the hold state
+ # by passing hold=True|False
+ washold = ax._hold
+ hold = kw.pop('hold', None)
+ if hold is not None:
+ ax._hold = hold
+ from matplotlib.cbook import mplDeprecation
+ warnings.warn("The 'hold' keyword argument is deprecated since 2.0.",
+ mplDeprecation)
+ try:
+ ret = ax.barbs(*args, **kw)
+ finally:
+ ax._hold = washold
+
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@docstring.copy_dedent(Axes.cla)
+def cla():
+ ret = gca().cla()
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@docstring.copy_dedent(Axes.grid)
+def grid(b=None, which='major', axis='both', **kwargs):
+ ret = gca().grid(b=b, which=which, axis=axis, **kwargs)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@docstring.copy_dedent(Axes.legend)
+def legend(*args, **kwargs):
+ ret = gca().legend(*args, **kwargs)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@docstring.copy_dedent(Axes.table)
+def table(**kwargs):
+ ret = gca().table(**kwargs)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@docstring.copy_dedent(Axes.text)
+def text(x, y, s, fontdict=None, withdash=False, **kwargs):
+ ret = gca().text(x, y, s, fontdict=fontdict, withdash=withdash, **kwargs)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@docstring.copy_dedent(Axes.annotate)
+def annotate(*args, **kwargs):
+ ret = gca().annotate(*args, **kwargs)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@docstring.copy_dedent(Axes.ticklabel_format)
+def ticklabel_format(**kwargs):
+ ret = gca().ticklabel_format(**kwargs)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@docstring.copy_dedent(Axes.locator_params)
+def locator_params(axis='both', tight=None, **kwargs):
+ ret = gca().locator_params(axis=axis, tight=tight, **kwargs)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@docstring.copy_dedent(Axes.tick_params)
+def tick_params(axis='both', **kwargs):
+ ret = gca().tick_params(axis=axis, **kwargs)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@docstring.copy_dedent(Axes.margins)
+def margins(*args, **kw):
+ ret = gca().margins(*args, **kw)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+@docstring.copy_dedent(Axes.autoscale)
+def autoscale(enable=True, axis='both', tight=None):
+ ret = gca().autoscale(enable=enable, axis=axis, tight=tight)
+ return ret
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def autumn():
+ """
+ Set the colormap to "autumn".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("autumn")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def bone():
+ """
+ Set the colormap to "bone".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("bone")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def cool():
+ """
+ Set the colormap to "cool".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("cool")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def copper():
+ """
+ Set the colormap to "copper".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("copper")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def flag():
+ """
+ Set the colormap to "flag".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("flag")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def gray():
+ """
+ Set the colormap to "gray".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("gray")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def hot():
+ """
+ Set the colormap to "hot".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("hot")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def hsv():
+ """
+ Set the colormap to "hsv".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("hsv")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def jet():
+ """
+ Set the colormap to "jet".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("jet")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def pink():
+ """
+ Set the colormap to "pink".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("pink")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def prism():
+ """
+ Set the colormap to "prism".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("prism")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def spring():
+ """
+ Set the colormap to "spring".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("spring")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def summer():
+ """
+ Set the colormap to "summer".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("summer")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def winter():
+ """
+ Set the colormap to "winter".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("winter")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def magma():
+ """
+ Set the colormap to "magma".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("magma")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def inferno():
+ """
+ Set the colormap to "inferno".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("inferno")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def plasma():
+ """
+ Set the colormap to "plasma".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("plasma")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def viridis():
+ """
+ Set the colormap to "viridis".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("viridis")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def nipy_spectral():
+ """
+ Set the colormap to "nipy_spectral".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ set_cmap("nipy_spectral")
+
+
+# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
+def spectral():
+ """
+ Set the colormap to "spectral".
+
+ This changes the default colormap as well as the colormap of the current
+ image if there is one. See ``help(colormaps)`` for more information.
+ """
+ from matplotlib.cbook import warn_deprecated
+ warn_deprecated(
+ "2.0",
+ name="spectral",
+ obj_type="colormap"
+ )
+ set_cmap("spectral")
+
+_setup_pyplot_info_docstrings()
diff --git a/contrib/python/matplotlib/py2/matplotlib/quiver.py b/contrib/python/matplotlib/py2/matplotlib/quiver.py
new file mode 100644
index 00000000000..92de37ecb89
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/quiver.py
@@ -0,0 +1,1197 @@
+"""
+Support for plotting vector fields.
+
+Presently this contains Quiver and Barb. Quiver plots an arrow in the
+direction of the vector, with the size of the arrow related to the
+magnitude of the vector.
+
+Barbs are like quiver in that they point along a vector, but
+the magnitude of the vector is given schematically by the presence of barbs
+or flags on the barb.
+
+This will also become a home for things such as standard
+deviation ellipses, which can and will be derived very easily from
+the Quiver code.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import math
+import weakref
+
+import numpy as np
+from numpy import ma
+import matplotlib.collections as mcollections
+import matplotlib.transforms as transforms
+import matplotlib.text as mtext
+import matplotlib.artist as martist
+from matplotlib.artist import allow_rasterization
+from matplotlib import docstring
+import matplotlib.font_manager as font_manager
+from matplotlib.cbook import delete_masked_points
+from matplotlib.patches import CirclePolygon
+
+
+_quiver_doc = """
+Plot a 2-D field of arrows.
+
+Call signatures::
+
+ quiver(U, V, **kw)
+ quiver(U, V, C, **kw)
+ quiver(X, Y, U, V, **kw)
+ quiver(X, Y, U, V, C, **kw)
+
+*U* and *V* are the arrow data, *X* and *Y* set the location of the
+arrows, and *C* sets the color of the arrows. These arguments may be 1-D or
+2-D arrays or sequences.
+
+If *X* and *Y* are absent, they will be generated as a uniform grid.
+If *U* and *V* are 2-D arrays and *X* and *Y* are 1-D, and if ``len(X)`` and
+``len(Y)`` match the column and row dimensions of *U*, then *X* and *Y* will be
+expanded with :func:`numpy.meshgrid`.
+
+The default settings auto-scales the length of the arrows to a reasonable size.
+To change this behavior see the *scale* and *scale_units* kwargs.
+
+The defaults give a slightly swept-back arrow; to make the head a
+triangle, make *headaxislength* the same as *headlength*. To make the
+arrow more pointed, reduce *headwidth* or increase *headlength* and
+*headaxislength*. To make the head smaller relative to the shaft,
+scale down all the head parameters. You will probably do best to leave
+minshaft alone.
+
+*linewidths* and *edgecolors* can be used to customize the arrow
+outlines.
+
+Parameters
+----------
+X : 1D or 2D array, sequence, optional
+ The x coordinates of the arrow locations
+Y : 1D or 2D array, sequence, optional
+ The y coordinates of the arrow locations
+U : 1D or 2D array or masked array, sequence
+ The x components of the arrow vectors
+V : 1D or 2D array or masked array, sequence
+ The y components of the arrow vectors
+C : 1D or 2D array, sequence, optional
+ The arrow colors
+units : [ 'width' | 'height' | 'dots' | 'inches' | 'x' | 'y' | 'xy' ]
+ The arrow dimensions (except for *length*) are measured in multiples of
+ this unit.
+
+ 'width' or 'height': the width or height of the axis
+
+ 'dots' or 'inches': pixels or inches, based on the figure dpi
+
+ 'x', 'y', or 'xy': respectively *X*, *Y*, or :math:`\\sqrt{X^2 + Y^2}`
+ in data units
+
+ The arrows scale differently depending on the units. For
+ 'x' or 'y', the arrows get larger as one zooms in; for other
+ units, the arrow size is independent of the zoom state. For
+ 'width or 'height', the arrow size increases with the width and
+ height of the axes, respectively, when the window is resized;
+ for 'dots' or 'inches', resizing does not change the arrows.
+angles : [ 'uv' | 'xy' ], array, optional
+ Method for determining the angle of the arrows. Default is 'uv'.
+
+ 'uv': the arrow axis aspect ratio is 1 so that
+ if *U*==*V* the orientation of the arrow on the plot is 45 degrees
+ counter-clockwise from the horizontal axis (positive to the right).
+
+ 'xy': arrows point from (x,y) to (x+u, y+v).
+ Use this for plotting a gradient field, for example.
+
+ Alternatively, arbitrary angles may be specified as an array
+ of values in degrees, counter-clockwise from the horizontal axis.
+
+ Note: inverting a data axis will correspondingly invert the
+ arrows only with ``angles='xy'``.
+scale : None, float, optional
+ Number of data units per arrow length unit, e.g., m/s per plot width; a
+ smaller scale parameter makes the arrow longer. Default is *None*.
+
+ If *None*, a simple autoscaling algorithm is used, based on the average
+ vector length and the number of vectors. The arrow length unit is given by
+ the *scale_units* parameter
+scale_units : [ 'width' | 'height' | 'dots' | 'inches' | 'x' | 'y' | 'xy' ], \
+None, optional
+ If the *scale* kwarg is *None*, the arrow length unit. Default is *None*.
+
+ e.g. *scale_units* is 'inches', *scale* is 2.0, and
+ ``(u,v) = (1,0)``, then the vector will be 0.5 inches long.
+
+ If *scale_units* is 'width'/'height', then the vector will be half the
+ width/height of the axes.
+
+ If *scale_units* is 'x' then the vector will be 0.5 x-axis
+ units. To plot vectors in the x-y plane, with u and v having
+ the same units as x and y, use
+ ``angles='xy', scale_units='xy', scale=1``.
+width : scalar, optional
+ Shaft width in arrow units; default depends on choice of units,
+ above, and number of vectors; a typical starting value is about
+ 0.005 times the width of the plot.
+headwidth : scalar, optional
+ Head width as multiple of shaft width, default is 3
+headlength : scalar, optional
+ Head length as multiple of shaft width, default is 5
+headaxislength : scalar, optional
+ Head length at shaft intersection, default is 4.5
+minshaft : scalar, optional
+ Length below which arrow scales, in units of head length. Do not
+ set this to less than 1, or small arrows will look terrible!
+ Default is 1
+minlength : scalar, optional
+ Minimum length as a multiple of shaft width; if an arrow length
+ is less than this, plot a dot (hexagon) of this diameter instead.
+ Default is 1.
+pivot : [ 'tail' | 'mid' | 'middle' | 'tip' ], optional
+ The part of the arrow that is at the grid point; the arrow rotates
+ about this point, hence the name *pivot*.
+color : [ color | color sequence ], optional
+ This is a synonym for the
+ :class:`~matplotlib.collections.PolyCollection` facecolor kwarg.
+ If *C* has been set, *color* has no effect.
+
+Notes
+-----
+Additional :class:`~matplotlib.collections.PolyCollection`
+keyword arguments:
+
+%(PolyCollection)s
+
+See Also
+--------
+quiverkey : Add a key to a quiver plot
+""" % docstring.interpd.params
+
+_quiverkey_doc = """
+Add a key to a quiver plot.
+
+Call signature::
+
+ quiverkey(Q, X, Y, U, label, **kw)
+
+Arguments:
+
+ *Q*:
+ The Quiver instance returned by a call to quiver.
+
+ *X*, *Y*:
+ The location of the key; additional explanation follows.
+
+ *U*:
+ The length of the key
+
+ *label*:
+ A string with the length and units of the key
+
+Keyword arguments:
+
+ *angle* = 0
+ The angle of the key arrow. Measured in degrees anti-clockwise from the
+ x-axis.
+
+ *coordinates* = [ 'axes' | 'figure' | 'data' | 'inches' ]
+ Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
+ normalized coordinate systems with 0,0 in the lower left and 1,1
+ in the upper right; 'data' are the axes data coordinates (used for
+ the locations of the vectors in the quiver plot itself); 'inches'
+ is position in the figure in inches, with 0,0 at the lower left
+ corner.
+
+ *color*:
+ overrides face and edge colors from *Q*.
+
+ *labelpos* = [ 'N' | 'S' | 'E' | 'W' ]
+ Position the label above, below, to the right, to the left of the
+ arrow, respectively.
+
+ *labelsep*:
+ Distance in inches between the arrow and the label. Default is
+ 0.1
+
+ *labelcolor*:
+ defaults to default :class:`~matplotlib.text.Text` color.
+
+ *fontproperties*:
+ A dictionary with keyword arguments accepted by the
+ :class:`~matplotlib.font_manager.FontProperties` initializer:
+ *family*, *style*, *variant*, *size*, *weight*
+
+Any additional keyword arguments are used to override vector
+properties taken from *Q*.
+
+The positioning of the key depends on *X*, *Y*, *coordinates*, and
+*labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position
+of the middle of the key arrow. If *labelpos* is 'E', *X*, *Y*
+positions the head, and if *labelpos* is 'W', *X*, *Y* positions the
+tail; in either of these two cases, *X*, *Y* is somewhere in the
+middle of the arrow+label key object.
+"""
+
+
+class QuiverKey(martist.Artist):
+ """ Labelled arrow for use as a quiver plot scale key."""
+ halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
+ valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
+ pivot = {'N': 'middle', 'S': 'middle', 'E': 'tip', 'W': 'tail'}
+
+ def __init__(self, Q, X, Y, U, label, **kw):
+ martist.Artist.__init__(self)
+ self.Q = Q
+ self.X = X
+ self.Y = Y
+ self.U = U
+ self.angle = kw.pop('angle', 0)
+ self.coord = kw.pop('coordinates', 'axes')
+ self.color = kw.pop('color', None)
+ self.label = label
+ self._labelsep_inches = kw.pop('labelsep', 0.1)
+ self.labelsep = (self._labelsep_inches * Q.ax.figure.dpi)
+
+ # try to prevent closure over the real self
+ weak_self = weakref.ref(self)
+
+ def on_dpi_change(fig):
+ self_weakref = weak_self()
+ if self_weakref is not None:
+ self_weakref.labelsep = (self_weakref._labelsep_inches*fig.dpi)
+ self_weakref._initialized = False # simple brute force update
+ # works because _init is
+ # called at the start of
+ # draw.
+
+ self._cid = Q.ax.figure.callbacks.connect('dpi_changed',
+ on_dpi_change)
+
+ self.labelpos = kw.pop('labelpos', 'N')
+ self.labelcolor = kw.pop('labelcolor', None)
+ self.fontproperties = kw.pop('fontproperties', dict())
+ self.kw = kw
+ _fp = self.fontproperties
+ # boxprops = dict(facecolor='red')
+ self.text = mtext.Text(
+ text=label, # bbox=boxprops,
+ horizontalalignment=self.halign[self.labelpos],
+ verticalalignment=self.valign[self.labelpos],
+ fontproperties=font_manager.FontProperties(**_fp))
+
+ if self.labelcolor is not None:
+ self.text.set_color(self.labelcolor)
+ self._initialized = False
+ self.zorder = Q.zorder + 0.1
+
+ def remove(self):
+ """
+ Overload the remove method
+ """
+ self.Q.ax.figure.callbacks.disconnect(self._cid)
+ self._cid = None
+ # pass the remove call up the stack
+ martist.Artist.remove(self)
+
+ __init__.__doc__ = _quiverkey_doc
+
+ def _init(self):
+ if True: # not self._initialized:
+ if not self.Q._initialized:
+ self.Q._init()
+ self._set_transform()
+ _pivot = self.Q.pivot
+ self.Q.pivot = self.pivot[self.labelpos]
+ # Hack: save and restore the Umask
+ _mask = self.Q.Umask
+ self.Q.Umask = ma.nomask
+ self.verts = self.Q._make_verts(np.array([self.U]),
+ np.zeros((1,)),
+ self.angle)
+ self.Q.Umask = _mask
+ self.Q.pivot = _pivot
+ kw = self.Q.polykw
+ kw.update(self.kw)
+ self.vector = mcollections.PolyCollection(
+ self.verts,
+ offsets=[(self.X, self.Y)],
+ transOffset=self.get_transform(),
+ **kw)
+ if self.color is not None:
+ self.vector.set_color(self.color)
+ self.vector.set_transform(self.Q.get_transform())
+ self.vector.set_figure(self.get_figure())
+ self._initialized = True
+
+ def _text_x(self, x):
+ if self.labelpos == 'E':
+ return x + self.labelsep
+ elif self.labelpos == 'W':
+ return x - self.labelsep
+ else:
+ return x
+
+ def _text_y(self, y):
+ if self.labelpos == 'N':
+ return y + self.labelsep
+ elif self.labelpos == 'S':
+ return y - self.labelsep
+ else:
+ return y
+
+ @allow_rasterization
+ def draw(self, renderer):
+ self._init()
+ self.vector.draw(renderer)
+ x, y = self.get_transform().transform_point((self.X, self.Y))
+ self.text.set_x(self._text_x(x))
+ self.text.set_y(self._text_y(y))
+ self.text.draw(renderer)
+ self.stale = False
+
+ def _set_transform(self):
+ if self.coord == 'data':
+ self.set_transform(self.Q.ax.transData)
+ elif self.coord == 'axes':
+ self.set_transform(self.Q.ax.transAxes)
+ elif self.coord == 'figure':
+ self.set_transform(self.Q.ax.figure.transFigure)
+ elif self.coord == 'inches':
+ self.set_transform(self.Q.ax.figure.dpi_scale_trans)
+ else:
+ raise ValueError('unrecognized coordinates')
+
+ def set_figure(self, fig):
+ martist.Artist.set_figure(self, fig)
+ self.text.set_figure(fig)
+
+ def contains(self, mouseevent):
+ # Maybe the dictionary should allow one to
+ # distinguish between a text hit and a vector hit.
+ if (self.text.contains(mouseevent)[0] or
+ self.vector.contains(mouseevent)[0]):
+ return True, {}
+ return False, {}
+
+ quiverkey_doc = _quiverkey_doc
+
+
+# This is a helper function that parses out the various combination of
+# arguments for doing colored vector plots. Pulling it out here
+# allows both Quiver and Barbs to use it
+def _parse_args(*args):
+ X, Y, U, V, C = [None] * 5
+ args = list(args)
+
+ # The use of atleast_1d allows for handling scalar arguments while also
+ # keeping masked arrays
+ if len(args) == 3 or len(args) == 5:
+ C = np.atleast_1d(args.pop(-1))
+ V = np.atleast_1d(args.pop(-1))
+ U = np.atleast_1d(args.pop(-1))
+ if U.ndim == 1:
+ nr, nc = 1, U.shape[0]
+ else:
+ nr, nc = U.shape
+ if len(args) == 2: # remaining after removing U,V,C
+ X, Y = [np.array(a).ravel() for a in args]
+ if len(X) == nc and len(Y) == nr:
+ X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
+ else:
+ indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
+ X, Y = [np.ravel(a) for a in indexgrid]
+ return X, Y, U, V, C
+
+
+def _check_consistent_shapes(*arrays):
+ all_shapes = set(a.shape for a in arrays)
+ if len(all_shapes) != 1:
+ raise ValueError('The shapes of the passed in arrays do not match.')
+
+
+class Quiver(mcollections.PolyCollection):
+ """
+ Specialized PolyCollection for arrows.
+
+ The only API method is set_UVC(), which can be used
+ to change the size, orientation, and color of the
+ arrows; their locations are fixed when the class is
+ instantiated. Possibly this method will be useful
+ in animations.
+
+ Much of the work in this class is done in the draw()
+ method so that as much information as possible is available
+ about the plot. In subsequent draw() calls, recalculation
+ is limited to things that might have changed, so there
+ should be no performance penalty from putting the calculations
+ in the draw() method.
+ """
+
+ _PIVOT_VALS = ('tail', 'mid', 'middle', 'tip')
+
+ @docstring.Substitution(_quiver_doc)
+ def __init__(self, ax, *args, **kw):
+ """
+ The constructor takes one required argument, an Axes
+ instance, followed by the args and kwargs described
+ by the following pylab interface documentation:
+ %s
+ """
+ self.ax = ax
+ X, Y, U, V, C = _parse_args(*args)
+ self.X = X
+ self.Y = Y
+ self.XY = np.hstack((X[:, np.newaxis], Y[:, np.newaxis]))
+ self.N = len(X)
+ self.scale = kw.pop('scale', None)
+ self.headwidth = kw.pop('headwidth', 3)
+ self.headlength = float(kw.pop('headlength', 5))
+ self.headaxislength = kw.pop('headaxislength', 4.5)
+ self.minshaft = kw.pop('minshaft', 1)
+ self.minlength = kw.pop('minlength', 1)
+ self.units = kw.pop('units', 'width')
+ self.scale_units = kw.pop('scale_units', None)
+ self.angles = kw.pop('angles', 'uv')
+ self.width = kw.pop('width', None)
+ self.color = kw.pop('color', 'k')
+
+ pivot = kw.pop('pivot', 'tail').lower()
+ # validate pivot
+ if pivot not in self._PIVOT_VALS:
+ raise ValueError(
+ 'pivot must be one of {keys}, you passed {inp}'.format(
+ keys=self._PIVOT_VALS, inp=pivot))
+ # normalize to 'middle'
+ if pivot == 'mid':
+ pivot = 'middle'
+ self.pivot = pivot
+
+ self.transform = kw.pop('transform', ax.transData)
+ kw.setdefault('facecolors', self.color)
+ kw.setdefault('linewidths', (0,))
+ mcollections.PolyCollection.__init__(self, [], offsets=self.XY,
+ transOffset=self.transform,
+ closed=False,
+ **kw)
+ self.polykw = kw
+ self.set_UVC(U, V, C)
+ self._initialized = False
+
+ self.keyvec = None
+ self.keytext = None
+
+ # try to prevent closure over the real self
+ weak_self = weakref.ref(self)
+
+ def on_dpi_change(fig):
+ self_weakref = weak_self()
+ if self_weakref is not None:
+ self_weakref._new_UV = True # vertices depend on width, span
+ # which in turn depend on dpi
+ self_weakref._initialized = False # simple brute force update
+ # works because _init is
+ # called at the start of
+ # draw.
+
+ self._cid = self.ax.figure.callbacks.connect('dpi_changed',
+ on_dpi_change)
+
+ def remove(self):
+ """
+ Overload the remove method
+ """
+ # disconnect the call back
+ self.ax.figure.callbacks.disconnect(self._cid)
+ self._cid = None
+ # pass the remove call up the stack
+ mcollections.PolyCollection.remove(self)
+
+ def _init(self):
+ """
+ Initialization delayed until first draw;
+ allow time for axes setup.
+ """
+ # It seems that there are not enough event notifications
+ # available to have this work on an as-needed basis at present.
+ if True: # not self._initialized:
+ trans = self._set_transform()
+ ax = self.ax
+ sx, sy = trans.inverted().transform_point(
+ (ax.bbox.width, ax.bbox.height))
+ self.span = sx
+ if self.width is None:
+ sn = np.clip(math.sqrt(self.N), 8, 25)
+ self.width = 0.06 * self.span / sn
+
+ # _make_verts sets self.scale if not already specified
+ if not self._initialized and self.scale is None:
+ self._make_verts(self.U, self.V, self.angles)
+
+ self._initialized = True
+
+ def get_datalim(self, transData):
+ trans = self.get_transform()
+ transOffset = self.get_offset_transform()
+ full_transform = (trans - transData) + (transOffset - transData)
+ XY = full_transform.transform(self.XY)
+ bbox = transforms.Bbox.null()
+ bbox.update_from_data_xy(XY, ignore=True)
+ return bbox
+
+ @allow_rasterization
+ def draw(self, renderer):
+ self._init()
+ verts = self._make_verts(self.U, self.V, self.angles)
+ self.set_verts(verts, closed=False)
+ self._new_UV = False
+ mcollections.PolyCollection.draw(self, renderer)
+ self.stale = False
+
+ def set_UVC(self, U, V, C=None):
+ # We need to ensure we have a copy, not a reference
+ # to an array that might change before draw().
+ U = ma.masked_invalid(U, copy=True).ravel()
+ V = ma.masked_invalid(V, copy=True).ravel()
+ mask = ma.mask_or(U.mask, V.mask, copy=False, shrink=True)
+ if C is not None:
+ C = ma.masked_invalid(C, copy=True).ravel()
+ mask = ma.mask_or(mask, C.mask, copy=False, shrink=True)
+ if mask is ma.nomask:
+ C = C.filled()
+ else:
+ C = ma.array(C, mask=mask, copy=False)
+ self.U = U.filled(1)
+ self.V = V.filled(1)
+ self.Umask = mask
+ if C is not None:
+ self.set_array(C)
+ self._new_UV = True
+ self.stale = True
+
+ def _dots_per_unit(self, units):
+ """
+ Return a scale factor for converting from units to pixels
+ """
+ ax = self.ax
+ if units in ('x', 'y', 'xy'):
+ if units == 'x':
+ dx0 = ax.viewLim.width
+ dx1 = ax.bbox.width
+ elif units == 'y':
+ dx0 = ax.viewLim.height
+ dx1 = ax.bbox.height
+ else: # 'xy' is assumed
+ dxx0 = ax.viewLim.width
+ dxx1 = ax.bbox.width
+ dyy0 = ax.viewLim.height
+ dyy1 = ax.bbox.height
+ dx1 = np.hypot(dxx1, dyy1)
+ dx0 = np.hypot(dxx0, dyy0)
+ dx = dx1 / dx0
+ else:
+ if units == 'width':
+ dx = ax.bbox.width
+ elif units == 'height':
+ dx = ax.bbox.height
+ elif units == 'dots':
+ dx = 1.0
+ elif units == 'inches':
+ dx = ax.figure.dpi
+ else:
+ raise ValueError('unrecognized units')
+ return dx
+
+ def _set_transform(self):
+ """
+ Sets the PolygonCollection transform to go
+ from arrow width units to pixels.
+ """
+ dx = self._dots_per_unit(self.units)
+ self._trans_scale = dx # pixels per arrow width unit
+ trans = transforms.Affine2D().scale(dx)
+ self.set_transform(trans)
+ return trans
+
+ def _angles_lengths(self, U, V, eps=1):
+ xy = self.ax.transData.transform(self.XY)
+ uv = np.hstack((U[:, np.newaxis], V[:, np.newaxis]))
+ xyp = self.ax.transData.transform(self.XY + eps * uv)
+ dxy = xyp - xy
+ angles = np.arctan2(dxy[:, 1], dxy[:, 0])
+ lengths = np.hypot(*dxy.T) / eps
+ return angles, lengths
+
+ def _make_verts(self, U, V, angles):
+ uv = (U + V * 1j)
+ str_angles = angles if isinstance(angles, six.string_types) else ''
+ if str_angles == 'xy' and self.scale_units == 'xy':
+ # Here eps is 1 so that if we get U, V by diffing
+ # the X, Y arrays, the vectors will connect the
+ # points, regardless of the axis scaling (including log).
+ angles, lengths = self._angles_lengths(U, V, eps=1)
+ elif str_angles == 'xy' or self.scale_units == 'xy':
+ # Calculate eps based on the extents of the plot
+ # so that we don't end up with roundoff error from
+ # adding a small number to a large.
+ eps = np.abs(self.ax.dataLim.extents).max() * 0.001
+ angles, lengths = self._angles_lengths(U, V, eps=eps)
+ if str_angles and self.scale_units == 'xy':
+ a = lengths
+ else:
+ a = np.abs(uv)
+ if self.scale is None:
+ sn = max(10, math.sqrt(self.N))
+ if self.Umask is not ma.nomask:
+ amean = a[~self.Umask].mean()
+ else:
+ amean = a.mean()
+ # crude auto-scaling
+ # scale is typical arrow length as a multiple of the arrow width
+ scale = 1.8 * amean * sn / self.span
+ if self.scale_units is None:
+ if self.scale is None:
+ self.scale = scale
+ widthu_per_lenu = 1.0
+ else:
+ if self.scale_units == 'xy':
+ dx = 1
+ else:
+ dx = self._dots_per_unit(self.scale_units)
+ widthu_per_lenu = dx / self._trans_scale
+ if self.scale is None:
+ self.scale = scale * widthu_per_lenu
+ length = a * (widthu_per_lenu / (self.scale * self.width))
+ X, Y = self._h_arrows(length)
+ if str_angles == 'xy':
+ theta = angles
+ elif str_angles == 'uv':
+ theta = np.angle(uv)
+ else:
+ theta = ma.masked_invalid(np.deg2rad(angles)).filled(0)
+ theta = theta.reshape((-1, 1)) # for broadcasting
+ xy = (X + Y * 1j) * np.exp(1j * theta) * self.width
+ xy = xy[:, :, np.newaxis]
+ XY = np.concatenate((xy.real, xy.imag), axis=2)
+ if self.Umask is not ma.nomask:
+ XY = ma.array(XY)
+ XY[self.Umask] = ma.masked
+ # This might be handled more efficiently with nans, given
+ # that nans will end up in the paths anyway.
+
+ return XY
+
+ def _h_arrows(self, length):
+ """ length is in arrow width units """
+ # It might be possible to streamline the code
+ # and speed it up a bit by using complex (x,y)
+ # instead of separate arrays; but any gain would be slight.
+ minsh = self.minshaft * self.headlength
+ N = len(length)
+ length = length.reshape(N, 1)
+ # This number is chosen based on when pixel values overflow in Agg
+ # causing rendering errors
+ # length = np.minimum(length, 2 ** 16)
+ np.clip(length, 0, 2 ** 16, out=length)
+ # x, y: normal horizontal arrow
+ x = np.array([0, -self.headaxislength,
+ -self.headlength, 0],
+ np.float64)
+ x = x + np.array([0, 1, 1, 1]) * length
+ y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
+ y = np.repeat(y[np.newaxis, :], N, axis=0)
+ # x0, y0: arrow without shaft, for short vectors
+ x0 = np.array([0, minsh - self.headaxislength,
+ minsh - self.headlength, minsh], np.float64)
+ y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
+ ii = [0, 1, 2, 3, 2, 1, 0, 0]
+ X = x.take(ii, 1)
+ Y = y.take(ii, 1)
+ Y[:, 3:-1] *= -1
+ X0 = x0.take(ii)
+ Y0 = y0.take(ii)
+ Y0[3:-1] *= -1
+ shrink = length / minsh if minsh != 0. else 0.
+ X0 = shrink * X0[np.newaxis, :]
+ Y0 = shrink * Y0[np.newaxis, :]
+ short = np.repeat(length < minsh, 8, axis=1)
+ # Now select X0, Y0 if short, otherwise X, Y
+ np.copyto(X, X0, where=short)
+ np.copyto(Y, Y0, where=short)
+ if self.pivot == 'middle':
+ X -= 0.5 * X[:, 3, np.newaxis]
+ elif self.pivot == 'tip':
+ X = X - X[:, 3, np.newaxis] # numpy bug? using -= does not
+ # work here unless we multiply
+ # by a float first, as with 'mid'.
+ elif self.pivot != 'tail':
+ raise ValueError(("Quiver.pivot must have value in {{'middle', "
+ "'tip', 'tail'}} not {0}").format(self.pivot))
+
+ tooshort = length < self.minlength
+ if tooshort.any():
+ # Use a heptagonal dot:
+ th = np.arange(0, 8, 1, np.float64) * (np.pi / 3.0)
+ x1 = np.cos(th) * self.minlength * 0.5
+ y1 = np.sin(th) * self.minlength * 0.5
+ X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
+ Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
+ tooshort = np.repeat(tooshort, 8, 1)
+ np.copyto(X, X1, where=tooshort)
+ np.copyto(Y, Y1, where=tooshort)
+ # Mask handling is deferred to the caller, _make_verts.
+ return X, Y
+
+ quiver_doc = _quiver_doc
+
+
+_barbs_doc = r"""
+Plot a 2-D field of barbs.
+
+Call signatures::
+
+ barb(U, V, **kw)
+ barb(U, V, C, **kw)
+ barb(X, Y, U, V, **kw)
+ barb(X, Y, U, V, C, **kw)
+
+Arguments:
+
+ *X*, *Y*:
+ The x and y coordinates of the barb locations
+ (default is head of barb; see *pivot* kwarg)
+
+ *U*, *V*:
+ Give the x and y components of the barb shaft
+
+ *C*:
+ An optional array used to map colors to the barbs
+
+All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
+are absent, they will be generated as a uniform grid. If *U* and *V*
+are 2-D arrays but *X* and *Y* are 1-D, and if ``len(X)`` and ``len(Y)``
+match the column and row dimensions of *U*, then *X* and *Y* will be
+expanded with :func:`numpy.meshgrid`.
+
+*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
+supported at present.
+
+Keyword arguments:
+
+ *length*:
+ Length of the barb in points; the other parts of the barb
+ are scaled against this.
+ Default is 7.
+
+ *pivot*: [ 'tip' | 'middle' | float ]
+ The part of the arrow that is at the grid point; the arrow rotates
+ about this point, hence the name *pivot*. Default is 'tip'. Can
+ also be a number, which shifts the start of the barb that many
+ points from the origin.
+
+ *barbcolor*: [ color | color sequence ]
+ Specifies the color all parts of the barb except any flags. This
+ parameter is analogous to the *edgecolor* parameter for polygons,
+ which can be used instead. However this parameter will override
+ facecolor.
+
+ *flagcolor*: [ color | color sequence ]
+ Specifies the color of any flags on the barb. This parameter is
+ analogous to the *facecolor* parameter for polygons, which can be
+ used instead. However this parameter will override facecolor. If
+ this is not set (and *C* has not either) then *flagcolor* will be
+ set to match *barbcolor* so that the barb has a uniform color. If
+ *C* has been set, *flagcolor* has no effect.
+
+ *sizes*:
+ A dictionary of coefficients specifying the ratio of a given
+ feature to the length of the barb. Only those values one wishes to
+ override need to be included. These features include:
+
+ - 'spacing' - space between features (flags, full/half barbs)
+
+ - 'height' - height (distance from shaft to top) of a flag or
+ full barb
+
+ - 'width' - width of a flag, twice the width of a full barb
+
+ - 'emptybarb' - radius of the circle used for low magnitudes
+
+ *fill_empty*:
+ A flag on whether the empty barbs (circles) that are drawn should
+ be filled with the flag color. If they are not filled, they will
+ be drawn such that no color is applied to the center. Default is
+ False
+
+ *rounding*:
+ A flag to indicate whether the vector magnitude should be rounded
+ when allocating barb components. If True, the magnitude is
+ rounded to the nearest multiple of the half-barb increment. If
+ False, the magnitude is simply truncated to the next lowest
+ multiple. Default is True
+
+ *barb_increments*:
+ A dictionary of increments specifying values to associate with
+ different parts of the barb. Only those values one wishes to
+ override need to be included.
+
+ - 'half' - half barbs (Default is 5)
+
+ - 'full' - full barbs (Default is 10)
+
+ - 'flag' - flags (default is 50)
+
+ *flip_barb*:
+ Either a single boolean flag or an array of booleans. Single
+ boolean indicates whether the lines and flags should point
+ opposite to normal for all barbs. An array (which should be the
+ same size as the other data arrays) indicates whether to flip for
+ each individual barb. Normal behavior is for the barbs and lines
+ to point right (comes from wind barbs having these features point
+ towards low pressure in the Northern Hemisphere.) Default is
+ False
+
+Barbs are traditionally used in meteorology as a way to plot the speed
+and direction of wind observations, but can technically be used to
+plot any two dimensional vector quantity. As opposed to arrows, which
+give vector magnitude by the length of the arrow, the barbs give more
+quantitative information about the vector magnitude by putting slanted
+lines or a triangle for various increments in magnitude, as show
+schematically below::
+
+ : /\ \\
+ : / \ \\
+ : / \ \ \\
+ : / \ \ \\
+ : ------------------------------
+
+.. note the double \\ at the end of each line to make the figure
+.. render correctly
+
+The largest increment is given by a triangle (or "flag"). After those
+come full lines (barbs). The smallest increment is a half line. There
+is only, of course, ever at most 1 half line. If the magnitude is
+small and only needs a single half-line and no full lines or
+triangles, the half-line is offset from the end of the barb so that it
+can be easily distinguished from barbs with a single full line. The
+magnitude for the barb shown above would nominally be 65, using the
+standard increments of 50, 10, and 5.
+
+linewidths and edgecolors can be used to customize the barb.
+Additional :class:`~matplotlib.collections.PolyCollection` keyword
+arguments:
+
+%(PolyCollection)s
+""" % docstring.interpd.params
+
+docstring.interpd.update(barbs_doc=_barbs_doc)
+
+
+class Barbs(mcollections.PolyCollection):
+ '''
+ Specialized PolyCollection for barbs.
+
+ The only API method is :meth:`set_UVC`, which can be used to
+ change the size, orientation, and color of the arrows. Locations
+ are changed using the :meth:`set_offsets` collection method.
+ Possibly this method will be useful in animations.
+
+ There is one internal function :meth:`_find_tails` which finds
+ exactly what should be put on the barb given the vector magnitude.
+ From there :meth:`_make_barbs` is used to find the vertices of the
+ polygon to represent the barb based on this information.
+ '''
+ # This may be an abuse of polygons here to render what is essentially maybe
+ # 1 triangle and a series of lines. It works fine as far as I can tell
+ # however.
+ @docstring.interpd
+ def __init__(self, ax, *args, **kw):
+ """
+ The constructor takes one required argument, an Axes
+ instance, followed by the args and kwargs described
+ by the following pylab interface documentation:
+ %(barbs_doc)s
+ """
+ self._pivot = kw.pop('pivot', 'tip')
+ self._length = kw.pop('length', 7)
+ barbcolor = kw.pop('barbcolor', None)
+ flagcolor = kw.pop('flagcolor', None)
+ self.sizes = kw.pop('sizes', dict())
+ self.fill_empty = kw.pop('fill_empty', False)
+ self.barb_increments = kw.pop('barb_increments', dict())
+ self.rounding = kw.pop('rounding', True)
+ self.flip = kw.pop('flip_barb', False)
+ transform = kw.pop('transform', ax.transData)
+
+ # Flagcolor and barbcolor provide convenience parameters for
+ # setting the facecolor and edgecolor, respectively, of the barb
+ # polygon. We also work here to make the flag the same color as the
+ # rest of the barb by default
+
+ if None in (barbcolor, flagcolor):
+ kw['edgecolors'] = 'face'
+ if flagcolor:
+ kw['facecolors'] = flagcolor
+ elif barbcolor:
+ kw['facecolors'] = barbcolor
+ else:
+ # Set to facecolor passed in or default to black
+ kw.setdefault('facecolors', 'k')
+ else:
+ kw['edgecolors'] = barbcolor
+ kw['facecolors'] = flagcolor
+
+ # Explicitly set a line width if we're not given one, otherwise
+ # polygons are not outlined and we get no barbs
+ if 'linewidth' not in kw and 'lw' not in kw:
+ kw['linewidth'] = 1
+
+ # Parse out the data arrays from the various configurations supported
+ x, y, u, v, c = _parse_args(*args)
+ self.x = x
+ self.y = y
+ xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
+
+ # Make a collection
+ barb_size = self._length ** 2 / 4 # Empirically determined
+ mcollections.PolyCollection.__init__(self, [], (barb_size,),
+ offsets=xy,
+ transOffset=transform, **kw)
+ self.set_transform(transforms.IdentityTransform())
+
+ self.set_UVC(u, v, c)
+
+ def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
+ '''
+ Find how many of each of the tail pieces is necessary. Flag
+ specifies the increment for a flag, barb for a full barb, and half for
+ half a barb. Mag should be the magnitude of a vector (i.e., >= 0).
+
+ This returns a tuple of:
+
+ (*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)
+
+ *half_flag* is a boolean whether half of a barb is needed,
+ since there should only ever be one half on a given
+ barb. *empty_flag* flag is an array of flags to easily tell if
+ a barb is empty (too low to plot any barbs/flags.
+ '''
+
+ # If rounding, round to the nearest multiple of half, the smallest
+ # increment
+ if rounding:
+ mag = half * (mag / half + 0.5).astype(int)
+
+ num_flags = np.floor(mag / flag).astype(int)
+ mag = np.mod(mag, flag)
+
+ num_barb = np.floor(mag / full).astype(int)
+ mag = np.mod(mag, full)
+
+ half_flag = mag >= half
+ empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))
+
+ return num_flags, num_barb, half_flag, empty_flag
+
+ def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
+ pivot, sizes, fill_empty, flip):
+ '''
+ This function actually creates the wind barbs. *u* and *v*
+ are components of the vector in the *x* and *y* directions,
+ respectively.
+
+ *nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
+ *respectively, the number of flags, number of barbs, flag for
+ *half a barb, and flag for empty barb, ostensibly obtained
+ *from :meth:`_find_tails`.
+
+ *length* is the length of the barb staff in points.
+
+ *pivot* specifies the point on the barb around which the
+ entire barb should be rotated. Right now, valid options are
+ 'tip' and 'middle'. Can also be a number, which shifts the start
+ of the barb that many points from the origin.
+
+ *sizes* is a dictionary of coefficients specifying the ratio
+ of a given feature to the length of the barb. These features
+ include:
+
+ - *spacing*: space between features (flags, full/half
+ barbs)
+
+ - *height*: distance from shaft of top of a flag or full
+ barb
+
+ - *width* - width of a flag, twice the width of a full barb
+
+ - *emptybarb* - radius of the circle used for low
+ magnitudes
+
+ *fill_empty* specifies whether the circle representing an
+ empty barb should be filled or not (this changes the drawing
+ of the polygon).
+
+ *flip* is a flag indicating whether the features should be flipped to
+ the other side of the barb (useful for winds in the southern
+ hemisphere).
+
+ This function returns list of arrays of vertices, defining a polygon
+ for each of the wind barbs. These polygons have been rotated to
+ properly align with the vector direction.
+ '''
+
+ # These control the spacing and size of barb elements relative to the
+ # length of the shaft
+ spacing = length * sizes.get('spacing', 0.125)
+ full_height = length * sizes.get('height', 0.4)
+ full_width = length * sizes.get('width', 0.25)
+ empty_rad = length * sizes.get('emptybarb', 0.15)
+
+ # Controls y point where to pivot the barb.
+ pivot_points = dict(tip=0.0, middle=-length / 2.)
+
+ # Check for flip
+ if flip:
+ full_height = -full_height
+
+ endx = 0.0
+ try:
+ endy = float(pivot)
+ except ValueError:
+ endy = pivot_points[pivot.lower()]
+
+ # Get the appropriate angle for the vector components. The offset is
+ # due to the way the barb is initially drawn, going down the y-axis.
+ # This makes sense in a meteorological mode of thinking since there 0
+ # degrees corresponds to north (the y-axis traditionally)
+ angles = -(ma.arctan2(v, u) + np.pi / 2)
+
+ # Used for low magnitude. We just get the vertices, so if we make it
+ # out here, it can be reused. The center set here should put the
+ # center of the circle at the location(offset), rather than at the
+ # same point as the barb pivot; this seems more sensible.
+ circ = CirclePolygon((0, 0), radius=empty_rad).get_verts()
+ if fill_empty:
+ empty_barb = circ
+ else:
+ # If we don't want the empty one filled, we make a degenerate
+ # polygon that wraps back over itself
+ empty_barb = np.concatenate((circ, circ[::-1]))
+
+ barb_list = []
+ for index, angle in np.ndenumerate(angles):
+ # If the vector magnitude is too weak to draw anything, plot an
+ # empty circle instead
+ if empty_flag[index]:
+ # We can skip the transform since the circle has no preferred
+ # orientation
+ barb_list.append(empty_barb)
+ continue
+
+ poly_verts = [(endx, endy)]
+ offset = length
+
+ # Add vertices for each flag
+ for i in range(nflags[index]):
+ # The spacing that works for the barbs is a little to much for
+ # the flags, but this only occurs when we have more than 1
+ # flag.
+ if offset != length:
+ offset += spacing / 2.
+ poly_verts.extend(
+ [[endx, endy + offset],
+ [endx + full_height, endy - full_width / 2 + offset],
+ [endx, endy - full_width + offset]])
+
+ offset -= full_width + spacing
+
+ # Add vertices for each barb. These really are lines, but works
+ # great adding 3 vertices that basically pull the polygon out and
+ # back down the line
+ for i in range(nbarbs[index]):
+ poly_verts.extend(
+ [(endx, endy + offset),
+ (endx + full_height, endy + offset + full_width / 2),
+ (endx, endy + offset)])
+
+ offset -= spacing
+
+ # Add the vertices for half a barb, if needed
+ if half_barb[index]:
+ # If the half barb is the first on the staff, traditionally it
+ # is offset from the end to make it easy to distinguish from a
+ # barb with a full one
+ if offset == length:
+ poly_verts.append((endx, endy + offset))
+ offset -= 1.5 * spacing
+ poly_verts.extend(
+ [(endx, endy + offset),
+ (endx + full_height / 2, endy + offset + full_width / 4),
+ (endx, endy + offset)])
+
+ # Rotate the barb according the angle. Making the barb first and
+ # then rotating it made the math for drawing the barb really easy.
+ # Also, the transform framework makes doing the rotation simple.
+ poly_verts = transforms.Affine2D().rotate(-angle).transform(
+ poly_verts)
+ barb_list.append(poly_verts)
+
+ return barb_list
+
+ def set_UVC(self, U, V, C=None):
+ self.u = ma.masked_invalid(U, copy=False).ravel()
+ self.v = ma.masked_invalid(V, copy=False).ravel()
+ if C is not None:
+ c = ma.masked_invalid(C, copy=False).ravel()
+ x, y, u, v, c = delete_masked_points(self.x.ravel(),
+ self.y.ravel(),
+ self.u, self.v, c)
+ _check_consistent_shapes(x, y, u, v, c)
+ else:
+ x, y, u, v = delete_masked_points(self.x.ravel(), self.y.ravel(),
+ self.u, self.v)
+ _check_consistent_shapes(x, y, u, v)
+
+ magnitude = np.hypot(u, v)
+ flags, barbs, halves, empty = self._find_tails(magnitude,
+ self.rounding,
+ **self.barb_increments)
+
+ # Get the vertices for each of the barbs
+
+ plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
+ self._length, self._pivot, self.sizes,
+ self.fill_empty, self.flip)
+ self.set_verts(plot_barbs)
+
+ # Set the color array
+ if C is not None:
+ self.set_array(c)
+
+ # Update the offsets in case the masked data changed
+ xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
+ self._offsets = xy
+ self.stale = True
+
+ def set_offsets(self, xy):
+ """
+ Set the offsets for the barb polygons. This saves the offsets passed
+ in and actually sets version masked as appropriate for the existing
+ U/V data. *offsets* should be a sequence.
+
+ ACCEPTS: sequence of pairs of floats
+ """
+ self.x = xy[:, 0]
+ self.y = xy[:, 1]
+ x, y, u, v = delete_masked_points(self.x.ravel(), self.y.ravel(),
+ self.u, self.v)
+ _check_consistent_shapes(x, y, u, v)
+ xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
+ mcollections.PolyCollection.set_offsets(self, xy)
+ self.stale = True
+
+ set_offsets.__doc__ = mcollections.PolyCollection.set_offsets.__doc__
+
+ barbs_doc = _barbs_doc
diff --git a/contrib/python/matplotlib/py2/matplotlib/rcsetup.py b/contrib/python/matplotlib/py2/matplotlib/rcsetup.py
new file mode 100644
index 00000000000..f8d5ad5036d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/rcsetup.py
@@ -0,0 +1,1450 @@
+"""
+The rcsetup module contains the default values and the validation code for
+customization using matplotlib's rc settings.
+
+Each rc setting is assigned a default value and a function used to validate
+any attempted changes to that setting. The default values and validation
+functions are defined in the rcsetup module, and are used to construct the
+rcParams global object which stores the settings and is referenced throughout
+matplotlib.
+
+These default values should be consistent with the default matplotlibrc file
+that actually reflects the values given here. Any additions or deletions to the
+parameter set listed here should also be visited to the
+:file:`matplotlibrc.template` in matplotlib's root source directory.
+"""
+from __future__ import absolute_import, division, print_function
+
+import six
+
+from collections import Iterable, Mapping
+from functools import reduce
+import operator
+import os
+import warnings
+import re
+
+from matplotlib import cbook, testing
+from matplotlib.cbook import mplDeprecation, deprecated, ls_mapper
+from matplotlib.fontconfig_pattern import parse_fontconfig_pattern
+from matplotlib.colors import is_color_like
+
+# Don't let the original cycler collide with our validating cycler
+from cycler import Cycler, cycler as ccycler
+
+
+# The capitalized forms are needed for ipython at present; this may
+# change for later versions.
+interactive_bk = ['GTK', 'GTKAgg', 'GTKCairo', 'MacOSX',
+ 'Qt4Agg', 'Qt5Agg', 'TkAgg', 'WX', 'WXAgg',
+ 'GTK3Cairo', 'GTK3Agg', 'WebAgg', 'nbAgg']
+interactive_bk = ['GTK', 'GTKAgg', 'GTKCairo', 'GTK3Agg', 'GTK3Cairo',
+ 'MacOSX',
+ 'nbAgg',
+ 'Qt4Agg', 'Qt4Cairo', 'Qt5Agg', 'Qt5Cairo',
+ 'TkAgg', 'TkCairo',
+ 'WebAgg',
+ 'WX', 'WXAgg', 'WXCairo']
+non_interactive_bk = ['agg', 'cairo', 'gdk',
+ 'pdf', 'pgf', 'ps', 'svg', 'template']
+all_backends = interactive_bk + non_interactive_bk
+
+
+class ValidateInStrings(object):
+ def __init__(self, key, valid, ignorecase=False):
+ 'valid is a list of legal strings'
+ self.key = key
+ self.ignorecase = ignorecase
+
+ def func(s):
+ if ignorecase:
+ return s.lower()
+ else:
+ return s
+ self.valid = {func(k): k for k in valid}
+
+ def __call__(self, s):
+ if self.ignorecase:
+ s = s.lower()
+ if s in self.valid:
+ return self.valid[s]
+ raise ValueError('Unrecognized %s string "%s": valid strings are %s'
+ % (self.key, s, list(six.itervalues(self.valid))))
+
+
+def _listify_validator(scalar_validator, allow_stringlist=False):
+ def f(s):
+ if isinstance(s, six.string_types):
+ try:
+ return [scalar_validator(v.strip()) for v in s.split(',')
+ if v.strip()]
+ except Exception:
+ if allow_stringlist:
+ # Sometimes, a list of colors might be a single string
+ # of single-letter colornames. So give that a shot.
+ return [scalar_validator(v.strip()) for v in s if v.strip()]
+ else:
+ raise
+ # We should allow any generic sequence type, including generators,
+ # Numpy ndarrays, and pandas data structures. However, unordered
+ # sequences, such as sets, should be allowed but discouraged unless the
+ # user desires pseudorandom behavior.
+ elif isinstance(s, Iterable) and not isinstance(s, Mapping):
+ # The condition on this list comprehension will preserve the
+ # behavior of filtering out any empty strings (behavior was
+ # from the original validate_stringlist()), while allowing
+ # any non-string/text scalar values such as numbers and arrays.
+ return [scalar_validator(v) for v in s
+ if not isinstance(v, six.string_types) or v]
+ else:
+ raise ValueError("{!r} must be of type: string or non-dictionary "
+ "iterable".format(s))
+ try:
+ f.__name__ = "{}list".format(scalar_validator.__name__)
+ except AttributeError: # class instance.
+ f.__name__ = "{}List".format(type(scalar_validator).__name__)
+ f.__doc__ = scalar_validator.__doc__
+ return f
+
+
+def validate_any(s):
+ return s
+validate_anylist = _listify_validator(validate_any)
+
+
+def validate_path_exists(s):
+ """If s is a path, return s, else False"""
+ if s is None:
+ return None
+ if os.path.exists(s):
+ return s
+ else:
+ raise RuntimeError('"%s" should be a path but it does not exist' % s)
+
+
+def validate_bool(b):
+ """Convert b to a boolean or raise"""
+ if isinstance(b, six.string_types):
+ b = b.lower()
+ if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True):
+ return True
+ elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False):
+ return False
+ else:
+ raise ValueError('Could not convert "%s" to boolean' % b)
+
+
+def validate_bool_maybe_none(b):
+ 'Convert b to a boolean or raise'
+ if isinstance(b, six.string_types):
+ b = b.lower()
+ if b is None or b == 'none':
+ return None
+ if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True):
+ return True
+ elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False):
+ return False
+ else:
+ raise ValueError('Could not convert "%s" to boolean' % b)
+
+
+def deprecate_axes_hold(value):
+ if value is None:
+ return None # converted to True where accessed in figure.py,
+ # axes/_base.py
+ warnings.warn("axes.hold is deprecated, will be removed in 3.0",
+ mplDeprecation)
+ return validate_bool(value)
+
+
+def validate_float(s):
+ """convert s to float or raise"""
+ try:
+ return float(s)
+ except ValueError:
+ raise ValueError('Could not convert "%s" to float' % s)
+validate_floatlist = _listify_validator(validate_float)
+
+
+def validate_float_or_None(s):
+ """convert s to float, None or raise"""
+ # values directly from the rc file can only be strings,
+ # so we need to recognize the string "None" and convert
+ # it into the object. We will be case-sensitive here to
+ # avoid confusion between string values of 'none', which
+ # can be a valid string value for some other parameters.
+ if s is None or s == 'None':
+ return None
+ try:
+ return float(s)
+ except ValueError:
+ raise ValueError('Could not convert "%s" to float or None' % s)
+
+
+def validate_string_or_None(s):
+ """convert s to string or raise"""
+ if s is None:
+ return None
+ try:
+ return validate_string(s)
+ except ValueError:
+ raise ValueError('Could not convert "%s" to string' % s)
+
+
+def validate_axisbelow(s):
+ try:
+ return validate_bool(s)
+ except ValueError:
+ if isinstance(s, six.string_types):
+ s = s.lower()
+ if s.startswith('line'):
+ return 'line'
+ raise ValueError('%s cannot be interpreted as'
+ ' True, False, or "line"' % s)
+
+
+def validate_dpi(s):
+ """confirm s is string 'figure' or convert s to float or raise"""
+ if s == 'figure':
+ return s
+ try:
+ return float(s)
+ except ValueError:
+ raise ValueError('"%s" is not string "figure" or'
+ ' could not convert "%s" to float' % (s, s))
+
+
+def validate_int(s):
+ """convert s to int or raise"""
+ try:
+ return int(s)
+ except ValueError:
+ raise ValueError('Could not convert "%s" to int' % s)
+
+
+def validate_int_or_None(s):
+ """if not None, tries to validate as an int"""
+ if s=='None':
+ s = None
+ if s is None:
+ return None
+ try:
+ return int(s)
+ except ValueError:
+ raise ValueError('Could not convert "%s" to int' % s)
+
+
+def validate_fonttype(s):
+ """
+ confirm that this is a Postscript of PDF font type that we know how to
+ convert to
+ """
+ fonttypes = {'type3': 3,
+ 'truetype': 42}
+ try:
+ fonttype = validate_int(s)
+ except ValueError:
+ try:
+ return fonttypes[s.lower()]
+ except KeyError:
+ raise ValueError(
+ 'Supported Postscript/PDF font types are %s' % list(fonttypes))
+ else:
+ if fonttype not in six.itervalues(fonttypes):
+ raise ValueError(
+ 'Supported Postscript/PDF font types are %s' %
+ list(six.itervalues(fonttypes)))
+ return fonttype
+
+
+_validate_standard_backends = ValidateInStrings(
+ 'backend', all_backends, ignorecase=True)
+
+
+def validate_backend(s):
+ if s.startswith('module://'):
+ return s
+ else:
+ return _validate_standard_backends(s)
+
+
+def validate_qt4(s):
+ if s is None:
+ return None
+ return ValidateInStrings("backend.qt4", ['PyQt4', 'PySide', 'PyQt4v2'])(s)
+
+
+def validate_qt5(s):
+ if s is None:
+ return None
+ return ValidateInStrings("backend.qt5", ['PyQt5', 'PySide2'])(s)
+
+
+def validate_toolbar(s):
+ validator = ValidateInStrings(
+ 'toolbar',
+ ['None', 'toolbar2', 'toolmanager'],
+ ignorecase=True)
+ return validator(s)
+
+
+_seq_err_msg = ('You must supply exactly {n} values, you provided {num} '
+ 'values: {s}')
+
+_str_err_msg = ('You must supply exactly {n} comma-separated values, you '
+ 'provided {num} comma-separated values: {s}')
+
+
+class validate_nseq_float(object):
+ def __init__(self, n=None, allow_none=False):
+ self.n = n
+ self.allow_none = allow_none
+
+ def __call__(self, s):
+ """return a seq of n floats or raise"""
+ if isinstance(s, six.string_types):
+ s = [x.strip() for x in s.split(',')]
+ err_msg = _str_err_msg
+ else:
+ err_msg = _seq_err_msg
+
+ if self.n is not None and len(s) != self.n:
+ raise ValueError(err_msg.format(n=self.n, num=len(s), s=s))
+
+ try:
+ return [float(val)
+ if not self.allow_none or val is not None
+ else val
+ for val in s]
+ except ValueError:
+ raise ValueError('Could not convert all entries to floats')
+
+
+class validate_nseq_int(object):
+ def __init__(self, n=None):
+ self.n = n
+
+ def __call__(self, s):
+ """return a seq of n ints or raise"""
+ if isinstance(s, six.string_types):
+ s = [x.strip() for x in s.split(',')]
+ err_msg = _str_err_msg
+ else:
+ err_msg = _seq_err_msg
+
+ if self.n is not None and len(s) != self.n:
+ raise ValueError(err_msg.format(n=self.n, num=len(s), s=s))
+
+ try:
+ return [int(val) for val in s]
+ except ValueError:
+ raise ValueError('Could not convert all entries to ints')
+
+
+def validate_color_or_inherit(s):
+ 'return a valid color arg'
+ if s == 'inherit':
+ return s
+ return validate_color(s)
+
+
+def validate_color_or_auto(s):
+ if s == 'auto':
+ return s
+ return validate_color(s)
+
+
+def validate_color_for_prop_cycle(s):
+ # Special-case the N-th color cycle syntax, this obviously can not
+ # go in the color cycle.
+ if isinstance(s, bytes):
+ match = re.match(b'^C[0-9]$', s)
+ if match is not None:
+ raise ValueError('Can not put cycle reference ({cn!r}) in '
+ 'prop_cycler'.format(cn=s))
+ elif isinstance(s, six.string_types):
+ match = re.match('^C[0-9]$', s)
+ if match is not None:
+ raise ValueError('Can not put cycle reference ({cn!r}) in '
+ 'prop_cycler'.format(cn=s))
+ return validate_color(s)
+
+
+def validate_color(s):
+ 'return a valid color arg'
+ try:
+ if s.lower() == 'none':
+ return 'none'
+ except AttributeError:
+ pass
+
+ if isinstance(s, six.string_types):
+ if len(s) == 6 or len(s) == 8:
+ stmp = '#' + s
+ if is_color_like(stmp):
+ return stmp
+
+ if is_color_like(s):
+ return s
+
+ # If it is still valid, it must be a tuple.
+ colorarg = s
+ msg = ''
+ if s.find(',') >= 0:
+ # get rid of grouping symbols
+ stmp = ''.join([c for c in s if c.isdigit() or c == '.' or c == ','])
+ vals = stmp.split(',')
+ if len(vals) not in [3, 4]:
+ msg = '\nColor tuples must be of length 3 or 4'
+ else:
+ try:
+ colorarg = [float(val) for val in vals]
+ except ValueError:
+ msg = '\nCould not convert all entries to floats'
+
+ if not msg and is_color_like(colorarg):
+ return colorarg
+
+ raise ValueError('%s does not look like a color arg%s' % (s, msg))
+
+
+validate_colorlist = _listify_validator(validate_color, allow_stringlist=True)
+validate_colorlist.__doc__ = 'return a list of colorspecs'
+
+def validate_string(s):
+ if isinstance(s, (str, six.text_type)):
+ # Always leave str as str and unicode as unicode
+ return s
+ else:
+ return str(s)
+
+validate_stringlist = _listify_validator(str)
+validate_stringlist.__doc__ = 'return a list'
+
+validate_orientation = ValidateInStrings(
+ 'orientation', ['landscape', 'portrait'])
+
+
+def validate_aspect(s):
+ if s in ('auto', 'equal'):
+ return s
+ try:
+ return float(s)
+ except ValueError:
+ raise ValueError('not a valid aspect specification')
+
+
+def validate_fontsize(s):
+ fontsizes = ['xx-small', 'x-small', 'small', 'medium', 'large',
+ 'x-large', 'xx-large', 'smaller', 'larger']
+ if isinstance(s, six.string_types):
+ s = s.lower()
+ if s in fontsizes:
+ return s
+ try:
+ return float(s)
+ except ValueError:
+ raise ValueError("%s is not a valid font size. Valid font sizes "
+ "are %s." % (s, ", ".join(fontsizes)))
+
+
+validate_fontsizelist = _listify_validator(validate_fontsize)
+
+
+def validate_font_properties(s):
+ parse_fontconfig_pattern(s)
+ return s
+
+
+validate_fontset = ValidateInStrings(
+ 'fontset',
+ ['dejavusans', 'dejavuserif', 'cm', 'stix', 'stixsans', 'custom'])
+
+validate_mathtext_default = ValidateInStrings(
+ 'default',
+ "rm cal it tt sf bf default bb frak circled scr regular".split())
+
+validate_verbose = ValidateInStrings(
+ 'verbose',
+ ['silent', 'helpful', 'debug', 'debug-annoying'])
+
+_validate_alignment = ValidateInStrings(
+ 'alignment',
+ ['center', 'top', 'bottom', 'baseline',
+ 'center_baseline'])
+
+def validate_whiskers(s):
+ if s == 'range':
+ return 'range'
+ else:
+ try:
+ v = validate_nseq_float(2)(s)
+ return v
+ except (TypeError, ValueError):
+ try:
+ v = float(s)
+ return v
+ except ValueError:
+ raise ValueError("Not a valid whisker value ['range', float, "
+ "(float, float)]")
+
+
+def update_savefig_format(value):
+ # The old savefig.extension could also have a value of "auto", but
+ # the new savefig.format does not. We need to fix this here.
+ value = validate_string(value)
+ if value == 'auto':
+ value = 'png'
+ return value
+
+
+validate_ps_papersize = ValidateInStrings(
+ 'ps_papersize',
+ ['auto', 'letter', 'legal', 'ledger',
+ 'a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10',
+ 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10',
+ ], ignorecase=True)
+
+
+def validate_ps_distiller(s):
+ if isinstance(s, six.string_types):
+ s = s.lower()
+ if s in ('none', None):
+ return None
+ elif s in ('false', False):
+ return False
+ elif s in ('ghostscript', 'xpdf'):
+ return s
+ else:
+ raise ValueError('matplotlibrc ps.usedistiller must either be none, '
+ 'ghostscript or xpdf')
+
+validate_joinstyle = ValidateInStrings('joinstyle',
+ ['miter', 'round', 'bevel'],
+ ignorecase=True)
+validate_joinstylelist = _listify_validator(validate_joinstyle)
+
+validate_capstyle = ValidateInStrings('capstyle',
+ ['butt', 'round', 'projecting'],
+ ignorecase=True)
+validate_capstylelist = _listify_validator(validate_capstyle)
+
+validate_fillstyle = ValidateInStrings('markers.fillstyle',
+ ['full', 'left', 'right', 'bottom',
+ 'top', 'none'])
+validate_fillstylelist = _listify_validator(validate_fillstyle)
+
+_validate_negative_linestyle = ValidateInStrings('negative_linestyle',
+ ['solid', 'dashed'],
+ ignorecase=True)
+
+
+@deprecated('2.1',
+ addendum=(" See 'validate_negative_linestyle_legacy' " +
+ "deprecation warning for more information."))
+def validate_negative_linestyle(s):
+ return _validate_negative_linestyle(s)
+
+
+@deprecated('2.1',
+ addendum=(" The 'contour.negative_linestyle' rcParam now " +
+ "follows the same validation as the other rcParams " +
+ "that are related to line style."))
+def validate_negative_linestyle_legacy(s):
+ try:
+ res = validate_negative_linestyle(s)
+ return res
+ except ValueError:
+ dashes = validate_nseq_float(2)(s)
+ return (0, dashes) # (offset, (solid, blank))
+
+
+validate_legend_loc = ValidateInStrings(
+ 'legend_loc',
+ ['best',
+ 'upper right',
+ 'upper left',
+ 'lower left',
+ 'lower right',
+ 'right',
+ 'center left',
+ 'center right',
+ 'lower center',
+ 'upper center',
+ 'center'], ignorecase=True)
+
+
+def validate_svg_fonttype(s):
+ if s in ["none", "path"]:
+ return s
+ if s == "svgfont":
+ cbook.warn_deprecated(
+ "2.2", "'svgfont' support for svg.fonttype is deprecated.")
+ return s
+ raise ValueError("Unrecognized svg.fonttype string '{}'; "
+ "valid strings are 'none', 'path'")
+
+
+def validate_hinting(s):
+ if s in (True, False):
+ return s
+ if s.lower() in ('auto', 'native', 'either', 'none'):
+ return s.lower()
+ raise ValueError("hinting should be 'auto', 'native', 'either' or 'none'")
+
+validate_pgf_texsystem = ValidateInStrings('pgf.texsystem',
+ ['xelatex', 'lualatex', 'pdflatex'])
+
+validate_movie_writer = ValidateInStrings('animation.writer',
+ ['ffmpeg', 'ffmpeg_file',
+ 'avconv', 'avconv_file',
+ 'imagemagick', 'imagemagick_file',
+ 'html'])
+
+validate_movie_frame_fmt = ValidateInStrings('animation.frame_format',
+ ['png', 'jpeg', 'tiff', 'raw', 'rgba'])
+
+validate_axis_locator = ValidateInStrings('major', ['minor', 'both', 'major'])
+
+validate_movie_html_fmt = ValidateInStrings('animation.html',
+ ['html5', 'jshtml', 'none'])
+
+def validate_bbox(s):
+ if isinstance(s, six.string_types):
+ s = s.lower()
+ if s == 'tight':
+ return s
+ if s == 'standard':
+ return None
+ raise ValueError("bbox should be 'tight' or 'standard'")
+ elif s is not None:
+ # Backwards compatibility. None is equivalent to 'standard'.
+ raise ValueError("bbox should be 'tight' or 'standard'")
+ return s
+
+def validate_sketch(s):
+ if isinstance(s, six.string_types):
+ s = s.lower()
+ if s == 'none' or s is None:
+ return None
+ if isinstance(s, six.string_types):
+ result = tuple([float(v.strip()) for v in s.split(',')])
+ elif isinstance(s, (list, tuple)):
+ result = tuple([float(v) for v in s])
+ if len(result) != 3:
+ raise ValueError("path.sketch must be a tuple (scale, length, randomness)")
+ return result
+
+class ValidateInterval(object):
+ """
+ Value must be in interval
+ """
+ def __init__(self, vmin, vmax, closedmin=True, closedmax=True):
+ self.vmin = vmin
+ self.vmax = vmax
+ self.cmin = closedmin
+ self.cmax = closedmax
+
+ def __call__(self, s):
+ try:
+ s = float(s)
+ except ValueError:
+ raise RuntimeError('Value must be a float; found "%s"' % s)
+
+ if self.cmin and s < self.vmin:
+ raise RuntimeError('Value must be >= %f; found "%f"' %
+ (self.vmin, s))
+ elif not self.cmin and s <= self.vmin:
+ raise RuntimeError('Value must be > %f; found "%f"' %
+ (self.vmin, s))
+
+ if self.cmax and s > self.vmax:
+ raise RuntimeError('Value must be <= %f; found "%f"' %
+ (self.vmax, s))
+ elif not self.cmax and s >= self.vmax:
+ raise RuntimeError('Value must be < %f; found "%f"' %
+ (self.vmax, s))
+ return s
+
+validate_grid_axis = ValidateInStrings('axes.grid.axis', ['x', 'y', 'both'])
+
+
+def validate_hatch(s):
+ """
+ Validate a hatch pattern.
+ A hatch pattern string can have any sequence of the following
+ characters: ``\\ / | - + * . x o O``.
+
+ """
+ if not isinstance(s, six.string_types):
+ raise ValueError("Hatch pattern must be a string")
+ unknown = set(s) - {'\\', '/', '|', '-', '+', '*', '.', 'x', 'o', 'O'}
+ if unknown:
+ raise ValueError("Unknown hatch symbol(s): %s" % list(unknown))
+ return s
+validate_hatchlist = _listify_validator(validate_hatch)
+validate_dashlist = _listify_validator(validate_nseq_float(allow_none=True))
+
+_prop_validators = {
+ 'color': _listify_validator(validate_color_for_prop_cycle,
+ allow_stringlist=True),
+ 'linewidth': validate_floatlist,
+ 'linestyle': validate_stringlist,
+ 'facecolor': validate_colorlist,
+ 'edgecolor': validate_colorlist,
+ 'joinstyle': validate_joinstylelist,
+ 'capstyle': validate_capstylelist,
+ 'fillstyle': validate_fillstylelist,
+ 'markerfacecolor': validate_colorlist,
+ 'markersize': validate_floatlist,
+ 'markeredgewidth': validate_floatlist,
+ 'markeredgecolor': validate_colorlist,
+ 'alpha': validate_floatlist,
+ 'marker': validate_stringlist,
+ 'hatch': validate_hatchlist,
+ 'dashes': validate_dashlist,
+ }
+_prop_aliases = {
+ 'c': 'color',
+ 'lw': 'linewidth',
+ 'ls': 'linestyle',
+ 'fc': 'facecolor',
+ 'ec': 'edgecolor',
+ 'mfc': 'markerfacecolor',
+ 'mec': 'markeredgecolor',
+ 'mew': 'markeredgewidth',
+ 'ms': 'markersize',
+ }
+
+
+def cycler(*args, **kwargs):
+ """
+ Creates a `~cycler.Cycler` object much like :func:`cycler.cycler`,
+ but includes input validation.
+
+ Call signatures::
+
+ cycler(cycler)
+ cycler(label=values[, label2=values2[, ...]])
+ cycler(label, values)
+
+ Form 1 copies a given `~cycler.Cycler` object.
+
+ Form 2 creates a `~cycler.Cycler` which cycles over one or more
+ properties simultaneously. If multiple properties are given, their
+ value lists must have the same length.
+
+ Form 3 creates a `~cycler.Cycler` for a single property. This form
+ exists for compatibility with the original cycler. Its use is
+ discouraged in favor of the kwarg form, i.e. ``cycler(label=values)``.
+
+ Parameters
+ ----------
+ cycler : Cycler
+ Copy constructor for Cycler.
+
+ label : str
+ The property key. Must be a valid `.Artist` property.
+ For example, 'color' or 'linestyle'. Aliases are allowed,
+ such as 'c' for 'color' and 'lw' for 'linewidth'.
+
+ values : iterable
+ Finite-length iterable of the property values. These values
+ are validated and will raise a ValueError if invalid.
+
+ Returns
+ -------
+ cycler : Cycler
+ A new :class:`~cycler.Cycler` for the given properties.
+
+ Examples
+ --------
+ Creating a cycler for a single property:
+
+ >>> c = cycler(color=['red', 'green', 'blue'])
+
+ Creating a cycler for simultaneously cycling over multiple properties
+ (e.g. red circle, green plus, blue cross):
+
+ >>> c = cycler(color=['red', 'green', 'blue'],
+ ... marker=['o', '+', 'x'])
+
+ """
+ if args and kwargs:
+ raise TypeError("cycler() can only accept positional OR keyword "
+ "arguments -- not both.")
+ elif not args and not kwargs:
+ raise TypeError("cycler() must have positional OR keyword arguments")
+
+ if len(args) == 1:
+ if not isinstance(args[0], Cycler):
+ raise TypeError("If only one positional argument given, it must "
+ " be a Cycler instance.")
+ return validate_cycler(args[0])
+ elif len(args) == 2:
+ pairs = [(args[0], args[1])]
+ elif len(args) > 2:
+ raise TypeError("No more than 2 positional arguments allowed")
+ else:
+ pairs = six.iteritems(kwargs)
+
+ validated = []
+ for prop, vals in pairs:
+ norm_prop = _prop_aliases.get(prop, prop)
+ validator = _prop_validators.get(norm_prop, None)
+ if validator is None:
+ raise TypeError("Unknown artist property: %s" % prop)
+ vals = validator(vals)
+ # We will normalize the property names as well to reduce
+ # the amount of alias handling code elsewhere.
+ validated.append((norm_prop, vals))
+
+ return reduce(operator.add, (ccycler(k, v) for k, v in validated))
+
+
+def validate_cycler(s):
+ 'return a Cycler object from a string repr or the object itself'
+ if isinstance(s, six.string_types):
+ try:
+ # TODO: We might want to rethink this...
+ # While I think I have it quite locked down,
+ # it is execution of arbitrary code without
+ # sanitation.
+ # Combine this with the possibility that rcparams
+ # might come from the internet (future plans), this
+ # could be downright dangerous.
+ # I locked it down by only having the 'cycler()' function
+ # available.
+ # UPDATE: Partly plugging a security hole.
+ # I really should have read this:
+ # http://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html
+ # We should replace this eval with a combo of PyParsing and
+ # ast.literal_eval()
+ if '.__' in s.replace(' ', ''):
+ raise ValueError("'%s' seems to have dunder methods. Raising"
+ " an exception for your safety")
+ s = eval(s, {'cycler': cycler, '__builtins__': {}})
+ except BaseException as e:
+ raise ValueError("'%s' is not a valid cycler construction: %s" %
+ (s, e))
+ # Should make sure what comes from the above eval()
+ # is a Cycler object.
+ if isinstance(s, Cycler):
+ cycler_inst = s
+ else:
+ raise ValueError("object was not a string or Cycler instance: %s" % s)
+
+ unknowns = cycler_inst.keys - (set(_prop_validators) | set(_prop_aliases))
+ if unknowns:
+ raise ValueError("Unknown artist properties: %s" % unknowns)
+
+ # Not a full validation, but it'll at least normalize property names
+ # A fuller validation would require v0.10 of cycler.
+ checker = set()
+ for prop in cycler_inst.keys:
+ norm_prop = _prop_aliases.get(prop, prop)
+ if norm_prop != prop and norm_prop in cycler_inst.keys:
+ raise ValueError("Cannot specify both '{0}' and alias '{1}'"
+ " in the same prop_cycle".format(norm_prop, prop))
+ if norm_prop in checker:
+ raise ValueError("Another property was already aliased to '{0}'."
+ " Collision normalizing '{1}'.".format(norm_prop,
+ prop))
+ checker.update([norm_prop])
+
+ # This is just an extra-careful check, just in case there is some
+ # edge-case I haven't thought of.
+ assert len(checker) == len(cycler_inst.keys)
+
+ # Now, it should be safe to mutate this cycler
+ for prop in cycler_inst.keys:
+ norm_prop = _prop_aliases.get(prop, prop)
+ cycler_inst.change_key(prop, norm_prop)
+
+ for key, vals in cycler_inst.by_key().items():
+ _prop_validators[key](vals)
+
+ return cycler_inst
+
+
+def validate_hist_bins(s):
+ if isinstance(s, six.string_types) and s == 'auto':
+ return s
+ try:
+ return int(s)
+ except (TypeError, ValueError):
+ pass
+
+ try:
+ return validate_floatlist(s)
+ except ValueError:
+ pass
+
+ raise ValueError("'hist.bins' must be 'auto', an int or " +
+ "a sequence of floats")
+
+def validate_animation_writer_path(p):
+ # Make sure it's a string and then figure out if the animations
+ # are already loaded and reset the writers (which will validate
+ # the path on next call)
+ if not isinstance(p, six.string_types):
+ raise ValueError("path must be a (unicode) string")
+ from sys import modules
+ # set dirty, so that the next call to the registry will re-evaluate
+ # the state.
+ # only set dirty if already loaded. If not loaded, the load will
+ # trigger the checks.
+ if "matplotlib.animation" in modules:
+ modules["matplotlib.animation"].writers.set_dirty()
+ return p
+
+def validate_webagg_address(s):
+ if s is not None:
+ import socket
+ try:
+ socket.inet_aton(s)
+ except socket.error as e:
+ raise ValueError("'webagg.address' is not a valid IP address")
+ return s
+ raise ValueError("'webagg.address' is not a valid IP address")
+
+# A validator dedicated to the named line styles, based on the items in
+# ls_mapper, and a list of possible strings read from Line2D.set_linestyle
+_validate_named_linestyle = ValidateInStrings('linestyle',
+ list(six.iterkeys(ls_mapper)) +
+ list(six.itervalues(ls_mapper)) +
+ ['None', 'none', ' ', ''],
+ ignorecase=True)
+
+
+def _validate_linestyle(ls):
+ """
+ A validator for all possible line styles, the named ones *and*
+ the on-off ink sequences.
+ """
+ # Look first for a valid named line style, like '--' or 'solid'
+ if isinstance(ls, six.string_types):
+ try:
+ return _validate_named_linestyle(ls)
+ except (UnicodeDecodeError, KeyError):
+ # On Python 2, string-like *ls*, like for example
+ # 'solid'.encode('utf-16'), may raise a unicode error.
+ raise ValueError("the linestyle string {!r} is not a valid "
+ "string.".format(ls))
+
+ if isinstance(ls, (bytes, bytearray)):
+ # On Python 2, a string-like *ls* should already have lead to a
+ # successful return or to raising an exception. On Python 3, we have
+ # to manually raise an exception in the case of a byte-like *ls*.
+ # Otherwise, if *ls* is of even-length, it will be passed to the
+ # instance of validate_nseq_float, which will return an absurd on-off
+ # ink sequence...
+ raise ValueError("linestyle {!r} neither looks like an on-off ink "
+ "sequence nor a valid string.".format(ls))
+
+ # Look for an on-off ink sequence (in points) *of even length*.
+ # Offset is set to None.
+ try:
+ if len(ls) % 2 != 0:
+ raise ValueError("the linestyle sequence {!r} is not of even "
+ "length.".format(ls))
+
+ return (None, validate_nseq_float()(ls))
+
+ except (ValueError, TypeError):
+ # TypeError can be raised inside the instance of validate_nseq_float,
+ # by wrong types passed to float(), like NoneType.
+ raise ValueError("linestyle {!r} is not a valid on-off ink "
+ "sequence.".format(ls))
+
+
+# a map from key -> value, converter
+defaultParams = {
+ 'backend': ['Agg', validate_backend], # agg is certainly
+ # present
+ 'backend_fallback': [True, validate_bool], # agg is certainly present
+ 'backend.qt4': [None, validate_qt4],
+ 'backend.qt5': [None, validate_qt5],
+ 'webagg.port': [8988, validate_int],
+ 'webagg.address': ['127.0.0.1', validate_webagg_address],
+ 'webagg.open_in_browser': [True, validate_bool],
+ 'webagg.port_retries': [50, validate_int],
+ 'nbagg.transparent': [True, validate_bool],
+ 'toolbar': ['toolbar2', validate_toolbar],
+ 'datapath': [None, validate_path_exists], # handled by
+ # _get_data_path_cached
+ 'interactive': [False, validate_bool],
+ 'timezone': ['UTC', validate_string],
+
+ # the verbosity setting
+ 'verbose.level': ['silent', validate_verbose],
+ 'verbose.fileo': ['sys.stdout', validate_string],
+
+ # line props
+ 'lines.linewidth': [1.5, validate_float], # line width in points
+ 'lines.linestyle': ['-', _validate_linestyle], # solid line
+ 'lines.color': ['C0', validate_color], # first color in color cycle
+ 'lines.marker': ['None', validate_string], # marker name
+ 'lines.markeredgewidth': [1.0, validate_float],
+ 'lines.markersize': [6, validate_float], # markersize, in points
+ 'lines.antialiased': [True, validate_bool], # antialiased (no jaggies)
+ 'lines.dash_joinstyle': ['round', validate_joinstyle],
+ 'lines.solid_joinstyle': ['round', validate_joinstyle],
+ 'lines.dash_capstyle': ['butt', validate_capstyle],
+ 'lines.solid_capstyle': ['projecting', validate_capstyle],
+ 'lines.dashed_pattern': [[3.7, 1.6], validate_nseq_float(allow_none=True)],
+ 'lines.dashdot_pattern': [[6.4, 1.6, 1, 1.6],
+ validate_nseq_float(allow_none=True)],
+ 'lines.dotted_pattern': [[1, 1.65], validate_nseq_float(allow_none=True)],
+ 'lines.scale_dashes': [True, validate_bool],
+
+ # marker props
+ 'markers.fillstyle': ['full', validate_fillstyle],
+
+ ## patch props
+ 'patch.linewidth': [1.0, validate_float], # line width in points
+ 'patch.edgecolor': ['k', validate_color],
+ 'patch.force_edgecolor' : [False, validate_bool],
+ 'patch.facecolor': ['C0', validate_color], # first color in cycle
+ 'patch.antialiased': [True, validate_bool], # antialiased (no jaggies)
+
+ ## hatch props
+ 'hatch.color': ['k', validate_color],
+ 'hatch.linewidth': [1.0, validate_float],
+
+ ## Histogram properties
+ 'hist.bins': [10, validate_hist_bins],
+
+ ## Boxplot properties
+ 'boxplot.notch': [False, validate_bool],
+ 'boxplot.vertical': [True, validate_bool],
+ 'boxplot.whiskers': [1.5, validate_whiskers],
+ 'boxplot.bootstrap': [None, validate_int_or_None],
+ 'boxplot.patchartist': [False, validate_bool],
+ 'boxplot.showmeans': [False, validate_bool],
+ 'boxplot.showcaps': [True, validate_bool],
+ 'boxplot.showbox': [True, validate_bool],
+ 'boxplot.showfliers': [True, validate_bool],
+ 'boxplot.meanline': [False, validate_bool],
+
+ 'boxplot.flierprops.color': ['k', validate_color],
+ 'boxplot.flierprops.marker': ['o', validate_string],
+ 'boxplot.flierprops.markerfacecolor': ['none', validate_color_or_auto],
+ 'boxplot.flierprops.markeredgecolor': ['k', validate_color],
+ 'boxplot.flierprops.markersize': [6, validate_float],
+ 'boxplot.flierprops.linestyle': ['none', _validate_linestyle],
+ 'boxplot.flierprops.linewidth': [1.0, validate_float],
+
+ 'boxplot.boxprops.color': ['k', validate_color],
+ 'boxplot.boxprops.linewidth': [1.0, validate_float],
+ 'boxplot.boxprops.linestyle': ['-', _validate_linestyle],
+
+ 'boxplot.whiskerprops.color': ['k', validate_color],
+ 'boxplot.whiskerprops.linewidth': [1.0, validate_float],
+ 'boxplot.whiskerprops.linestyle': ['-', _validate_linestyle],
+
+ 'boxplot.capprops.color': ['k', validate_color],
+ 'boxplot.capprops.linewidth': [1.0, validate_float],
+ 'boxplot.capprops.linestyle': ['-', _validate_linestyle],
+
+ 'boxplot.medianprops.color': ['C1', validate_color],
+ 'boxplot.medianprops.linewidth': [1.0, validate_float],
+ 'boxplot.medianprops.linestyle': ['-', _validate_linestyle],
+
+ 'boxplot.meanprops.color': ['C2', validate_color],
+ 'boxplot.meanprops.marker': ['^', validate_string],
+ 'boxplot.meanprops.markerfacecolor': ['C2', validate_color],
+ 'boxplot.meanprops.markeredgecolor': ['C2', validate_color],
+ 'boxplot.meanprops.markersize': [6, validate_float],
+ 'boxplot.meanprops.linestyle': ['--', _validate_linestyle],
+ 'boxplot.meanprops.linewidth': [1.0, validate_float],
+
+ ## font props
+ 'font.family': [['sans-serif'], validate_stringlist], # used by text object
+ 'font.style': ['normal', validate_string],
+ 'font.variant': ['normal', validate_string],
+ 'font.stretch': ['normal', validate_string],
+ 'font.weight': ['normal', validate_string],
+ 'font.size': [10, validate_float], # Base font size in points
+ 'font.serif': [['DejaVu Serif', 'Bitstream Vera Serif',
+ 'Computer Modern Roman',
+ 'New Century Schoolbook', 'Century Schoolbook L',
+ 'Utopia', 'ITC Bookman', 'Bookman',
+ 'Nimbus Roman No9 L', 'Times New Roman',
+ 'Times', 'Palatino', 'Charter', 'serif'],
+ validate_stringlist],
+ 'font.sans-serif': [['DejaVu Sans', 'Bitstream Vera Sans',
+ 'Computer Modern Sans Serif',
+ 'Lucida Grande', 'Verdana', 'Geneva', 'Lucid',
+ 'Arial', 'Helvetica', 'Avant Garde', 'sans-serif'],
+ validate_stringlist],
+ 'font.cursive': [['Apple Chancery', 'Textile', 'Zapf Chancery',
+ 'Sand', 'Script MT', 'Felipa', 'cursive'],
+ validate_stringlist],
+ 'font.fantasy': [['Comic Sans MS', 'Chicago', 'Charcoal', 'Impact'
+ 'Western', 'Humor Sans', 'xkcd', 'fantasy'],
+ validate_stringlist],
+ 'font.monospace': [['DejaVu Sans Mono', 'Bitstream Vera Sans Mono',
+ 'Computer Modern Typewriter',
+ 'Andale Mono', 'Nimbus Mono L', 'Courier New',
+ 'Courier', 'Fixed', 'Terminal', 'monospace'],
+ validate_stringlist],
+
+ # text props
+ 'text.color': ['k', validate_color], # black
+ 'text.usetex': [False, validate_bool],
+ 'text.latex.unicode': [False, validate_bool],
+ 'text.latex.preamble': [[''], validate_stringlist],
+ 'text.latex.preview': [False, validate_bool],
+ 'text.dvipnghack': [None, validate_bool_maybe_none],
+ 'text.hinting': ['auto', validate_hinting],
+ 'text.hinting_factor': [8, validate_int],
+ 'text.antialiased': [True, validate_bool],
+
+ 'mathtext.cal': ['cursive', validate_font_properties],
+ 'mathtext.rm': ['sans', validate_font_properties],
+ 'mathtext.tt': ['monospace', validate_font_properties],
+ 'mathtext.it': ['sans:italic', validate_font_properties],
+ 'mathtext.bf': ['sans:bold', validate_font_properties],
+ 'mathtext.sf': ['sans', validate_font_properties],
+ 'mathtext.fontset': ['dejavusans', validate_fontset],
+ 'mathtext.default': ['it', validate_mathtext_default],
+ 'mathtext.fallback_to_cm': [True, validate_bool],
+
+ 'image.aspect': ['equal', validate_aspect], # equal, auto, a number
+ 'image.interpolation': ['nearest', validate_string],
+ 'image.cmap': ['viridis', validate_string], # one of gray, jet, etc
+ 'image.lut': [256, validate_int], # lookup table
+ 'image.origin': ['upper', validate_string], # lookup table
+ 'image.resample': [True, validate_bool],
+ # Specify whether vector graphics backends will combine all images on a
+ # set of axes into a single composite image
+ 'image.composite_image': [True, validate_bool],
+
+ # contour props
+ 'contour.negative_linestyle': ['dashed', _validate_linestyle],
+ 'contour.corner_mask': [True, validate_bool],
+
+ # errorbar props
+ 'errorbar.capsize': [0, validate_float],
+
+ # axes props
+ 'axes.axisbelow': ['line', validate_axisbelow],
+ 'axes.hold': [None, deprecate_axes_hold],
+ 'axes.facecolor': ['w', validate_color], # background color; white
+ 'axes.edgecolor': ['k', validate_color], # edge color; black
+ 'axes.linewidth': [0.8, validate_float], # edge linewidth
+
+ 'axes.spines.left': [True, validate_bool], # Set visibility of axes
+ 'axes.spines.right': [True, validate_bool], # 'spines', the lines
+ 'axes.spines.bottom': [True, validate_bool], # around the chart
+ 'axes.spines.top': [True, validate_bool], # denoting data boundary
+
+ 'axes.titlesize': ['large', validate_fontsize], # fontsize of the
+ # axes title
+ 'axes.titleweight': ['normal', validate_string], # font weight of axes title
+ 'axes.titlepad': [6.0, validate_float], # pad from axes top to title in points
+ 'axes.grid': [False, validate_bool], # display grid or not
+ 'axes.grid.which': ['major', validate_axis_locator], # set whether the gid are by
+ # default draw on 'major'
+ # 'minor' or 'both' kind of
+ # axis locator
+ 'axes.grid.axis': ['both', validate_grid_axis], # grid type.
+ # Can be 'x', 'y', 'both'
+ 'axes.labelsize': ['medium', validate_fontsize], # fontsize of the
+ # x any y labels
+ 'axes.labelpad': [4.0, validate_float], # space between label and axis
+ 'axes.labelweight': ['normal', validate_string], # fontsize of the x any y labels
+ 'axes.labelcolor': ['k', validate_color], # color of axis label
+ 'axes.formatter.limits': [[-7, 7], validate_nseq_int(2)],
+ # use scientific notation if log10
+ # of the axis range is smaller than the
+ # first or larger than the second
+ 'axes.formatter.use_locale': [False, validate_bool],
+ # Use the current locale to format ticks
+ 'axes.formatter.use_mathtext': [False, validate_bool],
+ 'axes.formatter.min_exponent': [0, validate_int], # minimum exponent to format in scientific notation
+ 'axes.formatter.useoffset': [True, validate_bool],
+ 'axes.formatter.offset_threshold': [4, validate_int],
+ 'axes.unicode_minus': [True, validate_bool],
+ # This entry can be either a cycler object or a
+ # string repr of a cycler-object, which gets eval()'ed
+ # to create the object.
+ 'axes.prop_cycle': [
+ ccycler('color',
+ ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
+ '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
+ '#bcbd22', '#17becf']),
+ validate_cycler],
+ # If 'data', axes limits are set close to the data.
+ # If 'round_numbers' axes limits are set to the nearest round numbers.
+ 'axes.autolimit_mode': [
+ 'data',
+ ValidateInStrings('autolimit_mode', ['data', 'round_numbers'])],
+ 'axes.xmargin': [0.05, ValidateInterval(0, 1,
+ closedmin=True,
+ closedmax=True)], # margin added to xaxis
+ 'axes.ymargin': [0.05, ValidateInterval(0, 1,
+ closedmin=True,
+ closedmax=True)],# margin added to yaxis
+
+ 'polaraxes.grid': [True, validate_bool], # display polar grid or
+ # not
+ 'axes3d.grid': [True, validate_bool], # display 3d grid
+
+ # scatter props
+ 'scatter.marker': ['o', validate_string],
+
+ # TODO validate that these are valid datetime format strings
+ 'date.autoformatter.year': ['%Y', validate_string],
+ 'date.autoformatter.month': ['%Y-%m', validate_string],
+ 'date.autoformatter.day': ['%Y-%m-%d', validate_string],
+ 'date.autoformatter.hour': ['%m-%d %H', validate_string],
+ 'date.autoformatter.minute': ['%d %H:%M', validate_string],
+ 'date.autoformatter.second': ['%H:%M:%S', validate_string],
+ 'date.autoformatter.microsecond': ['%M:%S.%f', validate_string],
+
+ #legend properties
+ 'legend.fancybox': [True, validate_bool],
+ 'legend.loc': ['best', validate_legend_loc],
+ # the number of points in the legend line
+ 'legend.numpoints': [1, validate_int],
+ # the number of points in the legend line for scatter
+ 'legend.scatterpoints': [1, validate_int],
+ 'legend.fontsize': ['medium', validate_fontsize],
+ # the relative size of legend markers vs. original
+ 'legend.markerscale': [1.0, validate_float],
+ 'legend.shadow': [False, validate_bool],
+ # whether or not to draw a frame around legend
+ 'legend.frameon': [True, validate_bool],
+ # alpha value of the legend frame
+ 'legend.framealpha': [0.8, validate_float_or_None],
+
+ ## the following dimensions are in fraction of the font size
+ 'legend.borderpad': [0.4, validate_float], # units are fontsize
+ # the vertical space between the legend entries
+ 'legend.labelspacing': [0.5, validate_float],
+ # the length of the legend lines
+ 'legend.handlelength': [2., validate_float],
+ # the length of the legend lines
+ 'legend.handleheight': [0.7, validate_float],
+ # the space between the legend line and legend text
+ 'legend.handletextpad': [.8, validate_float],
+ # the border between the axes and legend edge
+ 'legend.borderaxespad': [0.5, validate_float],
+ # the border between the axes and legend edge
+ 'legend.columnspacing': [2., validate_float],
+ 'legend.facecolor': ['inherit', validate_color_or_inherit],
+ 'legend.edgecolor': ['0.8', validate_color_or_inherit],
+
+ # tick properties
+ 'xtick.top': [False, validate_bool], # draw ticks on the top side
+ 'xtick.bottom': [True, validate_bool], # draw ticks on the bottom side
+ 'xtick.labeltop': [False, validate_bool], # draw label on the top
+ 'xtick.labelbottom': [True, validate_bool], # draw label on the bottom
+ 'xtick.major.size': [3.5, validate_float], # major xtick size in points
+ 'xtick.minor.size': [2, validate_float], # minor xtick size in points
+ 'xtick.major.width': [0.8, validate_float], # major xtick width in points
+ 'xtick.minor.width': [0.6, validate_float], # minor xtick width in points
+ 'xtick.major.pad': [3.5, validate_float], # distance to label in points
+ 'xtick.minor.pad': [3.4, validate_float], # distance to label in points
+ 'xtick.color': ['k', validate_color], # color of the xtick labels
+ 'xtick.minor.visible': [False, validate_bool], # visibility of the x axis minor ticks
+ 'xtick.minor.top': [True, validate_bool], # draw x axis top minor ticks
+ 'xtick.minor.bottom': [True, validate_bool], # draw x axis bottom minor ticks
+ 'xtick.major.top': [True, validate_bool], # draw x axis top major ticks
+ 'xtick.major.bottom': [True, validate_bool], # draw x axis bottom major ticks
+
+ # fontsize of the xtick labels
+ 'xtick.labelsize': ['medium', validate_fontsize],
+ 'xtick.direction': ['out', validate_string], # direction of xticks
+ 'xtick.alignment': ["center", _validate_alignment],
+
+ 'ytick.left': [True, validate_bool], # draw ticks on the left side
+ 'ytick.right': [False, validate_bool], # draw ticks on the right side
+ 'ytick.labelleft': [True, validate_bool], # draw tick labels on the left side
+ 'ytick.labelright': [False, validate_bool], # draw tick labels on the right side
+ 'ytick.major.size': [3.5, validate_float], # major ytick size in points
+ 'ytick.minor.size': [2, validate_float], # minor ytick size in points
+ 'ytick.major.width': [0.8, validate_float], # major ytick width in points
+ 'ytick.minor.width': [0.6, validate_float], # minor ytick width in points
+ 'ytick.major.pad': [3.5, validate_float], # distance to label in points
+ 'ytick.minor.pad': [3.4, validate_float], # distance to label in points
+ 'ytick.color': ['k', validate_color], # color of the ytick labels
+ 'ytick.minor.visible': [False, validate_bool], # visibility of the y axis minor ticks
+ 'ytick.minor.left': [True, validate_bool], # draw y axis left minor ticks
+ 'ytick.minor.right': [True, validate_bool], # draw y axis right minor ticks
+ 'ytick.major.left': [True, validate_bool], # draw y axis left major ticks
+ 'ytick.major.right': [True, validate_bool], # draw y axis right major ticks
+
+ # fontsize of the ytick labels
+ 'ytick.labelsize': ['medium', validate_fontsize],
+ 'ytick.direction': ['out', validate_string], # direction of yticks
+ 'ytick.alignment': ["center_baseline", _validate_alignment],
+
+
+ 'grid.color': ['#b0b0b0', validate_color], # grid color
+ 'grid.linestyle': ['-', _validate_linestyle], # solid
+ 'grid.linewidth': [0.8, validate_float], # in points
+ 'grid.alpha': [1.0, validate_float],
+
+
+ ## figure props
+ # figure title
+ 'figure.titlesize': ['large', validate_fontsize],
+ 'figure.titleweight': ['normal', validate_string],
+
+ # figure size in inches: width by height
+ 'figure.figsize': [[6.4, 4.8], validate_nseq_float(2)],
+ 'figure.dpi': [100, validate_float], # DPI
+ 'figure.facecolor': ['w', validate_color], # facecolor; white
+ 'figure.edgecolor': ['w', validate_color], # edgecolor; white
+ 'figure.frameon': [True, validate_bool],
+ 'figure.autolayout': [False, validate_bool],
+ 'figure.max_open_warning': [20, validate_int],
+
+ 'figure.subplot.left': [0.125, ValidateInterval(0, 1, closedmin=True,
+ closedmax=True)],
+ 'figure.subplot.right': [0.9, ValidateInterval(0, 1, closedmin=True,
+ closedmax=True)],
+ 'figure.subplot.bottom': [0.11, ValidateInterval(0, 1, closedmin=True,
+ closedmax=True)],
+ 'figure.subplot.top': [0.88, ValidateInterval(0, 1, closedmin=True,
+ closedmax=True)],
+ 'figure.subplot.wspace': [0.2, ValidateInterval(0, 1, closedmin=True,
+ closedmax=False)],
+ 'figure.subplot.hspace': [0.2, ValidateInterval(0, 1, closedmin=True,
+ closedmax=False)],
+
+ # do constrained_layout.
+ 'figure.constrained_layout.use': [False, validate_bool],
+ # wspace and hspace are fraction of adjacent subplots to use
+ # for space. Much smaller than above because we don't need
+ # room for the text.
+ 'figure.constrained_layout.hspace': [0.02, ValidateInterval(
+ 0, 1, closedmin=True, closedmax=False)],
+ 'figure.constrained_layout.wspace': [0.02, ValidateInterval(
+ 0, 1, closedmin=True, closedmax=False)],
+ # This is a buffer around the axes in inches. This is 3pts.
+ 'figure.constrained_layout.h_pad': [0.04167, validate_float],
+ 'figure.constrained_layout.w_pad': [0.04167, validate_float],
+
+ ## Saving figure's properties
+ 'savefig.dpi': ['figure', validate_dpi], # DPI
+ 'savefig.facecolor': ['w', validate_color], # facecolor; white
+ 'savefig.edgecolor': ['w', validate_color], # edgecolor; white
+ 'savefig.frameon': [True, validate_bool],
+ 'savefig.orientation': ['portrait', validate_orientation], # edgecolor;
+ #white
+ 'savefig.jpeg_quality': [95, validate_int],
+ # value checked by backend at runtime
+ 'savefig.format': ['png', update_savefig_format],
+ # options are 'tight', or 'standard'. 'standard' validates to None.
+ 'savefig.bbox': ['standard', validate_bbox],
+ 'savefig.pad_inches': [0.1, validate_float],
+ # default directory in savefig dialog box
+ 'savefig.directory': ['~', validate_string],
+ 'savefig.transparent': [False, validate_bool],
+
+ # Maintain shell focus for TkAgg
+ 'tk.window_focus': [False, validate_bool],
+
+ # Set the papersize/type
+ 'ps.papersize': ['letter', validate_ps_papersize],
+ 'ps.useafm': [False, validate_bool], # Set PYTHONINSPECT
+ # use ghostscript or xpdf to distill ps output
+ 'ps.usedistiller': [False, validate_ps_distiller],
+ 'ps.distiller.res': [6000, validate_int], # dpi
+ 'ps.fonttype': [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
+ # compression level from 0 to 9; 0 to disable
+ 'pdf.compression': [6, validate_int],
+ # ignore any color-setting commands from the frontend
+ 'pdf.inheritcolor': [False, validate_bool],
+ # use only the 14 PDF core fonts embedded in every PDF viewing application
+ 'pdf.use14corefonts': [False, validate_bool],
+ 'pdf.fonttype': [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
+
+ 'pgf.debug': [False, validate_bool], # output debug information
+ # choose latex application for creating pdf files (xelatex/lualatex)
+ 'pgf.texsystem': ['xelatex', validate_pgf_texsystem],
+ # use matplotlib rc settings for font configuration
+ 'pgf.rcfonts': [True, validate_bool],
+ # provide a custom preamble for the latex process
+ 'pgf.preamble': [[''], validate_stringlist],
+
+ # write raster image data directly into the svg file
+ 'svg.image_inline': [True, validate_bool],
+ # True to save all characters as paths in the SVG
+ 'svg.fonttype': ['path', validate_svg_fonttype],
+ 'svg.hashsalt': [None, validate_string_or_None],
+
+ # set this when you want to generate hardcopy docstring
+ 'docstring.hardcopy': [False, validate_bool],
+ # where plugin directory is locate
+ 'plugins.directory': ['.matplotlib_plugins', validate_string],
+
+ 'path.simplify': [True, validate_bool],
+ 'path.simplify_threshold': [1.0 / 9.0, ValidateInterval(0.0, 1.0)],
+ 'path.snap': [True, validate_bool],
+ 'path.sketch': [None, validate_sketch],
+ 'path.effects': [[], validate_any],
+ 'agg.path.chunksize': [0, validate_int], # 0 to disable chunking;
+
+ # key-mappings (multi-character mappings should be a list/tuple)
+ 'keymap.fullscreen': [('f', 'ctrl+f'), validate_stringlist],
+ 'keymap.home': [['h', 'r', 'home'], validate_stringlist],
+ 'keymap.back': [['left', 'c', 'backspace'], validate_stringlist],
+ 'keymap.forward': [['right', 'v'], validate_stringlist],
+ 'keymap.pan': [['p'], validate_stringlist],
+ 'keymap.zoom': [['o'], validate_stringlist],
+ 'keymap.save': [['s', 'ctrl+s'], validate_stringlist],
+ 'keymap.quit': [['ctrl+w', 'cmd+w', 'q'], validate_stringlist],
+ 'keymap.quit_all': [['W', 'cmd+W', 'Q'], validate_stringlist],
+ 'keymap.grid': [['g'], validate_stringlist],
+ 'keymap.grid_minor': [['G'], validate_stringlist],
+ 'keymap.yscale': [['l'], validate_stringlist],
+ 'keymap.xscale': [['k', 'L'], validate_stringlist],
+ 'keymap.all_axes': [['a'], validate_stringlist],
+
+ # sample data
+ 'examples.directory': ['', validate_string],
+
+ # Animation settings
+ 'animation.html': ['none', validate_movie_html_fmt],
+ # Limit, in MB, of size of base64 encoded animation in HTML
+ # (i.e. IPython notebook)
+ 'animation.embed_limit': [20, validate_float],
+ 'animation.writer': ['ffmpeg', validate_movie_writer],
+ 'animation.codec': ['h264', validate_string],
+ 'animation.bitrate': [-1, validate_int],
+ # Controls image format when frames are written to disk
+ 'animation.frame_format': ['png', validate_movie_frame_fmt],
+ # Additional arguments for HTML writer
+ 'animation.html_args': [[], validate_stringlist],
+ # Path to ffmpeg binary. If just binary name, subprocess uses $PATH.
+ 'animation.ffmpeg_path': ['ffmpeg', validate_animation_writer_path],
+ # Additional arguments for ffmpeg movie writer (using pipes)
+ 'animation.ffmpeg_args': [[], validate_stringlist],
+ # Path to AVConv binary. If just binary name, subprocess uses $PATH.
+ 'animation.avconv_path': ['avconv', validate_animation_writer_path],
+ # Additional arguments for avconv movie writer (using pipes)
+ 'animation.avconv_args': [[], validate_stringlist],
+ # Path to convert binary. If just binary name, subprocess uses $PATH.
+ 'animation.convert_path': ['convert', validate_animation_writer_path],
+ # Additional arguments for convert movie writer (using pipes)
+ 'animation.convert_args': [[], validate_stringlist],
+
+ # Classic (pre 2.0) compatibility mode
+ # This is used for things that are hard to make backward compatible
+ # with a sane rcParam alone. This does *not* turn on classic mode
+ # altogether. For that use `matplotlib.style.use('classic')`.
+ '_internal.classic_mode': [False, validate_bool]
+}
+
+
+if __name__ == '__main__':
+ rc = defaultParams
+ rc['datapath'][0] = '/'
+ for key in rc:
+ if not rc[key][1](rc[key][0]) == rc[key][0]:
+ print("%s: %s != %s" % (key, rc[key][1](rc[key][0]), rc[key][0]))
diff --git a/contrib/python/matplotlib/py2/matplotlib/sankey.py b/contrib/python/matplotlib/py2/matplotlib/sankey.py
new file mode 100644
index 00000000000..88def21ce63
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/sankey.py
@@ -0,0 +1,833 @@
+"""
+Module for creating Sankey diagrams using matplotlib
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+import logging
+from six.moves import zip
+import numpy as np
+
+from matplotlib.cbook import iterable, Bunch
+from matplotlib.path import Path
+from matplotlib.patches import PathPatch
+from matplotlib.transforms import Affine2D
+from matplotlib import docstring
+from matplotlib import rcParams
+
+_log = logging.getLogger(__name__)
+
+__author__ = "Kevin L. Davies"
+__credits__ = ["Yannick Copin"]
+__license__ = "BSD"
+__version__ = "2011/09/16"
+
+# Angles [deg/90]
+RIGHT = 0
+UP = 1
+# LEFT = 2
+DOWN = 3
+
+
+class Sankey(object):
+ """
+ Sankey diagram in matplotlib
+
+ Sankey diagrams are a specific type of flow diagram, in which
+ the width of the arrows is shown proportionally to the flow
+ quantity. They are typically used to visualize energy or
+ material or cost transfers between processes.
+ `Wikipedia (6/1/2011) <https://en.wikipedia.org/wiki/Sankey_diagram>`_
+
+ """
+
+ def __init__(self, ax=None, scale=1.0, unit='', format='%G', gap=0.25,
+ radius=0.1, shoulder=0.03, offset=0.15, head_angle=100,
+ margin=0.4, tolerance=1e-6, **kwargs):
+ """
+ Create a new Sankey instance.
+
+ Optional keyword arguments:
+
+ =============== ===================================================
+ Field Description
+ =============== ===================================================
+ *ax* axes onto which the data should be plotted
+ If *ax* isn't provided, new axes will be created.
+ *scale* scaling factor for the flows
+ *scale* sizes the width of the paths in order to
+ maintain proper layout. The same scale is applied
+ to all subdiagrams. The value should be chosen
+ such that the product of the scale and the sum of
+ the inputs is approximately 1.0 (and the product of
+ the scale and the sum of the outputs is
+ approximately -1.0).
+ *unit* string representing the physical unit associated
+ with the flow quantities
+ If *unit* is None, then none of the quantities are
+ labeled.
+ *format* a Python number formatting string to be used in
+ labeling the flow as a quantity (i.e., a number
+ times a unit, where the unit is given)
+ *gap* space between paths that break in/break away
+ to/from the top or bottom
+ *radius* inner radius of the vertical paths
+ *shoulder* size of the shoulders of output arrowS
+ *offset* text offset (from the dip or tip of the arrow)
+ *head_angle* angle of the arrow heads (and negative of the angle
+ of the tails) [deg]
+ *margin* minimum space between Sankey outlines and the edge
+ of the plot area
+ *tolerance* acceptable maximum of the magnitude of the sum of
+ flows
+ The magnitude of the sum of connected flows cannot
+ be greater than *tolerance*.
+ =============== ===================================================
+
+ The optional arguments listed above are applied to all subdiagrams so
+ that there is consistent alignment and formatting.
+
+ If :class:`Sankey` is instantiated with any keyword arguments other
+ than those explicitly listed above (``**kwargs``), they will be passed
+ to :meth:`add`, which will create the first subdiagram.
+
+ In order to draw a complex Sankey diagram, create an instance of
+ :class:`Sankey` by calling it without any kwargs::
+
+ sankey = Sankey()
+
+ Then add simple Sankey sub-diagrams::
+
+ sankey.add() # 1
+ sankey.add() # 2
+ #...
+ sankey.add() # n
+
+ Finally, create the full diagram::
+
+ sankey.finish()
+
+ Or, instead, simply daisy-chain those calls::
+
+ Sankey().add().add... .add().finish()
+
+ .. seealso::
+
+ :meth:`add`
+ :meth:`finish`
+
+
+ **Examples:**
+
+ .. plot:: gallery/api/sankey_basics.py
+ """
+ # Check the arguments.
+ if gap < 0:
+ raise ValueError(
+ "The gap is negative.\nThis isn't allowed because it "
+ "would cause the paths to overlap.")
+ if radius > gap:
+ raise ValueError(
+ "The inner radius is greater than the path spacing.\n"
+ "This isn't allowed because it would cause the paths to overlap.")
+ if head_angle < 0:
+ raise ValueError(
+ "The angle is negative.\nThis isn't allowed "
+ "because it would cause inputs to look like "
+ "outputs and vice versa.")
+ if tolerance < 0:
+ raise ValueError(
+ "The tolerance is negative.\nIt must be a magnitude.")
+
+ # Create axes if necessary.
+ if ax is None:
+ import matplotlib.pyplot as plt
+ fig = plt.figure()
+ ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
+
+ self.diagrams = []
+
+ # Store the inputs.
+ self.ax = ax
+ self.unit = unit
+ self.format = format
+ self.scale = scale
+ self.gap = gap
+ self.radius = radius
+ self.shoulder = shoulder
+ self.offset = offset
+ self.margin = margin
+ self.pitch = np.tan(np.pi * (1 - head_angle / 180.0) / 2.0)
+ self.tolerance = tolerance
+
+ # Initialize the vertices of tight box around the diagram(s).
+ self.extent = np.array((np.inf, -np.inf, np.inf, -np.inf))
+
+ # If there are any kwargs, create the first subdiagram.
+ if len(kwargs):
+ self.add(**kwargs)
+
+ def _arc(self, quadrant=0, cw=True, radius=1, center=(0, 0)):
+ """
+ Return the codes and vertices for a rotated, scaled, and translated
+ 90 degree arc.
+
+ Optional keyword arguments:
+
+ =============== ==========================================
+ Keyword Description
+ =============== ==========================================
+ *quadrant* uses 0-based indexing (0, 1, 2, or 3)
+ *cw* if True, clockwise
+ *center* (x, y) tuple of the arc's center
+ =============== ==========================================
+ """
+ # Note: It would be possible to use matplotlib's transforms to rotate,
+ # scale, and translate the arc, but since the angles are discrete,
+ # it's just as easy and maybe more efficient to do it here.
+ ARC_CODES = [Path.LINETO,
+ Path.CURVE4,
+ Path.CURVE4,
+ Path.CURVE4,
+ Path.CURVE4,
+ Path.CURVE4,
+ Path.CURVE4]
+ # Vertices of a cubic Bezier curve approximating a 90 deg arc
+ # These can be determined by Path.arc(0,90).
+ ARC_VERTICES = np.array([[1.00000000e+00, 0.00000000e+00],
+ [1.00000000e+00, 2.65114773e-01],
+ [8.94571235e-01, 5.19642327e-01],
+ [7.07106781e-01, 7.07106781e-01],
+ [5.19642327e-01, 8.94571235e-01],
+ [2.65114773e-01, 1.00000000e+00],
+ # Insignificant
+ # [6.12303177e-17, 1.00000000e+00]])
+ [0.00000000e+00, 1.00000000e+00]])
+ if quadrant == 0 or quadrant == 2:
+ if cw:
+ vertices = ARC_VERTICES
+ else:
+ vertices = ARC_VERTICES[:, ::-1] # Swap x and y.
+ elif quadrant == 1 or quadrant == 3:
+ # Negate x.
+ if cw:
+ # Swap x and y.
+ vertices = np.column_stack((-ARC_VERTICES[:, 1],
+ ARC_VERTICES[:, 0]))
+ else:
+ vertices = np.column_stack((-ARC_VERTICES[:, 0],
+ ARC_VERTICES[:, 1]))
+ if quadrant > 1:
+ radius = -radius # Rotate 180 deg.
+ return list(zip(ARC_CODES, radius * vertices +
+ np.tile(center, (ARC_VERTICES.shape[0], 1))))
+
+ def _add_input(self, path, angle, flow, length):
+ """
+ Add an input to a path and return its tip and label locations.
+ """
+ if angle is None:
+ return [0, 0], [0, 0]
+ else:
+ x, y = path[-1][1] # Use the last point as a reference.
+ dipdepth = (flow / 2) * self.pitch
+ if angle == RIGHT:
+ x -= length
+ dip = [x + dipdepth, y + flow / 2.0]
+ path.extend([(Path.LINETO, [x, y]),
+ (Path.LINETO, dip),
+ (Path.LINETO, [x, y + flow]),
+ (Path.LINETO, [x + self.gap, y + flow])])
+ label_location = [dip[0] - self.offset, dip[1]]
+ else: # Vertical
+ x -= self.gap
+ if angle == UP:
+ sign = 1
+ else:
+ sign = -1
+
+ dip = [x - flow / 2, y - sign * (length - dipdepth)]
+ if angle == DOWN:
+ quadrant = 2
+ else:
+ quadrant = 1
+
+ # Inner arc isn't needed if inner radius is zero
+ if self.radius:
+ path.extend(self._arc(quadrant=quadrant,
+ cw=angle == UP,
+ radius=self.radius,
+ center=(x + self.radius,
+ y - sign * self.radius)))
+ else:
+ path.append((Path.LINETO, [x, y]))
+ path.extend([(Path.LINETO, [x, y - sign * length]),
+ (Path.LINETO, dip),
+ (Path.LINETO, [x - flow, y - sign * length])])
+ path.extend(self._arc(quadrant=quadrant,
+ cw=angle == DOWN,
+ radius=flow + self.radius,
+ center=(x + self.radius,
+ y - sign * self.radius)))
+ path.append((Path.LINETO, [x - flow, y + sign * flow]))
+ label_location = [dip[0], dip[1] - sign * self.offset]
+
+ return dip, label_location
+
+ def _add_output(self, path, angle, flow, length):
+ """
+ Append an output to a path and return its tip and label locations.
+
+ .. note:: *flow* is negative for an output.
+ """
+ if angle is None:
+ return [0, 0], [0, 0]
+ else:
+ x, y = path[-1][1] # Use the last point as a reference.
+ tipheight = (self.shoulder - flow / 2) * self.pitch
+ if angle == RIGHT:
+ x += length
+ tip = [x + tipheight, y + flow / 2.0]
+ path.extend([(Path.LINETO, [x, y]),
+ (Path.LINETO, [x, y + self.shoulder]),
+ (Path.LINETO, tip),
+ (Path.LINETO, [x, y - self.shoulder + flow]),
+ (Path.LINETO, [x, y + flow]),
+ (Path.LINETO, [x - self.gap, y + flow])])
+ label_location = [tip[0] + self.offset, tip[1]]
+ else: # Vertical
+ x += self.gap
+ if angle == UP:
+ sign = 1
+ else:
+ sign = -1
+
+ tip = [x - flow / 2.0, y + sign * (length + tipheight)]
+ if angle == UP:
+ quadrant = 3
+ else:
+ quadrant = 0
+ # Inner arc isn't needed if inner radius is zero
+ if self.radius:
+ path.extend(self._arc(quadrant=quadrant,
+ cw=angle == UP,
+ radius=self.radius,
+ center=(x - self.radius,
+ y + sign * self.radius)))
+ else:
+ path.append((Path.LINETO, [x, y]))
+ path.extend([(Path.LINETO, [x, y + sign * length]),
+ (Path.LINETO, [x - self.shoulder,
+ y + sign * length]),
+ (Path.LINETO, tip),
+ (Path.LINETO, [x + self.shoulder - flow,
+ y + sign * length]),
+ (Path.LINETO, [x - flow, y + sign * length])])
+ path.extend(self._arc(quadrant=quadrant,
+ cw=angle == DOWN,
+ radius=self.radius - flow,
+ center=(x - self.radius,
+ y + sign * self.radius)))
+ path.append((Path.LINETO, [x - flow, y + sign * flow]))
+ label_location = [tip[0], tip[1] + sign * self.offset]
+ return tip, label_location
+
+ def _revert(self, path, first_action=Path.LINETO):
+ """
+ A path is not simply revertable by path[::-1] since the code
+ specifies an action to take from the **previous** point.
+ """
+ reverse_path = []
+ next_code = first_action
+ for code, position in path[::-1]:
+ reverse_path.append((next_code, position))
+ next_code = code
+ return reverse_path
+ # This might be more efficient, but it fails because 'tuple' object
+ # doesn't support item assignment:
+ # path[1] = path[1][-1:0:-1]
+ # path[1][0] = first_action
+ # path[2] = path[2][::-1]
+ # return path
+
+ @docstring.dedent_interpd
+ def add(self, patchlabel='', flows=None, orientations=None, labels='',
+ trunklength=1.0, pathlengths=0.25, prior=None, connect=(0, 0),
+ rotation=0, **kwargs):
+ """
+ Add a simple Sankey diagram with flows at the same hierarchical level.
+
+ Return value is the instance of :class:`Sankey`.
+
+ Optional keyword arguments:
+
+ =============== ===================================================
+ Keyword Description
+ =============== ===================================================
+ *patchlabel* label to be placed at the center of the diagram
+ Note: *label* (not *patchlabel*) will be passed to
+ the patch through ``**kwargs`` and can be used to
+ create an entry in the legend.
+ *flows* array of flow values
+ By convention, inputs are positive and outputs are
+ negative.
+ *orientations* list of orientations of the paths
+ Valid values are 1 (from/to the top), 0 (from/to
+ the left or right), or -1 (from/to the bottom). If
+ *orientations* == 0, inputs will break in from the
+ left and outputs will break away to the right.
+ *labels* list of specifications of the labels for the flows
+ Each value may be *None* (no labels), '' (just
+ label the quantities), or a labeling string. If a
+ single value is provided, it will be applied to all
+ flows. If an entry is a non-empty string, then the
+ quantity for the corresponding flow will be shown
+ below the string. However, if the *unit* of the
+ main diagram is None, then quantities are never
+ shown, regardless of the value of this argument.
+ *trunklength* length between the bases of the input and output
+ groups
+ *pathlengths* list of lengths of the arrows before break-in or
+ after break-away
+ If a single value is given, then it will be applied
+ to the first (inside) paths on the top and bottom,
+ and the length of all other arrows will be
+ justified accordingly. The *pathlengths* are not
+ applied to the horizontal inputs and outputs.
+ *prior* index of the prior diagram to which this diagram
+ should be connected
+ *connect* a (prior, this) tuple indexing the flow of the
+ prior diagram and the flow of this diagram which
+ should be connected
+ If this is the first diagram or *prior* is *None*,
+ *connect* will be ignored.
+ *rotation* angle of rotation of the diagram [deg]
+ *rotation* is ignored if this diagram is connected
+ to an existing one (using *prior* and *connect*).
+ The interpretation of the *orientations* argument
+ will be rotated accordingly (e.g., if *rotation*
+ == 90, an *orientations* entry of 1 means to/from
+ the left).
+ =============== ===================================================
+
+ Valid kwargs are :meth:`matplotlib.patches.PathPatch` arguments:
+
+ %(Patch)s
+
+ As examples, ``fill=False`` and ``label='A legend entry'``.
+ By default, ``facecolor='#bfd1d4'`` (light blue) and
+ ``linewidth=0.5``.
+
+ The indexing parameters (*prior* and *connect*) are zero-based.
+
+ The flows are placed along the top of the diagram from the inside out
+ in order of their index within the *flows* list or array. They are
+ placed along the sides of the diagram from the top down and along the
+ bottom from the outside in.
+
+ If the sum of the inputs and outputs is nonzero, the discrepancy
+ will appear as a cubic Bezier curve along the top and bottom edges of
+ the trunk.
+
+ .. seealso::
+
+ :meth:`finish`
+ """
+ # Check and preprocess the arguments.
+ if flows is None:
+ flows = np.array([1.0, -1.0])
+ else:
+ flows = np.array(flows)
+ n = flows.shape[0] # Number of flows
+ if rotation is None:
+ rotation = 0
+ else:
+ # In the code below, angles are expressed in deg/90.
+ rotation /= 90.0
+ if orientations is None:
+ orientations = [0, 0]
+ if len(orientations) != n:
+ raise ValueError(
+ "orientations and flows must have the same length.\n"
+ "orientations has length %d, but flows has length %d."
+ % (len(orientations), n))
+ if labels != '' and getattr(labels, '__iter__', False):
+ # iterable() isn't used because it would give True if labels is a
+ # string
+ if len(labels) != n:
+ raise ValueError(
+ "If labels is a list, then labels and flows must have the "
+ "same length.\nlabels has length %d, but flows has length %d."
+ % (len(labels), n))
+ else:
+ labels = [labels] * n
+ if trunklength < 0:
+ raise ValueError(
+ "trunklength is negative.\nThis isn't allowed, because it would "
+ "cause poor layout.")
+ if np.abs(np.sum(flows)) > self.tolerance:
+ _log.info("The sum of the flows is nonzero (%f).\nIs the "
+ "system not at steady state?", np.sum(flows))
+ scaled_flows = self.scale * flows
+ gain = sum(max(flow, 0) for flow in scaled_flows)
+ loss = sum(min(flow, 0) for flow in scaled_flows)
+ if not (0.5 <= gain <= 2.0):
+ _log.info(
+ "The scaled sum of the inputs is %f.\nThis may "
+ "cause poor layout.\nConsider changing the scale so"
+ " that the scaled sum is approximately 1.0.", gain)
+ if not (-2.0 <= loss <= -0.5):
+ _log.info(
+ "The scaled sum of the outputs is %f.\nThis may "
+ "cause poor layout.\nConsider changing the scale so"
+ " that the scaled sum is approximately 1.0.", gain)
+ if prior is not None:
+ if prior < 0:
+ raise ValueError("The index of the prior diagram is negative.")
+ if min(connect) < 0:
+ raise ValueError(
+ "At least one of the connection indices is negative.")
+ if prior >= len(self.diagrams):
+ raise ValueError(
+ "The index of the prior diagram is %d, but there are "
+ "only %d other diagrams.\nThe index is zero-based."
+ % (prior, len(self.diagrams)))
+ if connect[0] >= len(self.diagrams[prior].flows):
+ raise ValueError(
+ "The connection index to the source diagram is %d, but "
+ "that diagram has only %d flows.\nThe index is zero-based."
+ % (connect[0], len(self.diagrams[prior].flows)))
+ if connect[1] >= n:
+ raise ValueError(
+ "The connection index to this diagram is %d, but this diagram"
+ "has only %d flows.\n The index is zero-based."
+ % (connect[1], n))
+ if self.diagrams[prior].angles[connect[0]] is None:
+ raise ValueError(
+ "The connection cannot be made. Check that the magnitude "
+ "of flow %d of diagram %d is greater than or equal to the "
+ "specified tolerance." % (connect[0], prior))
+ flow_error = (self.diagrams[prior].flows[connect[0]] +
+ flows[connect[1]])
+ if abs(flow_error) >= self.tolerance:
+ raise ValueError(
+ "The scaled sum of the connected flows is %f, which is not "
+ "within the tolerance (%f)." % (flow_error, self.tolerance))
+
+ # Determine if the flows are inputs.
+ are_inputs = [None] * n
+ for i, flow in enumerate(flows):
+ if flow >= self.tolerance:
+ are_inputs[i] = True
+ elif flow <= -self.tolerance:
+ are_inputs[i] = False
+ else:
+ _log.info(
+ "The magnitude of flow %d (%f) is below the "
+ "tolerance (%f).\nIt will not be shown, and it "
+ "cannot be used in a connection."
+ % (i, flow, self.tolerance))
+
+ # Determine the angles of the arrows (before rotation).
+ angles = [None] * n
+ for i, (orient, is_input) in enumerate(zip(orientations, are_inputs)):
+ if orient == 1:
+ if is_input:
+ angles[i] = DOWN
+ elif not is_input:
+ # Be specific since is_input can be None.
+ angles[i] = UP
+ elif orient == 0:
+ if is_input is not None:
+ angles[i] = RIGHT
+ else:
+ if orient != -1:
+ raise ValueError(
+ "The value of orientations[%d] is %d, "
+ "but it must be [ -1 | 0 | 1 ]." % (i, orient))
+ if is_input:
+ angles[i] = UP
+ elif not is_input:
+ angles[i] = DOWN
+
+ # Justify the lengths of the paths.
+ if iterable(pathlengths):
+ if len(pathlengths) != n:
+ raise ValueError(
+ "If pathlengths is a list, then pathlengths and flows must "
+ "have the same length.\npathlengths has length %d, but flows "
+ "has length %d." % (len(pathlengths), n))
+ else: # Make pathlengths into a list.
+ urlength = pathlengths
+ ullength = pathlengths
+ lrlength = pathlengths
+ lllength = pathlengths
+ d = dict(RIGHT=pathlengths)
+ pathlengths = [d.get(angle, 0) for angle in angles]
+ # Determine the lengths of the top-side arrows
+ # from the middle outwards.
+ for i, (angle, is_input, flow) in enumerate(zip(angles, are_inputs,
+ scaled_flows)):
+ if angle == DOWN and is_input:
+ pathlengths[i] = ullength
+ ullength += flow
+ elif angle == UP and not is_input:
+ pathlengths[i] = urlength
+ urlength -= flow # Flow is negative for outputs.
+ # Determine the lengths of the bottom-side arrows
+ # from the middle outwards.
+ for i, (angle, is_input, flow) in enumerate(reversed(list(zip(
+ angles, are_inputs, scaled_flows)))):
+ if angle == UP and is_input:
+ pathlengths[n - i - 1] = lllength
+ lllength += flow
+ elif angle == DOWN and not is_input:
+ pathlengths[n - i - 1] = lrlength
+ lrlength -= flow
+ # Determine the lengths of the left-side arrows
+ # from the bottom upwards.
+ has_left_input = False
+ for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
+ angles, are_inputs, zip(scaled_flows, pathlengths))))):
+ if angle == RIGHT:
+ if is_input:
+ if has_left_input:
+ pathlengths[n - i - 1] = 0
+ else:
+ has_left_input = True
+ # Determine the lengths of the right-side arrows
+ # from the top downwards.
+ has_right_output = False
+ for i, (angle, is_input, spec) in enumerate(zip(
+ angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
+ if angle == RIGHT:
+ if not is_input:
+ if has_right_output:
+ pathlengths[i] = 0
+ else:
+ has_right_output = True
+
+ # Begin the subpaths, and smooth the transition if the sum of the flows
+ # is nonzero.
+ urpath = [(Path.MOVETO, [(self.gap - trunklength / 2.0), # Upper right
+ gain / 2.0]),
+ (Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
+ gain / 2.0]),
+ (Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
+ gain / 2.0]),
+ (Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
+ -loss / 2.0]),
+ (Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
+ -loss / 2.0]),
+ (Path.LINETO, [(trunklength / 2.0 - self.gap),
+ -loss / 2.0])]
+ llpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower left
+ loss / 2.0]),
+ (Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
+ loss / 2.0]),
+ (Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
+ loss / 2.0]),
+ (Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
+ -gain / 2.0]),
+ (Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
+ -gain / 2.0]),
+ (Path.LINETO, [(self.gap - trunklength / 2.0),
+ -gain / 2.0])]
+ lrpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower right
+ loss / 2.0])]
+ ulpath = [(Path.LINETO, [self.gap - trunklength / 2.0, # Upper left
+ gain / 2.0])]
+
+ # Add the subpaths and assign the locations of the tips and labels.
+ tips = np.zeros((n, 2))
+ label_locations = np.zeros((n, 2))
+ # Add the top-side inputs and outputs from the middle outwards.
+ for i, (angle, is_input, spec) in enumerate(zip(
+ angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
+ if angle == DOWN and is_input:
+ tips[i, :], label_locations[i, :] = self._add_input(
+ ulpath, angle, *spec)
+ elif angle == UP and not is_input:
+ tips[i, :], label_locations[i, :] = self._add_output(
+ urpath, angle, *spec)
+ # Add the bottom-side inputs and outputs from the middle outwards.
+ for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
+ angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
+ if angle == UP and is_input:
+ tip, label_location = self._add_input(llpath, angle, *spec)
+ tips[n - i - 1, :] = tip
+ label_locations[n - i - 1, :] = label_location
+ elif angle == DOWN and not is_input:
+ tip, label_location = self._add_output(lrpath, angle, *spec)
+ tips[n - i - 1, :] = tip
+ label_locations[n - i - 1, :] = label_location
+ # Add the left-side inputs from the bottom upwards.
+ has_left_input = False
+ for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
+ angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
+ if angle == RIGHT and is_input:
+ if not has_left_input:
+ # Make sure the lower path extends
+ # at least as far as the upper one.
+ if llpath[-1][1][0] > ulpath[-1][1][0]:
+ llpath.append((Path.LINETO, [ulpath[-1][1][0],
+ llpath[-1][1][1]]))
+ has_left_input = True
+ tip, label_location = self._add_input(llpath, angle, *spec)
+ tips[n - i - 1, :] = tip
+ label_locations[n - i - 1, :] = label_location
+ # Add the right-side outputs from the top downwards.
+ has_right_output = False
+ for i, (angle, is_input, spec) in enumerate(zip(
+ angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
+ if angle == RIGHT and not is_input:
+ if not has_right_output:
+ # Make sure the upper path extends
+ # at least as far as the lower one.
+ if urpath[-1][1][0] < lrpath[-1][1][0]:
+ urpath.append((Path.LINETO, [lrpath[-1][1][0],
+ urpath[-1][1][1]]))
+ has_right_output = True
+ tips[i, :], label_locations[i, :] = self._add_output(
+ urpath, angle, *spec)
+ # Trim any hanging vertices.
+ if not has_left_input:
+ ulpath.pop()
+ llpath.pop()
+ if not has_right_output:
+ lrpath.pop()
+ urpath.pop()
+
+ # Concatenate the subpaths in the correct order (clockwise from top).
+ path = (urpath + self._revert(lrpath) + llpath + self._revert(ulpath) +
+ [(Path.CLOSEPOLY, urpath[0][1])])
+
+ # Create a patch with the Sankey outline.
+ codes, vertices = zip(*path)
+ vertices = np.array(vertices)
+
+ def _get_angle(a, r):
+ if a is None:
+ return None
+ else:
+ return a + r
+
+ if prior is None:
+ if rotation != 0: # By default, none of this is needed.
+ angles = [_get_angle(angle, rotation) for angle in angles]
+ rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
+ tips = rotate(tips)
+ label_locations = rotate(label_locations)
+ vertices = rotate(vertices)
+ text = self.ax.text(0, 0, s=patchlabel, ha='center', va='center')
+ else:
+ rotation = (self.diagrams[prior].angles[connect[0]] -
+ angles[connect[1]])
+ angles = [_get_angle(angle, rotation) for angle in angles]
+ rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
+ tips = rotate(tips)
+ offset = self.diagrams[prior].tips[connect[0]] - tips[connect[1]]
+ translate = Affine2D().translate(*offset).transform_affine
+ tips = translate(tips)
+ label_locations = translate(rotate(label_locations))
+ vertices = translate(rotate(vertices))
+ kwds = dict(s=patchlabel, ha='center', va='center')
+ text = self.ax.text(*offset, **kwds)
+ if rcParams['_internal.classic_mode']:
+ fc = kwargs.pop('fc', kwargs.pop('facecolor', '#bfd1d4'))
+ lw = kwargs.pop('lw', kwargs.pop('linewidth', 0.5))
+ else:
+ fc = kwargs.pop('fc', kwargs.pop('facecolor', None))
+ lw = kwargs.pop('lw', kwargs.pop('linewidth', None))
+ if fc is None:
+ fc = next(self.ax._get_patches_for_fill.prop_cycler)['color']
+ patch = PathPatch(Path(vertices, codes), fc=fc, lw=lw, **kwargs)
+ self.ax.add_patch(patch)
+
+ # Add the path labels.
+ texts = []
+ for number, angle, label, location in zip(flows, angles, labels,
+ label_locations):
+ if label is None or angle is None:
+ label = ''
+ elif self.unit is not None:
+ quantity = self.format % abs(number) + self.unit
+ if label != '':
+ label += "\n"
+ label += quantity
+ texts.append(self.ax.text(x=location[0], y=location[1],
+ s=label,
+ ha='center', va='center'))
+ # Text objects are placed even they are empty (as long as the magnitude
+ # of the corresponding flow is larger than the tolerance) in case the
+ # user wants to provide labels later.
+
+ # Expand the size of the diagram if necessary.
+ self.extent = (min(np.min(vertices[:, 0]),
+ np.min(label_locations[:, 0]),
+ self.extent[0]),
+ max(np.max(vertices[:, 0]),
+ np.max(label_locations[:, 0]),
+ self.extent[1]),
+ min(np.min(vertices[:, 1]),
+ np.min(label_locations[:, 1]),
+ self.extent[2]),
+ max(np.max(vertices[:, 1]),
+ np.max(label_locations[:, 1]),
+ self.extent[3]))
+ # Include both vertices _and_ label locations in the extents; there are
+ # where either could determine the margins (e.g., arrow shoulders).
+
+ # Add this diagram as a subdiagram.
+ self.diagrams.append(Bunch(patch=patch, flows=flows, angles=angles,
+ tips=tips, text=text, texts=texts))
+
+ # Allow a daisy-chained call structure (see docstring for the class).
+ return self
+
+ def finish(self):
+ """
+ Adjust the axes and return a list of information about the Sankey
+ subdiagram(s).
+
+ Return value is a list of subdiagrams represented with the following
+ fields:
+
+ =============== ===================================================
+ Field Description
+ =============== ===================================================
+ *patch* Sankey outline (an instance of
+ :class:`~maplotlib.patches.PathPatch`)
+ *flows* values of the flows (positive for input, negative
+ for output)
+ *angles* list of angles of the arrows [deg/90]
+ For example, if the diagram has not been rotated,
+ an input to the top side will have an angle of 3
+ (DOWN), and an output from the top side will have
+ an angle of 1 (UP). If a flow has been skipped
+ (because its magnitude is less than *tolerance*),
+ then its angle will be *None*.
+ *tips* array in which each row is an [x, y] pair
+ indicating the positions of the tips (or "dips") of
+ the flow paths
+ If the magnitude of a flow is less the *tolerance*
+ for the instance of :class:`Sankey`, the flow is
+ skipped and its tip will be at the center of the
+ diagram.
+ *text* :class:`~matplotlib.text.Text` instance for the
+ label of the diagram
+ *texts* list of :class:`~matplotlib.text.Text` instances
+ for the labels of flows
+ =============== ===================================================
+
+ .. seealso::
+
+ :meth:`add`
+ """
+ self.ax.axis([self.extent[0] - self.margin,
+ self.extent[1] + self.margin,
+ self.extent[2] - self.margin,
+ self.extent[3] + self.margin])
+ self.ax.set_aspect('equal', adjustable='datalim')
+ return self.diagrams
diff --git a/contrib/python/matplotlib/py2/matplotlib/scale.py b/contrib/python/matplotlib/py2/matplotlib/scale.py
new file mode 100644
index 00000000000..357aff9fc21
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/scale.py
@@ -0,0 +1,607 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import numpy as np
+from numpy import ma
+
+from matplotlib import cbook, docstring, rcParams
+from matplotlib.ticker import (
+ NullFormatter, ScalarFormatter, LogFormatterSciNotation, LogitFormatter,
+ NullLocator, LogLocator, AutoLocator, AutoMinorLocator,
+ SymmetricalLogLocator, LogitLocator)
+from matplotlib.transforms import Transform, IdentityTransform
+
+
+class ScaleBase(object):
+ """
+ The base class for all scales.
+
+ Scales are separable transformations, working on a single dimension.
+
+ Any subclasses will want to override:
+
+ - :attr:`name`
+ - :meth:`get_transform`
+ - :meth:`set_default_locators_and_formatters`
+
+ And optionally:
+ - :meth:`limit_range_for_scale`
+ """
+ def get_transform(self):
+ """
+ Return the :class:`~matplotlib.transforms.Transform` object
+ associated with this scale.
+ """
+ raise NotImplementedError()
+
+ def set_default_locators_and_formatters(self, axis):
+ """
+ Set the :class:`~matplotlib.ticker.Locator` and
+ :class:`~matplotlib.ticker.Formatter` objects on the given
+ axis to match this scale.
+ """
+ raise NotImplementedError()
+
+ def limit_range_for_scale(self, vmin, vmax, minpos):
+ """
+ Returns the range *vmin*, *vmax*, possibly limited to the
+ domain supported by this scale.
+
+ *minpos* should be the minimum positive value in the data.
+ This is used by log scales to determine a minimum value.
+ """
+ return vmin, vmax
+
+
+class LinearScale(ScaleBase):
+ """
+ The default linear scale.
+ """
+
+ name = 'linear'
+
+ def __init__(self, axis, **kwargs):
+ pass
+
+ def set_default_locators_and_formatters(self, axis):
+ """
+ Set the locators and formatters to reasonable defaults for
+ linear scaling.
+ """
+ axis.set_major_locator(AutoLocator())
+ axis.set_major_formatter(ScalarFormatter())
+ axis.set_minor_formatter(NullFormatter())
+ # update the minor locator for x and y axis based on rcParams
+ if rcParams['xtick.minor.visible']:
+ axis.set_minor_locator(AutoMinorLocator())
+ else:
+ axis.set_minor_locator(NullLocator())
+
+ def get_transform(self):
+ """
+ The transform for linear scaling is just the
+ :class:`~matplotlib.transforms.IdentityTransform`.
+ """
+ return IdentityTransform()
+
+
+class LogTransformBase(Transform):
+ input_dims = 1
+ output_dims = 1
+ is_separable = True
+ has_inverse = True
+
+ def __init__(self, nonpos='clip'):
+ Transform.__init__(self)
+ self._clip = {"clip": True, "mask": False}[nonpos]
+
+ def transform_non_affine(self, a):
+ # Ignore invalid values due to nans being passed to the transform
+ with np.errstate(divide="ignore", invalid="ignore"):
+ out = np.log(a)
+ out /= np.log(self.base)
+ if self._clip:
+ # SVG spec says that conforming viewers must support values up
+ # to 3.4e38 (C float); however experiments suggest that
+ # Inkscape (which uses cairo for rendering) runs into cairo's
+ # 24-bit limit (which is apparently shared by Agg).
+ # Ghostscript (used for pdf rendering appears to overflow even
+ # earlier, with the max value around 2 ** 15 for the tests to
+ # pass. On the other hand, in practice, we want to clip beyond
+ # np.log10(np.nextafter(0, 1)) ~ -323
+ # so 1000 seems safe.
+ out[a <= 0] = -1000
+ return out
+
+ def __str__(self):
+ return "{}({!r})".format(
+ type(self).__name__, "clip" if self._clip else "mask")
+
+
+class InvertedLogTransformBase(Transform):
+ input_dims = 1
+ output_dims = 1
+ is_separable = True
+ has_inverse = True
+
+ def transform_non_affine(self, a):
+ return ma.power(self.base, a)
+
+ def __str__(self):
+ return "{}()".format(type(self).__name__)
+
+
+class Log10Transform(LogTransformBase):
+ base = 10.0
+
+ def inverted(self):
+ return InvertedLog10Transform()
+
+
+class InvertedLog10Transform(InvertedLogTransformBase):
+ base = 10.0
+
+ def inverted(self):
+ return Log10Transform()
+
+
+class Log2Transform(LogTransformBase):
+ base = 2.0
+
+ def inverted(self):
+ return InvertedLog2Transform()
+
+
+class InvertedLog2Transform(InvertedLogTransformBase):
+ base = 2.0
+
+ def inverted(self):
+ return Log2Transform()
+
+
+class NaturalLogTransform(LogTransformBase):
+ base = np.e
+
+ def inverted(self):
+ return InvertedNaturalLogTransform()
+
+
+class InvertedNaturalLogTransform(InvertedLogTransformBase):
+ base = np.e
+
+ def inverted(self):
+ return NaturalLogTransform()
+
+
+class LogTransform(LogTransformBase):
+ def __init__(self, base, nonpos='clip'):
+ LogTransformBase.__init__(self, nonpos)
+ self.base = base
+
+ def inverted(self):
+ return InvertedLogTransform(self.base)
+
+
+class InvertedLogTransform(InvertedLogTransformBase):
+ def __init__(self, base):
+ InvertedLogTransformBase.__init__(self)
+ self.base = base
+
+ def inverted(self):
+ return LogTransform(self.base)
+
+
+class LogScale(ScaleBase):
+ """
+ A standard logarithmic scale. Care is taken so non-positive
+ values are not plotted.
+
+ For computational efficiency (to push as much as possible to Numpy
+ C code in the common cases), this scale provides different
+ transforms depending on the base of the logarithm:
+
+ - base 10 (:class:`Log10Transform`)
+ - base 2 (:class:`Log2Transform`)
+ - base e (:class:`NaturalLogTransform`)
+ - arbitrary base (:class:`LogTransform`)
+ """
+ name = 'log'
+
+ # compatibility shim
+ LogTransformBase = LogTransformBase
+ Log10Transform = Log10Transform
+ InvertedLog10Transform = InvertedLog10Transform
+ Log2Transform = Log2Transform
+ InvertedLog2Transform = InvertedLog2Transform
+ NaturalLogTransform = NaturalLogTransform
+ InvertedNaturalLogTransform = InvertedNaturalLogTransform
+ LogTransform = LogTransform
+ InvertedLogTransform = InvertedLogTransform
+
+ def __init__(self, axis, **kwargs):
+ """
+ *basex*/*basey*:
+ The base of the logarithm
+
+ *nonposx*/*nonposy*: ['mask' | 'clip' ]
+ non-positive values in *x* or *y* can be masked as
+ invalid, or clipped to a very small positive number
+
+ *subsx*/*subsy*:
+ Where to place the subticks between each major tick.
+ Should be a sequence of integers. For example, in a log10
+ scale: ``[2, 3, 4, 5, 6, 7, 8, 9]``
+
+ will place 8 logarithmically spaced minor ticks between
+ each major tick.
+ """
+ if axis.axis_name == 'x':
+ base = kwargs.pop('basex', 10.0)
+ subs = kwargs.pop('subsx', None)
+ nonpos = kwargs.pop('nonposx', 'clip')
+ else:
+ base = kwargs.pop('basey', 10.0)
+ subs = kwargs.pop('subsy', None)
+ nonpos = kwargs.pop('nonposy', 'clip')
+
+ if len(kwargs):
+ raise ValueError(("provided too many kwargs, can only pass "
+ "{'basex', 'subsx', nonposx'} or "
+ "{'basey', 'subsy', nonposy'}. You passed ") +
+ "{!r}".format(kwargs))
+
+ if nonpos not in ['mask', 'clip']:
+ raise ValueError("nonposx, nonposy kwarg must be 'mask' or 'clip'")
+ if base <= 0 or base == 1:
+ raise ValueError('The log base cannot be <= 0 or == 1')
+
+ if base == 10.0:
+ self._transform = self.Log10Transform(nonpos)
+ elif base == 2.0:
+ self._transform = self.Log2Transform(nonpos)
+ elif base == np.e:
+ self._transform = self.NaturalLogTransform(nonpos)
+ else:
+ self._transform = self.LogTransform(base, nonpos)
+
+ self.base = base
+ self.subs = subs
+
+ def set_default_locators_and_formatters(self, axis):
+ """
+ Set the locators and formatters to specialized versions for
+ log scaling.
+ """
+ axis.set_major_locator(LogLocator(self.base))
+ axis.set_major_formatter(LogFormatterSciNotation(self.base))
+ axis.set_minor_locator(LogLocator(self.base, self.subs))
+ axis.set_minor_formatter(
+ LogFormatterSciNotation(self.base,
+ labelOnlyBase=(self.subs is not None)))
+
+ def get_transform(self):
+ """
+ Return a :class:`~matplotlib.transforms.Transform` instance
+ appropriate for the given logarithm base.
+ """
+ return self._transform
+
+ def limit_range_for_scale(self, vmin, vmax, minpos):
+ """
+ Limit the domain to positive values.
+ """
+ if not np.isfinite(minpos):
+ minpos = 1e-300 # This value should rarely if ever
+ # end up with a visible effect.
+
+ return (minpos if vmin <= 0 else vmin,
+ minpos if vmax <= 0 else vmax)
+
+
+class SymmetricalLogTransform(Transform):
+ input_dims = 1
+ output_dims = 1
+ is_separable = True
+ has_inverse = True
+
+ def __init__(self, base, linthresh, linscale):
+ Transform.__init__(self)
+ self.base = base
+ self.linthresh = linthresh
+ self.linscale = linscale
+ self._linscale_adj = (linscale / (1.0 - self.base ** -1))
+ self._log_base = np.log(base)
+
+ def transform_non_affine(self, a):
+ sign = np.sign(a)
+ masked = ma.masked_inside(a,
+ -self.linthresh,
+ self.linthresh,
+ copy=False)
+ log = sign * self.linthresh * (
+ self._linscale_adj +
+ ma.log(np.abs(masked) / self.linthresh) / self._log_base)
+ if masked.mask.any():
+ return ma.where(masked.mask, a * self._linscale_adj, log)
+ else:
+ return log
+
+ def inverted(self):
+ return InvertedSymmetricalLogTransform(self.base, self.linthresh,
+ self.linscale)
+
+
+class InvertedSymmetricalLogTransform(Transform):
+ input_dims = 1
+ output_dims = 1
+ is_separable = True
+ has_inverse = True
+
+ def __init__(self, base, linthresh, linscale):
+ Transform.__init__(self)
+ symlog = SymmetricalLogTransform(base, linthresh, linscale)
+ self.base = base
+ self.linthresh = linthresh
+ self.invlinthresh = symlog.transform(linthresh)
+ self.linscale = linscale
+ self._linscale_adj = (linscale / (1.0 - self.base ** -1))
+
+ def transform_non_affine(self, a):
+ sign = np.sign(a)
+ masked = ma.masked_inside(a, -self.invlinthresh,
+ self.invlinthresh, copy=False)
+ exp = sign * self.linthresh * (
+ ma.power(self.base, (sign * (masked / self.linthresh))
+ - self._linscale_adj))
+ if masked.mask.any():
+ return ma.where(masked.mask, a / self._linscale_adj, exp)
+ else:
+ return exp
+
+ def inverted(self):
+ return SymmetricalLogTransform(self.base,
+ self.linthresh, self.linscale)
+
+
+class SymmetricalLogScale(ScaleBase):
+ """
+ The symmetrical logarithmic scale is logarithmic in both the
+ positive and negative directions from the origin.
+
+ Since the values close to zero tend toward infinity, there is a
+ need to have a range around zero that is linear. The parameter
+ *linthresh* allows the user to specify the size of this range
+ (-*linthresh*, *linthresh*).
+ """
+ name = 'symlog'
+ # compatibility shim
+ SymmetricalLogTransform = SymmetricalLogTransform
+ InvertedSymmetricalLogTransform = InvertedSymmetricalLogTransform
+
+ def __init__(self, axis, **kwargs):
+ """
+ *basex*/*basey*:
+ The base of the logarithm
+
+ *linthreshx*/*linthreshy*:
+ A single float which defines the range (-*x*, *x*), within
+ which the plot is linear. This avoids having the plot go to
+ infinity around zero.
+
+ *subsx*/*subsy*:
+ Where to place the subticks between each major tick.
+ Should be a sequence of integers. For example, in a log10
+ scale: ``[2, 3, 4, 5, 6, 7, 8, 9]``
+
+ will place 8 logarithmically spaced minor ticks between
+ each major tick.
+
+ *linscalex*/*linscaley*:
+ This allows the linear range (-*linthresh* to *linthresh*)
+ to be stretched relative to the logarithmic range. Its
+ value is the number of decades to use for each half of the
+ linear range. For example, when *linscale* == 1.0 (the
+ default), the space used for the positive and negative
+ halves of the linear range will be equal to one decade in
+ the logarithmic range.
+ """
+ if axis.axis_name == 'x':
+ base = kwargs.pop('basex', 10.0)
+ linthresh = kwargs.pop('linthreshx', 2.0)
+ subs = kwargs.pop('subsx', None)
+ linscale = kwargs.pop('linscalex', 1.0)
+ else:
+ base = kwargs.pop('basey', 10.0)
+ linthresh = kwargs.pop('linthreshy', 2.0)
+ subs = kwargs.pop('subsy', None)
+ linscale = kwargs.pop('linscaley', 1.0)
+
+ if base <= 1.0:
+ raise ValueError("'basex/basey' must be larger than 1")
+ if linthresh <= 0.0:
+ raise ValueError("'linthreshx/linthreshy' must be positive")
+ if linscale <= 0.0:
+ raise ValueError("'linscalex/linthreshy' must be positive")
+
+ self._transform = self.SymmetricalLogTransform(base,
+ linthresh,
+ linscale)
+
+ self.base = base
+ self.linthresh = linthresh
+ self.linscale = linscale
+ self.subs = subs
+
+ def set_default_locators_and_formatters(self, axis):
+ """
+ Set the locators and formatters to specialized versions for
+ symmetrical log scaling.
+ """
+ axis.set_major_locator(SymmetricalLogLocator(self.get_transform()))
+ axis.set_major_formatter(LogFormatterSciNotation(self.base))
+ axis.set_minor_locator(SymmetricalLogLocator(self.get_transform(),
+ self.subs))
+ axis.set_minor_formatter(NullFormatter())
+
+ def get_transform(self):
+ """
+ Return a :class:`SymmetricalLogTransform` instance.
+ """
+ return self._transform
+
+
+class LogitTransform(Transform):
+ input_dims = 1
+ output_dims = 1
+ is_separable = True
+ has_inverse = True
+
+ def __init__(self, nonpos='mask'):
+ Transform.__init__(self)
+ self._nonpos = nonpos
+ self._clip = {"clip": True, "mask": False}[nonpos]
+
+ def transform_non_affine(self, a):
+ """logit transform (base 10), masked or clipped"""
+ with np.errstate(divide="ignore", invalid="ignore"):
+ out = np.log10(a / (1 - a))
+ if self._clip: # See LogTransform for choice of clip value.
+ out[a <= 0] = -1000
+ out[1 <= a] = 1000
+ return out
+
+ def inverted(self):
+ return LogisticTransform(self._nonpos)
+
+ def __str__(self):
+ return "{}({!r})".format(type(self).__name__,
+ "clip" if self._clip else "mask")
+
+
+class LogisticTransform(Transform):
+ input_dims = 1
+ output_dims = 1
+ is_separable = True
+ has_inverse = True
+
+ def __init__(self, nonpos='mask'):
+ Transform.__init__(self)
+ self._nonpos = nonpos
+
+ def transform_non_affine(self, a):
+ """logistic transform (base 10)"""
+ return 1.0 / (1 + 10**(-a))
+
+ def inverted(self):
+ return LogitTransform(self._nonpos)
+
+ def __str__(self):
+ return "{}({!r})".format(type(self).__name__, self._nonpos)
+
+
+class LogitScale(ScaleBase):
+ """
+ Logit scale for data between zero and one, both excluded.
+
+ This scale is similar to a log scale close to zero and to one, and almost
+ linear around 0.5. It maps the interval ]0, 1[ onto ]-infty, +infty[.
+ """
+ name = 'logit'
+
+ def __init__(self, axis, nonpos='mask'):
+ """
+ *nonpos*: ['mask' | 'clip' ]
+ values beyond ]0, 1[ can be masked as invalid, or clipped to a number
+ very close to 0 or 1
+ """
+ if nonpos not in ['mask', 'clip']:
+ raise ValueError("nonposx, nonposy kwarg must be 'mask' or 'clip'")
+
+ self._transform = LogitTransform(nonpos)
+
+ def get_transform(self):
+ """
+ Return a :class:`LogitTransform` instance.
+ """
+ return self._transform
+
+ def set_default_locators_and_formatters(self, axis):
+ # ..., 0.01, 0.1, 0.5, 0.9, 0.99, ...
+ axis.set_major_locator(LogitLocator())
+ axis.set_major_formatter(LogitFormatter())
+ axis.set_minor_locator(LogitLocator(minor=True))
+ axis.set_minor_formatter(LogitFormatter())
+
+ def limit_range_for_scale(self, vmin, vmax, minpos):
+ """
+ Limit the domain to values between 0 and 1 (excluded).
+ """
+ if not np.isfinite(minpos):
+ minpos = 1e-7 # This value should rarely if ever
+ # end up with a visible effect.
+ return (minpos if vmin <= 0 else vmin,
+ 1 - minpos if vmax >= 1 else vmax)
+
+
+_scale_mapping = {
+ 'linear': LinearScale,
+ 'log': LogScale,
+ 'symlog': SymmetricalLogScale,
+ 'logit': LogitScale,
+ }
+
+
+def get_scale_names():
+ return sorted(_scale_mapping)
+
+
+def scale_factory(scale, axis, **kwargs):
+ """
+ Return a scale class by name.
+
+ ACCEPTS: [ %(names)s ]
+ """
+ scale = scale.lower()
+ if scale is None:
+ scale = 'linear'
+
+ if scale not in _scale_mapping:
+ raise ValueError("Unknown scale type '%s'" % scale)
+
+ return _scale_mapping[scale](axis, **kwargs)
+scale_factory.__doc__ = cbook.dedent(scale_factory.__doc__) % \
+ {'names': " | ".join(get_scale_names())}
+
+
+def register_scale(scale_class):
+ """
+ Register a new kind of scale.
+
+ *scale_class* must be a subclass of :class:`ScaleBase`.
+ """
+ _scale_mapping[scale_class.name] = scale_class
+
+
+def get_scale_docs():
+ """
+ Helper function for generating docstrings related to scales.
+ """
+ docs = []
+ for name in get_scale_names():
+ scale_class = _scale_mapping[name]
+ docs.append(" '%s'" % name)
+ docs.append("")
+ class_docs = cbook.dedent(scale_class.__init__.__doc__)
+ class_docs = "".join([" %s\n" %
+ x for x in class_docs.split("\n")])
+ docs.append(class_docs)
+ docs.append("")
+ return "\n".join(docs)
+
+
+docstring.interpd.update(
+ scale=' | '.join([repr(x) for x in get_scale_names()]),
+ scale_docs=get_scale_docs().rstrip(),
+ )
diff --git a/contrib/python/matplotlib/py2/matplotlib/sphinxext/__init__.py b/contrib/python/matplotlib/py2/matplotlib/sphinxext/__init__.py
new file mode 100644
index 00000000000..800d82e7ee0
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/sphinxext/__init__.py
@@ -0,0 +1,2 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
diff --git a/contrib/python/matplotlib/py2/matplotlib/sphinxext/mathmpl.py b/contrib/python/matplotlib/py2/matplotlib/sphinxext/mathmpl.py
new file mode 100644
index 00000000000..4bfbc52a249
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/sphinxext/mathmpl.py
@@ -0,0 +1,126 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+import six
+import hashlib
+import os
+import sys
+import warnings
+
+from docutils import nodes
+from docutils.parsers.rst import directives
+import sphinx
+
+from matplotlib import rcParams
+from matplotlib.mathtext import MathTextParser
+rcParams['mathtext.fontset'] = 'cm'
+mathtext_parser = MathTextParser("Bitmap")
+
+# Define LaTeX math node:
+class latex_math(nodes.General, nodes.Element):
+ pass
+
+def fontset_choice(arg):
+ return directives.choice(arg, ['cm', 'stix', 'stixsans'])
+
+options_spec = {'fontset': fontset_choice}
+
+def math_role(role, rawtext, text, lineno, inliner,
+ options={}, content=[]):
+ i = rawtext.find('`')
+ latex = rawtext[i+1:-1]
+ node = latex_math(rawtext)
+ node['latex'] = latex
+ node['fontset'] = options.get('fontset', 'cm')
+ return [node], []
+math_role.options = options_spec
+
+def math_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ latex = ''.join(content)
+ node = latex_math(block_text)
+ node['latex'] = latex
+ node['fontset'] = options.get('fontset', 'cm')
+ return [node]
+
+# This uses mathtext to render the expression
+def latex2png(latex, filename, fontset='cm'):
+ latex = "$%s$" % latex
+ orig_fontset = rcParams['mathtext.fontset']
+ rcParams['mathtext.fontset'] = fontset
+ if os.path.exists(filename):
+ depth = mathtext_parser.get_depth(latex, dpi=100)
+ else:
+ try:
+ depth = mathtext_parser.to_png(filename, latex, dpi=100)
+ except:
+ warnings.warn("Could not render math expression %s" % latex,
+ Warning)
+ depth = 0
+ rcParams['mathtext.fontset'] = orig_fontset
+ sys.stdout.write("#")
+ sys.stdout.flush()
+ return depth
+
+# LaTeX to HTML translation stuff:
+def latex2html(node, source):
+ inline = isinstance(node.parent, nodes.TextElement)
+ latex = node['latex']
+ name = 'math-%s' % hashlib.md5(latex.encode()).hexdigest()[-10:]
+
+ destdir = os.path.join(setup.app.builder.outdir, '_images', 'mathmpl')
+ if not os.path.exists(destdir):
+ os.makedirs(destdir)
+ dest = os.path.join(destdir, '%s.png' % name)
+ path = '/'.join((setup.app.builder.imgpath, 'mathmpl'))
+
+ depth = latex2png(latex, dest, node['fontset'])
+
+ if inline:
+ cls = ''
+ else:
+ cls = 'class="center" '
+ if inline and depth != 0:
+ style = 'style="position: relative; bottom: -%dpx"' % (depth + 1)
+ else:
+ style = ''
+
+ return '<img src="%s/%s.png" %s%s/>' % (path, name, cls, style)
+
+
+def setup(app):
+ setup.app = app
+
+ # Add visit/depart methods to HTML-Translator:
+ def visit_latex_math_html(self, node):
+ source = self.document.attributes['source']
+ self.body.append(latex2html(node, source))
+
+ def depart_latex_math_html(self, node):
+ pass
+
+ # Add visit/depart methods to LaTeX-Translator:
+ def visit_latex_math_latex(self, node):
+ inline = isinstance(node.parent, nodes.TextElement)
+ if inline:
+ self.body.append('$%s$' % node['latex'])
+ else:
+ self.body.extend(['\\begin{equation}',
+ node['latex'],
+ '\\end{equation}'])
+
+ def depart_latex_math_latex(self, node):
+ pass
+
+ app.add_node(latex_math,
+ html=(visit_latex_math_html, depart_latex_math_html),
+ latex=(visit_latex_math_latex, depart_latex_math_latex))
+ app.add_role('mathmpl', math_role)
+ app.add_directive('mathmpl', math_directive,
+ True, (0, 0, 0), **options_spec)
+ if sphinx.version_info < (1, 8):
+ app.add_role('math', math_role)
+ app.add_directive('math', math_directive,
+ True, (0, 0, 0), **options_spec)
+
+ metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
+ return metadata
diff --git a/contrib/python/matplotlib/py2/matplotlib/sphinxext/only_directives.py b/contrib/python/matplotlib/py2/matplotlib/sphinxext/only_directives.py
new file mode 100644
index 00000000000..0a5ed70f800
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/sphinxext/only_directives.py
@@ -0,0 +1,75 @@
+#
+# A pair of directives for inserting content that will only appear in
+# either html or latex.
+#
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from docutils.nodes import Body, Element
+
+
+class only_base(Body, Element):
+ def dont_traverse(self, *args, **kwargs):
+ return []
+
+class html_only(only_base):
+ pass
+
+class latex_only(only_base):
+ pass
+
+def run(content, node_class, state, content_offset):
+ text = '\n'.join(content)
+ node = node_class(text)
+ state.nested_parse(content, content_offset, node)
+ return [node]
+
+def html_only_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ return run(content, html_only, state, content_offset)
+
+def latex_only_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ return run(content, latex_only, state, content_offset)
+
+def builder_inited(app):
+ if app.builder.name == 'html':
+ latex_only.traverse = only_base.dont_traverse
+ else:
+ html_only.traverse = only_base.dont_traverse
+
+
+def setup(app):
+ app.add_directive('htmlonly', html_only_directive, True, (0, 0, 0))
+ app.add_directive('latexonly', latex_only_directive, True, (0, 0, 0))
+
+ # This will *really* never see the light of day As it turns out,
+ # this results in "broken" image nodes since they never get
+ # processed, so best not to do this.
+ # app.connect('builder-inited', builder_inited)
+
+ # Add visit/depart methods to HTML-Translator:
+ def visit_perform(self, node):
+ pass
+
+ def depart_perform(self, node):
+ pass
+
+ def visit_ignore(self, node):
+ node.children = []
+
+ def depart_ignore(self, node):
+ node.children = []
+
+ app.add_node(html_only,
+ html=(visit_perform, depart_perform),
+ latex=(visit_ignore, depart_ignore))
+ app.add_node(latex_only,
+ latex=(visit_perform, depart_perform),
+ html=(visit_ignore, depart_ignore))
+
+ metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
+ return metadata
diff --git a/contrib/python/matplotlib/py2/matplotlib/sphinxext/plot_directive.py b/contrib/python/matplotlib/py2/matplotlib/sphinxext/plot_directive.py
new file mode 100644
index 00000000000..434bc50aee2
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/sphinxext/plot_directive.py
@@ -0,0 +1,868 @@
+"""
+A directive for including a matplotlib plot in a Sphinx document.
+
+By default, in HTML output, `plot` will include a .png file with a
+link to a high-res .png and .pdf. In LaTeX output, it will include a
+.pdf.
+
+The source code for the plot may be included in one of three ways:
+
+ 1. **A path to a source file** as the argument to the directive::
+
+ .. plot:: path/to/plot.py
+
+ When a path to a source file is given, the content of the
+ directive may optionally contain a caption for the plot::
+
+ .. plot:: path/to/plot.py
+
+ This is the caption for the plot
+
+ Additionally, one may specify the name of a function to call (with
+ no arguments) immediately after importing the module::
+
+ .. plot:: path/to/plot.py plot_function1
+
+ 2. Included as **inline content** to the directive::
+
+ .. plot::
+
+ import matplotlib.pyplot as plt
+ import matplotlib.image as mpimg
+ import numpy as np
+ img = mpimg.imread('_static/stinkbug.png')
+ imgplot = plt.imshow(img)
+
+ 3. Using **doctest** syntax::
+
+ .. plot::
+ A plotting example:
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot([1,2,3], [4,5,6])
+
+Options
+-------
+
+The ``plot`` directive supports the following options:
+
+ format : {'python', 'doctest'}
+ Specify the format of the input
+
+ include-source : bool
+ Whether to display the source code. The default can be changed
+ using the `plot_include_source` variable in conf.py
+
+ encoding : str
+ If this source file is in a non-UTF8 or non-ASCII encoding,
+ the encoding must be specified using the `:encoding:` option.
+ The encoding will not be inferred using the ``-*- coding -*-``
+ metacomment.
+
+ context : bool or str
+ If provided, the code will be run in the context of all
+ previous plot directives for which the `:context:` option was
+ specified. This only applies to inline code plot directives,
+ not those run from files. If the ``:context: reset`` option is
+ specified, the context is reset for this and future plots, and
+ previous figures are closed prior to running the code.
+ ``:context:close-figs`` keeps the context but closes previous figures
+ before running the code.
+
+ nofigs : bool
+ If specified, the code block will be run, but no figures will
+ be inserted. This is usually useful with the ``:context:``
+ option.
+
+Additionally, this directive supports all of the options of the
+`image` directive, except for `target` (since plot will add its own
+target). These include `alt`, `height`, `width`, `scale`, `align` and
+`class`.
+
+Configuration options
+---------------------
+
+The plot directive has the following configuration options:
+
+ plot_include_source
+ Default value for the include-source option
+
+ plot_html_show_source_link
+ Whether to show a link to the source in HTML.
+
+ plot_pre_code
+ Code that should be executed before each plot. If not specified or None
+ it will default to a string containing::
+
+ import numpy as np
+ from matplotlib import pyplot as plt
+
+ plot_basedir
+ Base directory, to which ``plot::`` file names are relative
+ to. (If None or empty, file names are relative to the
+ directory where the file containing the directive is.)
+
+ plot_formats
+ File formats to generate. List of tuples or strings::
+
+ [(suffix, dpi), suffix, ...]
+
+ that determine the file format and the DPI. For entries whose
+ DPI was omitted, sensible defaults are chosen. When passing from
+ the command line through sphinx_build the list should be passed as
+ suffix:dpi,suffix:dpi, ....
+
+ plot_html_show_formats
+ Whether to show links to the files in HTML.
+
+ plot_rcparams
+ A dictionary containing any non-standard rcParams that should
+ be applied before each plot.
+
+ plot_apply_rcparams
+ By default, rcParams are applied when `context` option is not used in
+ a plot directive. This configuration option overrides this behavior
+ and applies rcParams before each plot.
+
+ plot_working_directory
+ By default, the working directory will be changed to the directory of
+ the example, so the code can get at its data files, if any. Also its
+ path will be added to `sys.path` so it can import any helper modules
+ sitting beside it. This configuration option can be used to specify
+ a central directory (also added to `sys.path`) where data files and
+ helper modules for all code are located.
+
+ plot_template
+ Provide a customized template for preparing restructured text.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange
+
+import sys, os, shutil, io, re, textwrap
+from os.path import relpath
+import traceback
+import warnings
+
+if not six.PY3:
+ import cStringIO
+
+from docutils.parsers.rst import directives
+from docutils.parsers.rst.directives.images import Image
+align = Image.align
+import sphinx
+
+sphinx_version = sphinx.__version__.split(".")
+# The split is necessary for sphinx beta versions where the string is
+# '6b1'
+sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
+ for x in sphinx_version[:2]])
+
+import jinja2 # Sphinx dependency.
+
+import matplotlib
+import matplotlib.cbook as cbook
+try:
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter("error", UserWarning)
+ matplotlib.use('Agg')
+except UserWarning:
+ import matplotlib.pyplot as plt
+ plt.switch_backend("Agg")
+else:
+ import matplotlib.pyplot as plt
+from matplotlib import _pylab_helpers
+
+__version__ = 2
+
+#------------------------------------------------------------------------------
+# Registration hook
+#------------------------------------------------------------------------------
+
+def plot_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ """Implementation of the ``.. plot::`` directive.
+
+ See the module docstring for details.
+ """
+ return run(arguments, content, options, state_machine, state, lineno)
+
+
+def _option_boolean(arg):
+ if not arg or not arg.strip():
+ # no argument given, assume used as a flag
+ return True
+ elif arg.strip().lower() in ('no', '0', 'false'):
+ return False
+ elif arg.strip().lower() in ('yes', '1', 'true'):
+ return True
+ else:
+ raise ValueError('"%s" unknown boolean' % arg)
+
+
+def _option_context(arg):
+ if arg in [None, 'reset', 'close-figs']:
+ return arg
+ raise ValueError("argument should be None or 'reset' or 'close-figs'")
+
+
+def _option_format(arg):
+ return directives.choice(arg, ('python', 'doctest'))
+
+
+def _option_align(arg):
+ return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
+ "right"))
+
+
+def mark_plot_labels(app, document):
+ """
+ To make plots referenceable, we need to move the reference from
+ the "htmlonly" (or "latexonly") node to the actual figure node
+ itself.
+ """
+ for name, explicit in six.iteritems(document.nametypes):
+ if not explicit:
+ continue
+ labelid = document.nameids[name]
+ if labelid is None:
+ continue
+ node = document.ids[labelid]
+ if node.tagname in ('html_only', 'latex_only'):
+ for n in node:
+ if n.tagname == 'figure':
+ sectname = name
+ for c in n:
+ if c.tagname == 'caption':
+ sectname = c.astext()
+ break
+
+ node['ids'].remove(labelid)
+ node['names'].remove(name)
+ n['ids'].append(labelid)
+ n['names'].append(name)
+ document.settings.env.labels[name] = \
+ document.settings.env.docname, labelid, sectname
+ break
+
+
+def setup(app):
+ setup.app = app
+ setup.config = app.config
+ setup.confdir = app.confdir
+
+ options = {'alt': directives.unchanged,
+ 'height': directives.length_or_unitless,
+ 'width': directives.length_or_percentage_or_unitless,
+ 'scale': directives.nonnegative_int,
+ 'align': _option_align,
+ 'class': directives.class_option,
+ 'include-source': _option_boolean,
+ 'format': _option_format,
+ 'context': _option_context,
+ 'nofigs': directives.flag,
+ 'encoding': directives.encoding
+ }
+
+ app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
+ app.add_config_value('plot_pre_code', None, True)
+ app.add_config_value('plot_include_source', False, True)
+ app.add_config_value('plot_html_show_source_link', True, True)
+ app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
+ app.add_config_value('plot_basedir', None, True)
+ app.add_config_value('plot_html_show_formats', True, True)
+ app.add_config_value('plot_rcparams', {}, True)
+ app.add_config_value('plot_apply_rcparams', False, True)
+ app.add_config_value('plot_working_directory', None, True)
+ app.add_config_value('plot_template', None, True)
+
+ app.connect(str('doctree-read'), mark_plot_labels)
+
+ metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
+ return metadata
+
+#------------------------------------------------------------------------------
+# Doctest handling
+#------------------------------------------------------------------------------
+
+def contains_doctest(text):
+ try:
+ # check if it's valid Python as-is
+ compile(text, '<string>', 'exec')
+ return False
+ except SyntaxError:
+ pass
+ r = re.compile(r'^\s*>>>', re.M)
+ m = r.search(text)
+ return bool(m)
+
+
+def unescape_doctest(text):
+ """
+ Extract code from a piece of text, which contains either Python code
+ or doctests.
+
+ """
+ if not contains_doctest(text):
+ return text
+
+ code = ""
+ for line in text.split("\n"):
+ m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
+ if m:
+ code += m.group(2) + "\n"
+ elif line.strip():
+ code += "# " + line.strip() + "\n"
+ else:
+ code += "\n"
+ return code
+
+
+def split_code_at_show(text):
+ """
+ Split code at plt.show()
+
+ """
+
+ parts = []
+ is_doctest = contains_doctest(text)
+
+ part = []
+ for line in text.split("\n"):
+ if (not is_doctest and line.strip() == 'plt.show()') or \
+ (is_doctest and line.strip() == '>>> plt.show()'):
+ part.append(line)
+ parts.append("\n".join(part))
+ part = []
+ else:
+ part.append(line)
+ if "\n".join(part).strip():
+ parts.append("\n".join(part))
+ return parts
+
+
+def remove_coding(text):
+ r"""
+ Remove the coding comment, which six.exec\_ doesn't like.
+ """
+ sub_re = re.compile(r"^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
+ return sub_re.sub("", text)
+
+#------------------------------------------------------------------------------
+# Template
+#------------------------------------------------------------------------------
+
+
+TEMPLATE = """
+{{ source_code }}
+
+{{ only_html }}
+
+ {% if source_link or (html_show_formats and not multi_image) %}
+ (
+ {%- if source_link -%}
+ `Source code <{{ source_link }}>`__
+ {%- endif -%}
+ {%- if html_show_formats and not multi_image -%}
+ {%- for img in images -%}
+ {%- for fmt in img.formats -%}
+ {%- if source_link or not loop.first -%}, {% endif -%}
+ `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
+ {%- endfor -%}
+ {%- endfor -%}
+ {%- endif -%}
+ )
+ {% endif %}
+
+ {% for img in images %}
+ .. figure:: {{ build_dir }}/{{ img.basename }}.{{ default_fmt }}
+ {% for option in options -%}
+ {{ option }}
+ {% endfor %}
+
+ {% if html_show_formats and multi_image -%}
+ (
+ {%- for fmt in img.formats -%}
+ {%- if not loop.first -%}, {% endif -%}
+ `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
+ {%- endfor -%}
+ )
+ {%- endif -%}
+
+ {{ caption }}
+ {% endfor %}
+
+{{ only_latex }}
+
+ {% for img in images %}
+ {% if 'pdf' in img.formats -%}
+ .. figure:: {{ build_dir }}/{{ img.basename }}.pdf
+ {% for option in options -%}
+ {{ option }}
+ {% endfor %}
+
+ {{ caption }}
+ {% endif -%}
+ {% endfor %}
+
+{{ only_texinfo }}
+
+ {% for img in images %}
+ .. image:: {{ build_dir }}/{{ img.basename }}.png
+ {% for option in options -%}
+ {{ option }}
+ {% endfor %}
+
+ {% endfor %}
+
+"""
+
+exception_template = """
+.. htmlonly::
+
+ [`source code <%(linkdir)s/%(basename)s.py>`__]
+
+Exception occurred rendering plot.
+
+"""
+
+# the context of the plot for all directives specified with the
+# :context: option
+plot_context = dict()
+
+class ImageFile(object):
+ def __init__(self, basename, dirname):
+ self.basename = basename
+ self.dirname = dirname
+ self.formats = []
+
+ def filename(self, format):
+ return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
+
+ def filenames(self):
+ return [self.filename(fmt) for fmt in self.formats]
+
+
+def out_of_date(original, derived):
+ """
+ Returns True if derivative is out-of-date wrt original,
+ both of which are full file paths.
+ """
+ return (not os.path.exists(derived) or
+ (os.path.exists(original) and
+ os.stat(derived).st_mtime < os.stat(original).st_mtime))
+
+
+class PlotError(RuntimeError):
+ pass
+
+
+def run_code(code, code_path, ns=None, function_name=None):
+ """
+ Import a Python module from a path, and run the function given by
+ name, if function_name is not None.
+ """
+
+ # Change the working directory to the directory of the example, so
+ # it can get at its data files, if any. Add its path to sys.path
+ # so it can import any helper modules sitting beside it.
+ if six.PY2:
+ pwd = os.getcwdu()
+ else:
+ pwd = os.getcwd()
+ old_sys_path = list(sys.path)
+ if setup.config.plot_working_directory is not None:
+ try:
+ os.chdir(setup.config.plot_working_directory)
+ except OSError as err:
+ raise OSError(str(err) + '\n`plot_working_directory` option in'
+ 'Sphinx configuration file must be a valid '
+ 'directory path')
+ except TypeError as err:
+ raise TypeError(str(err) + '\n`plot_working_directory` option in '
+ 'Sphinx configuration file must be a string or '
+ 'None')
+ sys.path.insert(0, setup.config.plot_working_directory)
+ elif code_path is not None:
+ dirname = os.path.abspath(os.path.dirname(code_path))
+ os.chdir(dirname)
+ sys.path.insert(0, dirname)
+
+ # Reset sys.argv
+ old_sys_argv = sys.argv
+ sys.argv = [code_path]
+
+ # Redirect stdout
+ stdout = sys.stdout
+ if six.PY3:
+ sys.stdout = io.StringIO()
+ else:
+ sys.stdout = cStringIO.StringIO()
+
+ # Assign a do-nothing print function to the namespace. There
+ # doesn't seem to be any other way to provide a way to (not) print
+ # that works correctly across Python 2 and 3.
+ def _dummy_print(*arg, **kwarg):
+ pass
+
+ try:
+ try:
+ code = unescape_doctest(code)
+ if ns is None:
+ ns = {}
+ if not ns:
+ if setup.config.plot_pre_code is None:
+ six.exec_(six.text_type("import numpy as np\n" +
+ "from matplotlib import pyplot as plt\n"), ns)
+ else:
+ six.exec_(six.text_type(setup.config.plot_pre_code), ns)
+ ns['print'] = _dummy_print
+ if "__main__" in code:
+ six.exec_("__name__ = '__main__'", ns)
+ code = remove_coding(code)
+ six.exec_(code, ns)
+ if function_name is not None:
+ six.exec_(function_name + "()", ns)
+ except (Exception, SystemExit) as err:
+ raise PlotError(traceback.format_exc())
+ finally:
+ os.chdir(pwd)
+ sys.argv = old_sys_argv
+ sys.path[:] = old_sys_path
+ sys.stdout = stdout
+ return ns
+
+
+def clear_state(plot_rcparams, close=True):
+ if close:
+ plt.close('all')
+ matplotlib.rc_file_defaults()
+ matplotlib.rcParams.update(plot_rcparams)
+
+
+def get_plot_formats(config):
+ default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
+ formats = []
+ plot_formats = config.plot_formats
+ if isinstance(plot_formats, six.string_types):
+ # String Sphinx < 1.3, Split on , to mimic
+ # Sphinx 1.3 and later. Sphinx 1.3 always
+ # returns a list.
+ plot_formats = plot_formats.split(',')
+ for fmt in plot_formats:
+ if isinstance(fmt, six.string_types):
+ if ':' in fmt:
+ suffix, dpi = fmt.split(':')
+ formats.append((str(suffix), int(dpi)))
+ else:
+ formats.append((fmt, default_dpi.get(fmt, 80)))
+ elif type(fmt) in (tuple, list) and len(fmt) == 2:
+ formats.append((str(fmt[0]), int(fmt[1])))
+ else:
+ raise PlotError('invalid image format "%r" in plot_formats' % fmt)
+ return formats
+
+
+def render_figures(code, code_path, output_dir, output_base, context,
+ function_name, config, context_reset=False,
+ close_figs=False):
+ """
+ Run a pyplot script and save the images in *output_dir*.
+
+ Save the images under *output_dir* with file names derived from
+ *output_base*
+ """
+ formats = get_plot_formats(config)
+
+ # -- Try to determine if all images already exist
+
+ code_pieces = split_code_at_show(code)
+
+ # Look for single-figure output files first
+ all_exists = True
+ img = ImageFile(output_base, output_dir)
+ for format, dpi in formats:
+ if out_of_date(code_path, img.filename(format)):
+ all_exists = False
+ break
+ img.formats.append(format)
+
+ if all_exists:
+ return [(code, [img])]
+
+ # Then look for multi-figure output files
+ results = []
+ all_exists = True
+ for i, code_piece in enumerate(code_pieces):
+ images = []
+ for j in xrange(1000):
+ if len(code_pieces) > 1:
+ img = ImageFile('%s_%02d_%02d' % (output_base, i, j),
+ output_dir)
+ else:
+ img = ImageFile('%s_%02d' % (output_base, j), output_dir)
+ for format, dpi in formats:
+ if out_of_date(code_path, img.filename(format)):
+ all_exists = False
+ break
+ img.formats.append(format)
+
+ # assume that if we have one, we have them all
+ if not all_exists:
+ all_exists = (j > 0)
+ break
+ images.append(img)
+ if not all_exists:
+ break
+ results.append((code_piece, images))
+
+ if all_exists:
+ return results
+
+ # We didn't find the files, so build them
+
+ results = []
+ if context:
+ ns = plot_context
+ else:
+ ns = {}
+
+ if context_reset:
+ clear_state(config.plot_rcparams)
+ plot_context.clear()
+
+ close_figs = not context or close_figs
+
+ for i, code_piece in enumerate(code_pieces):
+
+ if not context or config.plot_apply_rcparams:
+ clear_state(config.plot_rcparams, close_figs)
+ elif close_figs:
+ plt.close('all')
+
+ run_code(code_piece, code_path, ns, function_name)
+
+ images = []
+ fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
+ for j, figman in enumerate(fig_managers):
+ if len(fig_managers) == 1 and len(code_pieces) == 1:
+ img = ImageFile(output_base, output_dir)
+ elif len(code_pieces) == 1:
+ img = ImageFile("%s_%02d" % (output_base, j), output_dir)
+ else:
+ img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
+ output_dir)
+ images.append(img)
+ for format, dpi in formats:
+ try:
+ figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
+ except Exception as err:
+ raise PlotError(traceback.format_exc())
+ img.formats.append(format)
+
+ results.append((code_piece, images))
+
+ if not context or config.plot_apply_rcparams:
+ clear_state(config.plot_rcparams, close=not context)
+
+ return results
+
+
+def run(arguments, content, options, state_machine, state, lineno):
+ document = state_machine.document
+ config = document.settings.env.config
+ nofigs = 'nofigs' in options
+
+ formats = get_plot_formats(config)
+ default_fmt = formats[0][0]
+
+ options.setdefault('include-source', config.plot_include_source)
+ keep_context = 'context' in options
+ context_opt = None if not keep_context else options['context']
+
+ rst_file = document.attributes['source']
+ rst_dir = os.path.dirname(rst_file)
+
+ if len(arguments):
+ if not config.plot_basedir:
+ source_file_name = os.path.join(setup.app.builder.srcdir,
+ directives.uri(arguments[0]))
+ else:
+ source_file_name = os.path.join(setup.confdir, config.plot_basedir,
+ directives.uri(arguments[0]))
+
+ # If there is content, it will be passed as a caption.
+ caption = '\n'.join(content)
+
+ # If the optional function name is provided, use it
+ if len(arguments) == 2:
+ function_name = arguments[1]
+ else:
+ function_name = None
+
+ with io.open(source_file_name, 'r', encoding='utf-8') as fd:
+ code = fd.read()
+ output_base = os.path.basename(source_file_name)
+ else:
+ source_file_name = rst_file
+ code = textwrap.dedent("\n".join(map(six.text_type, content)))
+ counter = document.attributes.get('_plot_counter', 0) + 1
+ document.attributes['_plot_counter'] = counter
+ base, ext = os.path.splitext(os.path.basename(source_file_name))
+ output_base = '%s-%d.py' % (base, counter)
+ function_name = None
+ caption = ''
+
+ base, source_ext = os.path.splitext(output_base)
+ if source_ext in ('.py', '.rst', '.txt'):
+ output_base = base
+ else:
+ source_ext = ''
+
+ # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
+ output_base = output_base.replace('.', '-')
+
+ # is it in doctest format?
+ is_doctest = contains_doctest(code)
+ if 'format' in options:
+ if options['format'] == 'python':
+ is_doctest = False
+ else:
+ is_doctest = True
+
+ # determine output directory name fragment
+ source_rel_name = relpath(source_file_name, setup.confdir)
+ source_rel_dir = os.path.dirname(source_rel_name)
+ while source_rel_dir.startswith(os.path.sep):
+ source_rel_dir = source_rel_dir[1:]
+
+ # build_dir: where to place output files (temporarily)
+ build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
+ 'plot_directive',
+ source_rel_dir)
+ # get rid of .. in paths, also changes pathsep
+ # see note in Python docs for warning about symbolic links on Windows.
+ # need to compare source and dest paths at end
+ build_dir = os.path.normpath(build_dir)
+
+ if not os.path.exists(build_dir):
+ os.makedirs(build_dir)
+
+ # output_dir: final location in the builder's directory
+ dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
+ source_rel_dir))
+ if not os.path.exists(dest_dir):
+ os.makedirs(dest_dir) # no problem here for me, but just use built-ins
+
+ # how to link to files from the RST file
+ dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
+ source_rel_dir).replace(os.path.sep, '/')
+ try:
+ build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
+ except ValueError:
+ # on Windows, relpath raises ValueError when path and start are on
+ # different mounts/drives
+ build_dir_link = build_dir
+ source_link = dest_dir_link + '/' + output_base + source_ext
+
+ # make figures
+ try:
+ results = render_figures(code,
+ source_file_name,
+ build_dir,
+ output_base,
+ keep_context,
+ function_name,
+ config,
+ context_reset=context_opt == 'reset',
+ close_figs=context_opt == 'close-figs')
+ errors = []
+ except PlotError as err:
+ reporter = state.memo.reporter
+ sm = reporter.system_message(
+ 2, "Exception occurred in plotting {}\n from {}:\n{}".format(
+ output_base, source_file_name, err),
+ line=lineno)
+ results = [(code, [])]
+ errors = [sm]
+
+ # Properly indent the caption
+ caption = '\n'.join(' ' + line.strip()
+ for line in caption.split('\n'))
+
+ # generate output restructuredtext
+ total_lines = []
+ for j, (code_piece, images) in enumerate(results):
+ if options['include-source']:
+ if is_doctest:
+ lines = ['']
+ lines += [row.rstrip() for row in code_piece.split('\n')]
+ else:
+ lines = ['.. code-block:: python', '']
+ lines += [' %s' % row.rstrip()
+ for row in code_piece.split('\n')]
+ source_code = "\n".join(lines)
+ else:
+ source_code = ""
+
+ if nofigs:
+ images = []
+
+ opts = [
+ ':%s: %s' % (key, val) for key, val in six.iteritems(options)
+ if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
+
+ only_html = ".. only:: html"
+ only_latex = ".. only:: latex"
+ only_texinfo = ".. only:: texinfo"
+
+ # Not-None src_link signals the need for a source link in the generated
+ # html
+ if j == 0 and config.plot_html_show_source_link:
+ src_link = source_link
+ else:
+ src_link = None
+
+ result = jinja2.Template(config.plot_template or TEMPLATE).render(
+ default_fmt=default_fmt,
+ dest_dir=dest_dir_link,
+ build_dir=build_dir_link,
+ source_link=src_link,
+ multi_image=len(images) > 1,
+ only_html=only_html,
+ only_latex=only_latex,
+ only_texinfo=only_texinfo,
+ options=opts,
+ images=images,
+ source_code=source_code,
+ html_show_formats=config.plot_html_show_formats and len(images),
+ caption=caption)
+
+ total_lines.extend(result.split("\n"))
+ total_lines.extend("\n")
+
+ if total_lines:
+ state_machine.insert_input(total_lines, source=source_file_name)
+
+ # copy image files to builder's output directory, if necessary
+ if not os.path.exists(dest_dir):
+ cbook.mkdirs(dest_dir)
+
+ for code_piece, images in results:
+ for img in images:
+ for fn in img.filenames():
+ destimg = os.path.join(dest_dir, os.path.basename(fn))
+ if fn != destimg:
+ shutil.copyfile(fn, destimg)
+
+ # copy script (if necessary)
+ target_name = os.path.join(dest_dir, output_base + source_ext)
+ with io.open(target_name, 'w', encoding="utf-8") as f:
+ if source_file_name == rst_file:
+ code_escaped = unescape_doctest(code)
+ else:
+ code_escaped = code
+ f.write(code_escaped)
+
+ return errors
diff --git a/contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/README.md b/contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/README.md
new file mode 100644
index 00000000000..e53d0adc5bd
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/README.md
@@ -0,0 +1,3 @@
+# Test project for matplotlib sphinx extensions
+
+A tiny sphinx project from ``sphinx-quickstart`` with all default answers.
diff --git a/contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/_static/README.txt b/contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/_static/README.txt
new file mode 100644
index 00000000000..ebde2c4b4ab
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/_static/README.txt
@@ -0,0 +1,7 @@
+##############################
+Static directory for tinypages
+##############################
+
+We need this README file to make sure the ``_static`` directory gets created
+in the installation. The tests check for warnings in builds, and, when the
+``_static`` directory is absent, this raises a warning.
diff --git a/contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/index.rst b/contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/index.rst
new file mode 100644
index 00000000000..3905483a8a5
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/index.rst
@@ -0,0 +1,21 @@
+.. tinypages documentation master file, created by
+ sphinx-quickstart on Tue Mar 18 11:58:34 2014.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to tinypages's documentation!
+=====================================
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ some_plots
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/some_plots.rst b/contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/some_plots.rst
new file mode 100644
index 00000000000..615908b0107
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/sphinxext/tests/tinypages/some_plots.rst
@@ -0,0 +1,129 @@
+##########
+Some plots
+##########
+
+Plot 1 does not use context:
+
+.. plot::
+
+ plt.plot(range(10))
+ a = 10
+
+Plot 2 doesn't use context either; has length 6:
+
+.. plot::
+
+ plt.plot(range(6))
+
+Plot 3 has length 4:
+
+.. plot::
+
+ plt.plot(range(4))
+
+Plot 4 shows that a new block with context does not see the variable defined
+in the no-context block:
+
+.. plot::
+ :context:
+
+ assert 'a' not in globals()
+
+Plot 5 defines ``a`` in a context block:
+
+.. plot::
+ :context:
+
+ plt.plot(range(6))
+ a = 10
+
+Plot 6 shows that a block with context sees the new variable. It also uses
+``:nofigs:``:
+
+.. plot::
+ :context:
+ :nofigs:
+
+ assert a == 10
+ b = 4
+
+Plot 7 uses a variable previously defined in previous ``nofigs`` context. It
+also closes any previous figures to create a fresh figure:
+
+.. plot::
+ :context: close-figs
+
+ assert b == 4
+ plt.plot(range(b))
+
+Plot 8 shows that a non-context block still doesn't have ``a``:
+
+.. plot::
+ :nofigs:
+
+ assert 'a' not in globals()
+
+Plot 9 has a context block, and does have ``a``:
+
+.. plot::
+ :context:
+ :nofigs:
+
+ assert a == 10
+
+Plot 10 resets context, and ``a`` has gone again:
+
+.. plot::
+ :context: reset
+ :nofigs:
+
+ assert 'a' not in globals()
+ c = 10
+
+Plot 11 continues the context, we have the new value, but not the old:
+
+.. plot::
+ :context:
+
+ assert c == 10
+ assert 'a' not in globals()
+ plt.plot(range(c))
+
+Plot 12 opens a new figure. By default the directive will plot both the first
+and the second figure:
+
+.. plot::
+ :context:
+
+ plt.figure()
+ plt.plot(range(6))
+
+Plot 13 shows ``close-figs`` in action. ``close-figs`` closes all figures
+previous to this plot directive, so we get always plot the figure we create in
+the directive:
+
+.. plot::
+ :context: close-figs
+
+ plt.figure()
+ plt.plot(range(4))
+
+Plot 14 uses ``include-source``:
+
+.. plot::
+ :include-source:
+
+ # Only a comment
+
+Plot 15 uses an external file with the plot commands and a caption:
+
+.. plot:: range4.py
+
+ This is the caption for plot 15.
+
+
+Plot 16 uses a specific function in a file with plot commands:
+
+.. plot:: range6.py range6
+
+
diff --git a/contrib/python/matplotlib/py2/matplotlib/spines.py b/contrib/python/matplotlib/py2/matplotlib/spines.py
new file mode 100644
index 00000000000..1e75c6ed616
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/spines.py
@@ -0,0 +1,542 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import matplotlib
+
+from matplotlib.artist import allow_rasterization
+from matplotlib import docstring
+import matplotlib.transforms as mtransforms
+import matplotlib.patches as mpatches
+import matplotlib.path as mpath
+import numpy as np
+import warnings
+
+rcParams = matplotlib.rcParams
+
+
+class Spine(mpatches.Patch):
+ """an axis spine -- the line noting the data area boundaries
+
+ Spines are the lines connecting the axis tick marks and noting the
+ boundaries of the data area. They can be placed at arbitrary
+ positions. See function:`~matplotlib.spines.Spine.set_position`
+ for more information.
+
+ The default position is ``('outward',0)``.
+
+ Spines are subclasses of class:`~matplotlib.patches.Patch`, and
+ inherit much of their behavior.
+
+ Spines draw a line, a circle, or an arc depending if
+ function:`~matplotlib.spines.Spine.set_patch_line`,
+ function:`~matplotlib.spines.Spine.set_patch_circle`, or
+ function:`~matplotlib.spines.Spine.set_patch_arc` has been called.
+ Line-like is the default.
+
+ """
+ def __str__(self):
+ return "Spine"
+
+ @docstring.dedent_interpd
+ def __init__(self, axes, spine_type, path, **kwargs):
+ """
+ - *axes* : the Axes instance containing the spine
+ - *spine_type* : a string specifying the spine type
+ - *path* : the path instance used to draw the spine
+
+ Valid kwargs are:
+ %(Patch)s
+ """
+ super(Spine, self).__init__(**kwargs)
+ self.axes = axes
+ self.set_figure(self.axes.figure)
+ self.spine_type = spine_type
+ self.set_facecolor('none')
+ self.set_edgecolor(rcParams['axes.edgecolor'])
+ self.set_linewidth(rcParams['axes.linewidth'])
+ self.set_capstyle('projecting')
+ self.axis = None
+
+ self.set_zorder(2.5)
+ self.set_transform(self.axes.transData) # default transform
+
+ self._bounds = None # default bounds
+ self._smart_bounds = False
+
+ # Defer initial position determination. (Not much support for
+ # non-rectangular axes is currently implemented, and this lets
+ # them pass through the spines machinery without errors.)
+ self._position = None
+ if not isinstance(path, matplotlib.path.Path):
+ raise ValueError(
+ "'path' must be an instance of 'matplotlib.path.Path'")
+ self._path = path
+
+ # To support drawing both linear and circular spines, this
+ # class implements Patch behavior three ways. If
+ # self._patch_type == 'line', behave like a mpatches.PathPatch
+ # instance. If self._patch_type == 'circle', behave like a
+ # mpatches.Ellipse instance. If self._patch_type == 'arc', behave like
+ # a mpatches.Arc instance.
+ self._patch_type = 'line'
+
+ # Behavior copied from mpatches.Ellipse:
+ # Note: This cannot be calculated until this is added to an Axes
+ self._patch_transform = mtransforms.IdentityTransform()
+
+ def set_smart_bounds(self, value):
+ """set the spine and associated axis to have smart bounds"""
+ self._smart_bounds = value
+
+ # also set the axis if possible
+ if self.spine_type in ('left', 'right'):
+ self.axes.yaxis.set_smart_bounds(value)
+ elif self.spine_type in ('top', 'bottom'):
+ self.axes.xaxis.set_smart_bounds(value)
+ self.stale = True
+
+ def get_smart_bounds(self):
+ """get whether the spine has smart bounds"""
+ return self._smart_bounds
+
+ def set_patch_arc(self, center, radius, theta1, theta2):
+ """set the spine to be arc-like"""
+ self._patch_type = 'arc'
+ self._center = center
+ self._width = radius * 2
+ self._height = radius * 2
+ self._theta1 = theta1
+ self._theta2 = theta2
+ self._path = mpath.Path.arc(theta1, theta2)
+ # arc drawn on axes transform
+ self.set_transform(self.axes.transAxes)
+ self.stale = True
+
+ def set_patch_circle(self, center, radius):
+ """set the spine to be circular"""
+ self._patch_type = 'circle'
+ self._center = center
+ self._width = radius * 2
+ self._height = radius * 2
+ # circle drawn on axes transform
+ self.set_transform(self.axes.transAxes)
+ self.stale = True
+
+ def set_patch_line(self):
+ """set the spine to be linear"""
+ self._patch_type = 'line'
+ self.stale = True
+
+ # Behavior copied from mpatches.Ellipse:
+ def _recompute_transform(self):
+ """NOTE: This cannot be called until after this has been added
+ to an Axes, otherwise unit conversion will fail. This
+ makes it very important to call the accessor method and
+ not directly access the transformation member variable.
+ """
+ assert self._patch_type in ('arc', 'circle')
+ center = (self.convert_xunits(self._center[0]),
+ self.convert_yunits(self._center[1]))
+ width = self.convert_xunits(self._width)
+ height = self.convert_yunits(self._height)
+ self._patch_transform = mtransforms.Affine2D() \
+ .scale(width * 0.5, height * 0.5) \
+ .translate(*center)
+
+ def get_patch_transform(self):
+ if self._patch_type in ('arc', 'circle'):
+ self._recompute_transform()
+ return self._patch_transform
+ else:
+ return super(Spine, self).get_patch_transform()
+
+ def get_path(self):
+ return self._path
+
+ def _ensure_position_is_set(self):
+ if self._position is None:
+ # default position
+ self._position = ('outward', 0.0) # in points
+ self.set_position(self._position)
+
+ def register_axis(self, axis):
+ """register an axis
+
+ An axis should be registered with its corresponding spine from
+ the Axes instance. This allows the spine to clear any axis
+ properties when needed.
+ """
+ self.axis = axis
+ if self.axis is not None:
+ self.axis.cla()
+ self.stale = True
+
+ def cla(self):
+ """Clear the current spine"""
+ self._position = None # clear position
+ if self.axis is not None:
+ self.axis.cla()
+
+ def is_frame_like(self):
+ """return True if directly on axes frame
+
+ This is useful for determining if a spine is the edge of an
+ old style MPL plot. If so, this function will return True.
+ """
+ self._ensure_position_is_set()
+ position = self._position
+ if isinstance(position, six.string_types):
+ if position == 'center':
+ position = ('axes', 0.5)
+ elif position == 'zero':
+ position = ('data', 0)
+ if len(position) != 2:
+ raise ValueError("position should be 2-tuple")
+ position_type, amount = position
+ if position_type == 'outward' and amount == 0:
+ return True
+ else:
+ return False
+
+ def _adjust_location(self):
+ """automatically set spine bounds to the view interval"""
+
+ if self.spine_type == 'circle':
+ return
+
+ if self._bounds is None:
+ if self.spine_type in ('left', 'right'):
+ low, high = self.axes.viewLim.intervaly
+ elif self.spine_type in ('top', 'bottom'):
+ low, high = self.axes.viewLim.intervalx
+ else:
+ raise ValueError('unknown spine spine_type: %s' %
+ self.spine_type)
+
+ if self._smart_bounds:
+ # attempt to set bounds in sophisticated way
+
+ # handle inverted limits
+ viewlim_low, viewlim_high = sorted([low, high])
+
+ if self.spine_type in ('left', 'right'):
+ datalim_low, datalim_high = self.axes.dataLim.intervaly
+ ticks = self.axes.get_yticks()
+ elif self.spine_type in ('top', 'bottom'):
+ datalim_low, datalim_high = self.axes.dataLim.intervalx
+ ticks = self.axes.get_xticks()
+ # handle inverted limits
+ ticks = np.sort(ticks)
+ datalim_low, datalim_high = sorted([datalim_low, datalim_high])
+
+ if datalim_low < viewlim_low:
+ # Data extends past view. Clip line to view.
+ low = viewlim_low
+ else:
+ # Data ends before view ends.
+ cond = (ticks <= datalim_low) & (ticks >= viewlim_low)
+ tickvals = ticks[cond]
+ if len(tickvals):
+ # A tick is less than or equal to lowest data point.
+ low = tickvals[-1]
+ else:
+ # No tick is available
+ low = datalim_low
+ low = max(low, viewlim_low)
+
+ if datalim_high > viewlim_high:
+ # Data extends past view. Clip line to view.
+ high = viewlim_high
+ else:
+ # Data ends before view ends.
+ cond = (ticks >= datalim_high) & (ticks <= viewlim_high)
+ tickvals = ticks[cond]
+ if len(tickvals):
+ # A tick is greater than or equal to highest data
+ # point.
+ high = tickvals[0]
+ else:
+ # No tick is available
+ high = datalim_high
+ high = min(high, viewlim_high)
+
+ else:
+ low, high = self._bounds
+
+ if self._patch_type == 'arc':
+ if self.spine_type in ('bottom', 'top'):
+ try:
+ direction = self.axes.get_theta_direction()
+ except AttributeError:
+ direction = 1
+ try:
+ offset = self.axes.get_theta_offset()
+ except AttributeError:
+ offset = 0
+ low = low * direction + offset
+ high = high * direction + offset
+ if low > high:
+ low, high = high, low
+
+ self._path = mpath.Path.arc(np.rad2deg(low), np.rad2deg(high))
+
+ if self.spine_type == 'bottom':
+ rmin, rmax = self.axes.viewLim.intervaly
+ try:
+ rorigin = self.axes.get_rorigin()
+ except AttributeError:
+ rorigin = rmin
+ scaled_diameter = (rmin - rorigin) / (rmax - rorigin)
+ self._height = scaled_diameter
+ self._width = scaled_diameter
+
+ else:
+ raise ValueError('unable to set bounds for spine "%s"' %
+ self.spine_type)
+ else:
+ v1 = self._path.vertices
+ assert v1.shape == (2, 2), 'unexpected vertices shape'
+ if self.spine_type in ['left', 'right']:
+ v1[0, 1] = low
+ v1[1, 1] = high
+ elif self.spine_type in ['bottom', 'top']:
+ v1[0, 0] = low
+ v1[1, 0] = high
+ else:
+ raise ValueError('unable to set bounds for spine "%s"' %
+ self.spine_type)
+
+ @allow_rasterization
+ def draw(self, renderer):
+ self._adjust_location()
+ ret = super(Spine, self).draw(renderer)
+ self.stale = False
+ return ret
+
+ def _calc_offset_transform(self):
+ """calculate the offset transform performed by the spine"""
+ self._ensure_position_is_set()
+ position = self._position
+ if isinstance(position, six.string_types):
+ if position == 'center':
+ position = ('axes', 0.5)
+ elif position == 'zero':
+ position = ('data', 0)
+ assert len(position) == 2, "position should be 2-tuple"
+ position_type, amount = position
+ assert position_type in ('axes', 'outward', 'data')
+ if position_type == 'outward':
+ if amount == 0:
+ # short circuit commonest case
+ self._spine_transform = ('identity',
+ mtransforms.IdentityTransform())
+ elif self.spine_type in ['left', 'right', 'top', 'bottom']:
+ offset_vec = {'left': (-1, 0),
+ 'right': (1, 0),
+ 'bottom': (0, -1),
+ 'top': (0, 1),
+ }[self.spine_type]
+ # calculate x and y offset in dots
+ offset_x = amount * offset_vec[0] / 72.0
+ offset_y = amount * offset_vec[1] / 72.0
+ self._spine_transform = ('post',
+ mtransforms.ScaledTranslation(
+ offset_x,
+ offset_y,
+ self.figure.dpi_scale_trans))
+ else:
+ warnings.warn('unknown spine type "%s": no spine '
+ 'offset performed' % self.spine_type)
+ self._spine_transform = ('identity',
+ mtransforms.IdentityTransform())
+ elif position_type == 'axes':
+ if self.spine_type in ('left', 'right'):
+ self._spine_transform = ('pre',
+ mtransforms.Affine2D.from_values(
+ # keep y unchanged, fix x at
+ # amount
+ 0, 0, 0, 1, amount, 0))
+ elif self.spine_type in ('bottom', 'top'):
+ self._spine_transform = ('pre',
+ mtransforms.Affine2D.from_values(
+ # keep x unchanged, fix y at
+ # amount
+ 1, 0, 0, 0, 0, amount))
+ else:
+ warnings.warn('unknown spine type "%s": no spine '
+ 'offset performed' % self.spine_type)
+ self._spine_transform = ('identity',
+ mtransforms.IdentityTransform())
+ elif position_type == 'data':
+ if self.spine_type in ('right', 'top'):
+ # The right and top spines have a default position of 1 in
+ # axes coordinates. When specifying the position in data
+ # coordinates, we need to calculate the position relative to 0.
+ amount -= 1
+ if self.spine_type in ('left', 'right'):
+ self._spine_transform = ('data',
+ mtransforms.Affine2D().translate(
+ amount, 0))
+ elif self.spine_type in ('bottom', 'top'):
+ self._spine_transform = ('data',
+ mtransforms.Affine2D().translate(
+ 0, amount))
+ else:
+ warnings.warn('unknown spine type "%s": no spine '
+ 'offset performed' % self.spine_type)
+ self._spine_transform = ('identity',
+ mtransforms.IdentityTransform())
+
+ def set_position(self, position):
+ """set the position of the spine
+
+ Spine position is specified by a 2 tuple of (position type,
+ amount). The position types are:
+
+ * 'outward' : place the spine out from the data area by the
+ specified number of points. (Negative values specify placing the
+ spine inward.)
+
+ * 'axes' : place the spine at the specified Axes coordinate (from
+ 0.0-1.0).
+
+ * 'data' : place the spine at the specified data coordinate.
+
+ Additionally, shorthand notations define a special positions:
+
+ * 'center' -> ('axes',0.5)
+ * 'zero' -> ('data', 0.0)
+
+ """
+ if position in ('center', 'zero'):
+ # special positions
+ pass
+ else:
+ if len(position) != 2:
+ raise ValueError("position should be 'center' or 2-tuple")
+ if position[0] not in ['outward', 'axes', 'data']:
+ raise ValueError("position[0] should be one of 'outward', "
+ "'axes', or 'data' ")
+ self._position = position
+ self._calc_offset_transform()
+
+ self.set_transform(self.get_spine_transform())
+
+ if self.axis is not None:
+ self.axis.reset_ticks()
+ self.stale = True
+
+ def get_position(self):
+ """get the spine position"""
+ self._ensure_position_is_set()
+ return self._position
+
+ def get_spine_transform(self):
+ """get the spine transform"""
+ self._ensure_position_is_set()
+ what, how = self._spine_transform
+
+ if what == 'data':
+ # special case data based spine locations
+ data_xform = self.axes.transScale + \
+ (how + self.axes.transLimits + self.axes.transAxes)
+ if self.spine_type in ['left', 'right']:
+ result = mtransforms.blended_transform_factory(
+ data_xform, self.axes.transData)
+ elif self.spine_type in ['top', 'bottom']:
+ result = mtransforms.blended_transform_factory(
+ self.axes.transData, data_xform)
+ else:
+ raise ValueError('unknown spine spine_type: %s' %
+ self.spine_type)
+ return result
+
+ if self.spine_type in ['left', 'right']:
+ base_transform = self.axes.get_yaxis_transform(which='grid')
+ elif self.spine_type in ['top', 'bottom']:
+ base_transform = self.axes.get_xaxis_transform(which='grid')
+ else:
+ raise ValueError('unknown spine spine_type: %s' %
+ self.spine_type)
+
+ if what == 'identity':
+ return base_transform
+ elif what == 'post':
+ return base_transform + how
+ elif what == 'pre':
+ return how + base_transform
+ else:
+ raise ValueError("unknown spine_transform type: %s" % what)
+
+ def set_bounds(self, low, high):
+ """Set the bounds of the spine."""
+ if self.spine_type == 'circle':
+ raise ValueError(
+ 'set_bounds() method incompatible with circular spines')
+ self._bounds = (low, high)
+ self.stale = True
+
+ def get_bounds(self):
+ """Get the bounds of the spine."""
+ return self._bounds
+
+ @classmethod
+ def linear_spine(cls, axes, spine_type, **kwargs):
+ """
+ (staticmethod) Returns a linear :class:`Spine`.
+ """
+ # all values of 13 get replaced upon call to set_bounds()
+ if spine_type == 'left':
+ path = mpath.Path([(0.0, 13), (0.0, 13)])
+ elif spine_type == 'right':
+ path = mpath.Path([(1.0, 13), (1.0, 13)])
+ elif spine_type == 'bottom':
+ path = mpath.Path([(13, 0.0), (13, 0.0)])
+ elif spine_type == 'top':
+ path = mpath.Path([(13, 1.0), (13, 1.0)])
+ else:
+ raise ValueError('unable to make path for spine "%s"' % spine_type)
+ result = cls(axes, spine_type, path, **kwargs)
+ result.set_visible(rcParams['axes.spines.{0}'.format(spine_type)])
+
+ return result
+
+ @classmethod
+ def arc_spine(cls, axes, spine_type, center, radius, theta1, theta2,
+ **kwargs):
+ """
+ (classmethod) Returns an arc :class:`Spine`.
+ """
+ path = mpath.Path.arc(theta1, theta2)
+ result = cls(axes, spine_type, path, **kwargs)
+ result.set_patch_arc(center, radius, theta1, theta2)
+ return result
+
+ @classmethod
+ def circular_spine(cls, axes, center, radius, **kwargs):
+ """
+ (staticmethod) Returns a circular :class:`Spine`.
+ """
+ path = mpath.Path.unit_circle()
+ spine_type = 'circle'
+ result = cls(axes, spine_type, path, **kwargs)
+ result.set_patch_circle(center, radius)
+ return result
+
+ def set_color(self, c):
+ """
+ Set the edgecolor.
+
+ ACCEPTS: matplotlib color arg or sequence of rgba tuples
+
+ .. seealso::
+
+ :meth:`set_facecolor`, :meth:`set_edgecolor`
+ For setting the edge or face color individually.
+ """
+ # The facecolor of a spine is always 'none' by default -- let
+ # the user change it manually if desired.
+ self.set_edgecolor(c)
+ self.stale = True
diff --git a/contrib/python/matplotlib/py2/matplotlib/stackplot.py b/contrib/python/matplotlib/py2/matplotlib/stackplot.py
new file mode 100644
index 00000000000..2b57aeb2b96
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/stackplot.py
@@ -0,0 +1,126 @@
+"""
+Stacked area plot for 1D arrays inspired by Douglas Y'barbo's stackoverflow
+answer:
+http://stackoverflow.com/questions/2225995/how-can-i-create-stacked-line-graph-with-matplotlib
+
+(http://stackoverflow.com/users/66549/doug)
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange
+import numpy as np
+
+__all__ = ['stackplot']
+
+
+def stackplot(axes, x, *args, **kwargs):
+ """
+ Draws a stacked area plot.
+
+ Parameters
+ ----------
+ x : 1d array of dimension N
+
+ y : 2d array (dimension MxN), or sequence of 1d arrays (each dimension 1xN)
+
+ The data is assumed to be unstacked. Each of the following
+ calls is legal::
+
+ stackplot(x, y) # where y is MxN
+ stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4, are all 1xNm
+
+ baseline : ['zero' | 'sym' | 'wiggle' | 'weighted_wiggle']
+ Method used to calculate the baseline:
+
+ - ``'zero'``: Constant zero baseline, i.e. a simple stacked plot.
+ - ``'sym'``: Symmetric around zero and is sometimes called
+ 'ThemeRiver'.
+ - ``'wiggle'``: Minimizes the sum of the squared slopes.
+ - ``'weighted_wiggle'``: Does the same but weights to account for
+ size of each layer. It is also called 'Streamgraph'-layout. More
+ details can be found at http://leebyron.com/streamgraph/.
+
+ labels : Length N sequence of strings
+ Labels to assign to each data series.
+
+ colors : Length N sequence of colors
+ A list or tuple of colors. These will be cycled through and used to
+ colour the stacked areas.
+
+ **kwargs :
+ All other keyword arguments are passed to `Axes.fill_between()`.
+
+
+ Returns
+ -------
+ list : list of `.PolyCollection`
+ A list of `.PolyCollection` instances, one for each element in the
+ stacked area plot.
+ """
+
+ y = np.row_stack(args)
+
+ labels = iter(kwargs.pop('labels', []))
+
+ colors = kwargs.pop('colors', None)
+ if colors is not None:
+ axes.set_prop_cycle(color=colors)
+
+ baseline = kwargs.pop('baseline', 'zero')
+ # Assume data passed has not been 'stacked', so stack it here.
+ # We'll need a float buffer for the upcoming calculations.
+ stack = np.cumsum(y, axis=0, dtype=np.promote_types(y.dtype, np.float32))
+
+ if baseline == 'zero':
+ first_line = 0.
+
+ elif baseline == 'sym':
+ first_line = -np.sum(y, 0) * 0.5
+ stack += first_line[None, :]
+
+ elif baseline == 'wiggle':
+ m = y.shape[0]
+ first_line = (y * (m - 0.5 - np.arange(m)[:, None])).sum(0)
+ first_line /= -m
+ stack += first_line
+
+ elif baseline == 'weighted_wiggle':
+ m, n = y.shape
+ total = np.sum(y, 0)
+ # multiply by 1/total (or zero) to avoid infinities in the division:
+ inv_total = np.zeros_like(total)
+ mask = total > 0
+ inv_total[mask] = 1.0 / total[mask]
+ increase = np.hstack((y[:, 0:1], np.diff(y)))
+ below_size = total - stack
+ below_size += 0.5 * y
+ move_up = below_size * inv_total
+ move_up[:, 0] = 0.5
+ center = (move_up - 0.5) * increase
+ center = np.cumsum(center.sum(0))
+ first_line = center - 0.5 * total
+ stack += first_line
+
+ else:
+ errstr = "Baseline method %s not recognised. " % baseline
+ errstr += "Expected 'zero', 'sym', 'wiggle' or 'weighted_wiggle'"
+ raise ValueError(errstr)
+
+ # Color between x = 0 and the first array.
+ color = axes._get_lines.get_next_color()
+ coll = axes.fill_between(x, first_line, stack[0, :],
+ facecolor=color, label=next(labels, None),
+ **kwargs)
+ coll.sticky_edges.y[:] = [0]
+ r = [coll]
+
+ # Color between array i-1 and array i
+ for i in xrange(len(y) - 1):
+ color = axes._get_lines.get_next_color()
+ r.append(axes.fill_between(x, stack[i, :], stack[i + 1, :],
+ facecolor=color, label=next(labels, None),
+ **kwargs))
+ return r
diff --git a/contrib/python/matplotlib/py2/matplotlib/streamplot.py b/contrib/python/matplotlib/py2/matplotlib/streamplot.py
new file mode 100644
index 00000000000..752a11eb4aa
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/streamplot.py
@@ -0,0 +1,674 @@
+"""
+Streamline plotting for 2D vector fields.
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange
+
+import numpy as np
+import matplotlib
+import matplotlib.cm as cm
+import matplotlib.colors as mcolors
+import matplotlib.collections as mcollections
+import matplotlib.lines as mlines
+import matplotlib.patches as patches
+
+
+__all__ = ['streamplot']
+
+
+def streamplot(axes, x, y, u, v, density=1, linewidth=None, color=None,
+ cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
+ minlength=0.1, transform=None, zorder=None, start_points=None,
+ maxlength=4.0, integration_direction='both'):
+ """Draws streamlines of a vector flow.
+
+ *x*, *y* : 1d arrays
+ an *evenly spaced* grid.
+ *u*, *v* : 2d arrays
+ x and y-velocities. Number of rows should match length of y, and
+ the number of columns should match x.
+ *density* : float or 2-tuple
+ Controls the closeness of streamlines. When `density = 1`, the domain
+ is divided into a 30x30 grid---*density* linearly scales this grid.
+ Each cell in the grid can have, at most, one traversing streamline.
+ For different densities in each direction, use [density_x, density_y].
+ *linewidth* : numeric or 2d array
+ vary linewidth when given a 2d array with the same shape as velocities.
+ *color* : matplotlib color code, or 2d array
+ Streamline color. When given an array with the same shape as
+ velocities, *color* values are converted to colors using *cmap*.
+ *cmap* : :class:`~matplotlib.colors.Colormap`
+ Colormap used to plot streamlines and arrows. Only necessary when using
+ an array input for *color*.
+ *norm* : :class:`~matplotlib.colors.Normalize`
+ Normalize object used to scale luminance data to 0, 1. If None, stretch
+ (min, max) to (0, 1). Only necessary when *color* is an array.
+ *arrowsize* : float
+ Factor scale arrow size.
+ *arrowstyle* : str
+ Arrow style specification.
+ See :class:`~matplotlib.patches.FancyArrowPatch`.
+ *minlength* : float
+ Minimum length of streamline in axes coordinates.
+ *start_points*: Nx2 array
+ Coordinates of starting points for the streamlines.
+ In data coordinates, the same as the ``x`` and ``y`` arrays.
+ *zorder* : int
+ any number
+ *maxlength* : float
+ Maximum length of streamline in axes coordinates.
+ *integration_direction* : ['forward', 'backward', 'both']
+ Integrate the streamline in forward, backward or both directions.
+
+ Returns:
+
+ *stream_container* : StreamplotSet
+ Container object with attributes
+
+ - lines: `matplotlib.collections.LineCollection` of streamlines
+
+ - arrows: collection of `matplotlib.patches.FancyArrowPatch`
+ objects representing arrows half-way along stream
+ lines.
+
+ This container will probably change in the future to allow changes
+ to the colormap, alpha, etc. for both lines and arrows, but these
+ changes should be backward compatible.
+
+ """
+ grid = Grid(x, y)
+ mask = StreamMask(density)
+ dmap = DomainMap(grid, mask)
+
+ if zorder is None:
+ zorder = mlines.Line2D.zorder
+
+ # default to data coordinates
+ if transform is None:
+ transform = axes.transData
+
+ if color is None:
+ color = axes._get_lines.get_next_color()
+
+ if linewidth is None:
+ linewidth = matplotlib.rcParams['lines.linewidth']
+
+ line_kw = {}
+ arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize)
+
+ if integration_direction not in ['both', 'forward', 'backward']:
+ errstr = ("Integration direction '%s' not recognised. "
+ "Expected 'both', 'forward' or 'backward'." %
+ integration_direction)
+ raise ValueError(errstr)
+
+ if integration_direction == 'both':
+ maxlength /= 2.
+
+ use_multicolor_lines = isinstance(color, np.ndarray)
+ if use_multicolor_lines:
+ if color.shape != grid.shape:
+ raise ValueError(
+ "If 'color' is given, must have the shape of 'Grid(x,y)'")
+ line_colors = []
+ color = np.ma.masked_invalid(color)
+ else:
+ line_kw['color'] = color
+ arrow_kw['color'] = color
+
+ if isinstance(linewidth, np.ndarray):
+ if linewidth.shape != grid.shape:
+ raise ValueError(
+ "If 'linewidth' is given, must have the shape of 'Grid(x,y)'")
+ line_kw['linewidth'] = []
+ else:
+ line_kw['linewidth'] = linewidth
+ arrow_kw['linewidth'] = linewidth
+
+ line_kw['zorder'] = zorder
+ arrow_kw['zorder'] = zorder
+
+ ## Sanity checks.
+ if u.shape != grid.shape or v.shape != grid.shape:
+ raise ValueError("'u' and 'v' must be of shape 'Grid(x,y)'")
+
+ u = np.ma.masked_invalid(u)
+ v = np.ma.masked_invalid(v)
+
+ integrate = get_integrator(u, v, dmap, minlength, maxlength,
+ integration_direction)
+
+ trajectories = []
+ if start_points is None:
+ for xm, ym in _gen_starting_points(mask.shape):
+ if mask[ym, xm] == 0:
+ xg, yg = dmap.mask2grid(xm, ym)
+ t = integrate(xg, yg)
+ if t is not None:
+ trajectories.append(t)
+ else:
+ sp2 = np.asanyarray(start_points, dtype=float).copy()
+
+ # Check if start_points are outside the data boundaries
+ for xs, ys in sp2:
+ if not (grid.x_origin <= xs <= grid.x_origin + grid.width
+ and grid.y_origin <= ys <= grid.y_origin + grid.height):
+ raise ValueError("Starting point ({}, {}) outside of data "
+ "boundaries".format(xs, ys))
+
+ # Convert start_points from data to array coords
+ # Shift the seed points from the bottom left of the data so that
+ # data2grid works properly.
+ sp2[:, 0] -= grid.x_origin
+ sp2[:, 1] -= grid.y_origin
+
+ for xs, ys in sp2:
+ xg, yg = dmap.data2grid(xs, ys)
+ t = integrate(xg, yg)
+ if t is not None:
+ trajectories.append(t)
+
+ if use_multicolor_lines:
+ if norm is None:
+ norm = mcolors.Normalize(color.min(), color.max())
+ if cmap is None:
+ cmap = cm.get_cmap(matplotlib.rcParams['image.cmap'])
+ else:
+ cmap = cm.get_cmap(cmap)
+
+ streamlines = []
+ arrows = []
+ for t in trajectories:
+ tgx = np.array(t[0])
+ tgy = np.array(t[1])
+ # Rescale from grid-coordinates to data-coordinates.
+ tx, ty = dmap.grid2data(*np.array(t))
+ tx += grid.x_origin
+ ty += grid.y_origin
+
+ points = np.transpose([tx, ty]).reshape(-1, 1, 2)
+ streamlines.extend(np.hstack([points[:-1], points[1:]]))
+
+ # Add arrows half way along each trajectory.
+ s = np.cumsum(np.sqrt(np.diff(tx) ** 2 + np.diff(ty) ** 2))
+ n = np.searchsorted(s, s[-1] / 2.)
+ arrow_tail = (tx[n], ty[n])
+ arrow_head = (np.mean(tx[n:n + 2]), np.mean(ty[n:n + 2]))
+
+ if isinstance(linewidth, np.ndarray):
+ line_widths = interpgrid(linewidth, tgx, tgy)[:-1]
+ line_kw['linewidth'].extend(line_widths)
+ arrow_kw['linewidth'] = line_widths[n]
+
+ if use_multicolor_lines:
+ color_values = interpgrid(color, tgx, tgy)[:-1]
+ line_colors.append(color_values)
+ arrow_kw['color'] = cmap(norm(color_values[n]))
+
+ p = patches.FancyArrowPatch(
+ arrow_tail, arrow_head, transform=transform, **arrow_kw)
+ axes.add_patch(p)
+ arrows.append(p)
+
+ lc = mcollections.LineCollection(
+ streamlines, transform=transform, **line_kw)
+ lc.sticky_edges.x[:] = [grid.x_origin, grid.x_origin + grid.width]
+ lc.sticky_edges.y[:] = [grid.y_origin, grid.y_origin + grid.height]
+ if use_multicolor_lines:
+ lc.set_array(np.ma.hstack(line_colors))
+ lc.set_cmap(cmap)
+ lc.set_norm(norm)
+ axes.add_collection(lc)
+ axes.autoscale_view()
+
+ ac = matplotlib.collections.PatchCollection(arrows)
+ stream_container = StreamplotSet(lc, ac)
+ return stream_container
+
+
+class StreamplotSet(object):
+
+ def __init__(self, lines, arrows, **kwargs):
+ self.lines = lines
+ self.arrows = arrows
+
+
+# Coordinate definitions
+# ========================
+
+class DomainMap(object):
+ """Map representing different coordinate systems.
+
+ Coordinate definitions:
+
+ * axes-coordinates goes from 0 to 1 in the domain.
+ * data-coordinates are specified by the input x-y coordinates.
+ * grid-coordinates goes from 0 to N and 0 to M for an N x M grid,
+ where N and M match the shape of the input data.
+ * mask-coordinates goes from 0 to N and 0 to M for an N x M mask,
+ where N and M are user-specified to control the density of streamlines.
+
+ This class also has methods for adding trajectories to the StreamMask.
+ Before adding a trajectory, run `start_trajectory` to keep track of regions
+ crossed by a given trajectory. Later, if you decide the trajectory is bad
+ (e.g., if the trajectory is very short) just call `undo_trajectory`.
+ """
+
+ def __init__(self, grid, mask):
+ self.grid = grid
+ self.mask = mask
+ # Constants for conversion between grid- and mask-coordinates
+ self.x_grid2mask = (mask.nx - 1) / grid.nx
+ self.y_grid2mask = (mask.ny - 1) / grid.ny
+
+ self.x_mask2grid = 1. / self.x_grid2mask
+ self.y_mask2grid = 1. / self.y_grid2mask
+
+ self.x_data2grid = 1. / grid.dx
+ self.y_data2grid = 1. / grid.dy
+
+ def grid2mask(self, xi, yi):
+ """Return nearest space in mask-coords from given grid-coords."""
+ return (int((xi * self.x_grid2mask) + 0.5),
+ int((yi * self.y_grid2mask) + 0.5))
+
+ def mask2grid(self, xm, ym):
+ return xm * self.x_mask2grid, ym * self.y_mask2grid
+
+ def data2grid(self, xd, yd):
+ return xd * self.x_data2grid, yd * self.y_data2grid
+
+ def grid2data(self, xg, yg):
+ return xg / self.x_data2grid, yg / self.y_data2grid
+
+ def start_trajectory(self, xg, yg):
+ xm, ym = self.grid2mask(xg, yg)
+ self.mask._start_trajectory(xm, ym)
+
+ def reset_start_point(self, xg, yg):
+ xm, ym = self.grid2mask(xg, yg)
+ self.mask._current_xy = (xm, ym)
+
+ def update_trajectory(self, xg, yg):
+ if not self.grid.within_grid(xg, yg):
+ raise InvalidIndexError
+ xm, ym = self.grid2mask(xg, yg)
+ self.mask._update_trajectory(xm, ym)
+
+ def undo_trajectory(self):
+ self.mask._undo_trajectory()
+
+
+class Grid(object):
+ """Grid of data."""
+ def __init__(self, x, y):
+
+ if x.ndim == 1:
+ pass
+ elif x.ndim == 2:
+ x_row = x[0, :]
+ if not np.allclose(x_row, x):
+ raise ValueError("The rows of 'x' must be equal")
+ x = x_row
+ else:
+ raise ValueError("'x' can have at maximum 2 dimensions")
+
+ if y.ndim == 1:
+ pass
+ elif y.ndim == 2:
+ y_col = y[:, 0]
+ if not np.allclose(y_col, y.T):
+ raise ValueError("The columns of 'y' must be equal")
+ y = y_col
+ else:
+ raise ValueError("'y' can have at maximum 2 dimensions")
+
+ self.nx = len(x)
+ self.ny = len(y)
+
+ self.dx = x[1] - x[0]
+ self.dy = y[1] - y[0]
+
+ self.x_origin = x[0]
+ self.y_origin = y[0]
+
+ self.width = x[-1] - x[0]
+ self.height = y[-1] - y[0]
+
+ @property
+ def shape(self):
+ return self.ny, self.nx
+
+ def within_grid(self, xi, yi):
+ """Return True if point is a valid index of grid."""
+ # Note that xi/yi can be floats; so, for example, we can't simply check
+ # `xi < self.nx` since `xi` can be `self.nx - 1 < xi < self.nx`
+ return xi >= 0 and xi <= self.nx - 1 and yi >= 0 and yi <= self.ny - 1
+
+
+class StreamMask(object):
+ """Mask to keep track of discrete regions crossed by streamlines.
+
+ The resolution of this grid determines the approximate spacing between
+ trajectories. Streamlines are only allowed to pass through zeroed cells:
+ When a streamline enters a cell, that cell is set to 1, and no new
+ streamlines are allowed to enter.
+ """
+
+ def __init__(self, density):
+ if np.isscalar(density):
+ if density <= 0:
+ raise ValueError("If a scalar, 'density' must be positive")
+ self.nx = self.ny = int(30 * density)
+ else:
+ if len(density) != 2:
+ raise ValueError("'density' can have at maximum 2 dimensions")
+ self.nx = int(30 * density[0])
+ self.ny = int(30 * density[1])
+ self._mask = np.zeros((self.ny, self.nx))
+ self.shape = self._mask.shape
+
+ self._current_xy = None
+
+ def __getitem__(self, *args):
+ return self._mask.__getitem__(*args)
+
+ def _start_trajectory(self, xm, ym):
+ """Start recording streamline trajectory"""
+ self._traj = []
+ self._update_trajectory(xm, ym)
+
+ def _undo_trajectory(self):
+ """Remove current trajectory from mask"""
+ for t in self._traj:
+ self._mask.__setitem__(t, 0)
+
+ def _update_trajectory(self, xm, ym):
+ """Update current trajectory position in mask.
+
+ If the new position has already been filled, raise `InvalidIndexError`.
+ """
+ if self._current_xy != (xm, ym):
+ if self[ym, xm] == 0:
+ self._traj.append((ym, xm))
+ self._mask[ym, xm] = 1
+ self._current_xy = (xm, ym)
+ else:
+ raise InvalidIndexError
+
+
+class InvalidIndexError(Exception):
+ pass
+
+
+class TerminateTrajectory(Exception):
+ pass
+
+
+# Integrator definitions
+#========================
+
+def get_integrator(u, v, dmap, minlength, maxlength, integration_direction):
+
+ # rescale velocity onto grid-coordinates for integrations.
+ u, v = dmap.data2grid(u, v)
+
+ # speed (path length) will be in axes-coordinates
+ u_ax = u / dmap.grid.nx
+ v_ax = v / dmap.grid.ny
+ speed = np.ma.sqrt(u_ax ** 2 + v_ax ** 2)
+
+ def forward_time(xi, yi):
+ ds_dt = interpgrid(speed, xi, yi)
+ if ds_dt == 0:
+ raise TerminateTrajectory()
+ dt_ds = 1. / ds_dt
+ ui = interpgrid(u, xi, yi)
+ vi = interpgrid(v, xi, yi)
+ return ui * dt_ds, vi * dt_ds
+
+ def backward_time(xi, yi):
+ dxi, dyi = forward_time(xi, yi)
+ return -dxi, -dyi
+
+ def integrate(x0, y0):
+ """Return x, y grid-coordinates of trajectory based on starting point.
+
+ Integrate both forward and backward in time from starting point in
+ grid coordinates.
+
+ Integration is terminated when a trajectory reaches a domain boundary
+ or when it crosses into an already occupied cell in the StreamMask. The
+ resulting trajectory is None if it is shorter than `minlength`.
+ """
+
+ stotal, x_traj, y_traj = 0., [], []
+
+ try:
+ dmap.start_trajectory(x0, y0)
+ except InvalidIndexError:
+ return None
+ if integration_direction in ['both', 'backward']:
+ s, xt, yt = _integrate_rk12(x0, y0, dmap, backward_time, maxlength)
+ stotal += s
+ x_traj += xt[::-1]
+ y_traj += yt[::-1]
+
+ if integration_direction in ['both', 'forward']:
+ dmap.reset_start_point(x0, y0)
+ s, xt, yt = _integrate_rk12(x0, y0, dmap, forward_time, maxlength)
+ if len(x_traj) > 0:
+ xt = xt[1:]
+ yt = yt[1:]
+ stotal += s
+ x_traj += xt
+ y_traj += yt
+
+ if stotal > minlength:
+ return x_traj, y_traj
+ else: # reject short trajectories
+ dmap.undo_trajectory()
+ return None
+
+ return integrate
+
+
+def _integrate_rk12(x0, y0, dmap, f, maxlength):
+ """2nd-order Runge-Kutta algorithm with adaptive step size.
+
+ This method is also referred to as the improved Euler's method, or Heun's
+ method. This method is favored over higher-order methods because:
+
+ 1. To get decent looking trajectories and to sample every mask cell
+ on the trajectory we need a small timestep, so a lower order
+ solver doesn't hurt us unless the data is *very* high resolution.
+ In fact, for cases where the user inputs
+ data smaller or of similar grid size to the mask grid, the higher
+ order corrections are negligible because of the very fast linear
+ interpolation used in `interpgrid`.
+
+ 2. For high resolution input data (i.e. beyond the mask
+ resolution), we must reduce the timestep. Therefore, an adaptive
+ timestep is more suited to the problem as this would be very hard
+ to judge automatically otherwise.
+
+ This integrator is about 1.5 - 2x as fast as both the RK4 and RK45
+ solvers in most setups on my machine. I would recommend removing the
+ other two to keep things simple.
+ """
+ # This error is below that needed to match the RK4 integrator. It
+ # is set for visual reasons -- too low and corners start
+ # appearing ugly and jagged. Can be tuned.
+ maxerror = 0.003
+
+ # This limit is important (for all integrators) to avoid the
+ # trajectory skipping some mask cells. We could relax this
+ # condition if we use the code which is commented out below to
+ # increment the location gradually. However, due to the efficient
+ # nature of the interpolation, this doesn't boost speed by much
+ # for quite a bit of complexity.
+ maxds = min(1. / dmap.mask.nx, 1. / dmap.mask.ny, 0.1)
+
+ ds = maxds
+ stotal = 0
+ xi = x0
+ yi = y0
+ xf_traj = []
+ yf_traj = []
+
+ while dmap.grid.within_grid(xi, yi):
+ xf_traj.append(xi)
+ yf_traj.append(yi)
+ try:
+ k1x, k1y = f(xi, yi)
+ k2x, k2y = f(xi + ds * k1x,
+ yi + ds * k1y)
+ except IndexError:
+ # Out of the domain on one of the intermediate integration steps.
+ # Take an Euler step to the boundary to improve neatness.
+ ds, xf_traj, yf_traj = _euler_step(xf_traj, yf_traj, dmap, f)
+ stotal += ds
+ break
+ except TerminateTrajectory:
+ break
+
+ dx1 = ds * k1x
+ dy1 = ds * k1y
+ dx2 = ds * 0.5 * (k1x + k2x)
+ dy2 = ds * 0.5 * (k1y + k2y)
+
+ nx, ny = dmap.grid.shape
+ # Error is normalized to the axes coordinates
+ error = np.sqrt(((dx2 - dx1) / nx) ** 2 + ((dy2 - dy1) / ny) ** 2)
+
+ # Only save step if within error tolerance
+ if error < maxerror:
+ xi += dx2
+ yi += dy2
+ try:
+ dmap.update_trajectory(xi, yi)
+ except InvalidIndexError:
+ break
+ if (stotal + ds) > maxlength:
+ break
+ stotal += ds
+
+ # recalculate stepsize based on step error
+ if error == 0:
+ ds = maxds
+ else:
+ ds = min(maxds, 0.85 * ds * (maxerror / error) ** 0.5)
+
+ return stotal, xf_traj, yf_traj
+
+
+def _euler_step(xf_traj, yf_traj, dmap, f):
+ """Simple Euler integration step that extends streamline to boundary."""
+ ny, nx = dmap.grid.shape
+ xi = xf_traj[-1]
+ yi = yf_traj[-1]
+ cx, cy = f(xi, yi)
+ if cx == 0:
+ dsx = np.inf
+ elif cx < 0:
+ dsx = xi / -cx
+ else:
+ dsx = (nx - 1 - xi) / cx
+ if cy == 0:
+ dsy = np.inf
+ elif cy < 0:
+ dsy = yi / -cy
+ else:
+ dsy = (ny - 1 - yi) / cy
+ ds = min(dsx, dsy)
+ xf_traj.append(xi + cx * ds)
+ yf_traj.append(yi + cy * ds)
+ return ds, xf_traj, yf_traj
+
+
+# Utility functions
+# ========================
+
+def interpgrid(a, xi, yi):
+ """Fast 2D, linear interpolation on an integer grid"""
+
+ Ny, Nx = np.shape(a)
+ if isinstance(xi, np.ndarray):
+ x = xi.astype(int)
+ y = yi.astype(int)
+ # Check that xn, yn don't exceed max index
+ xn = np.clip(x + 1, 0, Nx - 1)
+ yn = np.clip(y + 1, 0, Ny - 1)
+ else:
+ x = int(xi)
+ y = int(yi)
+ # conditional is faster than clipping for integers
+ if x == (Nx - 2):
+ xn = x
+ else:
+ xn = x + 1
+ if y == (Ny - 2):
+ yn = y
+ else:
+ yn = y + 1
+
+ a00 = a[y, x]
+ a01 = a[y, xn]
+ a10 = a[yn, x]
+ a11 = a[yn, xn]
+ xt = xi - x
+ yt = yi - y
+ a0 = a00 * (1 - xt) + a01 * xt
+ a1 = a10 * (1 - xt) + a11 * xt
+ ai = a0 * (1 - yt) + a1 * yt
+
+ if not isinstance(xi, np.ndarray):
+ if np.ma.is_masked(ai):
+ raise TerminateTrajectory
+
+ return ai
+
+
+def _gen_starting_points(shape):
+ """Yield starting points for streamlines.
+
+ Trying points on the boundary first gives higher quality streamlines.
+ This algorithm starts with a point on the mask corner and spirals inward.
+ This algorithm is inefficient, but fast compared to rest of streamplot.
+ """
+ ny, nx = shape
+ xfirst = 0
+ yfirst = 1
+ xlast = nx - 1
+ ylast = ny - 1
+ x, y = 0, 0
+ i = 0
+ direction = 'right'
+ for i in xrange(nx * ny):
+
+ yield x, y
+
+ if direction == 'right':
+ x += 1
+ if x >= xlast:
+ xlast -= 1
+ direction = 'up'
+ elif direction == 'up':
+ y += 1
+ if y >= ylast:
+ ylast -= 1
+ direction = 'left'
+ elif direction == 'left':
+ x -= 1
+ if x <= xfirst:
+ xfirst += 1
+ direction = 'down'
+ elif direction == 'down':
+ y -= 1
+ if y <= yfirst:
+ yfirst += 1
+ direction = 'right'
diff --git a/contrib/python/matplotlib/py2/matplotlib/style/__init__.py b/contrib/python/matplotlib/py2/matplotlib/style/__init__.py
new file mode 100644
index 00000000000..cb0592f41e7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/style/__init__.py
@@ -0,0 +1,3 @@
+from __future__ import absolute_import
+
+from .core import use, context, available, library, reload_library
diff --git a/contrib/python/matplotlib/py2/matplotlib/style/core.py b/contrib/python/matplotlib/py2/matplotlib/style/core.py
new file mode 100644
index 00000000000..593dd9dcb1c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/style/core.py
@@ -0,0 +1,234 @@
+from __future__ import absolute_import, division, print_function
+
+import six
+
+"""
+Core functions and attributes for the matplotlib style library:
+
+``use``
+ Select style sheet to override the current matplotlib settings.
+``context``
+ Context manager to use a style sheet temporarily.
+``available``
+ List available style sheets.
+``library``
+ A dictionary of style names and matplotlib settings.
+"""
+import os
+import re
+import contextlib
+import warnings
+
+import matplotlib as mpl
+from matplotlib import rc_params_from_file, rcParamsDefault
+
+
+__all__ = ['use', 'context', 'available', 'library', 'reload_library']
+
+
+BASE_LIBRARY_PATH = os.path.join(mpl.get_data_path(), 'stylelib')
+# Users may want multiple library paths, so store a list of paths.
+USER_LIBRARY_PATHS = [os.path.join(mpl._get_configdir(), 'stylelib')]
+STYLE_EXTENSION = 'mplstyle'
+STYLE_FILE_PATTERN = re.compile(r'([\S]+).%s$' % STYLE_EXTENSION)
+
+
+# A list of rcParams that should not be applied from styles
+STYLE_BLACKLIST = {
+ 'interactive', 'backend', 'backend.qt4', 'webagg.port', 'webagg.address',
+ 'webagg.port_retries', 'webagg.open_in_browser', 'backend_fallback',
+ 'toolbar', 'timezone', 'datapath', 'figure.max_open_warning',
+ 'savefig.directory', 'tk.window_focus', 'docstring.hardcopy'}
+
+
+def _remove_blacklisted_style_params(d, warn=True):
+ o = {}
+ for key, val in d.items():
+ if key in STYLE_BLACKLIST:
+ if warn:
+ warnings.warn(
+ "Style includes a parameter, '{0}', that is not related "
+ "to style. Ignoring".format(key))
+ else:
+ o[key] = val
+ return o
+
+
+def is_style_file(filename):
+ """Return True if the filename looks like a style file."""
+ return STYLE_FILE_PATTERN.match(filename) is not None
+
+
+def _apply_style(d, warn=True):
+ mpl.rcParams.update(_remove_blacklisted_style_params(d, warn=warn))
+
+
+def use(style):
+ """Use matplotlib style settings from a style specification.
+
+ The style name of 'default' is reserved for reverting back to
+ the default style settings.
+
+ Parameters
+ ----------
+ style : str, dict, or list
+ A style specification. Valid options are:
+
+ +------+-------------------------------------------------------------+
+ | str | The name of a style or a path/URL to a style file. For a |
+ | | list of available style names, see `style.available`. |
+ +------+-------------------------------------------------------------+
+ | dict | Dictionary with valid key/value pairs for |
+ | | `matplotlib.rcParams`. |
+ +------+-------------------------------------------------------------+
+ | list | A list of style specifiers (str or dict) applied from first |
+ | | to last in the list. |
+ +------+-------------------------------------------------------------+
+
+
+ """
+ style_alias = {'mpl20': 'default',
+ 'mpl15': 'classic'}
+ if isinstance(style, six.string_types) or hasattr(style, 'keys'):
+ # If name is a single str or dict, make it a single element list.
+ styles = [style]
+ else:
+ styles = style
+
+ styles = (style_alias.get(s, s)
+ if isinstance(s, six.string_types)
+ else s
+ for s in styles)
+ for style in styles:
+ if not isinstance(style, six.string_types):
+ _apply_style(style)
+ elif style == 'default':
+ _apply_style(rcParamsDefault, warn=False)
+ elif style in library:
+ _apply_style(library[style])
+ else:
+ try:
+ rc = rc_params_from_file(style, use_default_template=False)
+ _apply_style(rc)
+ except IOError:
+ raise IOError(
+ "{!r} not found in the style library and input is not a "
+ "valid URL or path; see `style.available` for list of "
+ "available styles".format(style))
+
+
+@contextlib.contextmanager
+def context(style, after_reset=False):
+ """Context manager for using style settings temporarily.
+
+ Parameters
+ ----------
+ style : str, dict, or list
+ A style specification. Valid options are:
+
+ +------+-------------------------------------------------------------+
+ | str | The name of a style or a path/URL to a style file. For a |
+ | | list of available style names, see `style.available`. |
+ +------+-------------------------------------------------------------+
+ | dict | Dictionary with valid key/value pairs for |
+ | | `matplotlib.rcParams`. |
+ +------+-------------------------------------------------------------+
+ | list | A list of style specifiers (str or dict) applied from first |
+ | | to last in the list. |
+ +------+-------------------------------------------------------------+
+
+ after_reset : bool
+ If True, apply style after resetting settings to their defaults;
+ otherwise, apply style on top of the current settings.
+ """
+ initial_settings = mpl.rcParams.copy()
+ if after_reset:
+ mpl.rcdefaults()
+ try:
+ use(style)
+ except:
+ # Restore original settings before raising errors during the update.
+ mpl.rcParams.update(initial_settings)
+ raise
+ else:
+ yield
+ finally:
+ mpl.rcParams.update(initial_settings)
+
+
+def load_base_library():
+ """Load style library defined in this package."""
+ library = dict()
+ library.update(read_style_directory(BASE_LIBRARY_PATH))
+ return library
+
+
+def iter_user_libraries():
+ for stylelib_path in USER_LIBRARY_PATHS:
+ stylelib_path = os.path.expanduser(stylelib_path)
+ if os.path.exists(stylelib_path) and os.path.isdir(stylelib_path):
+ yield stylelib_path
+
+
+def update_user_library(library):
+ """Update style library with user-defined rc files"""
+ for stylelib_path in iter_user_libraries():
+ styles = read_style_directory(stylelib_path)
+ update_nested_dict(library, styles)
+ return library
+
+
+def iter_style_files(style_dir):
+ """Yield file path and name of styles in the given directory."""
+ for path in os.listdir(style_dir):
+ filename = os.path.basename(path)
+ if is_style_file(filename):
+ match = STYLE_FILE_PATTERN.match(filename)
+ path = os.path.abspath(os.path.join(style_dir, path))
+ yield path, match.groups()[0]
+
+
+def read_style_directory(style_dir):
+ """Return dictionary of styles defined in `style_dir`."""
+ styles = dict()
+ for path, name in iter_style_files(style_dir):
+ with warnings.catch_warnings(record=True) as warns:
+ styles[name] = rc_params_from_file(path,
+ use_default_template=False)
+
+ for w in warns:
+ message = 'In %s: %s' % (path, w.message)
+ warnings.warn(message)
+
+ return styles
+
+
+def update_nested_dict(main_dict, new_dict):
+ """Update nested dict (only level of nesting) with new values.
+
+ Unlike dict.update, this assumes that the values of the parent dict are
+ dicts (or dict-like), so you shouldn't replace the nested dict if it
+ already exists. Instead you should update the sub-dict.
+ """
+ # update named styles specified by user
+ for name, rc_dict in six.iteritems(new_dict):
+ if name in main_dict:
+ main_dict[name].update(rc_dict)
+ else:
+ main_dict[name] = rc_dict
+ return main_dict
+
+
+# Load style library
+# ==================
+_base_library = load_base_library()
+
+library = None
+available = []
+
+
+def reload_library():
+ """Reload style library."""
+ global library
+ available[:] = library = update_user_library(_base_library)
+reload_library()
diff --git a/contrib/python/matplotlib/py2/matplotlib/table.py b/contrib/python/matplotlib/py2/matplotlib/table.py
new file mode 100644
index 00000000000..ee7908ca9d7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/table.py
@@ -0,0 +1,702 @@
+"""
+Place a table below the x-axis at location loc.
+
+The table consists of a grid of cells.
+
+The grid need not be rectangular and can have holes.
+
+Cells are added by specifying their row and column.
+
+For the purposes of positioning the cell at (0, 0) is
+assumed to be at the top left and the cell at (max_row, max_col)
+is assumed to be at bottom right.
+
+You can add additional cells outside this range to have convenient
+ways of positioning more interesting grids.
+
+Author : John Gill <jng@europe.renre.com>
+Copyright : 2004 John Gill and John Hunter
+License : matplotlib license
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange
+
+import warnings
+
+from . import artist
+from .artist import Artist, allow_rasterization
+from .patches import Rectangle
+from matplotlib import docstring
+from .text import Text
+from .transforms import Bbox
+from matplotlib.path import Path
+
+
+class Cell(Rectangle):
+ """
+ A cell is a Rectangle with some associated text.
+
+ """
+ PAD = 0.1 # padding between text and rectangle
+
+ def __init__(self, xy, width, height,
+ edgecolor='k', facecolor='w',
+ fill=True,
+ text='',
+ loc=None,
+ fontproperties=None
+ ):
+
+ # Call base
+ Rectangle.__init__(self, xy, width=width, height=height,
+ edgecolor=edgecolor, facecolor=facecolor)
+ self.set_clip_on(False)
+
+ # Create text object
+ if loc is None:
+ loc = 'right'
+ self._loc = loc
+ self._text = Text(x=xy[0], y=xy[1], text=text,
+ fontproperties=fontproperties)
+ self._text.set_clip_on(False)
+
+ def set_transform(self, trans):
+ Rectangle.set_transform(self, trans)
+ # the text does not get the transform!
+ self.stale = True
+
+ def set_figure(self, fig):
+ Rectangle.set_figure(self, fig)
+ self._text.set_figure(fig)
+
+ def get_text(self):
+ 'Return the cell Text intance'
+ return self._text
+
+ def set_fontsize(self, size):
+ self._text.set_fontsize(size)
+ self.stale = True
+
+ def get_fontsize(self):
+ 'Return the cell fontsize'
+ return self._text.get_fontsize()
+
+ def auto_set_font_size(self, renderer):
+ """ Shrink font size until text fits. """
+ fontsize = self.get_fontsize()
+ required = self.get_required_width(renderer)
+ while fontsize > 1 and required > self.get_width():
+ fontsize -= 1
+ self.set_fontsize(fontsize)
+ required = self.get_required_width(renderer)
+
+ return fontsize
+
+ @allow_rasterization
+ def draw(self, renderer):
+ if not self.get_visible():
+ return
+ # draw the rectangle
+ Rectangle.draw(self, renderer)
+
+ # position the text
+ self._set_text_position(renderer)
+ self._text.draw(renderer)
+ self.stale = False
+
+ def _set_text_position(self, renderer):
+ """ Set text up so it draws in the right place.
+
+ Currently support 'left', 'center' and 'right'
+ """
+ bbox = self.get_window_extent(renderer)
+ l, b, w, h = bbox.bounds
+
+ # draw in center vertically
+ self._text.set_verticalalignment('center')
+ y = b + (h / 2.0)
+
+ # now position horizontally
+ if self._loc == 'center':
+ self._text.set_horizontalalignment('center')
+ x = l + (w / 2.0)
+ elif self._loc == 'left':
+ self._text.set_horizontalalignment('left')
+ x = l + (w * self.PAD)
+ else:
+ self._text.set_horizontalalignment('right')
+ x = l + (w * (1.0 - self.PAD))
+
+ self._text.set_position((x, y))
+
+ def get_text_bounds(self, renderer):
+ """ Get text bounds in axes co-ordinates. """
+ bbox = self._text.get_window_extent(renderer)
+ bboxa = bbox.inverse_transformed(self.get_data_transform())
+ return bboxa.bounds
+
+ def get_required_width(self, renderer):
+ """ Get width required for this cell. """
+ l, b, w, h = self.get_text_bounds(renderer)
+ return w * (1.0 + (2.0 * self.PAD))
+
+ def set_text_props(self, **kwargs):
+ 'update the text properties with kwargs'
+ self._text.update(kwargs)
+ self.stale = True
+
+
+class CustomCell(Cell):
+ """
+ A subclass of Cell where the sides may be visibly toggled.
+
+ """
+
+ _edges = 'BRTL'
+ _edge_aliases = {'open': '',
+ 'closed': _edges, # default
+ 'horizontal': 'BT',
+ 'vertical': 'RL'
+ }
+
+ def __init__(self, *args, **kwargs):
+ visible_edges = kwargs.pop('visible_edges')
+ Cell.__init__(self, *args, **kwargs)
+ self.visible_edges = visible_edges
+
+ @property
+ def visible_edges(self):
+ return self._visible_edges
+
+ @visible_edges.setter
+ def visible_edges(self, value):
+ if value is None:
+ self._visible_edges = self._edges
+ elif value in self._edge_aliases:
+ self._visible_edges = self._edge_aliases[value]
+ else:
+ for edge in value:
+ if edge not in self._edges:
+ raise ValueError('Invalid edge param {}, must only be one '
+ 'of {} or string of {}'.format(
+ value,
+ ", ".join(self._edge_aliases),
+ ", ".join(self._edges)))
+ self._visible_edges = value
+ self.stale = True
+
+ def get_path(self):
+ 'Return a path where the edges specified by _visible_edges are drawn'
+
+ codes = [Path.MOVETO]
+
+ for edge in self._edges:
+ if edge in self._visible_edges:
+ codes.append(Path.LINETO)
+ else:
+ codes.append(Path.MOVETO)
+
+ if Path.MOVETO not in codes[1:]: # All sides are visible
+ codes[-1] = Path.CLOSEPOLY
+
+ return Path(
+ [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]],
+ codes,
+ readonly=True
+ )
+
+
+class Table(Artist):
+ """
+ Create a table of cells.
+
+ Table can have (optional) row and column headers.
+
+ Each entry in the table can be either text or patches.
+
+ Column widths and row heights for the table can be specified.
+
+ Return value is a sequence of text, line and patch instances that make
+ up the table
+ """
+ codes = {'best': 0,
+ 'upper right': 1, # default
+ 'upper left': 2,
+ 'lower left': 3,
+ 'lower right': 4,
+ 'center left': 5,
+ 'center right': 6,
+ 'lower center': 7,
+ 'upper center': 8,
+ 'center': 9,
+ 'top right': 10,
+ 'top left': 11,
+ 'bottom left': 12,
+ 'bottom right': 13,
+ 'right': 14,
+ 'left': 15,
+ 'top': 16,
+ 'bottom': 17,
+ }
+
+ FONTSIZE = 10
+ AXESPAD = 0.02 # the border between the axes and table edge
+
+ def __init__(self, ax, loc=None, bbox=None, **kwargs):
+
+ Artist.__init__(self)
+
+ if isinstance(loc, six.string_types) and loc not in self.codes:
+ warnings.warn('Unrecognized location %s. Falling back on '
+ 'bottom; valid locations are\n%s\t' %
+ (loc, '\n\t'.join(self.codes)))
+ loc = 'bottom'
+ if isinstance(loc, six.string_types):
+ loc = self.codes.get(loc, 1)
+ self.set_figure(ax.figure)
+ self._axes = ax
+ self._loc = loc
+ self._bbox = bbox
+
+ # use axes coords
+ self.set_transform(ax.transAxes)
+
+ self._texts = []
+ self._cells = {}
+ self._edges = None
+ self._autoRows = []
+ self._autoColumns = []
+ self._autoFontsize = True
+ self.update(kwargs)
+
+ self.set_clip_on(False)
+
+ def add_cell(self, row, col, *args, **kwargs):
+ """
+ Add a cell to the table.
+
+ Parameters
+ ----------
+ row : int
+ Row index
+ col : int
+ Column index
+
+ Returns
+ -------
+ `CustomCell`: Automatically created cell
+
+ """
+ xy = (0, 0)
+ cell = CustomCell(xy, visible_edges=self.edges, *args, **kwargs)
+ self[row, col] = cell
+ return cell
+
+ def __setitem__(self, position, cell):
+ """
+ Set a customcell in a given position
+ """
+ if not isinstance(cell, CustomCell):
+ raise TypeError('Table only accepts CustomCell')
+ try:
+ row, col = position[0], position[1]
+ except Exception:
+ raise KeyError('Only tuples length 2 are accepted as coordinates')
+ cell.set_figure(self.figure)
+ cell.set_transform(self.get_transform())
+ cell.set_clip_on(False)
+ self._cells[row, col] = cell
+ self.stale = True
+
+ def __getitem__(self, position):
+ """
+ Retreive a custom cell from a given position
+ """
+ try:
+ row, col = position[0], position[1]
+ except Exception:
+ raise KeyError('Only tuples length 2 are accepted as coordinates')
+ return self._cells[row, col]
+
+ @property
+ def edges(self):
+ return self._edges
+
+ @edges.setter
+ def edges(self, value):
+ self._edges = value
+ self.stale = True
+
+ def _approx_text_height(self):
+ return (self.FONTSIZE / 72.0 * self.figure.dpi /
+ self._axes.bbox.height * 1.2)
+
+ @allow_rasterization
+ def draw(self, renderer):
+ # Need a renderer to do hit tests on mouseevent; assume the last one
+ # will do
+ if renderer is None:
+ renderer = self.figure._cachedRenderer
+ if renderer is None:
+ raise RuntimeError('No renderer defined')
+
+ if not self.get_visible():
+ return
+ renderer.open_group('table')
+ self._update_positions(renderer)
+
+ for key in sorted(self._cells):
+ self._cells[key].draw(renderer)
+
+ renderer.close_group('table')
+ self.stale = False
+
+ def _get_grid_bbox(self, renderer):
+ """Get a bbox, in axes co-ordinates for the cells.
+
+ Only include those in the range (0,0) to (maxRow, maxCol)"""
+ boxes = [cell.get_window_extent(renderer)
+ for (row, col), cell in six.iteritems(self._cells)
+ if row >= 0 and col >= 0]
+ bbox = Bbox.union(boxes)
+ return bbox.inverse_transformed(self.get_transform())
+
+ def contains(self, mouseevent):
+ """Test whether the mouse event occurred in the table.
+
+ Returns T/F, {}
+ """
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+
+ # TODO: Return index of the cell containing the cursor so that the user
+ # doesn't have to bind to each one individually.
+ renderer = self.figure._cachedRenderer
+ if renderer is not None:
+ boxes = [cell.get_window_extent(renderer)
+ for (row, col), cell in six.iteritems(self._cells)
+ if row >= 0 and col >= 0]
+ bbox = Bbox.union(boxes)
+ return bbox.contains(mouseevent.x, mouseevent.y), {}
+ else:
+ return False, {}
+
+ def get_children(self):
+ 'Return the Artists contained by the table'
+ return list(six.itervalues(self._cells))
+ get_child_artists = get_children # backward compatibility
+
+ def get_window_extent(self, renderer):
+ 'Return the bounding box of the table in window coords'
+ boxes = [cell.get_window_extent(renderer)
+ for cell in six.itervalues(self._cells)]
+ return Bbox.union(boxes)
+
+ def _do_cell_alignment(self):
+ """ Calculate row heights and column widths.
+
+ Position cells accordingly.
+ """
+ # Calculate row/column widths
+ widths = {}
+ heights = {}
+ for (row, col), cell in six.iteritems(self._cells):
+ height = heights.setdefault(row, 0.0)
+ heights[row] = max(height, cell.get_height())
+ width = widths.setdefault(col, 0.0)
+ widths[col] = max(width, cell.get_width())
+
+ # work out left position for each column
+ xpos = 0
+ lefts = {}
+ for col in sorted(widths):
+ lefts[col] = xpos
+ xpos += widths[col]
+
+ ypos = 0
+ bottoms = {}
+ for row in sorted(heights, reverse=True):
+ bottoms[row] = ypos
+ ypos += heights[row]
+
+ # set cell positions
+ for (row, col), cell in six.iteritems(self._cells):
+ cell.set_x(lefts[col])
+ cell.set_y(bottoms[row])
+
+ def auto_set_column_width(self, col):
+ """ Given column indexs in either List, Tuple or int. Will be able to
+ automatically set the columns into optimal sizes.
+
+ Here is the example of the input, which triger automatic adjustment on
+ columns to optimal size by given index numbers.
+ -1: the row labling
+ 0: the 1st column
+ 1: the 2nd column
+
+ Args:
+ col(List): list of indexs
+ >>>table.auto_set_column_width([-1,0,1])
+
+ col(Tuple): tuple of indexs
+ >>>table.auto_set_column_width((-1,0,1))
+
+ col(int): index integer
+ >>>table.auto_set_column_width(-1)
+ >>>table.auto_set_column_width(0)
+ >>>table.auto_set_column_width(1)
+ """
+ # check for col possibility on iteration
+ try:
+ iter(col)
+ except (TypeError, AttributeError):
+ self._autoColumns.append(col)
+ else:
+ for cell in col:
+ self._autoColumns.append(cell)
+
+ self.stale = True
+
+ def _auto_set_column_width(self, col, renderer):
+ """ Automagically set width for column.
+ """
+ cells = [key for key in self._cells if key[1] == col]
+
+ # find max width
+ width = 0
+ for cell in cells:
+ c = self._cells[cell]
+ width = max(c.get_required_width(renderer), width)
+
+ # Now set the widths
+ for cell in cells:
+ self._cells[cell].set_width(width)
+
+ def auto_set_font_size(self, value=True):
+ """ Automatically set font size. """
+ self._autoFontsize = value
+ self.stale = True
+
+ def _auto_set_font_size(self, renderer):
+
+ if len(self._cells) == 0:
+ return
+ fontsize = list(six.itervalues(self._cells))[0].get_fontsize()
+ cells = []
+ for key, cell in six.iteritems(self._cells):
+ # ignore auto-sized columns
+ if key[1] in self._autoColumns:
+ continue
+ size = cell.auto_set_font_size(renderer)
+ fontsize = min(fontsize, size)
+ cells.append(cell)
+
+ # now set all fontsizes equal
+ for cell in six.itervalues(self._cells):
+ cell.set_fontsize(fontsize)
+
+ def scale(self, xscale, yscale):
+ """ Scale column widths by xscale and row heights by yscale. """
+ for c in six.itervalues(self._cells):
+ c.set_width(c.get_width() * xscale)
+ c.set_height(c.get_height() * yscale)
+
+ def set_fontsize(self, size):
+ """
+ Set the fontsize of the cell text
+
+ ACCEPTS: a float in points
+ """
+
+ for cell in six.itervalues(self._cells):
+ cell.set_fontsize(size)
+ self.stale = True
+
+ def _offset(self, ox, oy):
+ 'Move all the artists by ox,oy (axes coords)'
+
+ for c in six.itervalues(self._cells):
+ x, y = c.get_x(), c.get_y()
+ c.set_x(x + ox)
+ c.set_y(y + oy)
+
+ def _update_positions(self, renderer):
+ # called from renderer to allow more precise estimates of
+ # widths and heights with get_window_extent
+
+ # Do any auto width setting
+ for col in self._autoColumns:
+ self._auto_set_column_width(col, renderer)
+
+ if self._autoFontsize:
+ self._auto_set_font_size(renderer)
+
+ # Align all the cells
+ self._do_cell_alignment()
+
+ bbox = self._get_grid_bbox(renderer)
+ l, b, w, h = bbox.bounds
+
+ if self._bbox is not None:
+ # Position according to bbox
+ rl, rb, rw, rh = self._bbox
+ self.scale(rw / w, rh / h)
+ ox = rl - l
+ oy = rb - b
+ self._do_cell_alignment()
+ else:
+ # Position using loc
+ (BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
+ TR, TL, BL, BR, R, L, T, B) = xrange(len(self.codes))
+ # defaults for center
+ ox = (0.5 - w / 2) - l
+ oy = (0.5 - h / 2) - b
+ if self._loc in (UL, LL, CL): # left
+ ox = self.AXESPAD - l
+ if self._loc in (BEST, UR, LR, R, CR): # right
+ ox = 1 - (l + w + self.AXESPAD)
+ if self._loc in (BEST, UR, UL, UC): # upper
+ oy = 1 - (b + h + self.AXESPAD)
+ if self._loc in (LL, LR, LC): # lower
+ oy = self.AXESPAD - b
+ if self._loc in (LC, UC, C): # center x
+ ox = (0.5 - w / 2) - l
+ if self._loc in (CL, CR, C): # center y
+ oy = (0.5 - h / 2) - b
+
+ if self._loc in (TL, BL, L): # out left
+ ox = - (l + w)
+ if self._loc in (TR, BR, R): # out right
+ ox = 1.0 - l
+ if self._loc in (TR, TL, T): # out top
+ oy = 1.0 - b
+ if self._loc in (BL, BR, B): # out bottom
+ oy = - (b + h)
+
+ self._offset(ox, oy)
+
+ def get_celld(self):
+ 'return a dict of cells in the table'
+ return self._cells
+
+
+def table(ax,
+ cellText=None, cellColours=None,
+ cellLoc='right', colWidths=None,
+ rowLabels=None, rowColours=None, rowLoc='left',
+ colLabels=None, colColours=None, colLoc='center',
+ loc='bottom', bbox=None, edges='closed',
+ **kwargs):
+ """
+ TABLE(cellText=None, cellColours=None,
+ cellLoc='right', colWidths=None,
+ rowLabels=None, rowColours=None, rowLoc='left',
+ colLabels=None, colColours=None, colLoc='center',
+ loc='bottom', bbox=None, edges='closed')
+
+ Factory function to generate a Table instance.
+
+ Thanks to John Gill for providing the class and table.
+ """
+
+ if cellColours is None and cellText is None:
+ raise ValueError('At least one argument from "cellColours" or '
+ '"cellText" must be provided to create a table.')
+
+ # Check we have some cellText
+ if cellText is None:
+ # assume just colours are needed
+ rows = len(cellColours)
+ cols = len(cellColours[0])
+ cellText = [[''] * cols] * rows
+
+ rows = len(cellText)
+ cols = len(cellText[0])
+ for row in cellText:
+ if len(row) != cols:
+ raise ValueError("Each row in 'cellText' must have {} columns"
+ .format(cols))
+
+ if cellColours is not None:
+ if len(cellColours) != rows:
+ raise ValueError("'cellColours' must have {} rows".format(rows))
+ for row in cellColours:
+ if len(row) != cols:
+ raise ValueError("Each row in 'cellColours' must have {} "
+ "columns".format(cols))
+ else:
+ cellColours = ['w' * cols] * rows
+
+ # Set colwidths if not given
+ if colWidths is None:
+ colWidths = [1.0 / cols] * cols
+
+ # Fill in missing information for column
+ # and row labels
+ rowLabelWidth = 0
+ if rowLabels is None:
+ if rowColours is not None:
+ rowLabels = [''] * rows
+ rowLabelWidth = colWidths[0]
+ elif rowColours is None:
+ rowColours = 'w' * rows
+
+ if rowLabels is not None:
+ if len(rowLabels) != rows:
+ raise ValueError("'rowLabels' must be of length {0}".format(rows))
+
+ # If we have column labels, need to shift
+ # the text and colour arrays down 1 row
+ offset = 1
+ if colLabels is None:
+ if colColours is not None:
+ colLabels = [''] * cols
+ else:
+ offset = 0
+ elif colColours is None:
+ colColours = 'w' * cols
+
+ # Set up cell colours if not given
+ if cellColours is None:
+ cellColours = ['w' * cols] * rows
+
+ # Now create the table
+ table = Table(ax, loc, bbox, **kwargs)
+ table.edges = edges
+ height = table._approx_text_height()
+
+ # Add the cells
+ for row in xrange(rows):
+ for col in xrange(cols):
+ table.add_cell(row + offset, col,
+ width=colWidths[col], height=height,
+ text=cellText[row][col],
+ facecolor=cellColours[row][col],
+ loc=cellLoc)
+ # Do column labels
+ if colLabels is not None:
+ for col in xrange(cols):
+ table.add_cell(0, col,
+ width=colWidths[col], height=height,
+ text=colLabels[col], facecolor=colColours[col],
+ loc=colLoc)
+
+ # Do row labels
+ if rowLabels is not None:
+ for row in xrange(rows):
+ table.add_cell(row + offset, -1,
+ width=rowLabelWidth or 1e-15, height=height,
+ text=rowLabels[row], facecolor=rowColours[row],
+ loc=rowLoc)
+ if rowLabelWidth == 0:
+ table.auto_set_column_width(-1)
+
+ ax.add_table(table)
+ return table
+
+
+docstring.interpd.update(Table=artist.kwdoc(Table))
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/__init__.py b/contrib/python/matplotlib/py2/matplotlib/testing/__init__.py
new file mode 100644
index 00000000000..2184be03ef8
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/__init__.py
@@ -0,0 +1,59 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import functools
+import warnings
+
+import matplotlib as mpl
+from matplotlib import cbook
+
+
+def is_called_from_pytest():
+ """Returns whether the call was done from pytest"""
+ return getattr(mpl, '_called_from_pytest', False)
+
+
+def _copy_metadata(src_func, tgt_func):
+ """Replicates metadata of the function. Returns target function."""
+ functools.update_wrapper(tgt_func, src_func)
+ tgt_func.__wrapped__ = src_func # Python2 compatibility.
+ return tgt_func
+
+
+def set_font_settings_for_testing():
+ mpl.rcParams['font.family'] = 'DejaVu Sans'
+ mpl.rcParams['text.hinting'] = False
+ mpl.rcParams['text.hinting_factor'] = 8
+
+
+def set_reproducibility_for_testing():
+ mpl.rcParams['svg.hashsalt'] = 'matplotlib'
+
+
+def setup():
+ # The baseline images are created in this locale, so we should use
+ # it during all of the tests.
+ import locale
+ from matplotlib.backends import backend_agg, backend_pdf, backend_svg
+
+ try:
+ locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
+ except locale.Error:
+ try:
+ locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
+ except locale.Error:
+ warnings.warn(
+ "Could not set locale to English/United States. "
+ "Some date-related tests may fail")
+
+ mpl.use('Agg', warn=False) # use Agg backend for these tests
+
+ # These settings *must* be hardcoded for running the comparison
+ # tests and are not necessarily the default values as specified in
+ # rcsetup.py
+ mpl.rcdefaults() # Start with all defaults
+
+ set_font_settings_for_testing()
+ set_reproducibility_for_testing()
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/_nose/__init__.py b/contrib/python/matplotlib/py2/matplotlib/testing/_nose/__init__.py
new file mode 100644
index 00000000000..d513c7b14f4
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/_nose/__init__.py
@@ -0,0 +1,78 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import sys
+
+
+def get_extra_test_plugins():
+ from .plugins.performgc import PerformGC
+ from .plugins.knownfailure import KnownFailure
+ from nose.plugins import attrib
+
+ return [PerformGC, KnownFailure, attrib.Plugin]
+
+
+def get_env():
+ env = {'NOSE_COVER_PACKAGE': ['matplotlib', 'mpl_toolkits'],
+ 'NOSE_COVER_HTML': 1,
+ 'NOSE_COVER_NO_PRINT': 1}
+ return env
+
+
+def check_deps():
+ try:
+ import nose
+ try:
+ from unittest import mock
+ except ImportError:
+ import mock
+ except ImportError:
+ print("matplotlib.test requires nose and mock to run.")
+ raise
+
+
+def test(verbosity=None, coverage=False, switch_backend_warn=True,
+ recursionlimit=0, **kwargs):
+ from ... import default_test_modules, get_backend, use
+
+ old_backend = get_backend()
+ old_recursionlimit = sys.getrecursionlimit()
+ try:
+ use('agg')
+ if recursionlimit:
+ sys.setrecursionlimit(recursionlimit)
+ import nose
+ from nose.plugins import multiprocess
+
+ # Nose doesn't automatically instantiate all of the plugins in the
+ # child processes, so we have to provide the multiprocess plugin with
+ # a list.
+ extra_plugins = get_extra_test_plugins()
+ multiprocess._instantiate_plugins = extra_plugins
+
+ env = get_env()
+ if coverage:
+ env['NOSE_WITH_COVERAGE'] = 1
+
+ if verbosity is not None:
+ env['NOSE_VERBOSE'] = verbosity
+
+ success = nose.run(
+ addplugins=[plugin() for plugin in extra_plugins],
+ env=env,
+ defaultTest=default_test_modules,
+ **kwargs
+ )
+ finally:
+ if old_backend.lower() != 'agg':
+ use(old_backend, warn=switch_backend_warn)
+ if recursionlimit:
+ sys.setrecursionlimit(old_recursionlimit)
+
+ return success
+
+
+def knownfail(msg):
+ from .exceptions import KnownFailureTest
+ # Keep the next ultra-long comment so it shows in console.
+ raise KnownFailureTest(msg) # An error here when running nose means that you don't have the matplotlib.testing.nose.plugins:KnownFailure plugin in use. # noqa
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/_nose/decorators.py b/contrib/python/matplotlib/py2/matplotlib/testing/_nose/decorators.py
new file mode 100644
index 00000000000..1f0807df200
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/_nose/decorators.py
@@ -0,0 +1,33 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from .. import _copy_metadata
+from . import knownfail
+from .exceptions import KnownFailureDidNotFailTest
+
+
+def knownfailureif(fail_condition, msg=None, known_exception_class=None):
+ # based on numpy.testing.dec.knownfailureif
+ if msg is None:
+ msg = 'Test known to fail'
+
+ def known_fail_decorator(f):
+ def failer(*args, **kwargs):
+ try:
+ # Always run the test (to generate images).
+ result = f(*args, **kwargs)
+ except Exception as err:
+ if fail_condition:
+ if known_exception_class is not None:
+ if not isinstance(err, known_exception_class):
+ # This is not the expected exception
+ raise
+ knownfail(msg)
+ else:
+ raise
+ if fail_condition and fail_condition != 'indeterminate':
+ raise KnownFailureDidNotFailTest(msg)
+ return result
+ return _copy_metadata(f, failer)
+ return known_fail_decorator
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/_nose/exceptions.py b/contrib/python/matplotlib/py2/matplotlib/testing/_nose/exceptions.py
new file mode 100644
index 00000000000..51fc6f782d7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/_nose/exceptions.py
@@ -0,0 +1,10 @@
+class KnownFailureTest(Exception):
+ """
+ Raise this exception to mark a test as a known failing test.
+ """
+
+
+class KnownFailureDidNotFailTest(Exception):
+ """
+ Raise this exception to mark a test should have failed but did not.
+ """
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/__init__.py b/contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/__init__.py
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/knownfailure.py b/contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/knownfailure.py
new file mode 100644
index 00000000000..3a5c86c3504
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/knownfailure.py
@@ -0,0 +1,49 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import os
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+from ..exceptions import KnownFailureTest
+
+
+class KnownFailure(ErrorClassPlugin):
+ '''Plugin that installs a KNOWNFAIL error class for the
+ KnownFailureClass exception. When KnownFailureTest is raised,
+ the exception will be logged in the knownfail attribute of the
+ result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
+ exception will not be counted as an error or failure.
+
+ This is based on numpy.testing.noseclasses.KnownFailure.
+ '''
+ enabled = True
+ knownfail = ErrorClass(KnownFailureTest,
+ label='KNOWNFAIL',
+ isfailure=False)
+
+ def options(self, parser, env=os.environ):
+ env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
+ parser.add_option('--no-knownfail', action='store_true',
+ dest='noKnownFail', default=env.get(env_opt, False),
+ help='Disable special handling of KnownFailureTest '
+ 'exceptions')
+
+ def configure(self, options, conf):
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noKnownFail', False)
+ if disable:
+ self.enabled = False
+
+ def addError(self, test, err, *zero_nine_capt_args):
+ # Fixme (Really weird): if I don't leave empty method here,
+ # nose gets confused and KnownFails become testing errors when
+ # using the MplNosePlugin and MplTestCase.
+
+ # The *zero_nine_capt_args captures an extra argument. There
+ # seems to be a bug in
+ # nose.testing.manager.ZeroNinePlugin.addError() in which a
+ # 3rd positional argument ("capt") is passed to the plugin's
+ # addError() method, even if one is not explicitly using the
+ # ZeroNinePlugin.
+ pass
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/performgc.py b/contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/performgc.py
new file mode 100644
index 00000000000..818fbd96f44
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/_nose/plugins/performgc.py
@@ -0,0 +1,26 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import gc
+import os
+from nose.plugins import Plugin
+
+
+class PerformGC(Plugin):
+ """This plugin adds option to call ``gc.collect`` after each test"""
+ enabled = False
+
+ def options(self, parser, env=os.environ):
+ env_opt = 'PERFORM_GC'
+ parser.add_option('--perform-gc', action='store_true',
+ dest='performGC', default=env.get(env_opt, False),
+ help='Call gc.collect() after each test')
+
+ def configure(self, options, conf):
+ if not self.can_configure:
+ return
+
+ self.enabled = getattr(options, 'performGC', False)
+
+ def afterTest(self, test):
+ gc.collect()
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/compare.py b/contrib/python/matplotlib/py2/matplotlib/testing/compare.py
new file mode 100644
index 00000000000..dcda681d438
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/compare.py
@@ -0,0 +1,489 @@
+"""
+Provides a collection of utilities for comparing (image) results.
+
+"""
+from __future__ import absolute_import, division, print_function
+
+import six
+
+import atexit
+import functools
+import hashlib
+import itertools
+import os
+import re
+import shutil
+import sys
+from tempfile import TemporaryFile
+
+import numpy as np
+
+import matplotlib
+from matplotlib.compat import subprocess
+from matplotlib.testing.exceptions import ImageComparisonFailure
+from matplotlib import _png
+from matplotlib import _get_cachedir
+from matplotlib import cbook
+
+__all__ = ['compare_float', 'compare_images', 'comparable_formats']
+
+
+def make_test_filename(fname, purpose):
+ """
+ Make a new filename by inserting `purpose` before the file's
+ extension.
+ """
+ base, ext = os.path.splitext(fname)
+ return '%s-%s%s' % (base, purpose, ext)
+
+
+def compare_float(expected, actual, relTol=None, absTol=None):
+ """
+ Fail if the floating point values are not close enough, with
+ the given message.
+
+ You can specify a relative tolerance, absolute tolerance, or both.
+
+ """
+ if relTol is None and absTol is None:
+ raise ValueError("You haven't specified a 'relTol' relative "
+ "tolerance or a 'absTol' absolute tolerance "
+ "function argument. You must specify one.")
+ msg = ""
+
+ if absTol is not None:
+ absDiff = abs(expected - actual)
+ if absTol < absDiff:
+ template = ['',
+ 'Expected: {expected}',
+ 'Actual: {actual}',
+ 'Abs diff: {absDiff}',
+ 'Abs tol: {absTol}']
+ msg += '\n '.join([line.format(**locals()) for line in template])
+
+ if relTol is not None:
+ # The relative difference of the two values. If the expected value is
+ # zero, then return the absolute value of the difference.
+ relDiff = abs(expected - actual)
+ if expected:
+ relDiff = relDiff / abs(expected)
+
+ if relTol < relDiff:
+ # The relative difference is a ratio, so it's always unit-less.
+ template = ['',
+ 'Expected: {expected}',
+ 'Actual: {actual}',
+ 'Rel diff: {relDiff}',
+ 'Rel tol: {relTol}']
+ msg += '\n '.join([line.format(**locals()) for line in template])
+
+ return msg or None
+
+
+def get_cache_dir():
+ cachedir = _get_cachedir()
+ if cachedir is None:
+ raise RuntimeError('Could not find a suitable configuration directory')
+ cache_dir = os.path.join(cachedir, 'test_cache')
+ if not os.path.exists(cache_dir):
+ try:
+ cbook.mkdirs(cache_dir)
+ except IOError:
+ return None
+ if not os.access(cache_dir, os.W_OK):
+ return None
+ return cache_dir
+
+
+def get_file_hash(path, block_size=2 ** 20):
+ md5 = hashlib.md5()
+ with open(path, 'rb') as fd:
+ while True:
+ data = fd.read(block_size)
+ if not data:
+ break
+ md5.update(data)
+
+ if path.endswith('.pdf'):
+ from matplotlib import checkdep_ghostscript
+ md5.update(checkdep_ghostscript()[1].encode('utf-8'))
+ elif path.endswith('.svg'):
+ from matplotlib import checkdep_inkscape
+ md5.update(checkdep_inkscape().encode('utf-8'))
+
+ return md5.hexdigest()
+
+
+def make_external_conversion_command(cmd):
+ def convert(old, new):
+ cmdline = cmd(old, new)
+ pipe = subprocess.Popen(cmdline, universal_newlines=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = pipe.communicate()
+ errcode = pipe.wait()
+ if not os.path.exists(new) or errcode:
+ msg = "Conversion command failed:\n%s\n" % ' '.join(cmdline)
+ if stdout:
+ msg += "Standard output:\n%s\n" % stdout
+ if stderr:
+ msg += "Standard error:\n%s\n" % stderr
+ raise IOError(msg)
+
+ return convert
+
+
+# Modified from https://bugs.python.org/issue25567.
+_find_unsafe_bytes = re.compile(br'[^a-zA-Z0-9_@%+=:,./-]').search
+
+
+def _shlex_quote_bytes(b):
+ return (b if _find_unsafe_bytes(b) is None
+ else b"'" + b.replace(b"'", b"'\"'\"'") + b"'")
+
+
+class _SVGConverter(object):
+ def __init__(self):
+ self._proc = None
+ # We cannot rely on the GC to trigger `__del__` at exit because
+ # other modules (e.g. `subprocess`) may already have their globals
+ # set to `None`, which make `proc.communicate` or `proc.terminate`
+ # fail. By relying on `atexit` we ensure the destructor runs before
+ # `None`-setting occurs.
+ atexit.register(self.__del__)
+
+ def _read_to_prompt(self):
+ """Did Inkscape reach the prompt without crashing?
+ """
+ stream = iter(functools.partial(self._proc.stdout.read, 1), b"")
+ prompt = (b"\n", b">")
+ n = len(prompt)
+ its = itertools.tee(stream, n)
+ for i, it in enumerate(its):
+ next(itertools.islice(it, i, i), None) # Advance `it` by `i`.
+ while True:
+ window = tuple(map(next, its))
+ if len(window) != n:
+ # Ran out of data -- one of the `next(it)` raised
+ # StopIteration, so the tuple is shorter.
+ return False
+ if self._proc.poll() is not None:
+ # Inkscape exited.
+ return False
+ if window == prompt:
+ # Successfully read until prompt.
+ return True
+
+ def __call__(self, orig, dest):
+ if (not self._proc # First run.
+ or self._proc.poll() is not None): # Inkscape terminated.
+ env = os.environ.copy()
+ # If one passes e.g. a png file to Inkscape, it will try to
+ # query the user for conversion options via a GUI (even with
+ # `--without-gui`). Unsetting `DISPLAY` prevents this (and causes
+ # GTK to crash and Inkscape to terminate, but that'll just be
+ # reported as a regular exception below).
+ env.pop("DISPLAY", None) # May already be unset.
+ # Do not load any user options.
+ # `os.environ` needs native strings on Py2+Windows.
+ env[str("INKSCAPE_PROFILE_DIR")] = os.devnull
+ # Old versions of Inkscape (0.48.3.1, used on Travis as of now)
+ # seem to sometimes deadlock when stderr is redirected to a pipe,
+ # so we redirect it to a temporary file instead. This is not
+ # necessary anymore as of Inkscape 0.92.1.
+ self._stderr = TemporaryFile()
+ self._proc = subprocess.Popen(
+ [str("inkscape"), "--without-gui", "--shell"],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=self._stderr, env=env)
+ if not self._read_to_prompt():
+ raise OSError("Failed to start Inkscape")
+
+ try:
+ fsencode = os.fsencode
+ except AttributeError: # Py2.
+ def fsencode(s):
+ return s.encode(sys.getfilesystemencoding())
+
+ # Inkscape uses glib's `g_shell_parse_argv`, which has a consistent
+ # behavior across platforms, so we can just use `shlex.quote`.
+ orig_b, dest_b = map(_shlex_quote_bytes, map(fsencode, [orig, dest]))
+ if b"\n" in orig_b or b"\n" in dest_b:
+ # Who knows whether the current folder name has a newline, or if
+ # our encoding is even ASCII compatible... Just fall back on the
+ # slow solution (Inkscape uses `fgets` so it will always stop at a
+ # newline).
+ return make_external_conversion_command(lambda old, new: [
+ str('inkscape'), '-z', old, '--export-png', new])(orig, dest)
+ self._proc.stdin.write(orig_b + b" --export-png=" + dest_b + b"\n")
+ self._proc.stdin.flush()
+ if not self._read_to_prompt():
+ # Inkscape's output is not localized but gtk's is, so the
+ # output stream probably has a mixed encoding. Using
+ # `getfilesystemencoding` should at least get the filenames
+ # right...
+ self._stderr.seek(0)
+ raise ImageComparisonFailure(
+ self._stderr.read().decode(
+ sys.getfilesystemencoding(), "replace"))
+
+ def __del__(self):
+ if self._proc:
+ if self._proc.poll() is None: # Not exited yet.
+ self._proc.communicate(b"quit\n")
+ self._proc.wait()
+ self._proc.stdin.close()
+ self._proc.stdout.close()
+ self._stderr.close()
+
+
+def _update_converter():
+ gs, gs_v = matplotlib.checkdep_ghostscript()
+ if gs_v is not None:
+ def cmd(old, new):
+ return [str(gs), '-q', '-sDEVICE=png16m', '-dNOPAUSE', '-dBATCH',
+ '-sOutputFile=' + new, old]
+ converter['pdf'] = make_external_conversion_command(cmd)
+ converter['eps'] = make_external_conversion_command(cmd)
+
+ if matplotlib.checkdep_inkscape() is not None:
+ converter['svg'] = _SVGConverter()
+
+
+#: A dictionary that maps filename extensions to functions which
+#: themselves map arguments `old` and `new` (filenames) to a list of strings.
+#: The list can then be passed to Popen to convert files with that
+#: extension to png format.
+converter = {}
+_update_converter()
+
+
+def comparable_formats():
+ """
+ Returns the list of file formats that compare_images can compare
+ on this system.
+
+ """
+ return ['png'] + list(converter)
+
+
+def convert(filename, cache):
+ """
+ Convert the named file into a png file. Returns the name of the
+ created file.
+
+ If *cache* is True, the result of the conversion is cached in
+ `matplotlib._get_cachedir() + '/test_cache/'`. The caching is based
+ on a hash of the exact contents of the input file. The is no limit
+ on the size of the cache, so it may need to be manually cleared
+ periodically.
+
+ """
+ base, extension = filename.rsplit('.', 1)
+ if extension not in converter:
+ reason = "Don't know how to convert %s files to png" % extension
+ from . import is_called_from_pytest
+ if is_called_from_pytest():
+ import pytest
+ pytest.skip(reason)
+ else:
+ from nose import SkipTest
+ raise SkipTest(reason)
+ newname = base + '_' + extension + '.png'
+ if not os.path.exists(filename):
+ raise IOError("'%s' does not exist" % filename)
+
+ # Only convert the file if the destination doesn't already exist or
+ # is out of date.
+ if (not os.path.exists(newname) or
+ os.stat(newname).st_mtime < os.stat(filename).st_mtime):
+ if cache:
+ cache_dir = get_cache_dir()
+ else:
+ cache_dir = None
+
+ if cache_dir is not None:
+ hash_value = get_file_hash(filename)
+ new_ext = os.path.splitext(newname)[1]
+ cached_file = os.path.join(cache_dir, hash_value + new_ext)
+ if os.path.exists(cached_file):
+ shutil.copyfile(cached_file, newname)
+ return newname
+
+ converter[extension](filename, newname)
+
+ if cache_dir is not None:
+ shutil.copyfile(newname, cached_file)
+
+ return newname
+
+#: Maps file extensions to a function which takes a filename as its
+#: only argument to return a list suitable for execution with Popen.
+#: The purpose of this is so that the result file (with the given
+#: extension) can be verified with tools such as xmllint for svg.
+verifiers = {}
+
+# Turning this off, because it seems to cause multiprocessing issues
+if False and matplotlib.checkdep_xmllint():
+ verifiers['svg'] = lambda filename: [
+ 'xmllint', '--valid', '--nowarning', '--noout', filename]
+
+
+@cbook.deprecated("2.1")
+def verify(filename):
+ """Verify the file through some sort of verification tool."""
+ if not os.path.exists(filename):
+ raise IOError("'%s' does not exist" % filename)
+ base, extension = filename.rsplit('.', 1)
+ verifier = verifiers.get(extension, None)
+ if verifier is not None:
+ cmd = verifier(filename)
+ pipe = subprocess.Popen(cmd, universal_newlines=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = pipe.communicate()
+ errcode = pipe.wait()
+ if errcode != 0:
+ msg = "File verification command failed:\n%s\n" % ' '.join(cmd)
+ if stdout:
+ msg += "Standard output:\n%s\n" % stdout
+ if stderr:
+ msg += "Standard error:\n%s\n" % stderr
+ raise IOError(msg)
+
+
+def crop_to_same(actual_path, actual_image, expected_path, expected_image):
+ # clip the images to the same size -- this is useful only when
+ # comparing eps to pdf
+ if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
+ aw, ah, ad = actual_image.shape
+ ew, eh, ed = expected_image.shape
+ actual_image = actual_image[int(aw / 2 - ew / 2):int(
+ aw / 2 + ew / 2), int(ah / 2 - eh / 2):int(ah / 2 + eh / 2)]
+ return actual_image, expected_image
+
+
+def calculate_rms(expectedImage, actualImage):
+ "Calculate the per-pixel errors, then compute the root mean square error."
+ if expectedImage.shape != actualImage.shape:
+ raise ImageComparisonFailure(
+ "Image sizes do not match expected size: {0} "
+ "actual size {1}".format(expectedImage.shape, actualImage.shape))
+ # Convert to float to avoid overflowing finite integer types.
+ return np.sqrt(((expectedImage - actualImage).astype(float) ** 2).mean())
+
+
+def compare_images(expected, actual, tol, in_decorator=False):
+ """
+ Compare two "image" files checking differences within a tolerance.
+
+ The two given filenames may point to files which are convertible to
+ PNG via the `.converter` dictionary. The underlying RMS is calculated
+ with the `.calculate_rms` function.
+
+ Parameters
+ ----------
+ expected : str
+ The filename of the expected image.
+ actual :str
+ The filename of the actual image.
+ tol : float
+ The tolerance (a color value difference, where 255 is the
+ maximal difference). The test fails if the average pixel
+ difference is greater than this value.
+ in_decorator : bool
+ If called from image_comparison decorator, this should be
+ True. (default=False)
+
+ Examples
+ --------
+ img1 = "./baseline/plot.png"
+ img2 = "./output/plot.png"
+ compare_images( img1, img2, 0.001 ):
+
+ """
+ if not os.path.exists(actual):
+ raise Exception("Output image %s does not exist." % actual)
+
+ if os.stat(actual).st_size == 0:
+ raise Exception("Output image file %s is empty." % actual)
+
+ # Convert the image to png
+ extension = expected.split('.')[-1]
+
+ if not os.path.exists(expected):
+ raise IOError('Baseline image %r does not exist.' % expected)
+
+ if extension != 'png':
+ actual = convert(actual, False)
+ expected = convert(expected, True)
+
+ # open the image files and remove the alpha channel (if it exists)
+ expectedImage = _png.read_png_int(expected)
+ actualImage = _png.read_png_int(actual)
+ expectedImage = expectedImage[:, :, :3]
+ actualImage = actualImage[:, :, :3]
+
+ actualImage, expectedImage = crop_to_same(
+ actual, actualImage, expected, expectedImage)
+
+ diff_image = make_test_filename(actual, 'failed-diff')
+
+ if tol <= 0.0:
+ if np.array_equal(expectedImage, actualImage):
+ return None
+
+ # convert to signed integers, so that the images can be subtracted without
+ # overflow
+ expectedImage = expectedImage.astype(np.int16)
+ actualImage = actualImage.astype(np.int16)
+
+ rms = calculate_rms(expectedImage, actualImage)
+
+ if rms <= tol:
+ return None
+
+ save_diff_image(expected, actual, diff_image)
+
+ results = dict(rms=rms, expected=str(expected),
+ actual=str(actual), diff=str(diff_image), tol=tol)
+
+ if not in_decorator:
+ # Then the results should be a string suitable for stdout.
+ template = ['Error: Image files did not match.',
+ 'RMS Value: {rms}',
+ 'Expected: \n {expected}',
+ 'Actual: \n {actual}',
+ 'Difference:\n {diff}',
+ 'Tolerance: \n {tol}', ]
+ results = '\n '.join([line.format(**results) for line in template])
+ return results
+
+
+def save_diff_image(expected, actual, output):
+ expectedImage = _png.read_png(expected)
+ actualImage = _png.read_png(actual)
+ actualImage, expectedImage = crop_to_same(
+ actual, actualImage, expected, expectedImage)
+ expectedImage = np.array(expectedImage).astype(float)
+ actualImage = np.array(actualImage).astype(float)
+ if expectedImage.shape != actualImage.shape:
+ raise ImageComparisonFailure(
+ "Image sizes do not match expected size: {0} "
+ "actual size {1}".format(expectedImage.shape, actualImage.shape))
+ absDiffImage = np.abs(expectedImage - actualImage)
+
+ # expand differences in luminance domain
+ absDiffImage *= 255 * 10
+ save_image_np = np.clip(absDiffImage, 0, 255).astype(np.uint8)
+ height, width, depth = save_image_np.shape
+
+ # The PDF renderer doesn't produce an alpha channel, but the
+ # matplotlib PNG writer requires one, so expand the array
+ if depth == 3:
+ with_alpha = np.empty((height, width, 4), dtype=np.uint8)
+ with_alpha[:, :, 0:3] = save_image_np
+ save_image_np = with_alpha
+
+ # Hard-code the alpha channel to fully solid
+ save_image_np[:, :, 3] = 255
+
+ _png.write_png(save_image_np, output)
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/conftest.py b/contrib/python/matplotlib/py2/matplotlib/testing/conftest.py
new file mode 100644
index 00000000000..fb306594780
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/conftest.py
@@ -0,0 +1,100 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import pytest
+
+import matplotlib
+
+
+def pytest_configure(config):
+ matplotlib.use('agg')
+ matplotlib._called_from_pytest = True
+ matplotlib._init_tests()
+
+
+def pytest_unconfigure(config):
+ matplotlib._called_from_pytest = False
+
+
+@pytest.fixture(autouse=True)
+def mpl_test_settings(request):
+ from matplotlib.testing.decorators import _do_cleanup
+
+ original_units_registry = matplotlib.units.registry.copy()
+ original_settings = matplotlib.rcParams.copy()
+
+ backend = None
+ backend_marker = request.node.get_closest_marker('backend')
+ if backend_marker is not None:
+ assert len(backend_marker.args) == 1, \
+ "Marker 'backend' must specify 1 backend."
+ backend, = backend_marker.args
+ prev_backend = matplotlib.get_backend()
+
+ style = '_classic_test' # Default of cleanup and image_comparison too.
+ style_marker = request.node.get_closest_marker('style')
+ if style_marker is not None:
+ assert len(style_marker.args) == 1, \
+ "Marker 'style' must specify 1 style."
+ style, = style_marker.args
+
+ matplotlib.testing.setup()
+ if backend is not None:
+ # This import must come after setup() so it doesn't load the default
+ # backend prematurely.
+ import matplotlib.pyplot as plt
+ plt.switch_backend(backend)
+ matplotlib.style.use(style)
+ try:
+ yield
+ finally:
+ if backend is not None:
+ plt.switch_backend(prev_backend)
+ _do_cleanup(original_units_registry,
+ original_settings)
+
+
+@pytest.fixture
+def mpl_image_comparison_parameters(request, extension):
+ # This fixture is applied automatically by the image_comparison decorator.
+ #
+ # The sole purpose of this fixture is to provide an indirect method of
+ # obtaining parameters *without* modifying the decorated function
+ # signature. In this way, the function signature can stay the same and
+ # pytest won't get confused.
+ # We annotate the decorated function with any parameters captured by this
+ # fixture so that they can be used by the wrapper in image_comparison.
+ baseline_images, = request.node.get_closest_marker('baseline_images').args
+ if baseline_images is None:
+ # Allow baseline image list to be produced on the fly based on current
+ # parametrization.
+ baseline_images = request.getfixturevalue('baseline_images')
+
+ func = request.function
+ func.__wrapped__.parameters = (baseline_images, extension)
+ try:
+ yield
+ finally:
+ delattr(func.__wrapped__, 'parameters')
+
+
+@pytest.fixture
+def pd():
+ """Fixture to import and configure pandas."""
+ pd = pytest.importorskip('pandas')
+ try:
+ from pandas.plotting import (
+ register_matplotlib_converters as register)
+ except ImportError:
+ from pandas.tseries.converter import register
+ register()
+ try:
+ yield pd
+ finally:
+ try:
+ from pandas.plotting import (
+ deregister_matplotlib_converters as deregister)
+ except ImportError:
+ pass
+ else:
+ deregister()
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/decorators.py b/contrib/python/matplotlib/py2/matplotlib/testing/decorators.py
new file mode 100644
index 00000000000..d008446dcbf
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/decorators.py
@@ -0,0 +1,589 @@
+from __future__ import absolute_import, division, print_function
+
+import six
+
+import functools
+import inspect
+import os
+import sys
+import shutil
+import warnings
+import unittest
+
+# Note - don't import nose up here - import it only as needed in functions.
+# This allows other functions here to be used by pytest-based testing suites
+# without requiring nose to be installed.
+
+
+import matplotlib as mpl
+import matplotlib.style
+import matplotlib.units
+import matplotlib.testing
+from matplotlib import cbook
+from matplotlib import ticker
+from matplotlib import pyplot as plt
+from matplotlib import ft2font
+from matplotlib.testing.compare import (
+ comparable_formats, compare_images, make_test_filename)
+from . import _copy_metadata, is_called_from_pytest
+from .exceptions import ImageComparisonFailure
+
+
+def _knownfailureif(fail_condition, msg=None, known_exception_class=None):
+ """
+
+ Assume a will fail if *fail_condition* is True. *fail_condition*
+ may also be False or the string 'indeterminate'.
+
+ *msg* is the error message displayed for the test.
+
+ If *known_exception_class* is not None, the failure is only known
+ if the exception is an instance of this class. (Default = None)
+
+ """
+ if is_called_from_pytest():
+ import pytest
+ if fail_condition == 'indeterminate':
+ fail_condition, strict = True, False
+ else:
+ fail_condition, strict = bool(fail_condition), True
+ return pytest.mark.xfail(condition=fail_condition, reason=msg,
+ raises=known_exception_class, strict=strict)
+ else:
+ from ._nose.decorators import knownfailureif
+ return knownfailureif(fail_condition, msg, known_exception_class)
+
+
+@cbook.deprecated('2.1',
+ alternative='pytest.xfail or import the plugin')
+def knownfailureif(fail_condition, msg=None, known_exception_class=None):
+ _knownfailureif(fail_condition, msg, known_exception_class)
+
+
+def _do_cleanup(original_units_registry, original_settings):
+ plt.close('all')
+
+ mpl.rcParams.clear()
+ mpl.rcParams.update(original_settings)
+ matplotlib.units.registry.clear()
+ matplotlib.units.registry.update(original_units_registry)
+ warnings.resetwarnings() # reset any warning filters set in tests
+
+
+class CleanupTest(object):
+ @classmethod
+ def setup_class(cls):
+ cls.original_units_registry = matplotlib.units.registry.copy()
+ cls.original_settings = mpl.rcParams.copy()
+ matplotlib.testing.setup()
+
+ @classmethod
+ def teardown_class(cls):
+ _do_cleanup(cls.original_units_registry,
+ cls.original_settings)
+
+ def test(self):
+ self._func()
+
+
+class CleanupTestCase(unittest.TestCase):
+ '''A wrapper for unittest.TestCase that includes cleanup operations'''
+ @classmethod
+ def setUpClass(cls):
+ import matplotlib.units
+ cls.original_units_registry = matplotlib.units.registry.copy()
+ cls.original_settings = mpl.rcParams.copy()
+
+ @classmethod
+ def tearDownClass(cls):
+ _do_cleanup(cls.original_units_registry,
+ cls.original_settings)
+
+
+def cleanup(style=None):
+ """
+ A decorator to ensure that any global state is reset before
+ running a test.
+
+ Parameters
+ ----------
+ style : str, optional
+ The name of the style to apply.
+ """
+
+ # If cleanup is used without arguments, `style` will be a
+ # callable, and we pass it directly to the wrapper generator. If
+ # cleanup if called with an argument, it is a string naming a
+ # style, and the function will be passed as an argument to what we
+ # return. This is a confusing, but somewhat standard, pattern for
+ # writing a decorator with optional arguments.
+
+ def make_cleanup(func):
+ if inspect.isgeneratorfunction(func):
+ @functools.wraps(func)
+ def wrapped_callable(*args, **kwargs):
+ original_units_registry = matplotlib.units.registry.copy()
+ original_settings = mpl.rcParams.copy()
+ matplotlib.style.use(style)
+ try:
+ for yielded in func(*args, **kwargs):
+ yield yielded
+ finally:
+ _do_cleanup(original_units_registry,
+ original_settings)
+ else:
+ @functools.wraps(func)
+ def wrapped_callable(*args, **kwargs):
+ original_units_registry = matplotlib.units.registry.copy()
+ original_settings = mpl.rcParams.copy()
+ matplotlib.style.use(style)
+ try:
+ func(*args, **kwargs)
+ finally:
+ _do_cleanup(original_units_registry,
+ original_settings)
+
+ return wrapped_callable
+
+ if isinstance(style, six.string_types):
+ return make_cleanup
+ else:
+ result = make_cleanup(style)
+ # Default of mpl_test_settings fixture and image_comparison too.
+ style = '_classic_test'
+ return result
+
+
+def check_freetype_version(ver):
+ if ver is None:
+ return True
+
+ from distutils import version
+ if isinstance(ver, six.string_types):
+ ver = (ver, ver)
+ ver = [version.StrictVersion(x) for x in ver]
+ found = version.StrictVersion(ft2font.__freetype_version__)
+
+ return found >= ver[0] and found <= ver[1]
+
+
+def _checked_on_freetype_version(required_freetype_version):
+ if check_freetype_version(required_freetype_version):
+ return lambda f: f
+
+ reason = ("Mismatched version of freetype. "
+ "Test requires '%s', you have '%s'" %
+ (required_freetype_version, ft2font.__freetype_version__))
+ return _knownfailureif('indeterminate', msg=reason,
+ known_exception_class=ImageComparisonFailure)
+
+
+def remove_ticks_and_titles(figure):
+ figure.suptitle("")
+ null_formatter = ticker.NullFormatter()
+ for ax in figure.get_axes():
+ ax.set_title("")
+ ax.xaxis.set_major_formatter(null_formatter)
+ ax.xaxis.set_minor_formatter(null_formatter)
+ ax.yaxis.set_major_formatter(null_formatter)
+ ax.yaxis.set_minor_formatter(null_formatter)
+ try:
+ ax.zaxis.set_major_formatter(null_formatter)
+ ax.zaxis.set_minor_formatter(null_formatter)
+ except AttributeError:
+ pass
+
+
+def _raise_on_image_difference(expected, actual, tol):
+ __tracebackhide__ = True
+
+ err = compare_images(expected, actual, tol, in_decorator=True)
+
+ if not os.path.exists(expected):
+ raise ImageComparisonFailure('image does not exist: %s' % expected)
+
+ if err:
+ for key in ["actual", "expected"]:
+ err[key] = os.path.relpath(err[key])
+ raise ImageComparisonFailure(
+ 'images not close (RMS %(rms).3f):\n\t%(actual)s\n\t%(expected)s '
+ % err)
+
+
+def _xfail_if_format_is_uncomparable(extension):
+ will_fail = extension not in comparable_formats()
+ if will_fail:
+ fail_msg = 'Cannot compare %s files on this system' % extension
+ else:
+ fail_msg = 'No failure expected'
+
+ return _knownfailureif(will_fail, fail_msg,
+ known_exception_class=ImageComparisonFailure)
+
+
+def _mark_xfail_if_format_is_uncomparable(extension):
+ if isinstance(extension, six.string_types):
+ name = extension
+ marks = []
+ elif isinstance(extension, tuple):
+ # Extension might be a pytest ParameterSet instead of a plain string.
+ # Unfortunately, this type is not exposed, so since it's a namedtuple,
+ # check for a tuple instead.
+ name = extension.values[0]
+ marks = list(extension.marks)
+ else:
+ # Extension might be a pytest marker instead of a plain string.
+ name = extension.args[0]
+ marks = [extension.mark]
+
+ if name not in comparable_formats():
+ fail_msg = 'Cannot compare %s files on this system' % (name, )
+ import pytest
+ marks += [pytest.mark.xfail(reason=fail_msg, strict=False,
+ raises=ImageComparisonFailure)]
+ return pytest.param(name, marks=marks)
+ else:
+ return extension
+
+
+class _ImageComparisonBase(object):
+ """
+ Image comparison base class
+
+ This class provides *just* the comparison-related functionality and avoids
+ any code that would be specific to any testing framework.
+ """
+ def __init__(self, tol, remove_text, savefig_kwargs):
+ self.func = self.baseline_dir = self.result_dir = None
+ self.tol = tol
+ self.remove_text = remove_text
+ self.savefig_kwargs = savefig_kwargs
+
+ def delayed_init(self, func):
+ assert self.func is None, "it looks like same decorator used twice"
+ self.func = func
+ self.baseline_dir, self.result_dir = _image_directories(func)
+
+ def copy_baseline(self, baseline, extension):
+ baseline_path = os.path.join(self.baseline_dir, baseline)
+ orig_expected_fname = baseline_path + '.' + extension
+ if extension == 'eps' and not os.path.exists(orig_expected_fname):
+ orig_expected_fname = baseline_path + '.pdf'
+ expected_fname = make_test_filename(
+ os.path.join(self.result_dir,
+ os.path.basename(orig_expected_fname)),
+ 'expected')
+ if os.path.exists(orig_expected_fname):
+ shutil.copyfile(orig_expected_fname, expected_fname)
+ else:
+ reason = ("Do not have baseline image {0} because this "
+ "file does not exist: {1}".format(expected_fname,
+ orig_expected_fname))
+ raise ImageComparisonFailure(reason)
+ return expected_fname
+
+ def compare(self, idx, baseline, extension):
+ __tracebackhide__ = True
+ fignum = plt.get_fignums()[idx]
+ fig = plt.figure(fignum)
+
+ if self.remove_text:
+ remove_ticks_and_titles(fig)
+
+ actual_fname = (
+ os.path.join(self.result_dir, baseline) + '.' + extension)
+ kwargs = self.savefig_kwargs.copy()
+ if extension == 'pdf':
+ kwargs.setdefault('metadata',
+ {'Creator': None, 'Producer': None,
+ 'CreationDate': None})
+ fig.savefig(actual_fname, **kwargs)
+
+ expected_fname = self.copy_baseline(baseline, extension)
+ _raise_on_image_difference(expected_fname, actual_fname, self.tol)
+
+
+class ImageComparisonTest(CleanupTest, _ImageComparisonBase):
+ """
+ Nose-based image comparison class
+
+ This class generates tests for a nose-based testing framework. Ideally,
+ this class would not be public, and the only publicly visible API would
+ be the :func:`image_comparison` decorator. Unfortunately, there are
+ existing downstream users of this class (e.g., pytest-mpl) so it cannot yet
+ be removed.
+ """
+ def __init__(self, baseline_images, extensions, tol,
+ freetype_version, remove_text, savefig_kwargs, style):
+ _ImageComparisonBase.__init__(self, tol, remove_text, savefig_kwargs)
+ self.baseline_images = baseline_images
+ self.extensions = extensions
+ self.freetype_version = freetype_version
+ self.style = style
+
+ def setup(self):
+ func = self.func
+ plt.close('all')
+ self.setup_class()
+ try:
+ matplotlib.style.use(self.style)
+ matplotlib.testing.set_font_settings_for_testing()
+ func()
+ assert len(plt.get_fignums()) == len(self.baseline_images), (
+ "Test generated {} images but there are {} baseline images"
+ .format(len(plt.get_fignums()), len(self.baseline_images)))
+ except:
+ # Restore original settings before raising errors.
+ self.teardown_class()
+ raise
+
+ def teardown(self):
+ self.teardown_class()
+
+ @staticmethod
+ @cbook.deprecated('2.1',
+ alternative='remove_ticks_and_titles')
+ def remove_text(figure):
+ remove_ticks_and_titles(figure)
+
+ def nose_runner(self):
+ func = self.compare
+ func = _checked_on_freetype_version(self.freetype_version)(func)
+ funcs = {extension: _xfail_if_format_is_uncomparable(extension)(func)
+ for extension in self.extensions}
+ for idx, baseline in enumerate(self.baseline_images):
+ for extension in self.extensions:
+ yield funcs[extension], idx, baseline, extension
+
+ def __call__(self, func):
+ self.delayed_init(func)
+ import nose.tools
+
+ @nose.tools.with_setup(self.setup, self.teardown)
+ def runner_wrapper():
+ for case in self.nose_runner():
+ yield case
+
+ return _copy_metadata(func, runner_wrapper)
+
+
+def _pytest_image_comparison(baseline_images, extensions, tol,
+ freetype_version, remove_text, savefig_kwargs,
+ style):
+ """
+ Decorate function with image comparison for pytest.
+
+ This function creates a decorator that wraps a figure-generating function
+ with image comparison code. Pytest can become confused if we change the
+ signature of the function, so we indirectly pass anything we need via the
+ `mpl_image_comparison_parameters` fixture and extra markers.
+ """
+ import pytest
+
+ extensions = map(_mark_xfail_if_format_is_uncomparable, extensions)
+
+ def decorator(func):
+ # Parameter indirection; see docstring above and comment below.
+ @pytest.mark.usefixtures('mpl_image_comparison_parameters')
+ @pytest.mark.parametrize('extension', extensions)
+ @pytest.mark.baseline_images(baseline_images)
+ # END Parameter indirection.
+ @pytest.mark.style(style)
+ @_checked_on_freetype_version(freetype_version)
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ __tracebackhide__ = True
+ img = _ImageComparisonBase(tol=tol, remove_text=remove_text,
+ savefig_kwargs=savefig_kwargs)
+ img.delayed_init(func)
+ matplotlib.testing.set_font_settings_for_testing()
+ func(*args, **kwargs)
+
+ # Parameter indirection:
+ # This is hacked on via the mpl_image_comparison_parameters fixture
+ # so that we don't need to modify the function's real signature for
+ # any parametrization. Modifying the signature is very very tricky
+ # and likely to confuse pytest.
+ baseline_images, extension = func.parameters
+
+ assert len(plt.get_fignums()) == len(baseline_images), (
+ "Test generated {} images but there are {} baseline images"
+ .format(len(plt.get_fignums()), len(baseline_images)))
+ for idx, baseline in enumerate(baseline_images):
+ img.compare(idx, baseline, extension)
+
+ wrapper.__wrapped__ = func # For Python 2.7.
+ return _copy_metadata(func, wrapper)
+
+ return decorator
+
+
+def image_comparison(baseline_images, extensions=None, tol=0,
+ freetype_version=None, remove_text=False,
+ savefig_kwarg=None,
+ # Default of mpl_test_settings fixture and cleanup too.
+ style='_classic_test'):
+ """
+ Compare images generated by the test with those specified in
+ *baseline_images*, which must correspond else an
+ ImageComparisonFailure exception will be raised.
+
+ Arguments
+ ---------
+ baseline_images : list or None
+ A list of strings specifying the names of the images generated by
+ calls to :meth:`matplotlib.figure.savefig`.
+
+ If *None*, the test function must use the ``baseline_images`` fixture,
+ either as a parameter or with pytest.mark.usefixtures. This value is
+ only allowed when using pytest.
+
+ extensions : [ None | list ]
+
+ If None, defaults to all supported extensions.
+ Otherwise, a list of extensions to test. For example ['png','pdf'].
+
+ tol : float, optional, default: 0
+ The RMS threshold above which the test is considered failed.
+
+ freetype_version : str or tuple
+ The expected freetype version or range of versions for this test to
+ pass.
+
+ remove_text : bool
+ Remove the title and tick text from the figure before comparison.
+ This does not remove other, more deliberate, text, such as legends and
+ annotations.
+
+ savefig_kwarg : dict
+ Optional arguments that are passed to the savefig method.
+
+ style : string
+ Optional name for the base style to apply to the image test. The test
+ itself can also apply additional styles if desired. Defaults to the
+ '_classic_test' style.
+
+ """
+ if extensions is None:
+ # default extensions to test
+ extensions = ['png', 'pdf', 'svg']
+
+ if savefig_kwarg is None:
+ #default no kwargs to savefig
+ savefig_kwarg = dict()
+
+ if is_called_from_pytest():
+ return _pytest_image_comparison(
+ baseline_images=baseline_images, extensions=extensions, tol=tol,
+ freetype_version=freetype_version, remove_text=remove_text,
+ savefig_kwargs=savefig_kwarg, style=style)
+ else:
+ if baseline_images is None:
+ raise ValueError('baseline_images must be specified')
+
+ return ImageComparisonTest(
+ baseline_images=baseline_images, extensions=extensions, tol=tol,
+ freetype_version=freetype_version, remove_text=remove_text,
+ savefig_kwargs=savefig_kwarg, style=style)
+
+
+def _image_directories(func):
+ """
+ Compute the baseline and result image directories for testing *func*.
+ Create the result directory if it doesn't exist.
+ """
+ module_name = func.__module__
+ if module_name == '__main__':
+ # FIXME: this won't work for nested packages in matplotlib.tests
+ warnings.warn(
+ 'Test module run as script. Guessing baseline image locations.')
+ script_name = sys.argv[0]
+ basedir = os.path.abspath(os.path.dirname(script_name))
+ subdir = os.path.splitext(os.path.split(script_name)[1])[0]
+ else:
+ mods = module_name.split('.')
+ if len(mods) >= 3:
+ mods.pop(0)
+ # mods[0] will be the name of the package being tested (in
+ # most cases "matplotlib") However if this is a
+ # namespace package pip installed and run via the nose
+ # multiprocess plugin or as a specific test this may be
+ # missing. See https://github.com/matplotlib/matplotlib/issues/3314
+ if mods.pop(0) != 'tests':
+ warnings.warn(
+ "Module {!r} does not live in a parent module named 'tests'. "
+ "This is probably ok, but we may not be able to guess the "
+ "correct subdirectory containing the baseline images. If "
+ "things go wrong please make sure that there is a parent "
+ "directory named 'tests' and that it contains a __init__.py "
+ "file (can be empty).".format(module_name))
+ subdir = os.path.join(*mods)
+
+ import imp
+ def find_dotted_module(module_name, path=None):
+ """A version of imp which can handle dots in the module name.
+ As for imp.find_module(), the return value is a 3-element
+ tuple (file, pathname, description)."""
+ res = None
+ for sub_mod in module_name.split('.'):
+ try:
+ res = file, path, _ = imp.find_module(sub_mod, path)
+ path = [path]
+ if file is not None:
+ file.close()
+ except ImportError:
+ # assume namespace package
+ path = list(sys.modules[sub_mod].__path__)
+ res = None, path, None
+ return res
+
+ mod_file = find_dotted_module(func.__module__)[1]
+ basedir = os.path.dirname(mod_file)
+
+ baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
+ result_dir = os.path.abspath(os.path.join('result_images', subdir))
+
+ if not os.path.exists(result_dir):
+ cbook.mkdirs(result_dir)
+
+ return baseline_dir, result_dir
+
+
+def switch_backend(backend):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ def switch_backend_decorator(func):
+ @functools.wraps(func)
+ def backend_switcher(*args, **kwargs):
+ try:
+ prev_backend = mpl.get_backend()
+ matplotlib.testing.setup()
+ plt.switch_backend(backend)
+ result = func(*args, **kwargs)
+ finally:
+ plt.switch_backend(prev_backend)
+ return result
+
+ return _copy_metadata(func, backend_switcher)
+ return switch_backend_decorator
+
+
+def skip_if_command_unavailable(cmd):
+ """
+ skips a test if a command is unavailable.
+
+ Parameters
+ ----------
+ cmd : list of str
+ must be a complete command which should not
+ return a non zero exit code, something like
+ ["latex", "-version"]
+ """
+ from matplotlib.compat.subprocess import check_output
+ try:
+ check_output(cmd)
+ except:
+ import pytest
+ return pytest.mark.skip(reason='missing command: %s' % cmd[0])
+
+ return lambda f: f
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/determinism.py b/contrib/python/matplotlib/py2/matplotlib/testing/determinism.py
new file mode 100644
index 00000000000..614544ce28e
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/determinism.py
@@ -0,0 +1,145 @@
+"""
+Provides utilities to test output reproducibility.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import io
+import os
+import re
+import sys
+from subprocess import check_output
+
+import pytest
+
+import matplotlib
+from matplotlib import pyplot as plt
+
+
+def _determinism_save(objects='mhi', format="pdf", usetex=False):
+ # save current value of SOURCE_DATE_EPOCH and set it
+ # to a constant value, so that time difference is not
+ # taken into account
+ sde = os.environ.pop('SOURCE_DATE_EPOCH', None)
+ os.environ['SOURCE_DATE_EPOCH'] = "946684800"
+
+ matplotlib.rcParams['text.usetex'] = usetex
+
+ fig = plt.figure()
+
+ if 'm' in objects:
+ # use different markers...
+ ax1 = fig.add_subplot(1, 6, 1)
+ x = range(10)
+ ax1.plot(x, [1] * 10, marker=u'D')
+ ax1.plot(x, [2] * 10, marker=u'x')
+ ax1.plot(x, [3] * 10, marker=u'^')
+ ax1.plot(x, [4] * 10, marker=u'H')
+ ax1.plot(x, [5] * 10, marker=u'v')
+
+ if 'h' in objects:
+ # also use different hatch patterns
+ ax2 = fig.add_subplot(1, 6, 2)
+ bars = (ax2.bar(range(1, 5), range(1, 5)) +
+ ax2.bar(range(1, 5), [6] * 4, bottom=range(1, 5)))
+ ax2.set_xticks([1.5, 2.5, 3.5, 4.5])
+
+ patterns = ('-', '+', 'x', '\\', '*', 'o', 'O', '.')
+ for bar, pattern in zip(bars, patterns):
+ bar.set_hatch(pattern)
+
+ if 'i' in objects:
+ # also use different images
+ A = [[1, 2, 3], [2, 3, 1], [3, 1, 2]]
+ fig.add_subplot(1, 6, 3).imshow(A, interpolation='nearest')
+ A = [[1, 3, 2], [1, 2, 3], [3, 1, 2]]
+ fig.add_subplot(1, 6, 4).imshow(A, interpolation='bilinear')
+ A = [[2, 3, 1], [1, 2, 3], [2, 1, 3]]
+ fig.add_subplot(1, 6, 5).imshow(A, interpolation='bicubic')
+
+ x = range(5)
+ fig.add_subplot(1, 6, 6).plot(x, x)
+
+ if six.PY2 and format == 'ps':
+ stdout = io.StringIO()
+ else:
+ stdout = getattr(sys.stdout, 'buffer', sys.stdout)
+ fig.savefig(stdout, format=format)
+ if six.PY2 and format == 'ps':
+ sys.stdout.write(stdout.getvalue())
+
+ # Restores SOURCE_DATE_EPOCH
+ if sde is None:
+ os.environ.pop('SOURCE_DATE_EPOCH', None)
+ else:
+ os.environ['SOURCE_DATE_EPOCH'] = sde
+
+
+def _determinism_check(objects='mhi', format="pdf", usetex=False):
+ """
+ Output three times the same graphs and checks that the outputs are exactly
+ the same.
+
+ Parameters
+ ----------
+ objects : str
+ contains characters corresponding to objects to be included in the test
+ document: 'm' for markers, 'h' for hatch patterns, 'i' for images. The
+ default value is "mhi", so that the test includes all these objects.
+ format : str
+ format string. The default value is "pdf".
+ """
+ plots = []
+ for i in range(3):
+ result = check_output([sys.executable, '-R', '-c',
+ 'import matplotlib; '
+ 'matplotlib._called_from_pytest = True; '
+ 'matplotlib.use(%r); '
+ 'from matplotlib.testing.determinism '
+ 'import _determinism_save;'
+ '_determinism_save(%r,%r,%r)'
+ % (format, objects, format, usetex)])
+ plots.append(result)
+ for p in plots[1:]:
+ if usetex:
+ if p != plots[0]:
+ pytest.skip("failed, maybe due to ghostscript timestamps")
+ else:
+ assert p == plots[0]
+
+
+def _determinism_source_date_epoch(format, string, keyword=b"CreationDate"):
+ """
+ Test SOURCE_DATE_EPOCH support. Output a document with the environment
+ variable SOURCE_DATE_EPOCH set to 2000-01-01 00:00 UTC and check that the
+ document contains the timestamp that corresponds to this date (given as an
+ argument).
+
+ Parameters
+ ----------
+ format : str
+ format string, such as "pdf".
+ string : str
+ timestamp string for 2000-01-01 00:00 UTC.
+ keyword : bytes
+ a string to look at when searching for the timestamp in the document
+ (used in case the test fails).
+ """
+ buff = check_output([sys.executable, '-R', '-c',
+ 'import matplotlib; '
+ 'matplotlib._called_from_pytest = True; '
+ 'matplotlib.use(%r); '
+ 'from matplotlib.testing.determinism '
+ 'import _determinism_save;'
+ '_determinism_save(%r,%r)'
+ % (format, "", format)])
+ find_keyword = re.compile(b".*" + keyword + b".*")
+ key = find_keyword.search(buff)
+ if key:
+ print(key.group())
+ else:
+ print("Timestamp keyword (%s) not found!" % keyword)
+ assert string in buff
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/disable_internet.py b/contrib/python/matplotlib/py2/matplotlib/testing/disable_internet.py
new file mode 100644
index 00000000000..e70c6565276
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/disable_internet.py
@@ -0,0 +1,150 @@
+# Originally from astropy project (http://astropy.org), under BSD
+# 3-clause license.
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import contextlib
+import socket
+
+from six.moves import urllib
+
+# save original socket method for restoration
+# These are global so that re-calling the turn_off_internet function doesn't
+# overwrite them again
+socket_original = socket.socket
+socket_create_connection = socket.create_connection
+socket_bind = socket.socket.bind
+socket_connect = socket.socket.connect
+
+
+INTERNET_OFF = False
+
+# urllib2 uses a global variable to cache its default "opener" for opening
+# connections for various protocols; we store it off here so we can restore to
+# the default after re-enabling internet use
+_orig_opener = None
+
+
+# ::1 is apparently another valid name for localhost?
+# it is returned by getaddrinfo when that function is given localhost
+
+def check_internet_off(original_function):
+ """
+ Wraps ``original_function``, which in most cases is assumed
+ to be a `socket.socket` method, to raise an `IOError` for any operations
+ on non-local AF_INET sockets.
+ """
+
+ def new_function(*args, **kwargs):
+ if isinstance(args[0], socket.socket):
+ if not args[0].family in (socket.AF_INET, socket.AF_INET6):
+ # Should be fine in all but some very obscure cases
+ # More to the point, we don't want to affect AF_UNIX
+ # sockets.
+ return original_function(*args, **kwargs)
+ host = args[1][0]
+ addr_arg = 1
+ valid_hosts = ('localhost', '127.0.0.1', '::1')
+ else:
+ # The only other function this is used to wrap currently is
+ # socket.create_connection, which should be passed a 2-tuple, but
+ # we'll check just in case
+ if not (isinstance(args[0], tuple) and len(args[0]) == 2):
+ return original_function(*args, **kwargs)
+
+ host = args[0][0]
+ addr_arg = 0
+ valid_hosts = ('localhost', '127.0.0.1')
+
+ hostname = socket.gethostname()
+ fqdn = socket.getfqdn()
+
+ if host in (hostname, fqdn):
+ host = 'localhost'
+ new_addr = (host, args[addr_arg][1])
+ args = args[:addr_arg] + (new_addr,) + args[addr_arg + 1:]
+
+ if any([h in host for h in valid_hosts]):
+ return original_function(*args, **kwargs)
+ else:
+ raise IOError("An attempt was made to connect to the internet "
+ "by a test that was not marked `remote_data`.")
+ return new_function
+
+
+def turn_off_internet(verbose=False):
+ """
+ Disable internet access via python by preventing connections from being
+ created using the socket module. Presumably this could be worked around by
+ using some other means of accessing the internet, but all default python
+ modules (urllib, requests, etc.) use socket [citation needed].
+ """
+
+ global INTERNET_OFF
+ global _orig_opener
+
+ if INTERNET_OFF:
+ return
+
+ INTERNET_OFF = True
+
+ __tracebackhide__ = True
+ if verbose:
+ print("Internet access disabled")
+
+ # Update urllib2 to force it not to use any proxies
+ # Must use {} here (the default of None will kick off an automatic search
+ # for proxies)
+ _orig_opener = urllib.request.build_opener()
+ no_proxy_handler = urllib.request.ProxyHandler({})
+ opener = urllib.request.build_opener(no_proxy_handler)
+ urllib.request.install_opener(opener)
+
+ socket.create_connection = check_internet_off(socket_create_connection)
+ socket.socket.bind = check_internet_off(socket_bind)
+ socket.socket.connect = check_internet_off(socket_connect)
+
+ return socket
+
+
+def turn_on_internet(verbose=False):
+ """
+ Restore internet access. Not used, but kept in case it is needed.
+ """
+
+ global INTERNET_OFF
+ global _orig_opener
+
+ if not INTERNET_OFF:
+ return
+
+ INTERNET_OFF = False
+
+ if verbose:
+ print("Internet access enabled")
+
+ urllib.request.install_opener(_orig_opener)
+
+ socket.create_connection = socket_create_connection
+ socket.socket.bind = socket_bind
+ socket.socket.connect = socket_connect
+ return socket
+
+
+@contextlib.contextmanager
+def no_internet(verbose=False):
+ """Context manager to temporarily disable internet access (if not already
+ disabled). If it was already disabled before entering the context manager
+ (i.e. `turn_off_internet` was called previously) then this is a no-op and
+ leaves internet access disabled until a manual call to `turn_on_internet`.
+ """
+
+ already_disabled = INTERNET_OFF
+
+ turn_off_internet(verbose=verbose)
+ try:
+ yield
+ finally:
+ if not already_disabled:
+ turn_on_internet(verbose=verbose)
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/exceptions.py b/contrib/python/matplotlib/py2/matplotlib/testing/exceptions.py
new file mode 100644
index 00000000000..c39a3920774
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/exceptions.py
@@ -0,0 +1,4 @@
+class ImageComparisonFailure(AssertionError):
+ """
+ Raise this exception to mark a test as a comparison between two images.
+ """
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/Duration.py b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/Duration.py
new file mode 100644
index 00000000000..99b2f987298
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/Duration.py
@@ -0,0 +1,211 @@
+#===========================================================================
+#
+# Duration
+#
+#===========================================================================
+
+
+"""Duration module."""
+
+#===========================================================================
+# Place all imports after here.
+#
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+#
+# Place all imports before here.
+#===========================================================================
+
+#===========================================================================
+class Duration(object):
+ """Class Duration in development.
+ """
+ allowed = [ "ET", "UTC" ]
+
+ #-----------------------------------------------------------------------
+ def __init__( self, frame, seconds ):
+ """Create a new Duration object.
+
+ = ERROR CONDITIONS
+ - If the input frame is not in the allowed list, an error is thrown.
+
+ = INPUT VARIABLES
+ - frame The frame of the duration. Must be 'ET' or 'UTC'
+ - seconds The number of seconds in the Duration.
+ """
+ if frame not in self.allowed:
+ msg = "Input frame '%s' is not one of the supported frames of %s" \
+ % ( frame, str( self.allowed ) )
+ raise ValueError( msg )
+
+ self._frame = frame
+ self._seconds = seconds
+
+ #-----------------------------------------------------------------------
+ def frame( self ):
+ """Return the frame the duration is in."""
+ return self._frame
+
+ #-----------------------------------------------------------------------
+ def __abs__( self ):
+ """Return the absolute value of the duration."""
+ return Duration( self._frame, abs( self._seconds ) )
+
+ #-----------------------------------------------------------------------
+ def __neg__( self ):
+ """Return the negative value of this Duration."""
+ return Duration( self._frame, -self._seconds )
+
+ #-----------------------------------------------------------------------
+ def seconds( self ):
+ """Return the number of seconds in the Duration."""
+ return self._seconds
+
+ #-----------------------------------------------------------------------
+ def __nonzero__( self ):
+ """Compare two Durations.
+
+ = INPUT VARIABLES
+ - rhs The Duration to compare against.
+
+ = RETURN VALUE
+ - Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
+ """
+ return self._seconds != 0
+
+ if six.PY3:
+ __bool__ = __nonzero__
+
+ #-----------------------------------------------------------------------
+ def __cmp__( self, rhs ):
+ """Compare two Durations.
+
+ = ERROR CONDITIONS
+ - If the input rhs is not in the same frame, an error is thrown.
+
+ = INPUT VARIABLES
+ - rhs The Duration to compare against.
+
+ = RETURN VALUE
+ - Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
+ """
+ self.checkSameFrame( rhs, "compare" )
+ return cmp( self._seconds, rhs._seconds )
+
+ #-----------------------------------------------------------------------
+ def __add__( self, rhs ):
+ """Add two Durations.
+
+ = ERROR CONDITIONS
+ - If the input rhs is not in the same frame, an error is thrown.
+
+ = INPUT VARIABLES
+ - rhs The Duration to add.
+
+ = RETURN VALUE
+ - Returns the sum of ourselves and the input Duration.
+ """
+ # Delay-load due to circular dependencies.
+ import matplotlib.testing.jpl_units as U
+
+ if isinstance( rhs, U.Epoch ):
+ return rhs + self
+
+ self.checkSameFrame( rhs, "add" )
+ return Duration( self._frame, self._seconds + rhs._seconds )
+
+ #-----------------------------------------------------------------------
+ def __sub__( self, rhs ):
+ """Subtract two Durations.
+
+ = ERROR CONDITIONS
+ - If the input rhs is not in the same frame, an error is thrown.
+
+ = INPUT VARIABLES
+ - rhs The Duration to subtract.
+
+ = RETURN VALUE
+ - Returns the difference of ourselves and the input Duration.
+ """
+ self.checkSameFrame( rhs, "sub" )
+ return Duration( self._frame, self._seconds - rhs._seconds )
+
+ #-----------------------------------------------------------------------
+ def __mul__( self, rhs ):
+ """Scale a UnitDbl by a value.
+
+ = INPUT VARIABLES
+ - rhs The scalar to multiply by.
+
+ = RETURN VALUE
+ - Returns the scaled Duration.
+ """
+ return Duration( self._frame, self._seconds * float( rhs ) )
+
+ #-----------------------------------------------------------------------
+ def __rmul__( self, lhs ):
+ """Scale a Duration by a value.
+
+ = INPUT VARIABLES
+ - lhs The scalar to multiply by.
+
+ = RETURN VALUE
+ - Returns the scaled Duration.
+ """
+ return Duration( self._frame, self._seconds * float( lhs ) )
+
+ #-----------------------------------------------------------------------
+ def __div__( self, rhs ):
+ """Divide a Duration by a value.
+
+ = INPUT VARIABLES
+ - rhs The scalar to divide by.
+
+ = RETURN VALUE
+ - Returns the scaled Duration.
+ """
+ return Duration( self._frame, self._seconds / rhs )
+
+ #-----------------------------------------------------------------------
+ def __rdiv__( self, rhs ):
+ """Divide a Duration by a value.
+
+ = INPUT VARIABLES
+ - rhs The scalar to divide by.
+
+ = RETURN VALUE
+ - Returns the scaled Duration.
+ """
+ return Duration( self._frame, rhs / self._seconds )
+
+ #-----------------------------------------------------------------------
+ def __str__( self ):
+ """Print the Duration."""
+ return "%g %s" % ( self._seconds, self._frame )
+
+ #-----------------------------------------------------------------------
+ def __repr__( self ):
+ """Print the Duration."""
+ return "Duration( '%s', %g )" % ( self._frame, self._seconds )
+
+ #-----------------------------------------------------------------------
+ def checkSameFrame( self, rhs, func ):
+ """Check to see if frames are the same.
+
+ = ERROR CONDITIONS
+ - If the frame of the rhs Duration is not the same as our frame,
+ an error is thrown.
+
+ = INPUT VARIABLES
+ - rhs The Duration to check for the same frame
+ - func The name of the function doing the check.
+ """
+ if self._frame != rhs._frame:
+ msg = "Cannot %s Duration's with different frames.\n" \
+ "LHS: %s\n" \
+ "RHS: %s" % ( func, self._frame, rhs._frame )
+ raise ValueError( msg )
+
+#===========================================================================
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/Epoch.py b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/Epoch.py
new file mode 100644
index 00000000000..91b4c127eb5
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/Epoch.py
@@ -0,0 +1,238 @@
+#===========================================================================
+#
+# Epoch
+#
+#===========================================================================
+
+
+"""Epoch module."""
+
+#===========================================================================
+# Place all imports after here.
+#
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import math
+import datetime as DT
+from matplotlib.dates import date2num
+#
+# Place all imports before here.
+#===========================================================================
+
+#===========================================================================
+class Epoch(object):
+ # Frame conversion offsets in seconds
+ # t(TO) = t(FROM) + allowed[ FROM ][ TO ]
+ allowed = {
+ "ET" : {
+ "UTC" : +64.1839,
+ },
+ "UTC" : {
+ "ET" : -64.1839,
+ },
+ }
+
+ #-----------------------------------------------------------------------
+ def __init__( self, frame, sec=None, jd=None, daynum=None, dt=None ):
+ """Create a new Epoch object.
+
+ Build an epoch 1 of 2 ways:
+
+ Using seconds past a Julian date:
+ # Epoch( 'ET', sec=1e8, jd=2451545 )
+
+ or using a matplotlib day number
+ # Epoch( 'ET', daynum=730119.5 )
+
+
+ = ERROR CONDITIONS
+ - If the input units are not in the allowed list, an error is thrown.
+
+ = INPUT VARIABLES
+ - frame The frame of the epoch. Must be 'ET' or 'UTC'
+ - sec The number of seconds past the input JD.
+ - jd The Julian date of the epoch.
+ - daynum The matplotlib day number of the epoch.
+ - dt A python datetime instance.
+ """
+ if ( ( sec is None and jd is not None ) or
+ ( sec is not None and jd is None ) or
+ ( daynum is not None and ( sec is not None or jd is not None ) ) or
+ ( daynum is None and dt is None and ( sec is None or jd is None ) ) or
+ ( daynum is not None and dt is not None ) or
+ ( dt is not None and ( sec is not None or jd is not None ) ) or
+ ( (dt is not None) and not isinstance(dt, DT.datetime) ) ):
+ msg = "Invalid inputs. Must enter sec and jd together, " \
+ "daynum by itself, or dt (must be a python datetime).\n" \
+ "Sec = %s\nJD = %s\ndnum= %s\ndt = %s" \
+ % ( str( sec ), str( jd ), str( daynum ), str( dt ) )
+ raise ValueError( msg )
+
+ if frame not in self.allowed:
+ msg = "Input frame '%s' is not one of the supported frames of %s" \
+ % ( frame, str( list(six.iterkeys(self.allowed) ) ) )
+ raise ValueError(msg)
+
+ self._frame = frame
+
+ if dt is not None:
+ daynum = date2num( dt )
+
+ if daynum is not None:
+ # 1-JAN-0001 in JD = 1721425.5
+ jd = float( daynum ) + 1721425.5
+ self._jd = math.floor( jd )
+ self._seconds = ( jd - self._jd ) * 86400.0
+
+ else:
+ self._seconds = float( sec )
+ self._jd = float( jd )
+
+ # Resolve seconds down to [ 0, 86400 )
+ deltaDays = int( math.floor( self._seconds / 86400.0 ) )
+ self._jd += deltaDays
+ self._seconds -= deltaDays * 86400.0
+
+ #-----------------------------------------------------------------------
+ def convert( self, frame ):
+ if self._frame == frame:
+ return self
+
+ offset = self.allowed[ self._frame ][ frame ]
+
+ return Epoch( frame, self._seconds + offset, self._jd )
+
+ #-----------------------------------------------------------------------
+ def frame( self ):
+ return self._frame
+
+ #-----------------------------------------------------------------------
+ def julianDate( self, frame ):
+ t = self
+ if frame != self._frame:
+ t = self.convert( frame )
+
+ return t._jd + t._seconds / 86400.0
+
+ #-----------------------------------------------------------------------
+ def secondsPast( self, frame, jd ):
+ t = self
+ if frame != self._frame:
+ t = self.convert( frame )
+
+ delta = t._jd - jd
+ return t._seconds + delta * 86400
+
+ #-----------------------------------------------------------------------
+ def __cmp__( self, rhs ):
+ """Compare two Epoch's.
+
+ = INPUT VARIABLES
+ - rhs The Epoch to compare against.
+
+ = RETURN VALUE
+ - Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
+ """
+ t = self
+ if self._frame != rhs._frame:
+ t = self.convert( rhs._frame )
+
+ if t._jd != rhs._jd:
+ return cmp( t._jd, rhs._jd )
+
+ return cmp( t._seconds, rhs._seconds )
+
+ #-----------------------------------------------------------------------
+ def __add__( self, rhs ):
+ """Add a duration to an Epoch.
+
+ = INPUT VARIABLES
+ - rhs The Epoch to subtract.
+
+ = RETURN VALUE
+ - Returns the difference of ourselves and the input Epoch.
+ """
+ t = self
+ if self._frame != rhs.frame():
+ t = self.convert( rhs._frame )
+
+ sec = t._seconds + rhs.seconds()
+
+ return Epoch( t._frame, sec, t._jd )
+
+ #-----------------------------------------------------------------------
+ def __sub__( self, rhs ):
+ """Subtract two Epoch's or a Duration from an Epoch.
+
+ Valid:
+ Duration = Epoch - Epoch
+ Epoch = Epoch - Duration
+
+ = INPUT VARIABLES
+ - rhs The Epoch to subtract.
+
+ = RETURN VALUE
+ - Returns either the duration between to Epoch's or the a new
+ Epoch that is the result of subtracting a duration from an epoch.
+ """
+ # Delay-load due to circular dependencies.
+ import matplotlib.testing.jpl_units as U
+
+ # Handle Epoch - Duration
+ if isinstance( rhs, U.Duration ):
+ return self + -rhs
+
+ t = self
+ if self._frame != rhs._frame:
+ t = self.convert( rhs._frame )
+
+ days = t._jd - rhs._jd
+ sec = t._seconds - rhs._seconds
+
+ return U.Duration( rhs._frame, days*86400 + sec )
+
+ #-----------------------------------------------------------------------
+ def __str__( self ):
+ """Print the Epoch."""
+ return "%22.15e %s" % ( self.julianDate( self._frame ), self._frame )
+
+ #-----------------------------------------------------------------------
+ def __repr__( self ):
+ """Print the Epoch."""
+ return str( self )
+
+ #-----------------------------------------------------------------------
+ def range( start, stop, step ):
+ """Generate a range of Epoch objects.
+
+ Similar to the Python range() method. Returns the range [
+ start, stop ) at the requested step. Each element will be a
+ Epoch object.
+
+ = INPUT VARIABLES
+ - start The starting value of the range.
+ - stop The stop value of the range.
+ - step Step to use.
+
+ = RETURN VALUE
+ - Returns a list contianing the requested Epoch values.
+ """
+ elems = []
+
+ i = 0
+ while True:
+ d = start + i * step
+ if d >= stop:
+ break
+
+ elems.append( d )
+ i += 1
+
+ return elems
+
+ range = staticmethod( range )
+
+#===========================================================================
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/EpochConverter.py b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/EpochConverter.py
new file mode 100644
index 00000000000..eecf3321135
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/EpochConverter.py
@@ -0,0 +1,165 @@
+#===========================================================================
+#
+# EpochConverter
+#
+#===========================================================================
+
+
+"""EpochConverter module containing class EpochConverter."""
+
+#===========================================================================
+# Place all imports after here.
+#
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import matplotlib.units as units
+import matplotlib.dates as date_ticker
+from matplotlib.cbook import iterable
+#
+# Place all imports before here.
+#===========================================================================
+
+__all__ = [ 'EpochConverter' ]
+
+#===========================================================================
+class EpochConverter( units.ConversionInterface ):
+ """: A matplotlib converter class. Provides matplotlib conversion
+ functionality for Monte Epoch and Duration classes.
+ """
+
+ # julian date reference for "Jan 1, 0001" minus 1 day because
+ # matplotlib really wants "Jan 0, 0001"
+ jdRef = 1721425.5 - 1
+
+ #------------------------------------------------------------------------
+ @staticmethod
+ def axisinfo( unit, axis ):
+ """: Returns information on how to handle an axis that has Epoch data.
+
+ = INPUT VARIABLES
+ - unit The units to use for a axis with Epoch data.
+
+ = RETURN VALUE
+ - Returns a matplotlib AxisInfo data structure that contains
+ minor/major formatters, major/minor locators, and default
+ label information.
+ """
+
+ majloc = date_ticker.AutoDateLocator()
+ majfmt = date_ticker.AutoDateFormatter( majloc )
+
+ return units.AxisInfo( majloc = majloc,
+ majfmt = majfmt,
+ label = unit )
+
+ #------------------------------------------------------------------------
+ @staticmethod
+ def float2epoch( value, unit ):
+ """: Convert a matplotlib floating-point date into an Epoch of the
+ specified units.
+
+ = INPUT VARIABLES
+ - value The matplotlib floating-point date.
+ - unit The unit system to use for the Epoch.
+
+ = RETURN VALUE
+ - Returns the value converted to an Epoch in the specified time system.
+ """
+ # Delay-load due to circular dependencies.
+ import matplotlib.testing.jpl_units as U
+
+ secPastRef = value * 86400.0 * U.UnitDbl( 1.0, 'sec' )
+ return U.Epoch( unit, secPastRef, EpochConverter.jdRef )
+
+ #------------------------------------------------------------------------
+ @staticmethod
+ def epoch2float( value, unit ):
+ """: Convert an Epoch value to a float suitible for plotting as a
+ python datetime object.
+
+ = INPUT VARIABLES
+ - value An Epoch or list of Epochs that need to be converted.
+ - unit The units to use for an axis with Epoch data.
+
+ = RETURN VALUE
+ - Returns the value parameter converted to floats.
+ """
+ return value.julianDate( unit ) - EpochConverter.jdRef
+
+ #------------------------------------------------------------------------
+ @staticmethod
+ def duration2float( value ):
+ """: Convert a Duration value to a float suitible for plotting as a
+ python datetime object.
+
+ = INPUT VARIABLES
+ - value A Duration or list of Durations that need to be converted.
+
+ = RETURN VALUE
+ - Returns the value parameter converted to floats.
+ """
+ return value.seconds() / 86400.0
+
+ #------------------------------------------------------------------------
+ @staticmethod
+ def convert( value, unit, axis ):
+ """: Convert value using unit to a float. If value is a sequence, return
+ the converted sequence.
+
+ = INPUT VARIABLES
+ - value The value or list of values that need to be converted.
+ - unit The units to use for an axis with Epoch data.
+
+ = RETURN VALUE
+ - Returns the value parameter converted to floats.
+ """
+ # Delay-load due to circular dependencies.
+ import matplotlib.testing.jpl_units as U
+
+ isNotEpoch = True
+ isDuration = False
+
+ if ( iterable(value) and not isinstance(value, six.string_types) ):
+ if ( len(value) == 0 ):
+ return []
+ else:
+ return [ EpochConverter.convert( x, unit, axis ) for x in value ]
+
+ if ( isinstance(value, U.Epoch) ):
+ isNotEpoch = False
+ elif ( isinstance(value, U.Duration) ):
+ isDuration = True
+
+ if ( isNotEpoch and not isDuration and
+ units.ConversionInterface.is_numlike( value ) ):
+ return value
+
+ if ( unit == None ):
+ unit = EpochConverter.default_units( value, axis )
+
+ if ( isDuration ):
+ return EpochConverter.duration2float( value )
+ else:
+ return EpochConverter.epoch2float( value, unit )
+
+ #------------------------------------------------------------------------
+ @staticmethod
+ def default_units( value, axis ):
+ """: Return the default unit for value, or None.
+
+ = INPUT VARIABLES
+ - value The value or list of values that need units.
+
+ = RETURN VALUE
+ - Returns the default units to use for value.
+ """
+ frame = None
+ if ( iterable(value) and not isinstance(value, six.string_types) ):
+ return EpochConverter.default_units( value[0], axis )
+ else:
+ frame = value.frame()
+
+ return frame
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/StrConverter.py b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/StrConverter.py
new file mode 100644
index 00000000000..b5b8814f7c7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/StrConverter.py
@@ -0,0 +1,164 @@
+#===========================================================================
+#
+# StrConverter
+#
+#===========================================================================
+
+
+"""StrConverter module containing class StrConverter."""
+
+#===========================================================================
+# Place all imports after here.
+#
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange
+
+import matplotlib.units as units
+from matplotlib.cbook import iterable
+
+# Place all imports before here.
+#===========================================================================
+
+__all__ = [ 'StrConverter' ]
+
+#===========================================================================
+class StrConverter( units.ConversionInterface ):
+ """: A matplotlib converter class. Provides matplotlib conversion
+ functionality for string data values.
+
+ Valid units for string are:
+ - 'indexed' : Values are indexed as they are specified for plotting.
+ - 'sorted' : Values are sorted alphanumerically.
+ - 'inverted' : Values are inverted so that the first value is on top.
+ - 'sorted-inverted' : A combination of 'sorted' and 'inverted'
+ """
+
+ #------------------------------------------------------------------------
+ @staticmethod
+ def axisinfo( unit, axis ):
+ """: Returns information on how to handle an axis that has string data.
+
+ = INPUT VARIABLES
+ - axis The axis using this converter.
+ - unit The units to use for a axis with string data.
+
+ = RETURN VALUE
+ - Returns a matplotlib AxisInfo data structure that contains
+ minor/major formatters, major/minor locators, and default
+ label information.
+ """
+
+ return None
+
+ #------------------------------------------------------------------------
+ @staticmethod
+ def convert( value, unit, axis ):
+ """: Convert value using unit to a float. If value is a sequence, return
+ the converted sequence.
+
+ = INPUT VARIABLES
+ - axis The axis using this converter.
+ - value The value or list of values that need to be converted.
+ - unit The units to use for a axis with Epoch data.
+
+ = RETURN VALUE
+ - Returns the value parameter converted to floats.
+ """
+
+ if ( units.ConversionInterface.is_numlike( value ) ):
+ return value
+
+ if ( value == [] ):
+ return []
+
+ # we delay loading to make matplotlib happy
+ ax = axis.axes
+ if axis is ax.get_xaxis():
+ isXAxis = True
+ else:
+ isXAxis = False
+
+ axis.get_major_ticks()
+ ticks = axis.get_ticklocs()
+ labels = axis.get_ticklabels()
+
+ labels = [ l.get_text() for l in labels if l.get_text() ]
+
+ if ( not labels ):
+ ticks = []
+ labels = []
+
+
+ if ( not iterable( value ) ):
+ value = [ value ]
+
+ newValues = []
+ for v in value:
+ if ( (v not in labels) and (v not in newValues) ):
+ newValues.append( v )
+
+ for v in newValues:
+ if ( labels ):
+ labels.append( v )
+ else:
+ labels = [ v ]
+
+ #DISABLED: This is disabled because matplotlib bar plots do not
+ #DISABLED: recalculate the unit conversion of the data values
+ #DISABLED: this is due to design and is not really a bug.
+ #DISABLED: If this gets changed, then we can activate the following
+ #DISABLED: block of code. Note that this works for line plots.
+ #DISABLED if ( unit ):
+ #DISABLED if ( unit.find( "sorted" ) > -1 ):
+ #DISABLED labels.sort()
+ #DISABLED if ( unit.find( "inverted" ) > -1 ):
+ #DISABLED labels = labels[ ::-1 ]
+
+ # add padding (so they do not appear on the axes themselves)
+ labels = [ '' ] + labels + [ '' ]
+ ticks = list(xrange( len(labels) ))
+ ticks[0] = 0.5
+ ticks[-1] = ticks[-1] - 0.5
+
+ axis.set_ticks( ticks )
+ axis.set_ticklabels( labels )
+ # we have to do the following lines to make ax.autoscale_view work
+ loc = axis.get_major_locator()
+ loc.set_bounds( ticks[0], ticks[-1] )
+
+ if ( isXAxis ):
+ ax.set_xlim( ticks[0], ticks[-1] )
+ else:
+ ax.set_ylim( ticks[0], ticks[-1] )
+
+ result = []
+ for v in value:
+ # If v is not in labels then something went wrong with adding new
+ # labels to the list of old labels.
+ errmsg = "This is due to a logic error in the StrConverter class. "
+ errmsg += "Please report this error and its message in bugzilla."
+ assert ( v in labels ), errmsg
+ result.append( ticks[ labels.index(v) ] )
+
+ ax.viewLim.ignore(-1)
+ return result
+
+ #------------------------------------------------------------------------
+ @staticmethod
+ def default_units( value, axis ):
+ """: Return the default unit for value, or None.
+
+ = INPUT VARIABLES
+ - axis The axis using this converter.
+ - value The value or list of values that need units.
+
+ = RETURN VALUE
+ - Returns the default units to use for value.
+ Return the default unit for value, or None.
+ """
+
+ # The default behavior for string indexing.
+ return "indexed"
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/UnitDbl.py b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/UnitDbl.py
new file mode 100644
index 00000000000..20c89308dfd
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/UnitDbl.py
@@ -0,0 +1,297 @@
+#===========================================================================
+#
+# UnitDbl
+#
+#===========================================================================
+
+
+"""UnitDbl module."""
+
+#===========================================================================
+# Place all imports after here.
+#
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+#
+# Place all imports before here.
+#===========================================================================
+
+
+#===========================================================================
+class UnitDbl(object):
+ """Class UnitDbl in development.
+ """
+ #-----------------------------------------------------------------------
+ # Unit conversion table. Small subset of the full one but enough
+ # to test the required functions. First field is a scale factor to
+ # convert the input units to the units of the second field. Only
+ # units in this table are allowed.
+ allowed = {
+ "m" : ( 0.001, "km" ),
+ "km" : ( 1, "km" ),
+ "mile" : ( 1.609344, "km" ),
+
+ "rad" : ( 1, "rad" ),
+ "deg" : ( 1.745329251994330e-02, "rad" ),
+
+ "sec" : ( 1, "sec" ),
+ "min" : ( 60.0, "sec" ),
+ "hour" : ( 3600, "sec" ),
+ }
+
+ _types = {
+ "km" : "distance",
+ "rad" : "angle",
+ "sec" : "time",
+ }
+
+ #-----------------------------------------------------------------------
+ def __init__( self, value, units ):
+ """Create a new UnitDbl object.
+
+ Units are internally converted to km, rad, and sec. The only
+ valid inputs for units are [ m, km, mile, rad, deg, sec, min, hour ].
+
+ The field UnitDbl.value will contain the converted value. Use
+ the convert() method to get a specific type of units back.
+
+ = ERROR CONDITIONS
+ - If the input units are not in the allowed list, an error is thrown.
+
+ = INPUT VARIABLES
+ - value The numeric value of the UnitDbl.
+ - units The string name of the units the value is in.
+ """
+ self.checkUnits( units )
+
+ data = self.allowed[ units ]
+ self._value = float( value * data[0] )
+ self._units = data[1]
+
+ #-----------------------------------------------------------------------
+ def convert( self, units ):
+ """Convert the UnitDbl to a specific set of units.
+
+ = ERROR CONDITIONS
+ - If the input units are not in the allowed list, an error is thrown.
+
+ = INPUT VARIABLES
+ - units The string name of the units to convert to.
+
+ = RETURN VALUE
+ - Returns the value of the UnitDbl in the requested units as a floating
+ point number.
+ """
+ if self._units == units:
+ return self._value
+
+ self.checkUnits( units )
+
+ data = self.allowed[ units ]
+ if self._units != data[1]:
+ msg = "Error trying to convert to different units.\n" \
+ " Invalid conversion requested.\n" \
+ " UnitDbl: %s\n" \
+ " Units: %s\n" % ( str( self ), units )
+ raise ValueError( msg )
+
+ return self._value / data[0]
+
+ #-----------------------------------------------------------------------
+ def __abs__( self ):
+ """Return the absolute value of this UnitDbl."""
+ return UnitDbl( abs( self._value ), self._units )
+
+ #-----------------------------------------------------------------------
+ def __neg__( self ):
+ """Return the negative value of this UnitDbl."""
+ return UnitDbl( -self._value, self._units )
+
+ #-----------------------------------------------------------------------
+ def __nonzero__( self ):
+ """Test a UnitDbl for a non-zero value.
+
+ = RETURN VALUE
+ - Returns true if the value is non-zero.
+ """
+ if six.PY3:
+ return self._value.__bool__()
+ else:
+ return self._value.__nonzero__()
+
+ if six.PY3:
+ __bool__ = __nonzero__
+
+ #-----------------------------------------------------------------------
+ def __cmp__( self, rhs ):
+ """Compare two UnitDbl's.
+
+ = ERROR CONDITIONS
+ - If the input rhs units are not the same as our units,
+ an error is thrown.
+
+ = INPUT VARIABLES
+ - rhs The UnitDbl to compare against.
+
+ = RETURN VALUE
+ - Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
+ """
+ self.checkSameUnits( rhs, "compare" )
+ return cmp( self._value, rhs._value )
+
+ #-----------------------------------------------------------------------
+ def __add__( self, rhs ):
+ """Add two UnitDbl's.
+
+ = ERROR CONDITIONS
+ - If the input rhs units are not the same as our units,
+ an error is thrown.
+
+ = INPUT VARIABLES
+ - rhs The UnitDbl to add.
+
+ = RETURN VALUE
+ - Returns the sum of ourselves and the input UnitDbl.
+ """
+ self.checkSameUnits( rhs, "add" )
+ return UnitDbl( self._value + rhs._value, self._units )
+
+ #-----------------------------------------------------------------------
+ def __sub__( self, rhs ):
+ """Subtract two UnitDbl's.
+
+ = ERROR CONDITIONS
+ - If the input rhs units are not the same as our units,
+ an error is thrown.
+
+ = INPUT VARIABLES
+ - rhs The UnitDbl to subtract.
+
+ = RETURN VALUE
+ - Returns the difference of ourselves and the input UnitDbl.
+ """
+ self.checkSameUnits( rhs, "subtract" )
+ return UnitDbl( self._value - rhs._value, self._units )
+
+ #-----------------------------------------------------------------------
+ def __mul__( self, rhs ):
+ """Scale a UnitDbl by a value.
+
+ = INPUT VARIABLES
+ - rhs The scalar to multiply by.
+
+ = RETURN VALUE
+ - Returns the scaled UnitDbl.
+ """
+ return UnitDbl( self._value * rhs, self._units )
+
+ #-----------------------------------------------------------------------
+ def __rmul__( self, lhs ):
+ """Scale a UnitDbl by a value.
+
+ = INPUT VARIABLES
+ - lhs The scalar to multiply by.
+
+ = RETURN VALUE
+ - Returns the scaled UnitDbl.
+ """
+ return UnitDbl( self._value * lhs, self._units )
+
+ #-----------------------------------------------------------------------
+ def __div__( self, rhs ):
+ """Divide a UnitDbl by a value.
+
+ = INPUT VARIABLES
+ - rhs The scalar to divide by.
+
+ = RETURN VALUE
+ - Returns the scaled UnitDbl.
+ """
+ return UnitDbl( self._value / rhs, self._units )
+
+ #-----------------------------------------------------------------------
+ def __str__( self ):
+ """Print the UnitDbl."""
+ return "%g *%s" % ( self._value, self._units )
+
+ #-----------------------------------------------------------------------
+ def __repr__( self ):
+ """Print the UnitDbl."""
+ return "UnitDbl( %g, '%s' )" % ( self._value, self._units )
+
+ #-----------------------------------------------------------------------
+ def type( self ):
+ """Return the type of UnitDbl data."""
+ return self._types[ self._units ]
+
+ #-----------------------------------------------------------------------
+ def range( start, stop, step=None ):
+ """Generate a range of UnitDbl objects.
+
+ Similar to the Python range() method. Returns the range [
+ start, stop ) at the requested step. Each element will be a
+ UnitDbl object.
+
+ = INPUT VARIABLES
+ - start The starting value of the range.
+ - stop The stop value of the range.
+ - step Optional step to use. If set to None, then a UnitDbl of
+ value 1 w/ the units of the start is used.
+
+ = RETURN VALUE
+ - Returns a list contianing the requested UnitDbl values.
+ """
+ if step is None:
+ step = UnitDbl( 1, start._units )
+
+ elems = []
+
+ i = 0
+ while True:
+ d = start + i * step
+ if d >= stop:
+ break
+
+ elems.append( d )
+ i += 1
+
+ return elems
+
+ range = staticmethod( range )
+
+ #-----------------------------------------------------------------------
+ def checkUnits( self, units ):
+ """Check to see if some units are valid.
+
+ = ERROR CONDITIONS
+ - If the input units are not in the allowed list, an error is thrown.
+
+ = INPUT VARIABLES
+ - units The string name of the units to check.
+ """
+ if units not in self.allowed:
+ msg = "Input units '%s' are not one of the supported types of %s" \
+ % ( units, str( list(six.iterkeys(self.allowed)) ) )
+ raise ValueError( msg )
+
+ #-----------------------------------------------------------------------
+ def checkSameUnits( self, rhs, func ):
+ """Check to see if units are the same.
+
+ = ERROR CONDITIONS
+ - If the units of the rhs UnitDbl are not the same as our units,
+ an error is thrown.
+
+ = INPUT VARIABLES
+ - rhs The UnitDbl to check for the same units
+ - func The name of the function doing the check.
+ """
+ if self._units != rhs._units:
+ msg = "Cannot %s units of different types.\n" \
+ "LHS: %s\n" \
+ "RHS: %s" % ( func, self._units, rhs._units )
+ raise ValueError( msg )
+
+#===========================================================================
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/UnitDblConverter.py b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/UnitDblConverter.py
new file mode 100644
index 00000000000..41fe8e19a9b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/UnitDblConverter.py
@@ -0,0 +1,159 @@
+#===========================================================================
+#
+# UnitDblConverter
+#
+#===========================================================================
+
+
+"""UnitDblConverter module containing class UnitDblConverter."""
+
+#===========================================================================
+# Place all imports after here.
+#
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import numpy as np
+import matplotlib.units as units
+import matplotlib.projections.polar as polar
+from matplotlib.cbook import iterable
+#
+# Place all imports before here.
+#===========================================================================
+
+__all__ = [ 'UnitDblConverter' ]
+
+#===========================================================================
+
+# A special function for use with the matplotlib FuncFormatter class
+# for formatting axes with radian units.
+# This was copied from matplotlib example code.
+def rad_fn(x, pos = None ):
+ """Radian function formatter."""
+ n = int((x / np.pi) * 2.0 + 0.25)
+ if n == 0:
+ return str(x)
+ elif n == 1:
+ return r'$\pi/2$'
+ elif n == 2:
+ return r'$\pi$'
+ elif n % 2 == 0:
+ return r'$%s\pi$' % (n/2,)
+ else:
+ return r'$%s\pi/2$' % (n,)
+
+#===========================================================================
+class UnitDblConverter( units.ConversionInterface ):
+ """: A matplotlib converter class. Provides matplotlib conversion
+ functionality for the Monte UnitDbl class.
+ """
+
+ # default for plotting
+ defaults = {
+ "distance" : 'km',
+ "angle" : 'deg',
+ "time" : 'sec',
+ }
+
+ #------------------------------------------------------------------------
+ @staticmethod
+ def axisinfo( unit, axis ):
+ """: Returns information on how to handle an axis that has Epoch data.
+
+ = INPUT VARIABLES
+ - unit The units to use for a axis with Epoch data.
+
+ = RETURN VALUE
+ - Returns a matplotlib AxisInfo data structure that contains
+ minor/major formatters, major/minor locators, and default
+ label information.
+ """
+ # Delay-load due to circular dependencies.
+ import matplotlib.testing.jpl_units as U
+
+ # Check to see if the value used for units is a string unit value
+ # or an actual instance of a UnitDbl so that we can use the unit
+ # value for the default axis label value.
+ if ( unit ):
+ if ( isinstance( unit, six.string_types ) ):
+ label = unit
+ else:
+ label = unit.label()
+ else:
+ label = None
+
+ if ( label == "deg" ) and isinstance( axis.axes, polar.PolarAxes ):
+ # If we want degrees for a polar plot, use the PolarPlotFormatter
+ majfmt = polar.PolarAxes.ThetaFormatter()
+ else:
+ majfmt = U.UnitDblFormatter( useOffset = False )
+
+ return units.AxisInfo( majfmt = majfmt, label = label )
+
+ #------------------------------------------------------------------------
+ @staticmethod
+ def convert( value, unit, axis ):
+ """: Convert value using unit to a float. If value is a sequence, return
+ the converted sequence.
+
+ = INPUT VARIABLES
+ - value The value or list of values that need to be converted.
+ - unit The units to use for a axis with Epoch data.
+
+ = RETURN VALUE
+ - Returns the value parameter converted to floats.
+ """
+ # Delay-load due to circular dependencies.
+ import matplotlib.testing.jpl_units as U
+
+ isNotUnitDbl = True
+
+ if ( iterable(value) and not isinstance(value, six.string_types) ):
+ if ( len(value) == 0 ):
+ return []
+ else:
+ return [ UnitDblConverter.convert( x, unit, axis ) for x in value ]
+
+ # We need to check to see if the incoming value is actually a UnitDbl and
+ # set a flag. If we get an empty list, then just return an empty list.
+ if ( isinstance(value, U.UnitDbl) ):
+ isNotUnitDbl = False
+
+ # If the incoming value behaves like a number, but is not a UnitDbl,
+ # then just return it because we don't know how to convert it
+ # (or it is already converted)
+ if ( isNotUnitDbl and units.ConversionInterface.is_numlike( value ) ):
+ return value
+
+ # If no units were specified, then get the default units to use.
+ if ( unit == None ):
+ unit = UnitDblConverter.default_units( value, axis )
+
+ # Convert the incoming UnitDbl value/values to float/floats
+ if isinstance( axis.axes, polar.PolarAxes ) and value.type() == "angle":
+ # Guarantee that units are radians for polar plots.
+ return value.convert( "rad" )
+
+ return value.convert( unit )
+
+ #------------------------------------------------------------------------
+ @staticmethod
+ def default_units( value, axis ):
+ """: Return the default unit for value, or None.
+
+ = INPUT VARIABLES
+ - value The value or list of values that need units.
+
+ = RETURN VALUE
+ - Returns the default units to use for value.
+ Return the default unit for value, or None.
+ """
+
+ # Determine the default units based on the user preferences set for
+ # default units when printing a UnitDbl.
+ if ( iterable(value) and not isinstance(value, six.string_types) ):
+ return UnitDblConverter.default_units( value[0], axis )
+ else:
+ return UnitDblConverter.defaults[ value.type() ]
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/UnitDblFormatter.py b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/UnitDblFormatter.py
new file mode 100644
index 00000000000..269044748c5
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/UnitDblFormatter.py
@@ -0,0 +1,47 @@
+#===========================================================================
+#
+# UnitDblFormatter
+#
+#===========================================================================
+
+
+"""UnitDblFormatter module containing class UnitDblFormatter."""
+
+#===========================================================================
+# Place all imports after here.
+#
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import matplotlib.ticker as ticker
+#
+# Place all imports before here.
+#===========================================================================
+
+__all__ = [ 'UnitDblFormatter' ]
+
+#===========================================================================
+class UnitDblFormatter( ticker.ScalarFormatter ):
+ """The formatter for UnitDbl data types. This allows for formatting
+ with the unit string.
+ """
+ def __init__( self, *args, **kwargs ):
+ 'The arguments are identical to matplotlib.ticker.ScalarFormatter.'
+ ticker.ScalarFormatter.__init__( self, *args, **kwargs )
+
+ def __call__( self, x, pos = None ):
+ 'Return the format for tick val x at position pos'
+ if len(self.locs) == 0:
+ return ''
+ else:
+ return '{:.12}'.format(x)
+
+ def format_data_short( self, value ):
+ "Return the value formatted in 'short' format."
+ return '{:.12}'.format(value)
+
+ def format_data( self, value ):
+ "Return the value formatted into a string."
+ return '{:.12}'.format(value)
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/__init__.py b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/__init__.py
new file mode 100644
index 00000000000..074af4e8358
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/jpl_units/__init__.py
@@ -0,0 +1,88 @@
+#=======================================================================
+
+"""
+This is a sample set of units for use with testing unit conversion
+of matplotlib routines. These are used because they use very strict
+enforcement of unitized data which will test the entire spectrum of how
+unitized data might be used (it is not always meaningful to convert to
+a float without specific units given).
+
+UnitDbl is essentially a unitized floating point number. It has a
+minimal set of supported units (enough for testing purposes). All
+of the mathematical operation are provided to fully test any behaviour
+that might occur with unitized data. Remember that unitized data has
+rules as to how it can be applied to one another (a value of distance
+cannot be added to a value of time). Thus we need to guard against any
+accidental "default" conversion that will strip away the meaning of the
+data and render it neutered.
+
+Epoch is different than a UnitDbl of time. Time is something that can be
+measured where an Epoch is a specific moment in time. Epochs are typically
+referenced as an offset from some predetermined epoch.
+
+A difference of two epochs is a Duration. The distinction between a Duration
+and a UnitDbl of time is made because an Epoch can have different frames (or
+units). In the case of our test Epoch class the two allowed frames are 'UTC'
+and 'ET' (Note that these are rough estimates provided for testing purposes
+and should not be used in production code where accuracy of time frames is
+desired). As such a Duration also has a frame of reference and therefore needs
+to be called out as different that a simple measurement of time since a delta-t
+in one frame may not be the same in another.
+"""
+
+#=======================================================================
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from .Duration import Duration
+from .Epoch import Epoch
+from .UnitDbl import UnitDbl
+
+from .StrConverter import StrConverter
+from .EpochConverter import EpochConverter
+from .UnitDblConverter import UnitDblConverter
+
+from .UnitDblFormatter import UnitDblFormatter
+
+#=======================================================================
+
+__version__ = "1.0"
+
+__all__ = [
+ 'register',
+ 'Duration',
+ 'Epoch',
+ 'UnitDbl',
+ 'UnitDblFormatter',
+ ]
+
+#=======================================================================
+def register():
+ """Register the unit conversion classes with matplotlib."""
+ import matplotlib.units as mplU
+
+ mplU.registry[ str ] = StrConverter()
+ mplU.registry[ Epoch ] = EpochConverter()
+ mplU.registry[ Duration ] = EpochConverter()
+ mplU.registry[ UnitDbl ] = UnitDblConverter()
+
+#=======================================================================
+# Some default unit instances
+
+# Distances
+m = UnitDbl( 1.0, "m" )
+km = UnitDbl( 1.0, "km" )
+mile = UnitDbl( 1.0, "mile" )
+
+# Angles
+deg = UnitDbl( 1.0, "deg" )
+rad = UnitDbl( 1.0, "rad" )
+
+# Time
+sec = UnitDbl( 1.0, "sec" )
+min = UnitDbl( 1.0, "min" )
+hr = UnitDbl( 1.0, "hour" )
+day = UnitDbl( 24.0, "hour" )
+sec = UnitDbl( 1.0, "sec" )
diff --git a/contrib/python/matplotlib/py2/matplotlib/testing/noseclasses.py b/contrib/python/matplotlib/py2/matplotlib/testing/noseclasses.py
new file mode 100644
index 00000000000..2983b93d7fa
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/testing/noseclasses.py
@@ -0,0 +1,26 @@
+"""
+The module testing.noseclasses is deprecated as of 2.1
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+try:
+ from ._nose.plugins.knownfailure import KnownFailure as _KnownFailure
+ has_nose = True
+except ImportError:
+ has_nose = False
+ _KnownFailure = object
+
+from .. import cbook
+
+cbook.warn_deprecated(
+ since="2.1",
+ message="The noseclass module has been deprecated in 2.1 and will "
+ "be removed in matplotlib 2.3.")
+
+
+@cbook.deprecated("2.1")
+class KnownFailure(_KnownFailure):
+ def __init__(self):
+ if not has_nose:
+ raise ImportError("Need nose for this plugin.")
diff --git a/contrib/python/matplotlib/py2/matplotlib/texmanager.py b/contrib/python/matplotlib/py2/matplotlib/texmanager.py
new file mode 100644
index 00000000000..c9001151cde
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/texmanager.py
@@ -0,0 +1,505 @@
+r"""
+This module supports embedded TeX expressions in matplotlib via dvipng
+and dvips for the raster and postscript backends. The tex and
+dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
+sessions
+
+Requirements:
+
+* latex
+* \*Agg backends: dvipng>=1.6
+* PS backend: psfrag, dvips, and Ghostscript>=8.60
+
+Backends:
+
+* \*Agg
+* PS
+* PDF
+
+For raster output, you can get RGBA numpy arrays from TeX expressions
+as follows::
+
+ texmanager = TexManager()
+ s = ('\TeX\ is Number '
+ '$\displaystyle\sum_{n=1}^\infty\frac{-e^{i\pi}}{2^n}$!')
+ Z = texmanager.get_rgba(s, fontsize=12, dpi=80, rgb=(1,0,0))
+
+To enable tex rendering of all text in your matplotlib figure, set
+text.usetex in your matplotlibrc file or include these two lines in
+your script::
+
+ from matplotlib import rc
+ rc('text', usetex=True)
+
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import six
+
+import copy
+import glob
+import os
+import shutil
+import sys
+import warnings
+import logging
+
+from hashlib import md5
+
+import distutils.version
+import numpy as np
+import matplotlib as mpl
+from matplotlib import rcParams
+from matplotlib._png import read_png
+from matplotlib.cbook import mkdirs, Locked
+from matplotlib.compat.subprocess import subprocess, Popen, PIPE, STDOUT
+import matplotlib.dviread as dviread
+import re
+
+_log = logging.getLogger(__name__)
+
+
+@mpl.cbook.deprecated("2.1")
+def dvipng_hack_alpha():
+ try:
+ p = Popen([str('dvipng'), '-version'], stdin=PIPE, stdout=PIPE,
+ stderr=STDOUT, close_fds=(sys.platform != 'win32'))
+ stdout, stderr = p.communicate()
+ except OSError:
+ _log.info('No dvipng was found')
+ return False
+ lines = stdout.decode(sys.getdefaultencoding()).split('\n')
+ for line in lines:
+ if line.startswith('dvipng '):
+ version = line.split()[-1]
+ _log.info('Found dvipng version %s', version)
+ version = distutils.version.LooseVersion(version)
+ return version < distutils.version.LooseVersion('1.6')
+ _log.info('Unexpected response from dvipng -version')
+ return False
+
+
+class TexManager(object):
+ """
+ Convert strings to dvi files using TeX, caching the results to a directory.
+ """
+
+ cachedir = mpl.get_cachedir()
+ if cachedir is not None:
+ texcache = os.path.join(cachedir, 'tex.cache')
+ mkdirs(texcache)
+ else:
+ # Should only happen in a restricted environment (such as Google App
+ # Engine). Deal with this gracefully by not creating a cache directory.
+ texcache = None
+
+ # Caches.
+ rgba_arrayd = {}
+ grey_arrayd = {}
+ postscriptd = property(mpl.cbook.deprecated("2.2")(lambda self: {}))
+ pscnt = property(mpl.cbook.deprecated("2.2")(lambda self: 0))
+
+ serif = ('cmr', '')
+ sans_serif = ('cmss', '')
+ monospace = ('cmtt', '')
+ cursive = ('pzc', r'\usepackage{chancery}')
+ font_family = 'serif'
+ font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
+
+ font_info = {
+ 'new century schoolbook': ('pnc', r'\renewcommand{\rmdefault}{pnc}'),
+ 'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
+ 'times': ('ptm', r'\usepackage{mathptmx}'),
+ 'palatino': ('ppl', r'\usepackage{mathpazo}'),
+ 'zapf chancery': ('pzc', r'\usepackage{chancery}'),
+ 'cursive': ('pzc', r'\usepackage{chancery}'),
+ 'charter': ('pch', r'\usepackage{charter}'),
+ 'serif': ('cmr', ''),
+ 'sans-serif': ('cmss', ''),
+ 'helvetica': ('phv', r'\usepackage{helvet}'),
+ 'avant garde': ('pag', r'\usepackage{avant}'),
+ 'courier': ('pcr', r'\usepackage{courier}'),
+ 'monospace': ('cmtt', ''),
+ 'computer modern roman': ('cmr', ''),
+ 'computer modern sans serif': ('cmss', ''),
+ 'computer modern typewriter': ('cmtt', '')}
+
+ _rc_cache = None
+ _rc_cache_keys = (('text.latex.preamble', ) +
+ tuple(['font.' + n for n in ('family', ) +
+ font_families]))
+
+ def __init__(self):
+
+ if self.texcache is None:
+ raise RuntimeError('Cannot create TexManager, as there is no '
+ 'cache directory available')
+
+ mkdirs(self.texcache)
+ ff = rcParams['font.family']
+ if len(ff) == 1 and ff[0].lower() in self.font_families:
+ self.font_family = ff[0].lower()
+ elif (isinstance(ff, six.string_types)
+ and ff.lower() in self.font_families):
+ self.font_family = ff.lower()
+ else:
+ _log.info('font.family must be one of (%s) when text.usetex is '
+ 'True. serif will be used by default.',
+ ', '.join(self.font_families))
+ self.font_family = 'serif'
+
+ fontconfig = [self.font_family]
+ for font_family in self.font_families:
+ font_family_attr = font_family.replace('-', '_')
+ for font in rcParams['font.' + font_family]:
+ if font.lower() in self.font_info:
+ setattr(self, font_family_attr,
+ self.font_info[font.lower()])
+ _log.debug('family: %s, font: %s, info: %s',
+ font_family, font, self.font_info[font.lower()])
+ break
+ else:
+ _log.debug('%s font is not compatible with usetex.',
+ font_family)
+ else:
+ _log.info('No LaTeX-compatible font found for the %s font '
+ 'family in rcParams. Using default.', font_family)
+ setattr(self, font_family_attr, self.font_info[font_family])
+ fontconfig.append(getattr(self, font_family_attr)[0])
+ # Add a hash of the latex preamble to self._fontconfig so that the
+ # correct png is selected for strings rendered with same font and dpi
+ # even if the latex preamble changes within the session
+ preamble_bytes = self.get_custom_preamble().encode('utf-8')
+ fontconfig.append(md5(preamble_bytes).hexdigest())
+ self._fontconfig = ''.join(fontconfig)
+
+ # The following packages and commands need to be included in the latex
+ # file's preamble:
+ cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
+ if self.font_family == 'cursive':
+ cmd.append(self.cursive[1])
+ self._font_preamble = '\n'.join(
+ [r'\usepackage{type1cm}'] + cmd + [r'\usepackage{textcomp}'])
+
+ def get_basefile(self, tex, fontsize, dpi=None):
+ """
+ Return a filename based on a hash of the string, fontsize, and dpi.
+ """
+ s = ''.join([tex, self.get_font_config(), '%f' % fontsize,
+ self.get_custom_preamble(), str(dpi or '')])
+ return os.path.join(self.texcache, md5(s.encode('utf-8')).hexdigest())
+
+ def get_font_config(self):
+ """Reinitializes self if relevant rcParams on have changed."""
+ if self._rc_cache is None:
+ self._rc_cache = dict.fromkeys(self._rc_cache_keys)
+ changed = [par for par in self._rc_cache_keys
+ if rcParams[par] != self._rc_cache[par]]
+ if changed:
+ _log.debug('following keys changed: %s', changed)
+ for k in changed:
+ _log.debug('%-20s: %-10s -> %-10s',
+ k, self._rc_cache[k], rcParams[k])
+ # deepcopy may not be necessary, but feels more future-proof
+ self._rc_cache[k] = copy.deepcopy(rcParams[k])
+ _log.debug('RE-INIT\nold fontconfig: %s', self._fontconfig)
+ self.__init__()
+ _log.debug('fontconfig: %s', self._fontconfig)
+ return self._fontconfig
+
+ def get_font_preamble(self):
+ """
+ Return a string containing font configuration for the tex preamble.
+ """
+ return self._font_preamble
+
+ def get_custom_preamble(self):
+ """Return a string containing user additions to the tex preamble."""
+ return '\n'.join(rcParams['text.latex.preamble'])
+
+ def make_tex(self, tex, fontsize):
+ """
+ Generate a tex file to render the tex string at a specific font size.
+
+ Return the file name.
+ """
+ basefile = self.get_basefile(tex, fontsize)
+ texfile = '%s.tex' % basefile
+ custom_preamble = self.get_custom_preamble()
+ fontcmd = {'sans-serif': r'{\sffamily %s}',
+ 'monospace': r'{\ttfamily %s}'}.get(self.font_family,
+ r'{\rmfamily %s}')
+ tex = fontcmd % tex
+
+ if rcParams['text.latex.unicode']:
+ unicode_preamble = r"""
+\usepackage{ucs}
+\usepackage[utf8x]{inputenc}"""
+ else:
+ unicode_preamble = ''
+
+ s = r"""
+\documentclass{article}
+%s
+%s
+%s
+\usepackage[papersize={72in,72in},body={70in,70in},margin={1in,1in}]{geometry}
+\pagestyle{empty}
+\begin{document}
+\fontsize{%f}{%f}%s
+\end{document}
+""" % (self._font_preamble, unicode_preamble, custom_preamble,
+ fontsize, fontsize * 1.25, tex)
+ with open(texfile, 'wb') as fh:
+ if rcParams['text.latex.unicode']:
+ fh.write(s.encode('utf8'))
+ else:
+ try:
+ fh.write(s.encode('ascii'))
+ except UnicodeEncodeError as err:
+ _log.info("You are using unicode and latex, but have not "
+ "enabled the 'text.latex.unicode' rcParam.")
+ raise
+
+ return texfile
+
+ _re_vbox = re.compile(
+ r"MatplotlibBox:\(([\d.]+)pt\+([\d.]+)pt\)x([\d.]+)pt")
+
+ def make_tex_preview(self, tex, fontsize):
+ """
+ Generate a tex file to render the tex string at a specific font size.
+
+ It uses the preview.sty to determine the dimension (width, height,
+ descent) of the output.
+
+ Return the file name.
+ """
+ basefile = self.get_basefile(tex, fontsize)
+ texfile = '%s.tex' % basefile
+ custom_preamble = self.get_custom_preamble()
+ fontcmd = {'sans-serif': r'{\sffamily %s}',
+ 'monospace': r'{\ttfamily %s}'}.get(self.font_family,
+ r'{\rmfamily %s}')
+ tex = fontcmd % tex
+
+ if rcParams['text.latex.unicode']:
+ unicode_preamble = r"""
+\usepackage{ucs}
+\usepackage[utf8x]{inputenc}"""
+ else:
+ unicode_preamble = ''
+
+ # newbox, setbox, immediate, etc. are used to find the box
+ # extent of the rendered text.
+
+ s = r"""
+\documentclass{article}
+%s
+%s
+%s
+\usepackage[active,showbox,tightpage]{preview}
+\usepackage[papersize={72in,72in},body={70in,70in},margin={1in,1in}]{geometry}
+
+%% we override the default showbox as it is treated as an error and makes
+%% the exit status not zero
+\def\showbox#1%%
+{\immediate\write16{MatplotlibBox:(\the\ht#1+\the\dp#1)x\the\wd#1}}
+
+\begin{document}
+\begin{preview}
+{\fontsize{%f}{%f}%s}
+\end{preview}
+\end{document}
+""" % (self._font_preamble, unicode_preamble, custom_preamble,
+ fontsize, fontsize * 1.25, tex)
+ with open(texfile, 'wb') as fh:
+ if rcParams['text.latex.unicode']:
+ fh.write(s.encode('utf8'))
+ else:
+ try:
+ fh.write(s.encode('ascii'))
+ except UnicodeEncodeError as err:
+ _log.info("You are using unicode and latex, but have not "
+ "enabled the 'text.latex.unicode' rcParam.")
+ raise
+
+ return texfile
+
+ def _run_checked_subprocess(self, command, tex):
+ _log.debug(command)
+ try:
+ report = subprocess.check_output(command,
+ cwd=self.texcache,
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as exc:
+ raise RuntimeError(
+ '{prog} was not able to process the following string:\n'
+ '{tex!r}\n\n'
+ 'Here is the full report generated by {prog}:\n'
+ '{exc}\n\n'.format(
+ prog=command[0],
+ tex=tex.encode('unicode_escape'),
+ exc=exc.output.decode('utf-8')))
+ _log.debug(report)
+ return report
+
+ def make_dvi(self, tex, fontsize):
+ """
+ Generate a dvi file containing latex's layout of tex string.
+
+ Return the file name.
+ """
+
+ if rcParams['text.latex.preview']:
+ return self.make_dvi_preview(tex, fontsize)
+
+ basefile = self.get_basefile(tex, fontsize)
+ dvifile = '%s.dvi' % basefile
+ if not os.path.exists(dvifile):
+ texfile = self.make_tex(tex, fontsize)
+ with Locked(self.texcache):
+ self._run_checked_subprocess(
+ ["latex", "-interaction=nonstopmode", "--halt-on-error",
+ texfile], tex)
+ for fname in glob.glob(basefile + '*'):
+ if not fname.endswith(('dvi', 'tex')):
+ try:
+ os.remove(fname)
+ except OSError:
+ pass
+
+ return dvifile
+
+ def make_dvi_preview(self, tex, fontsize):
+ """
+ Generate a dvi file containing latex's layout of tex string.
+
+ It calls make_tex_preview() method and store the size information
+ (width, height, descent) in a separate file.
+
+ Return the file name.
+ """
+ basefile = self.get_basefile(tex, fontsize)
+ dvifile = '%s.dvi' % basefile
+ baselinefile = '%s.baseline' % basefile
+
+ if not os.path.exists(dvifile) or not os.path.exists(baselinefile):
+ texfile = self.make_tex_preview(tex, fontsize)
+ report = self._run_checked_subprocess(
+ ["latex", "-interaction=nonstopmode", "--halt-on-error",
+ texfile], tex)
+
+ # find the box extent information in the latex output
+ # file and store them in ".baseline" file
+ m = TexManager._re_vbox.search(report.decode("utf-8"))
+ with open(basefile + '.baseline', "w") as fh:
+ fh.write(" ".join(m.groups()))
+
+ for fname in glob.glob(basefile + '*'):
+ if not fname.endswith(('dvi', 'tex', 'baseline')):
+ try:
+ os.remove(fname)
+ except OSError:
+ pass
+
+ return dvifile
+
+ def make_png(self, tex, fontsize, dpi):
+ """
+ Generate a png file containing latex's rendering of tex string.
+
+ Return the file name.
+ """
+ basefile = self.get_basefile(tex, fontsize, dpi)
+ pngfile = '%s.png' % basefile
+ # see get_rgba for a discussion of the background
+ if not os.path.exists(pngfile):
+ dvifile = self.make_dvi(tex, fontsize)
+ self._run_checked_subprocess(
+ ["dvipng", "-bg", "Transparent", "-D", str(dpi),
+ "-T", "tight", "-o", pngfile, dvifile], tex)
+ return pngfile
+
+ @mpl.cbook.deprecated("2.2")
+ def make_ps(self, tex, fontsize):
+ """
+ Generate a postscript file containing latex's rendering of tex string.
+
+ Return the file name.
+ """
+ basefile = self.get_basefile(tex, fontsize)
+ psfile = '%s.epsf' % basefile
+ if not os.path.exists(psfile):
+ dvifile = self.make_dvi(tex, fontsize)
+ self._run_checked_subprocess(
+ ["dvips", "-q", "-E", "-o", psfile, dvifile], tex)
+ return psfile
+
+ @mpl.cbook.deprecated("2.2")
+ def get_ps_bbox(self, tex, fontsize):
+ """
+ Return a list of PS bboxes for latex's rendering of the tex string.
+ """
+ psfile = self.make_ps(tex, fontsize)
+ with open(psfile) as ps:
+ for line in ps:
+ if line.startswith('%%BoundingBox:'):
+ return [int(val) for val in line.split()[1:]]
+ raise RuntimeError('Could not parse %s' % psfile)
+
+ def get_grey(self, tex, fontsize=None, dpi=None):
+ """Return the alpha channel."""
+ key = tex, self.get_font_config(), fontsize, dpi
+ alpha = self.grey_arrayd.get(key)
+ if alpha is None:
+ pngfile = self.make_png(tex, fontsize, dpi)
+ X = read_png(os.path.join(self.texcache, pngfile))
+ self.grey_arrayd[key] = alpha = X[:, :, -1]
+ return alpha
+
+ def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0, 0, 0)):
+ """Return latex's rendering of the tex string as an rgba array."""
+ if not fontsize:
+ fontsize = rcParams['font.size']
+ if not dpi:
+ dpi = rcParams['savefig.dpi']
+ r, g, b = rgb
+ key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
+ Z = self.rgba_arrayd.get(key)
+
+ if Z is None:
+ alpha = self.get_grey(tex, fontsize, dpi)
+ Z = np.dstack([r, g, b, alpha])
+ self.rgba_arrayd[key] = Z
+
+ return Z
+
+ def get_text_width_height_descent(self, tex, fontsize, renderer=None):
+ """Return width, height and descent of the text."""
+ if tex.strip() == '':
+ return 0, 0, 0
+
+ dpi_fraction = renderer.points_to_pixels(1.) if renderer else 1
+
+ if rcParams['text.latex.preview']:
+ # use preview.sty
+ basefile = self.get_basefile(tex, fontsize)
+ baselinefile = '%s.baseline' % basefile
+
+ if not os.path.exists(baselinefile):
+ dvifile = self.make_dvi_preview(tex, fontsize)
+
+ with open(baselinefile) as fh:
+ l = fh.read().split()
+ height, depth, width = [float(l1) * dpi_fraction for l1 in l]
+ return width, height + depth, depth
+
+ else:
+ # use dviread. It sometimes returns a wrong descent.
+ dvifile = self.make_dvi(tex, fontsize)
+ with dviread.Dvi(dvifile, 72 * dpi_fraction) as dvi:
+ page = next(iter(dvi))
+ # A total height (including the descent) needs to be returned.
+ return page.width, page.height + page.descent, page.descent
diff --git a/contrib/python/matplotlib/py2/matplotlib/text.py b/contrib/python/matplotlib/py2/matplotlib/text.py
new file mode 100644
index 00000000000..6838ba5678a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/text.py
@@ -0,0 +1,2336 @@
+"""
+Classes for including text in a figure.
+"""
+from __future__ import absolute_import, division, print_function
+
+import six
+from six.moves import zip
+
+import contextlib
+import logging
+import math
+import warnings
+import weakref
+
+import numpy as np
+
+from . import artist, cbook, docstring, rcParams
+from .artist import Artist
+from .font_manager import FontProperties
+from .lines import Line2D
+from .patches import FancyArrowPatch, FancyBboxPatch, Rectangle
+from .textpath import TextPath # Unused, but imported by others.
+from .transforms import (
+ Affine2D, Bbox, BboxBase, BboxTransformTo, IdentityTransform, Transform)
+
+
+_log = logging.getLogger(__name__)
+
+
+def _process_text_args(override, fontdict=None, **kwargs):
+ "Return an override dict. See :func:`~pyplot.text' docstring for info"
+
+ if fontdict is not None:
+ override.update(fontdict)
+
+ override.update(kwargs)
+ return override
+
+
+@contextlib.contextmanager
+def _wrap_text(textobj):
+ """Temporarily inserts newlines to the text if the wrap option is enabled.
+ """
+ if textobj.get_wrap():
+ old_text = textobj.get_text()
+ try:
+ textobj.set_text(textobj._get_wrapped_text())
+ yield textobj
+ finally:
+ textobj.set_text(old_text)
+ else:
+ yield textobj
+
+
+# Extracted from Text's method to serve as a function
+def get_rotation(rotation):
+ """
+ Return the text angle as float. The returned
+ angle is between 0 and 360 deg.
+
+ *rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
+ """
+ try:
+ angle = float(rotation)
+ except (ValueError, TypeError):
+ isString = isinstance(rotation, six.string_types)
+ if ((isString and rotation == 'horizontal') or rotation is None):
+ angle = 0.
+ elif (isString and rotation == 'vertical'):
+ angle = 90.
+ else:
+ raise ValueError("rotation is {0} expected either 'horizontal'"
+ " 'vertical', numeric value or"
+ "None".format(rotation))
+
+ return angle % 360
+
+
+def _get_textbox(text, renderer):
+ """
+ Calculate the bounding box of the text. Unlike
+ :meth:`matplotlib.text.Text.get_extents` method, The bbox size of
+ the text before the rotation is calculated.
+ """
+ # TODO : This function may move into the Text class as a method. As a
+ # matter of fact, The information from the _get_textbox function
+ # should be available during the Text._get_layout() call, which is
+ # called within the _get_textbox. So, it would better to move this
+ # function as a method with some refactoring of _get_layout method.
+
+ projected_xs = []
+ projected_ys = []
+
+ theta = np.deg2rad(text.get_rotation())
+ tr = Affine2D().rotate(-theta)
+
+ _, parts, d = text._get_layout(renderer)
+
+ for t, wh, x, y in parts:
+ w, h = wh
+
+ xt1, yt1 = tr.transform_point((x, y))
+ yt1 -= d
+ xt2, yt2 = xt1 + w, yt1 + h
+
+ projected_xs.extend([xt1, xt2])
+ projected_ys.extend([yt1, yt2])
+
+ xt_box, yt_box = min(projected_xs), min(projected_ys)
+ w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
+
+ tr = Affine2D().rotate(theta)
+
+ x_box, y_box = tr.transform_point((xt_box, yt_box))
+
+ return x_box, y_box, w_box, h_box
+
+
+class Text(Artist):
+ """
+ Handle storing and drawing of text in window or data coordinates.
+ """
+ zorder = 3
+ _cached = cbook.maxdict(50)
+
+ def __repr__(self):
+ return "Text(%g,%g,%s)" % (self._x, self._y, repr(self._text))
+
+ def __init__(self,
+ x=0, y=0, text='',
+ color=None, # defaults to rc params
+ verticalalignment='baseline',
+ horizontalalignment='left',
+ multialignment=None,
+ fontproperties=None, # defaults to FontProperties()
+ rotation=None,
+ linespacing=None,
+ rotation_mode=None,
+ usetex=None, # defaults to rcParams['text.usetex']
+ wrap=False,
+ **kwargs
+ ):
+ """
+ Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
+ with string *text*.
+
+ Valid kwargs are
+ %(Text)s
+ """
+
+ Artist.__init__(self)
+ self._x, self._y = x, y
+
+ if color is None:
+ color = rcParams['text.color']
+ if fontproperties is None:
+ fontproperties = FontProperties()
+ elif isinstance(fontproperties, six.string_types):
+ fontproperties = FontProperties(fontproperties)
+
+ self.set_text(text)
+ self.set_color(color)
+ self.set_usetex(usetex)
+ self.set_wrap(wrap)
+ self._verticalalignment = verticalalignment
+ self._horizontalalignment = horizontalalignment
+ self._multialignment = multialignment
+ self._rotation = rotation
+ self._fontproperties = fontproperties
+ self._bbox_patch = None # a FancyBboxPatch instance
+ self._renderer = None
+ if linespacing is None:
+ linespacing = 1.2 # Maybe use rcParam later.
+ self._linespacing = linespacing
+ self.set_rotation_mode(rotation_mode)
+ self.update(kwargs)
+
+ def update(self, kwargs):
+ """
+ Update properties from a dictionary.
+ """
+ # Update bbox last, as it depends on font properties.
+ sentinel = object() # bbox can be None, so use another sentinel.
+ bbox = kwargs.pop("bbox", sentinel)
+ super(Text, self).update(kwargs)
+ if bbox is not sentinel:
+ self.set_bbox(bbox)
+
+ def __getstate__(self):
+ d = super(Text, self).__getstate__()
+ # remove the cached _renderer (if it exists)
+ d['_renderer'] = None
+ return d
+
+ def contains(self, mouseevent):
+ """Test whether the mouse event occurred in the patch.
+
+ In the case of text, a hit is true anywhere in the
+ axis-aligned bounding-box containing the text.
+
+ Returns True or False.
+ """
+ if callable(self._contains):
+ return self._contains(self, mouseevent)
+
+ if not self.get_visible() or self._renderer is None:
+ return False, {}
+
+ l, b, w, h = self.get_window_extent().bounds
+ r, t = l + w, b + h
+
+ x, y = mouseevent.x, mouseevent.y
+ inside = (l <= x <= r and b <= y <= t)
+ cattr = {}
+
+ # if the text has a surrounding patch, also check containment for it,
+ # and merge the results with the results for the text.
+ if self._bbox_patch:
+ patch_inside, patch_cattr = self._bbox_patch.contains(mouseevent)
+ inside = inside or patch_inside
+ cattr["bbox_patch"] = patch_cattr
+
+ return inside, cattr
+
+ def _get_xy_display(self):
+ 'get the (possibly unit converted) transformed x, y in display coords'
+ x, y = self.get_unitless_position()
+ return self.get_transform().transform_point((x, y))
+
+ def _get_multialignment(self):
+ if self._multialignment is not None:
+ return self._multialignment
+ else:
+ return self._horizontalalignment
+
+ def get_rotation(self):
+ 'return the text angle as float in degrees'
+ return get_rotation(self._rotation) # string_or_number -> number
+
+ def set_rotation_mode(self, m):
+ """
+ Set text rotation mode.
+
+ .. ACCEPTS: [ None | "default" | "anchor" ]
+
+ Parameters
+ ----------
+ m : ``None`` or ``"default"`` or ``"anchor"``
+ If ``None`` or ``"default"``, the text will be first rotated, then
+ aligned according to their horizontal and vertical alignments. If
+ ``"anchor"``, then alignment occurs before rotation.
+ """
+ if m is None or m in ["anchor", "default"]:
+ self._rotation_mode = m
+ else:
+ raise ValueError("Unknown rotation_mode : %s" % repr(m))
+ self.stale = True
+
+ def get_rotation_mode(self):
+ "get text rotation mode"
+ return self._rotation_mode
+
+ def update_from(self, other):
+ 'Copy properties from other to self'
+ Artist.update_from(self, other)
+ self._color = other._color
+ self._multialignment = other._multialignment
+ self._verticalalignment = other._verticalalignment
+ self._horizontalalignment = other._horizontalalignment
+ self._fontproperties = other._fontproperties.copy()
+ self._rotation = other._rotation
+ self._picker = other._picker
+ self._linespacing = other._linespacing
+ self.stale = True
+
+ def _get_layout(self, renderer):
+ """
+ return the extent (bbox) of the text together with
+ multiple-alignment information. Note that it returns an extent
+ of a rotated text when necessary.
+ """
+ key = self.get_prop_tup(renderer=renderer)
+ if key in self._cached:
+ return self._cached[key]
+
+ horizLayout = []
+
+ thisx, thisy = 0.0, 0.0
+ xmin, ymin = 0.0, 0.0
+ width, height = 0.0, 0.0
+ lines = self.get_text().split('\n')
+
+ whs = np.zeros((len(lines), 2))
+ horizLayout = np.zeros((len(lines), 4))
+
+ # Find full vertical extent of font,
+ # including ascenders and descenders:
+ tmp, lp_h, lp_bl = renderer.get_text_width_height_descent('lp',
+ self._fontproperties,
+ ismath=False)
+ offsety = (lp_h - lp_bl) * self._linespacing
+
+ baseline = 0
+ for i, line in enumerate(lines):
+ clean_line, ismath = self.is_math_text(line, self.get_usetex())
+ if clean_line:
+ w, h, d = renderer.get_text_width_height_descent(clean_line,
+ self._fontproperties,
+ ismath=ismath)
+ else:
+ w, h, d = 0, 0, 0
+
+ # For multiline text, increase the line spacing when the
+ # text net-height(excluding baseline) is larger than that
+ # of a "l" (e.g., use of superscripts), which seems
+ # what TeX does.
+ h = max(h, lp_h)
+ d = max(d, lp_bl)
+
+ whs[i] = w, h
+
+ baseline = (h - d) - thisy
+ thisy -= max(offsety, (h - d) * self._linespacing)
+ horizLayout[i] = thisx, thisy, w, h
+ thisy -= d
+ width = max(width, w)
+ descent = d
+
+ ymin = horizLayout[-1][1]
+ ymax = horizLayout[0][1] + horizLayout[0][3]
+ height = ymax - ymin
+ xmax = xmin + width
+
+ # get the rotation matrix
+ M = Affine2D().rotate_deg(self.get_rotation())
+
+ offsetLayout = np.zeros((len(lines), 2))
+ offsetLayout[:] = horizLayout[:, 0:2]
+ # now offset the individual text lines within the box
+ if len(lines) > 1: # do the multiline aligment
+ malign = self._get_multialignment()
+ if malign == 'center':
+ offsetLayout[:, 0] += width / 2.0 - horizLayout[:, 2] / 2.0
+ elif malign == 'right':
+ offsetLayout[:, 0] += width - horizLayout[:, 2]
+
+ # the corners of the unrotated bounding box
+ cornersHoriz = np.array(
+ [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)], float)
+ cornersHoriz[:, 1] -= descent
+
+ # now rotate the bbox
+ cornersRotated = M.transform(cornersHoriz)
+
+ txs = cornersRotated[:, 0]
+ tys = cornersRotated[:, 1]
+
+ # compute the bounds of the rotated box
+ xmin, xmax = txs.min(), txs.max()
+ ymin, ymax = tys.min(), tys.max()
+ width = xmax - xmin
+ height = ymax - ymin
+
+ # Now move the box to the target position offset the display
+ # bbox by alignment
+ halign = self._horizontalalignment
+ valign = self._verticalalignment
+
+ rotation_mode = self.get_rotation_mode()
+ if rotation_mode != "anchor":
+ # compute the text location in display coords and the offsets
+ # necessary to align the bbox with that location
+ if halign == 'center':
+ offsetx = (xmin + width / 2.0)
+ elif halign == 'right':
+ offsetx = (xmin + width)
+ else:
+ offsetx = xmin
+
+ if valign == 'center':
+ offsety = (ymin + height / 2.0)
+ elif valign == 'top':
+ offsety = (ymin + height)
+ elif valign == 'baseline':
+ offsety = (ymin + height) - baseline
+ elif valign == 'center_baseline':
+ offsety = ymin + height - baseline / 2.0
+ else:
+ offsety = ymin
+ else:
+ xmin1, ymin1 = cornersHoriz[0]
+ xmax1, ymax1 = cornersHoriz[2]
+
+ if halign == 'center':
+ offsetx = (xmin1 + xmax1) / 2.0
+ elif halign == 'right':
+ offsetx = xmax1
+ else:
+ offsetx = xmin1
+
+ if valign == 'center':
+ offsety = (ymin1 + ymax1) / 2.0
+ elif valign == 'top':
+ offsety = ymax1
+ elif valign == 'baseline':
+ offsety = ymax1 - baseline
+ elif valign == 'center_baseline':
+ offsety = (ymin1 + ymax1 - baseline) / 2.0
+ else:
+ offsety = ymin1
+
+ offsetx, offsety = M.transform_point((offsetx, offsety))
+
+ xmin -= offsetx
+ ymin -= offsety
+
+ bbox = Bbox.from_bounds(xmin, ymin, width, height)
+
+ # now rotate the positions around the first x,y position
+ xys = M.transform(offsetLayout)
+ xys -= (offsetx, offsety)
+
+ xs, ys = xys[:, 0], xys[:, 1]
+
+ ret = bbox, list(zip(lines, whs, xs, ys)), descent
+ self._cached[key] = ret
+ return ret
+
+ def set_bbox(self, rectprops):
+ """
+ Draw a bounding box around self. rectprops are any settable
+ properties for a FancyBboxPatch, e.g., facecolor='red', alpha=0.5.
+
+ t.set_bbox(dict(facecolor='red', alpha=0.5))
+
+ The default boxstyle is 'square'. The mutation
+ scale of the FancyBboxPatch is set to the fontsize.
+
+ ACCEPTS: FancyBboxPatch prop dict
+ """
+
+ if rectprops is not None:
+ props = rectprops.copy()
+ boxstyle = props.pop("boxstyle", None)
+ pad = props.pop("pad", None)
+ if boxstyle is None:
+ boxstyle = "square"
+ if pad is None:
+ pad = 4 # points
+ pad /= self.get_size() # to fraction of font size
+ else:
+ if pad is None:
+ pad = 0.3
+
+ # boxstyle could be a callable or a string
+ if (isinstance(boxstyle, six.string_types)
+ and "pad" not in boxstyle):
+ boxstyle += ",pad=%0.2f" % pad
+
+ bbox_transmuter = props.pop("bbox_transmuter", None)
+
+ self._bbox_patch = FancyBboxPatch(
+ (0., 0.),
+ 1., 1.,
+ boxstyle=boxstyle,
+ bbox_transmuter=bbox_transmuter,
+ transform=IdentityTransform(),
+ **props)
+ else:
+ self._bbox_patch = None
+
+ self._update_clip_properties()
+
+ def get_bbox_patch(self):
+ """
+ Return the bbox Patch object. Returns None if the
+ FancyBboxPatch is not made.
+ """
+ return self._bbox_patch
+
+ def update_bbox_position_size(self, renderer):
+ """
+ Update the location and the size of the bbox. This method
+ should be used when the position and size of the bbox needs to
+ be updated before actually drawing the bbox.
+ """
+
+ if self._bbox_patch:
+
+ trans = self.get_transform()
+
+ # don't use self.get_unitless_position here, which refers to text
+ # position in Text, and dash position in TextWithDash:
+ posx = float(self.convert_xunits(self._x))
+ posy = float(self.convert_yunits(self._y))
+
+ posx, posy = trans.transform_point((posx, posy))
+
+ x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
+ self._bbox_patch.set_bounds(0., 0., w_box, h_box)
+ theta = np.deg2rad(self.get_rotation())
+ tr = Affine2D().rotate(theta)
+ tr = tr.translate(posx + x_box, posy + y_box)
+ self._bbox_patch.set_transform(tr)
+ fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
+ self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
+
+ def _draw_bbox(self, renderer, posx, posy):
+
+ """ Update the location and the size of the bbox
+ (FancyBboxPatch), and draw
+ """
+
+ x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
+ self._bbox_patch.set_bounds(0., 0., w_box, h_box)
+ theta = np.deg2rad(self.get_rotation())
+ tr = Affine2D().rotate(theta)
+ tr = tr.translate(posx + x_box, posy + y_box)
+ self._bbox_patch.set_transform(tr)
+ fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
+ self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
+ self._bbox_patch.draw(renderer)
+
+ def _update_clip_properties(self):
+ clipprops = dict(clip_box=self.clipbox,
+ clip_path=self._clippath,
+ clip_on=self._clipon)
+
+ if self._bbox_patch:
+ bbox = self._bbox_patch.update(clipprops)
+
+ def set_clip_box(self, clipbox):
+ """
+ Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
+
+ ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
+ """
+ super(Text, self).set_clip_box(clipbox)
+ self._update_clip_properties()
+
+ def set_clip_path(self, path, transform=None):
+ """
+ Set the artist's clip path, which may be:
+
+ * a :class:`~matplotlib.patches.Patch` (or subclass) instance
+
+ * a :class:`~matplotlib.path.Path` instance, in which case
+ an optional :class:`~matplotlib.transforms.Transform`
+ instance may be provided, which will be applied to the
+ path before using it for clipping.
+
+ * *None*, to remove the clipping path
+
+ For efficiency, if the path happens to be an axis-aligned
+ rectangle, this method will set the clipping box to the
+ corresponding rectangle and set the clipping path to *None*.
+
+ ACCEPTS: [ (:class:`~matplotlib.path.Path`,
+ :class:`~matplotlib.transforms.Transform`) |
+ :class:`~matplotlib.patches.Patch` | None ]
+ """
+ super(Text, self).set_clip_path(path, transform)
+ self._update_clip_properties()
+
+ def set_clip_on(self, b):
+ """
+ Set whether artist uses clipping.
+
+ When False, artists will be visible outside of the axes, which can lead
+ to unexpected results.
+
+ Parameters
+ ----------
+ b : bool
+ .. ACCEPTS: bool
+ """
+ super(Text, self).set_clip_on(b)
+ self._update_clip_properties()
+
+ def get_wrap(self):
+ """Returns the wrapping state for the text."""
+ return self._wrap
+
+ def set_wrap(self, wrap):
+ """Sets the wrapping state for the text.
+
+ Parameters
+ ----------
+ wrap : bool
+ .. ACCEPTS: bool
+ """
+ self._wrap = wrap
+
+ def _get_wrap_line_width(self):
+ """
+ Returns the maximum line width for wrapping text based on the
+ current orientation.
+ """
+ x0, y0 = self.get_transform().transform(self.get_position())
+ figure_box = self.get_figure().get_window_extent()
+
+ # Calculate available width based on text alignment
+ alignment = self.get_horizontalalignment()
+ self.set_rotation_mode('anchor')
+ rotation = self.get_rotation()
+
+ left = self._get_dist_to_box(rotation, x0, y0, figure_box)
+ right = self._get_dist_to_box(
+ (180 + rotation) % 360,
+ x0,
+ y0,
+ figure_box)
+
+ if alignment == 'left':
+ line_width = left
+ elif alignment == 'right':
+ line_width = right
+ else:
+ line_width = 2 * min(left, right)
+
+ return line_width
+
+ def _get_dist_to_box(self, rotation, x0, y0, figure_box):
+ """
+ Returns the distance from the given points, to the boundaries
+ of a rotated box in pixels.
+ """
+ if rotation > 270:
+ quad = rotation - 270
+ h1 = y0 / math.cos(math.radians(quad))
+ h2 = (figure_box.x1 - x0) / math.cos(math.radians(90 - quad))
+ elif rotation > 180:
+ quad = rotation - 180
+ h1 = x0 / math.cos(math.radians(quad))
+ h2 = y0 / math.cos(math.radians(90 - quad))
+ elif rotation > 90:
+ quad = rotation - 90
+ h1 = (figure_box.y1 - y0) / math.cos(math.radians(quad))
+ h2 = x0 / math.cos(math.radians(90 - quad))
+ else:
+ h1 = (figure_box.x1 - x0) / math.cos(math.radians(rotation))
+ h2 = (figure_box.y1 - y0) / math.cos(math.radians(90 - rotation))
+
+ return min(h1, h2)
+
+ def _get_rendered_text_width(self, text):
+ """
+ Returns the width of a given text string, in pixels.
+ """
+ w, h, d = self._renderer.get_text_width_height_descent(
+ text,
+ self.get_fontproperties(),
+ False)
+ return math.ceil(w)
+
+ def _get_wrapped_text(self):
+ """
+ Return a copy of the text with new lines added, so that
+ the text is wrapped relative to the parent figure.
+ """
+ # Not fit to handle breaking up latex syntax correctly, so
+ # ignore latex for now.
+ if self.get_usetex():
+ return self.get_text()
+
+ # Build the line incrementally, for a more accurate measure of length
+ line_width = self._get_wrap_line_width()
+ wrapped_str = ""
+ line = ""
+
+ for word in self.get_text().split(' '):
+ # New lines in the user's test need to force a split, so that it's
+ # not using the longest current line width in the line being built
+ sub_words = word.split('\n')
+ for i in range(len(sub_words)):
+ current_width = self._get_rendered_text_width(
+ line + ' ' + sub_words[i])
+
+ # Split long lines, and each newline found in the current word
+ if current_width > line_width or i > 0:
+ wrapped_str += line + '\n'
+ line = ""
+
+ if line == "":
+ line = sub_words[i]
+ else:
+ line += ' ' + sub_words[i]
+
+ return wrapped_str + line
+
+ @artist.allow_rasterization
+ def draw(self, renderer):
+ """
+ Draws the :class:`Text` object to the given *renderer*.
+ """
+ if renderer is not None:
+ self._renderer = renderer
+ if not self.get_visible():
+ return
+ if self.get_text() == '':
+ return
+
+ renderer.open_group('text', self.get_gid())
+
+ with _wrap_text(self) as textobj:
+ bbox, info, descent = textobj._get_layout(renderer)
+ trans = textobj.get_transform()
+
+ # don't use textobj.get_position here, which refers to text
+ # position in Text, and dash position in TextWithDash:
+ posx = float(textobj.convert_xunits(textobj._x))
+ posy = float(textobj.convert_yunits(textobj._y))
+ posx, posy = trans.transform_point((posx, posy))
+ if not np.isfinite(posx) or not np.isfinite(posy):
+ _log.warning("posx and posy should be finite values")
+ return
+ canvasw, canvash = renderer.get_canvas_width_height()
+
+ # draw the FancyBboxPatch
+ if textobj._bbox_patch:
+ textobj._draw_bbox(renderer, posx, posy)
+
+ gc = renderer.new_gc()
+ gc.set_foreground(textobj.get_color())
+ gc.set_alpha(textobj.get_alpha())
+ gc.set_url(textobj._url)
+ textobj._set_gc_clip(gc)
+
+ angle = textobj.get_rotation()
+
+ for line, wh, x, y in info:
+
+ mtext = textobj if len(info) == 1 else None
+ x = x + posx
+ y = y + posy
+ if renderer.flipy():
+ y = canvash - y
+ clean_line, ismath = textobj.is_math_text(line,
+ self.get_usetex())
+
+ if textobj.get_path_effects():
+ from matplotlib.patheffects import PathEffectRenderer
+ textrenderer = PathEffectRenderer(
+ textobj.get_path_effects(), renderer)
+ else:
+ textrenderer = renderer
+
+ if textobj.get_usetex():
+ textrenderer.draw_tex(gc, x, y, clean_line,
+ textobj._fontproperties, angle,
+ mtext=mtext)
+ else:
+ textrenderer.draw_text(gc, x, y, clean_line,
+ textobj._fontproperties, angle,
+ ismath=ismath, mtext=mtext)
+
+ gc.restore()
+ renderer.close_group('text')
+ self.stale = False
+
+ def get_color(self):
+ "Return the color of the text"
+ return self._color
+
+ def get_fontproperties(self):
+ "Return the :class:`~font_manager.FontProperties` object"
+ return self._fontproperties
+
+ def get_font_properties(self):
+ 'alias for get_fontproperties'
+ return self.get_fontproperties()
+
+ def get_family(self):
+ "Return the list of font families used for font lookup"
+ return self._fontproperties.get_family()
+
+ def get_fontfamily(self):
+ 'alias for get_family'
+ return self.get_family()
+
+ def get_name(self):
+ "Return the font name as string"
+ return self._fontproperties.get_name()
+
+ def get_style(self):
+ "Return the font style as string"
+ return self._fontproperties.get_style()
+
+ def get_size(self):
+ "Return the font size as integer"
+ return self._fontproperties.get_size_in_points()
+
+ def get_variant(self):
+ "Return the font variant as a string"
+ return self._fontproperties.get_variant()
+
+ def get_fontvariant(self):
+ 'alias for get_variant'
+ return self.get_variant()
+
+ def get_weight(self):
+ "Get the font weight as string or number"
+ return self._fontproperties.get_weight()
+
+ def get_fontname(self):
+ 'alias for get_name'
+ return self.get_name()
+
+ def get_fontstyle(self):
+ 'alias for get_style'
+ return self.get_style()
+
+ def get_fontsize(self):
+ 'alias for get_size'
+ return self.get_size()
+
+ def get_fontweight(self):
+ 'alias for get_weight'
+ return self.get_weight()
+
+ def get_stretch(self):
+ 'Get the font stretch as a string or number'
+ return self._fontproperties.get_stretch()
+
+ def get_fontstretch(self):
+ 'alias for get_stretch'
+ return self.get_stretch()
+
+ def get_ha(self):
+ 'alias for get_horizontalalignment'
+ return self.get_horizontalalignment()
+
+ def get_horizontalalignment(self):
+ """
+ Return the horizontal alignment as string. Will be one of
+ 'left', 'center' or 'right'.
+ """
+ return self._horizontalalignment
+
+ def get_unitless_position(self):
+ "Return the unitless position of the text as a tuple (*x*, *y*)"
+ # This will get the position with all unit information stripped away.
+ # This is here for convenience since it is done in several locations.
+ x = float(self.convert_xunits(self._x))
+ y = float(self.convert_yunits(self._y))
+ return x, y
+
+ def get_position(self):
+ "Return the position of the text as a tuple (*x*, *y*)"
+ # This should return the same data (possible unitized) as was
+ # specified with 'set_x' and 'set_y'.
+ return self._x, self._y
+
+ def get_prop_tup(self, renderer=None):
+ """
+ Return a hashable tuple of properties.
+
+ Not intended to be human readable, but useful for backends who
+ want to cache derived information about text (e.g., layouts) and
+ need to know if the text has changed.
+ """
+ x, y = self.get_unitless_position()
+ renderer = renderer or self._renderer
+ return (x, y, self.get_text(), self._color,
+ self._verticalalignment, self._horizontalalignment,
+ hash(self._fontproperties),
+ self._rotation, self._rotation_mode,
+ self.figure.dpi, weakref.ref(renderer),
+ self._linespacing
+ )
+
+ def get_text(self):
+ "Get the text as string"
+ return self._text
+
+ def get_va(self):
+ 'alias for :meth:`getverticalalignment`'
+ return self.get_verticalalignment()
+
+ def get_verticalalignment(self):
+ """
+ Return the vertical alignment as string. Will be one of
+ 'top', 'center', 'bottom' or 'baseline'.
+ """
+ return self._verticalalignment
+
+ def get_window_extent(self, renderer=None, dpi=None):
+ '''
+ Return a :class:`~matplotlib.transforms.Bbox` object bounding
+ the text, in display units.
+
+ In addition to being used internally, this is useful for
+ specifying clickable regions in a png file on a web page.
+
+ *renderer* defaults to the _renderer attribute of the text
+ object. This is not assigned until the first execution of
+ :meth:`draw`, so you must use this kwarg if you want
+ to call :meth:`get_window_extent` prior to the first
+ :meth:`draw`. For getting web page regions, it is
+ simpler to call the method after saving the figure.
+
+ *dpi* defaults to self.figure.dpi; the renderer dpi is
+ irrelevant. For the web application, if figure.dpi is not
+ the value used when saving the figure, then the value that
+ was used must be specified as the *dpi* argument.
+ '''
+ #return _unit_box
+ if not self.get_visible():
+ return Bbox.unit()
+ if dpi is not None:
+ dpi_orig = self.figure.dpi
+ self.figure.dpi = dpi
+ if self.get_text() == '':
+ tx, ty = self._get_xy_display()
+ return Bbox.from_bounds(tx, ty, 0, 0)
+
+ if renderer is not None:
+ self._renderer = renderer
+ if self._renderer is None:
+ self._renderer = self.figure._cachedRenderer
+ if self._renderer is None:
+ raise RuntimeError('Cannot get window extent w/o renderer')
+
+ bbox, info, descent = self._get_layout(self._renderer)
+ x, y = self.get_unitless_position()
+ x, y = self.get_transform().transform_point((x, y))
+ bbox = bbox.translated(x, y)
+ if dpi is not None:
+ self.figure.dpi = dpi_orig
+ return bbox
+
+ def set_backgroundcolor(self, color):
+ """
+ Set the background color of the text by updating the bbox.
+
+ .. seealso::
+
+ :meth:`set_bbox`
+ To change the position of the bounding box.
+
+ ACCEPTS: any matplotlib color
+ """
+ if self._bbox_patch is None:
+ self.set_bbox(dict(facecolor=color, edgecolor=color))
+ else:
+ self._bbox_patch.update(dict(facecolor=color))
+
+ self._update_clip_properties()
+ self.stale = True
+
+ def set_color(self, color):
+ """
+ Set the foreground color of the text
+
+ ACCEPTS: any matplotlib color
+ """
+ # Make sure it is hashable, or get_prop_tup will fail.
+ try:
+ hash(color)
+ except TypeError:
+ color = tuple(color)
+ self._color = color
+ self.stale = True
+
+ def set_ha(self, align):
+ 'alias for set_horizontalalignment'
+ self.set_horizontalalignment(align)
+
+ def set_horizontalalignment(self, align):
+ """
+ Set the horizontal alignment to one of
+
+ ACCEPTS: [ 'center' | 'right' | 'left' ]
+ """
+ legal = ('center', 'right', 'left')
+ if align not in legal:
+ raise ValueError('Horizontal alignment must be one of %s' %
+ str(legal))
+ self._horizontalalignment = align
+ self.stale = True
+
+ def set_ma(self, align):
+ 'alias for set_multialignment'
+ self.set_multialignment(align)
+
+ def set_multialignment(self, align):
+ """
+ Set the alignment for multiple lines layout. The layout of the
+ bounding box of all the lines is determined bu the horizontalalignment
+ and verticalalignment properties, but the multiline text within that
+ box can be
+
+ ACCEPTS: ['left' | 'right' | 'center' ]
+ """
+ legal = ('center', 'right', 'left')
+ if align not in legal:
+ raise ValueError('Horizontal alignment must be one of %s' %
+ str(legal))
+ self._multialignment = align
+ self.stale = True
+
+ def set_linespacing(self, spacing):
+ """
+ Set the line spacing as a multiple of the font size.
+ Default is 1.2.
+
+ ACCEPTS: float (multiple of font size)
+ """
+ self._linespacing = spacing
+ self.stale = True
+
+ def set_family(self, fontname):
+ """
+ Set the font family. May be either a single string, or a list
+ of strings in decreasing priority. Each string may be either
+ a real font name or a generic font class name. If the latter,
+ the specific font names will be looked up in the
+ :file:`matplotlibrc` file.
+
+ ACCEPTS: [FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' |
+ 'monospace' ]
+ """
+ self._fontproperties.set_family(fontname)
+ self.stale = True
+
+ def set_variant(self, variant):
+ """
+ Set the font variant, either 'normal' or 'small-caps'.
+
+ ACCEPTS: [ 'normal' | 'small-caps' ]
+ """
+ self._fontproperties.set_variant(variant)
+ self.stale = True
+
+ def set_fontvariant(self, variant):
+ 'alias for set_variant'
+ return self.set_variant(variant)
+
+ def set_name(self, fontname):
+ """alias for set_family"""
+ return self.set_family(fontname)
+
+ def set_fontname(self, fontname):
+ """alias for set_family"""
+ self.set_family(fontname)
+
+ def set_style(self, fontstyle):
+ """
+ Set the font style.
+
+ ACCEPTS: [ 'normal' | 'italic' | 'oblique']
+ """
+ self._fontproperties.set_style(fontstyle)
+ self.stale = True
+
+ def set_fontstyle(self, fontstyle):
+ 'alias for set_style'
+ return self.set_style(fontstyle)
+
+ def set_size(self, fontsize):
+ """
+ Set the font size. May be either a size string, relative to
+ the default font size, or an absolute font size in points.
+
+ ACCEPTS: [size in points | 'xx-small' | 'x-small' | 'small' |
+ 'medium' | 'large' | 'x-large' | 'xx-large' ]
+ """
+ self._fontproperties.set_size(fontsize)
+ self.stale = True
+
+ def set_fontsize(self, fontsize):
+ 'alias for set_size'
+ return self.set_size(fontsize)
+
+ def set_weight(self, weight):
+ """
+ Set the font weight.
+
+ ACCEPTS: [a numeric value in range 0-1000 | 'ultralight' | 'light' |
+ 'normal' | 'regular' | 'book' | 'medium' | 'roman' |
+ 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' |
+ 'extra bold' | 'black' ]
+ """
+ self._fontproperties.set_weight(weight)
+ self.stale = True
+
+ def set_fontweight(self, weight):
+ 'alias for set_weight'
+ return self.set_weight(weight)
+
+ def set_stretch(self, stretch):
+ """
+ Set the font stretch (horizontal condensation or expansion).
+
+ ACCEPTS: [a numeric value in range 0-1000 | 'ultra-condensed' |
+ 'extra-condensed' | 'condensed' | 'semi-condensed' |
+ 'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' |
+ 'ultra-expanded' ]
+ """
+ self._fontproperties.set_stretch(stretch)
+ self.stale = True
+
+ def set_fontstretch(self, stretch):
+ 'alias for set_stretch'
+ return self.set_stretch(stretch)
+
+ def set_position(self, xy):
+ """
+ Set the (*x*, *y*) position of the text
+
+ ACCEPTS: (x,y)
+ """
+ self.set_x(xy[0])
+ self.set_y(xy[1])
+
+ def set_x(self, x):
+ """
+ Set the *x* position of the text
+
+ ACCEPTS: float
+ """
+ self._x = x
+ self.stale = True
+
+ def set_y(self, y):
+ """
+ Set the *y* position of the text
+
+ ACCEPTS: float
+ """
+ self._y = y
+ self.stale = True
+
+ def set_rotation(self, s):
+ """
+ Set the rotation of the text
+
+ ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
+ """
+ self._rotation = s
+ self.stale = True
+
+ def set_va(self, align):
+ 'alias for set_verticalalignment'
+ self.set_verticalalignment(align)
+
+ def set_verticalalignment(self, align):
+ """
+ Set the vertical alignment
+
+ ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
+ """
+ legal = ('top', 'bottom', 'center', 'baseline')
+ if align not in legal:
+ raise ValueError('Vertical alignment must be one of %s' %
+ str(legal))
+
+ self._verticalalignment = align
+ self.stale = True
+
+ def set_text(self, s):
+ """
+ Set the text string *s*
+
+ It may contain newlines (``\\n``) or math in LaTeX syntax.
+
+ ACCEPTS: string or anything printable with '%s' conversion.
+ """
+ self._text = '%s' % (s,)
+ self.stale = True
+
+ @staticmethod
+ def is_math_text(s, usetex=None):
+ """
+ Returns a cleaned string and a boolean flag.
+ The flag indicates if the given string *s* contains any mathtext,
+ determined by counting unescaped dollar signs. If no mathtext
+ is present, the cleaned string has its dollar signs unescaped.
+ If usetex is on, the flag always has the value "TeX".
+ """
+ # Did we find an even number of non-escaped dollar signs?
+ # If so, treat is as math text.
+ if usetex is None:
+ usetex = rcParams['text.usetex']
+ if usetex:
+ if s == ' ':
+ s = r'\ '
+ return s, 'TeX'
+
+ if cbook.is_math_text(s):
+ return s, True
+ else:
+ return s.replace(r'\$', '$'), False
+
+ def set_fontproperties(self, fp):
+ """
+ Set the font properties that control the text. *fp* must be a
+ :class:`matplotlib.font_manager.FontProperties` object.
+
+ ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
+ """
+ if isinstance(fp, six.string_types):
+ fp = FontProperties(fp)
+ self._fontproperties = fp.copy()
+ self.stale = True
+
+ def set_font_properties(self, fp):
+ 'alias for set_fontproperties'
+ self.set_fontproperties(fp)
+
+ def set_usetex(self, usetex):
+ """
+ Parameters
+ ----------
+ usetex : bool or None
+ Whether to render using TeX, ``None`` means to use
+ :rc:`text.usetex`.
+
+ .. ACCEPTS: bool or None
+ """
+ if usetex is None:
+ self._usetex = rcParams['text.usetex']
+ else:
+ self._usetex = bool(usetex)
+ self.stale = True
+
+ def get_usetex(self):
+ """
+ Return whether this `Text` object uses TeX for rendering.
+
+ If the user has not manually set this value, it defaults to
+ :rc:`text.usetex`.
+ """
+ if self._usetex is None:
+ return rcParams['text.usetex']
+ else:
+ return self._usetex
+
+docstring.interpd.update(Text=artist.kwdoc(Text))
+docstring.dedent_interpd(Text.__init__)
+
+
+class TextWithDash(Text):
+ """
+ This is basically a :class:`~matplotlib.text.Text` with a dash
+ (drawn with a :class:`~matplotlib.lines.Line2D`) before/after
+ it. It is intended to be a drop-in replacement for
+ :class:`~matplotlib.text.Text`, and should behave identically to
+ it when *dashlength* = 0.0.
+
+ The dash always comes between the point specified by
+ :meth:`~matplotlib.text.Text.set_position` and the text. When a
+ dash exists, the text alignment arguments (*horizontalalignment*,
+ *verticalalignment*) are ignored.
+
+ *dashlength* is the length of the dash in canvas units.
+ (default = 0.0).
+
+ *dashdirection* is one of 0 or 1, where 0 draws the dash after the
+ text and 1 before. (default = 0).
+
+ *dashrotation* specifies the rotation of the dash, and should
+ generally stay *None*. In this case
+ :meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
+ :meth:`~matplotlib.text.Text.get_rotation`. (i.e., the dash takes
+ its rotation from the text's rotation). Because the text center is
+ projected onto the dash, major deviations in the rotation cause
+ what may be considered visually unappealing results.
+ (default = *None*)
+
+ *dashpad* is a padding length to add (or subtract) space
+ between the text and the dash, in canvas units.
+ (default = 3)
+
+ *dashpush* "pushes" the dash and text away from the point
+ specified by :meth:`~matplotlib.text.Text.set_position` by the
+ amount in canvas units. (default = 0)
+
+ .. note::
+
+ The alignment of the two objects is based on the bounding box
+ of the :class:`~matplotlib.text.Text`, as obtained by
+ :meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
+ turn, appears to depend on the font metrics as given by the
+ rendering backend. Hence the quality of the "centering" of the
+ label text with respect to the dash varies depending on the
+ backend used.
+
+ .. note::
+
+ I'm not sure that I got the
+ :meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
+ or whether that's sufficient for providing the object bounding
+ box.
+
+ """
+ __name__ = 'textwithdash'
+
+ def __str__(self):
+ return "TextWithDash(%g,%g,%s)" % (self._x, self._y, repr(self._text))
+
+ def __init__(self,
+ x=0, y=0, text='',
+ color=None, # defaults to rc params
+ verticalalignment='center',
+ horizontalalignment='center',
+ multialignment=None,
+ fontproperties=None, # defaults to FontProperties()
+ rotation=None,
+ linespacing=None,
+ dashlength=0.0,
+ dashdirection=0,
+ dashrotation=None,
+ dashpad=3,
+ dashpush=0,
+ ):
+
+ Text.__init__(self, x=x, y=y, text=text, color=color,
+ verticalalignment=verticalalignment,
+ horizontalalignment=horizontalalignment,
+ multialignment=multialignment,
+ fontproperties=fontproperties,
+ rotation=rotation,
+ linespacing=linespacing)
+
+ # The position (x,y) values for text and dashline
+ # are bogus as given in the instantiation; they will
+ # be set correctly by update_coords() in draw()
+
+ self.dashline = Line2D(xdata=(x, x),
+ ydata=(y, y),
+ color='k',
+ linestyle='-')
+
+ self._dashx = float(x)
+ self._dashy = float(y)
+ self._dashlength = dashlength
+ self._dashdirection = dashdirection
+ self._dashrotation = dashrotation
+ self._dashpad = dashpad
+ self._dashpush = dashpush
+
+ #self.set_bbox(dict(pad=0))
+
+ def get_unitless_position(self):
+ "Return the unitless position of the text as a tuple (*x*, *y*)"
+ # This will get the position with all unit information stripped away.
+ # This is here for convenience since it is done in several locations.
+ x = float(self.convert_xunits(self._dashx))
+ y = float(self.convert_yunits(self._dashy))
+ return x, y
+
+ def get_position(self):
+ "Return the position of the text as a tuple (*x*, *y*)"
+ # This should return the same data (possibly unitized) as was
+ # specified with set_x and set_y
+ return self._dashx, self._dashy
+
+ def get_prop_tup(self, renderer=None):
+ """
+ Return a hashable tuple of properties.
+
+ Not intended to be human readable, but useful for backends who
+ want to cache derived information about text (e.g., layouts) and
+ need to know if the text has changed.
+ """
+ props = [p for p in Text.get_prop_tup(self, renderer=renderer)]
+ props.extend([self._x, self._y, self._dashlength,
+ self._dashdirection, self._dashrotation, self._dashpad,
+ self._dashpush])
+ return tuple(props)
+
+ def draw(self, renderer):
+ """
+ Draw the :class:`TextWithDash` object to the given *renderer*.
+ """
+ self.update_coords(renderer)
+ Text.draw(self, renderer)
+ if self.get_dashlength() > 0.0:
+ self.dashline.draw(renderer)
+ self.stale = False
+
+ def update_coords(self, renderer):
+ """
+ Computes the actual *x*, *y* coordinates for text based on the
+ input *x*, *y* and the *dashlength*. Since the rotation is
+ with respect to the actual canvas's coordinates we need to map
+ back and forth.
+ """
+ dashx, dashy = self.get_unitless_position()
+ dashlength = self.get_dashlength()
+ # Shortcircuit this process if we don't have a dash
+ if dashlength == 0.0:
+ self._x, self._y = dashx, dashy
+ return
+
+ dashrotation = self.get_dashrotation()
+ dashdirection = self.get_dashdirection()
+ dashpad = self.get_dashpad()
+ dashpush = self.get_dashpush()
+
+ angle = get_rotation(dashrotation)
+ theta = np.pi * (angle / 180.0 + dashdirection - 1)
+ cos_theta, sin_theta = np.cos(theta), np.sin(theta)
+
+ transform = self.get_transform()
+
+ # Compute the dash end points
+ # The 'c' prefix is for canvas coordinates
+ cxy = transform.transform_point((dashx, dashy))
+ cd = np.array([cos_theta, sin_theta])
+ c1 = cxy + dashpush * cd
+ c2 = cxy + (dashpush + dashlength) * cd
+
+ inverse = transform.inverted()
+ (x1, y1) = inverse.transform_point(tuple(c1))
+ (x2, y2) = inverse.transform_point(tuple(c2))
+ self.dashline.set_data((x1, x2), (y1, y2))
+
+ # We now need to extend this vector out to
+ # the center of the text area.
+ # The basic problem here is that we're "rotating"
+ # two separate objects but want it to appear as
+ # if they're rotated together.
+ # This is made non-trivial because of the
+ # interaction between text rotation and alignment -
+ # text alignment is based on the bbox after rotation.
+ # We reset/force both alignments to 'center'
+ # so we can do something relatively reasonable.
+ # There's probably a better way to do this by
+ # embedding all this in the object's transformations,
+ # but I don't grok the transformation stuff
+ # well enough yet.
+ we = Text.get_window_extent(self, renderer=renderer)
+ w, h = we.width, we.height
+ # Watch for zeros
+ if sin_theta == 0.0:
+ dx = w
+ dy = 0.0
+ elif cos_theta == 0.0:
+ dx = 0.0
+ dy = h
+ else:
+ tan_theta = sin_theta / cos_theta
+ dx = w
+ dy = w * tan_theta
+ if dy > h or dy < -h:
+ dy = h
+ dx = h / tan_theta
+ cwd = np.array([dx, dy]) / 2
+ cwd *= 1 + dashpad / np.sqrt(np.dot(cwd, cwd))
+ cw = c2 + (dashdirection * 2 - 1) * cwd
+
+ newx, newy = inverse.transform_point(tuple(cw))
+ self._x, self._y = newx, newy
+
+ # Now set the window extent
+ # I'm not at all sure this is the right way to do this.
+ we = Text.get_window_extent(self, renderer=renderer)
+ self._twd_window_extent = we.frozen()
+ self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
+
+ # Finally, make text align center
+ Text.set_horizontalalignment(self, 'center')
+ Text.set_verticalalignment(self, 'center')
+
+ def get_window_extent(self, renderer=None):
+ '''
+ Return a :class:`~matplotlib.transforms.Bbox` object bounding
+ the text, in display units.
+
+ In addition to being used internally, this is useful for
+ specifying clickable regions in a png file on a web page.
+
+ *renderer* defaults to the _renderer attribute of the text
+ object. This is not assigned until the first execution of
+ :meth:`draw`, so you must use this kwarg if you want
+ to call :meth:`get_window_extent` prior to the first
+ :meth:`draw`. For getting web page regions, it is
+ simpler to call the method after saving the figure.
+ '''
+ self.update_coords(renderer)
+ if self.get_dashlength() == 0.0:
+ return Text.get_window_extent(self, renderer=renderer)
+ else:
+ return self._twd_window_extent
+
+ def get_dashlength(self):
+ """
+ Get the length of the dash.
+ """
+ return self._dashlength
+
+ def set_dashlength(self, dl):
+ """
+ Set the length of the dash.
+
+ ACCEPTS: float (canvas units)
+ """
+ self._dashlength = dl
+ self.stale = True
+
+ def get_dashdirection(self):
+ """
+ Get the direction dash. 1 is before the text and 0 is after.
+ """
+ return self._dashdirection
+
+ def set_dashdirection(self, dd):
+ """
+ Set the direction of the dash following the text.
+ 1 is before the text and 0 is after. The default
+ is 0, which is what you'd want for the typical
+ case of ticks below and on the left of the figure.
+
+ ACCEPTS: int (1 is before, 0 is after)
+ """
+ self._dashdirection = dd
+ self.stale = True
+
+ def get_dashrotation(self):
+ """
+ Get the rotation of the dash in degrees.
+ """
+ if self._dashrotation is None:
+ return self.get_rotation()
+ else:
+ return self._dashrotation
+
+ def set_dashrotation(self, dr):
+ """
+ Set the rotation of the dash, in degrees
+
+ ACCEPTS: float (degrees)
+ """
+ self._dashrotation = dr
+ self.stale = True
+
+ def get_dashpad(self):
+ """
+ Get the extra spacing between the dash and the text, in canvas units.
+ """
+ return self._dashpad
+
+ def set_dashpad(self, dp):
+ """
+ Set the "pad" of the TextWithDash, which is the extra spacing
+ between the dash and the text, in canvas units.
+
+ ACCEPTS: float (canvas units)
+ """
+ self._dashpad = dp
+ self.stale = True
+
+ def get_dashpush(self):
+ """
+ Get the extra spacing between the dash and the specified text
+ position, in canvas units.
+ """
+ return self._dashpush
+
+ def set_dashpush(self, dp):
+ """
+ Set the "push" of the TextWithDash, which
+ is the extra spacing between the beginning
+ of the dash and the specified position.
+
+ ACCEPTS: float (canvas units)
+ """
+ self._dashpush = dp
+ self.stale = True
+
+ def set_position(self, xy):
+ """
+ Set the (*x*, *y*) position of the :class:`TextWithDash`.
+
+ ACCEPTS: (x, y)
+ """
+ self.set_x(xy[0])
+ self.set_y(xy[1])
+
+ def set_x(self, x):
+ """
+ Set the *x* position of the :class:`TextWithDash`.
+
+ ACCEPTS: float
+ """
+ self._dashx = float(x)
+ self.stale = True
+
+ def set_y(self, y):
+ """
+ Set the *y* position of the :class:`TextWithDash`.
+
+ ACCEPTS: float
+ """
+ self._dashy = float(y)
+ self.stale = True
+
+ def set_transform(self, t):
+ """
+ Set the :class:`matplotlib.transforms.Transform` instance used
+ by this artist.
+
+ ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
+ """
+ Text.set_transform(self, t)
+ self.dashline.set_transform(t)
+ self.stale = True
+
+ def get_figure(self):
+ 'return the figure instance the artist belongs to'
+ return self.figure
+
+ def set_figure(self, fig):
+ """
+ Set the figure instance the artist belong to.
+
+ ACCEPTS: a :class:`matplotlib.figure.Figure` instance
+ """
+ Text.set_figure(self, fig)
+ self.dashline.set_figure(fig)
+
+docstring.interpd.update(TextWithDash=artist.kwdoc(TextWithDash))
+
+
+class OffsetFrom(object):
+ 'Callable helper class for working with `Annotation`'
+ def __init__(self, artist, ref_coord, unit="points"):
+ '''
+ Parameters
+ ----------
+ artist : `Artist`, `BboxBase`, or `Transform`
+ The object to compute the offset from.
+
+ ref_coord : length 2 sequence
+ If `artist` is an `Artist` or `BboxBase`, this values is
+ the location to of the offset origin in fractions of the
+ `artist` bounding box.
+
+ If `artist` is a transform, the offset origin is the
+ transform applied to this value.
+
+ unit : {'points, 'pixels'}
+ The screen units to use (pixels or points) for the offset
+ input.
+
+ '''
+ self._artist = artist
+ self._ref_coord = ref_coord
+ self.set_unit(unit)
+
+ def set_unit(self, unit):
+ '''
+ The unit for input to the transform used by ``__call__``
+
+ Parameters
+ ----------
+ unit : {'points', 'pixels'}
+ '''
+ if unit not in ["points", "pixels"]:
+ raise ValueError("'unit' must be one of [ 'points' | 'pixels' ]")
+ self._unit = unit
+
+ def get_unit(self):
+ 'The unit for input to the transform used by ``__call__``'
+ return self._unit
+
+ def _get_scale(self, renderer):
+ unit = self.get_unit()
+ if unit == "pixels":
+ return 1.
+ else:
+ return renderer.points_to_pixels(1.)
+
+ def __call__(self, renderer):
+ '''
+ Return the offset transform.
+
+ Parameters
+ ----------
+ renderer : `RendererBase`
+ The renderer to use to compute the offset
+
+ Returns
+ -------
+ transform : `Transform`
+ Maps (x, y) in pixel or point units to screen units
+ relative to the given artist.
+ '''
+ if isinstance(self._artist, Artist):
+ bbox = self._artist.get_window_extent(renderer)
+ l, b, w, h = bbox.bounds
+ xf, yf = self._ref_coord
+ x, y = l + w * xf, b + h * yf
+ elif isinstance(self._artist, BboxBase):
+ l, b, w, h = self._artist.bounds
+ xf, yf = self._ref_coord
+ x, y = l + w * xf, b + h * yf
+ elif isinstance(self._artist, Transform):
+ x, y = self._artist.transform_point(self._ref_coord)
+ else:
+ raise RuntimeError("unknown type")
+
+ sc = self._get_scale(renderer)
+ tr = Affine2D().scale(sc, sc).translate(x, y)
+
+ return tr
+
+
+class _AnnotationBase(object):
+ def __init__(self,
+ xy,
+ xycoords='data',
+ annotation_clip=None):
+
+ self.xy = xy
+ self.xycoords = xycoords
+ self.set_annotation_clip(annotation_clip)
+
+ self._draggable = None
+
+ def _get_xy(self, renderer, x, y, s):
+ if isinstance(s, tuple):
+ s1, s2 = s
+ else:
+ s1, s2 = s, s
+
+ if s1 == 'data':
+ x = float(self.convert_xunits(x))
+ if s2 == 'data':
+ y = float(self.convert_yunits(y))
+
+ tr = self._get_xy_transform(renderer, s)
+ x1, y1 = tr.transform_point((x, y))
+ return x1, y1
+
+ def _get_xy_transform(self, renderer, s):
+
+ if isinstance(s, tuple):
+ s1, s2 = s
+ from matplotlib.transforms import blended_transform_factory
+ tr1 = self._get_xy_transform(renderer, s1)
+ tr2 = self._get_xy_transform(renderer, s2)
+ tr = blended_transform_factory(tr1, tr2)
+ return tr
+ elif callable(s):
+ tr = s(renderer)
+ if isinstance(tr, BboxBase):
+ return BboxTransformTo(tr)
+ elif isinstance(tr, Transform):
+ return tr
+ else:
+ raise RuntimeError("unknown return type ...")
+ elif isinstance(s, Artist):
+ bbox = s.get_window_extent(renderer)
+ return BboxTransformTo(bbox)
+ elif isinstance(s, BboxBase):
+ return BboxTransformTo(s)
+ elif isinstance(s, Transform):
+ return s
+ elif not isinstance(s, six.string_types):
+ raise RuntimeError("unknown coordinate type : %s" % (s,))
+
+ if s == 'data':
+ return self.axes.transData
+ elif s == 'polar':
+ from matplotlib.projections import PolarAxes
+ tr = PolarAxes.PolarTransform()
+ trans = tr + self.axes.transData
+ return trans
+
+ s_ = s.split()
+ if len(s_) != 2:
+ raise ValueError("%s is not a recognized coordinate" % s)
+
+ bbox0, xy0 = None, None
+
+ bbox_name, unit = s_
+ # if unit is offset-like
+ if bbox_name == "figure":
+ bbox0 = self.figure.bbox
+ elif bbox_name == "axes":
+ bbox0 = self.axes.bbox
+ # elif bbox_name == "bbox":
+ # if bbox is None:
+ # raise RuntimeError("bbox is specified as a coordinate but "
+ # "never set")
+ # bbox0 = self._get_bbox(renderer, bbox)
+
+ if bbox0 is not None:
+ xy0 = bbox0.bounds[:2]
+ elif bbox_name == "offset":
+ xy0 = self._get_ref_xy(renderer)
+
+ if xy0 is not None:
+ # reference x, y in display coordinate
+ ref_x, ref_y = xy0
+ from matplotlib.transforms import Affine2D
+ if unit == "points":
+ # dots per points
+ dpp = self.figure.get_dpi() / 72.
+ tr = Affine2D().scale(dpp, dpp)
+ elif unit == "pixels":
+ tr = Affine2D()
+ elif unit == "fontsize":
+ fontsize = self.get_size()
+ dpp = fontsize * self.figure.get_dpi() / 72.
+ tr = Affine2D().scale(dpp, dpp)
+ elif unit == "fraction":
+ w, h = bbox0.bounds[2:]
+ tr = Affine2D().scale(w, h)
+ else:
+ raise ValueError("%s is not a recognized coordinate" % s)
+
+ return tr.translate(ref_x, ref_y)
+
+ else:
+ raise ValueError("%s is not a recognized coordinate" % s)
+
+ def _get_ref_xy(self, renderer):
+ """
+ return x, y (in display coordinate) that is to be used for a reference
+ of any offset coordinate
+ """
+
+ if isinstance(self.xycoords, tuple):
+ s1, s2 = self.xycoords
+ if ((isinstance(s1, six.string_types)
+ and s1.split()[0] == "offset")
+ or (isinstance(s2, six.string_types)
+ and s2.split()[0] == "offset")):
+ raise ValueError("xycoords should not be an offset coordinate")
+ x, y = self.xy
+ x1, y1 = self._get_xy(renderer, x, y, s1)
+ x2, y2 = self._get_xy(renderer, x, y, s2)
+ return x1, y2
+ elif (isinstance(self.xycoords, six.string_types) and
+ self.xycoords.split()[0] == "offset"):
+ raise ValueError("xycoords should not be an offset coordinate")
+ else:
+ x, y = self.xy
+ return self._get_xy(renderer, x, y, self.xycoords)
+ #raise RuntimeError("must be defined by the derived class")
+
+ # def _get_bbox(self, renderer):
+ # if hasattr(bbox, "bounds"):
+ # return bbox
+ # elif hasattr(bbox, "get_window_extent"):
+ # bbox = bbox.get_window_extent()
+ # return bbox
+ # else:
+ # raise ValueError("A bbox instance is expected but got %s" %
+ # str(bbox))
+
+ def set_annotation_clip(self, b):
+ """
+ set *annotation_clip* attribute.
+
+ * True: the annotation will only be drawn when self.xy is inside
+ the axes.
+ * False: the annotation will always be drawn regardless of its
+ position.
+ * None: the self.xy will be checked only if *xycoords* is "data"
+ """
+ self._annotation_clip = b
+
+ def get_annotation_clip(self):
+ """
+ Return *annotation_clip* attribute.
+ See :meth:`set_annotation_clip` for the meaning of return values.
+ """
+ return self._annotation_clip
+
+ def _get_position_xy(self, renderer):
+ "Return the pixel position of the annotated point."
+ x, y = self.xy
+ return self._get_xy(renderer, x, y, self.xycoords)
+
+ def _check_xy(self, renderer, xy_pixel):
+ """
+ given the xy pixel coordinate, check if the annotation need to
+ be drawn.
+ """
+
+ b = self.get_annotation_clip()
+
+ if b or (b is None and self.xycoords == "data"):
+ # check if self.xy is inside the axes.
+ if not self.axes.contains_point(xy_pixel):
+ return False
+
+ return True
+
+ def draggable(self, state=None, use_blit=False):
+ """
+ Set the draggable state -- if state is
+
+ * None : toggle the current state
+
+ * True : turn draggable on
+
+ * False : turn draggable off
+
+ If draggable is on, you can drag the annotation on the canvas with
+ the mouse. The DraggableAnnotation helper instance is returned if
+ draggable is on.
+ """
+ from matplotlib.offsetbox import DraggableAnnotation
+ is_draggable = self._draggable is not None
+
+ # if state is None we'll toggle
+ if state is None:
+ state = not is_draggable
+
+ if state:
+ if self._draggable is None:
+ self._draggable = DraggableAnnotation(self, use_blit)
+ else:
+ if self._draggable is not None:
+ self._draggable.disconnect()
+ self._draggable = None
+
+ return self._draggable
+
+
+class Annotation(Text, _AnnotationBase):
+ def __str__(self):
+ return "Annotation(%g,%g,%s)" % (self.xy[0],
+ self.xy[1],
+ repr(self._text))
+
+ @docstring.dedent_interpd
+ def __init__(self, s, xy,
+ xytext=None,
+ xycoords='data',
+ textcoords=None,
+ arrowprops=None,
+ annotation_clip=None,
+ **kwargs):
+ '''
+ Annotate the point ``xy`` with text ``s``.
+
+ Additional kwargs are passed to `~matplotlib.text.Text`.
+
+ Parameters
+ ----------
+
+ s : str
+ The text of the annotation
+
+ xy : iterable
+ Length 2 sequence specifying the *(x,y)* point to annotate
+
+ xytext : iterable, optional
+ Length 2 sequence specifying the *(x,y)* to place the text
+ at. If None, defaults to ``xy``.
+
+ xycoords : str, Artist, Transform, callable or tuple, optional
+
+ The coordinate system that ``xy`` is given in.
+
+ For a `str` the allowed values are:
+
+ ================= ===============================================
+ Property Description
+ ================= ===============================================
+ 'figure points' points from the lower left of the figure
+ 'figure pixels' pixels from the lower left of the figure
+ 'figure fraction' fraction of figure from lower left
+ 'axes points' points from lower left corner of axes
+ 'axes pixels' pixels from lower left corner of axes
+ 'axes fraction' fraction of axes from lower left
+ 'data' use the coordinate system of the object being
+ annotated (default)
+ 'polar' *(theta,r)* if not native 'data' coordinates
+ ================= ===============================================
+
+ If a `~matplotlib.artist.Artist` object is passed in the units are
+ fraction if it's bounding box.
+
+ If a `~matplotlib.transforms.Transform` object is passed
+ in use that to transform ``xy`` to screen coordinates
+
+ If a callable it must take a
+ `~matplotlib.backend_bases.RendererBase` object as input
+ and return a `~matplotlib.transforms.Transform` or
+ `~matplotlib.transforms.Bbox` object
+
+ If a `tuple` must be length 2 tuple of str, `Artist`,
+ `Transform` or callable objects. The first transform is
+ used for the *x* coordinate and the second for *y*.
+
+ See :ref:`plotting-guide-annotation` for more details.
+
+ Defaults to ``'data'``
+
+ textcoords : str, `Artist`, `Transform`, callable or tuple, optional
+ The coordinate system that ``xytext`` is given, which
+ may be different than the coordinate system used for
+ ``xy``.
+
+ All ``xycoords`` values are valid as well as the following
+ strings:
+
+ ================= =========================================
+ Property Description
+ ================= =========================================
+ 'offset points' offset (in points) from the *xy* value
+ 'offset pixels' offset (in pixels) from the *xy* value
+ ================= =========================================
+
+ defaults to the input of ``xycoords``
+
+ arrowprops : dict, optional
+ If not None, properties used to draw a
+ `~matplotlib.patches.FancyArrowPatch` arrow between ``xy`` and
+ ``xytext``.
+
+ If `arrowprops` does not contain the key ``'arrowstyle'`` the
+ allowed keys are:
+
+ ========== ======================================================
+ Key Description
+ ========== ======================================================
+ width the width of the arrow in points
+ headwidth the width of the base of the arrow head in points
+ headlength the length of the arrow head in points
+ shrink fraction of total length to 'shrink' from both ends
+ ? any key to :class:`matplotlib.patches.FancyArrowPatch`
+ ========== ======================================================
+
+ If the `arrowprops` contains the key ``'arrowstyle'`` the
+ above keys are forbidden. The allowed values of
+ ``'arrowstyle'`` are:
+
+ ============ =============================================
+ Name Attrs
+ ============ =============================================
+ ``'-'`` None
+ ``'->'`` head_length=0.4,head_width=0.2
+ ``'-['`` widthB=1.0,lengthB=0.2,angleB=None
+ ``'|-|'`` widthA=1.0,widthB=1.0
+ ``'-|>'`` head_length=0.4,head_width=0.2
+ ``'<-'`` head_length=0.4,head_width=0.2
+ ``'<->'`` head_length=0.4,head_width=0.2
+ ``'<|-'`` head_length=0.4,head_width=0.2
+ ``'<|-|>'`` head_length=0.4,head_width=0.2
+ ``'fancy'`` head_length=0.4,head_width=0.4,tail_width=0.4
+ ``'simple'`` head_length=0.5,head_width=0.5,tail_width=0.2
+ ``'wedge'`` tail_width=0.3,shrink_factor=0.5
+ ============ =============================================
+
+ Valid keys for `~matplotlib.patches.FancyArrowPatch` are:
+
+ =============== ==================================================
+ Key Description
+ =============== ==================================================
+ arrowstyle the arrow style
+ connectionstyle the connection style
+ relpos default is (0.5, 0.5)
+ patchA default is bounding box of the text
+ patchB default is None
+ shrinkA default is 2 points
+ shrinkB default is 2 points
+ mutation_scale default is text size (in points)
+ mutation_aspect default is 1.
+ ? any key for :class:`matplotlib.patches.PathPatch`
+ =============== ==================================================
+
+ Defaults to None
+
+ annotation_clip : bool, optional
+ Controls the visibility of the annotation when it goes
+ outside the axes area.
+
+ If `True`, the annotation will only be drawn when the
+ ``xy`` is inside the axes. If `False`, the annotation will
+ always be drawn regardless of its position.
+
+ The default is `None`, which behave as `True` only if
+ *xycoords* is "data".
+
+ Returns
+ -------
+ Annotation
+
+ '''
+
+ _AnnotationBase.__init__(self,
+ xy,
+ xycoords=xycoords,
+ annotation_clip=annotation_clip)
+ # warn about wonky input data
+ if (xytext is None and
+ textcoords is not None and
+ textcoords != xycoords):
+ warnings.warn("You have used the `textcoords` kwarg, but not "
+ "the `xytext` kwarg. This can lead to surprising "
+ "results.")
+
+ # clean up textcoords and assign default
+ if textcoords is None:
+ textcoords = self.xycoords
+ self._textcoords = textcoords
+
+ # cleanup xytext defaults
+ if xytext is None:
+ xytext = self.xy
+ x, y = xytext
+
+ Text.__init__(self, x, y, s, **kwargs)
+
+ self.arrowprops = arrowprops
+
+ self.arrow = None
+
+ if arrowprops is not None:
+ if "arrowstyle" in arrowprops:
+ arrowprops = self.arrowprops.copy()
+ self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
+ else:
+ # modified YAArrow API to be used with FancyArrowPatch
+ shapekeys = ('width', 'headwidth', 'headlength',
+ 'shrink', 'frac')
+ arrowprops = dict()
+ for key, val in self.arrowprops.items():
+ if key not in shapekeys:
+ arrowprops[key] = val # basic Patch properties
+ self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
+ **arrowprops)
+ else:
+ self.arrow_patch = None
+
+ def contains(self, event):
+ contains, tinfo = Text.contains(self, event)
+ if self.arrow is not None:
+ in_arrow, _ = self.arrow.contains(event)
+ contains = contains or in_arrow
+ if self.arrow_patch is not None:
+ in_patch, _ = self.arrow_patch.contains(event)
+ contains = contains or in_patch
+
+ return contains, tinfo
+
+ @property
+ def xyann(self):
+ return self.get_position()
+
+ @xyann.setter
+ def xyann(self, xytext):
+ self.set_position(xytext)
+
+ @property
+ def anncoords(self):
+ return self._textcoords
+
+ @anncoords.setter
+ def anncoords(self, coords):
+ self._textcoords = coords
+
+ def set_figure(self, fig):
+
+ if self.arrow is not None:
+ self.arrow.set_figure(fig)
+ if self.arrow_patch is not None:
+ self.arrow_patch.set_figure(fig)
+ Artist.set_figure(self, fig)
+
+ def update_positions(self, renderer):
+ """"Update the pixel positions of the annotated point and the
+ text.
+ """
+ xy_pixel = self._get_position_xy(renderer)
+ self._update_position_xytext(renderer, xy_pixel)
+
+ def _update_position_xytext(self, renderer, xy_pixel):
+ """Update the pixel positions of the annotation text and the arrow
+ patch.
+ """
+ # generate transformation,
+ self.set_transform(self._get_xy_transform(renderer, self.anncoords))
+
+ ox0, oy0 = self._get_xy_display()
+ ox1, oy1 = xy_pixel
+
+ if self.arrowprops is not None:
+ x0, y0 = xy_pixel
+ l, b, w, h = Text.get_window_extent(self, renderer).bounds
+ r = l + w
+ t = b + h
+ xc = 0.5 * (l + r)
+ yc = 0.5 * (b + t)
+
+ d = self.arrowprops.copy()
+ ms = d.pop("mutation_scale", self.get_size())
+ self.arrow_patch.set_mutation_scale(ms)
+
+ if "arrowstyle" not in d:
+ # Approximately simulate the YAArrow.
+ # Pop its kwargs:
+ shrink = d.pop('shrink', 0.0)
+ width = d.pop('width', 4)
+ headwidth = d.pop('headwidth', 12)
+ # Ignore frac--it is useless.
+ frac = d.pop('frac', None)
+ if frac is not None:
+ warnings.warn(
+ "'frac' option in 'arrowprops' is no longer supported;"
+ " use 'headlength' to set the head length in points.")
+ headlength = d.pop('headlength', 12)
+
+ # NB: ms is in pts
+ stylekw = dict(head_length=headlength / ms,
+ head_width=headwidth / ms,
+ tail_width=width / ms)
+
+ self.arrow_patch.set_arrowstyle('simple', **stylekw)
+
+ # using YAArrow style:
+ # pick the x,y corner of the text bbox closest to point
+ # annotated
+ xpos = ((l, 0), (xc, 0.5), (r, 1))
+ ypos = ((b, 0), (yc, 0.5), (t, 1))
+
+ _, (x, relposx) = min((abs(val[0] - x0), val) for val in xpos)
+ _, (y, relposy) = min((abs(val[0] - y0), val) for val in ypos)
+
+ self._arrow_relpos = (relposx, relposy)
+
+ r = np.hypot((y - y0), (x - x0))
+ shrink_pts = shrink * r / renderer.points_to_pixels(1)
+ self.arrow_patch.shrinkA = shrink_pts
+ self.arrow_patch.shrinkB = shrink_pts
+
+ # adjust the starting point of the arrow relative to
+ # the textbox.
+ # TODO : Rotation needs to be accounted.
+ relpos = self._arrow_relpos
+ bbox = Text.get_window_extent(self, renderer)
+ ox0 = bbox.x0 + bbox.width * relpos[0]
+ oy0 = bbox.y0 + bbox.height * relpos[1]
+
+ # The arrow will be drawn from (ox0, oy0) to (ox1,
+ # oy1). It will be first clipped by patchA and patchB.
+ # Then it will be shrunk by shrinkA and shrinkB
+ # (in points). If patch A is not set, self.bbox_patch
+ # is used.
+
+ self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
+
+ if "patchA" in d:
+ self.arrow_patch.set_patchA(d.pop("patchA"))
+ else:
+ if self._bbox_patch:
+ self.arrow_patch.set_patchA(self._bbox_patch)
+ else:
+ pad = renderer.points_to_pixels(4)
+ if self.get_text() == "":
+ self.arrow_patch.set_patchA(None)
+ return
+
+ bbox = Text.get_window_extent(self, renderer)
+ l, b, w, h = bbox.bounds
+ l -= pad / 2.
+ b -= pad / 2.
+ w += pad
+ h += pad
+ r = Rectangle(xy=(l, b),
+ width=w,
+ height=h,
+ )
+ r.set_transform(IdentityTransform())
+ r.set_clip_on(False)
+
+ self.arrow_patch.set_patchA(r)
+
+ @artist.allow_rasterization
+ def draw(self, renderer):
+ """
+ Draw the :class:`Annotation` object to the given *renderer*.
+ """
+
+ if renderer is not None:
+ self._renderer = renderer
+ if not self.get_visible():
+ return
+
+ xy_pixel = self._get_position_xy(renderer)
+ if not self._check_xy(renderer, xy_pixel):
+ return
+
+ self._update_position_xytext(renderer, xy_pixel)
+ self.update_bbox_position_size(renderer)
+
+ if self.arrow_patch is not None: # FancyArrowPatch
+ if self.arrow_patch.figure is None and self.figure is not None:
+ self.arrow_patch.figure = self.figure
+ self.arrow_patch.draw(renderer)
+
+ # Draw text, including FancyBboxPatch, after FancyArrowPatch.
+ # Otherwise, a wedge arrowstyle can land partly on top of the Bbox.
+ Text.draw(self, renderer)
+
+ def get_window_extent(self, renderer=None):
+ '''
+ Return a :class:`~matplotlib.transforms.Bbox` object bounding
+ the text and arrow annotation, in display units.
+
+ *renderer* defaults to the _renderer attribute of the text
+ object. This is not assigned until the first execution of
+ :meth:`draw`, so you must use this kwarg if you want
+ to call :meth:`get_window_extent` prior to the first
+ :meth:`draw`. For getting web page regions, it is
+ simpler to call the method after saving the figure. The
+ *dpi* used defaults to self.figure.dpi; the renderer dpi is
+ irrelevant.
+
+ '''
+ if not self.get_visible():
+ return Bbox.unit()
+ arrow = self.arrow
+ arrow_patch = self.arrow_patch
+
+ text_bbox = Text.get_window_extent(self, renderer=renderer)
+ bboxes = [text_bbox]
+
+ if self.arrow is not None:
+ bboxes.append(arrow.get_window_extent(renderer=renderer))
+ elif self.arrow_patch is not None:
+ bboxes.append(arrow_patch.get_window_extent(renderer=renderer))
+
+ return Bbox.union(bboxes)
+
+
+docstring.interpd.update(Annotation=Annotation.__init__.__doc__)
diff --git a/contrib/python/matplotlib/py2/matplotlib/textpath.py b/contrib/python/matplotlib/py2/matplotlib/textpath.py
new file mode 100644
index 00000000000..5ee3567742d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/textpath.py
@@ -0,0 +1,536 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from collections import OrderedDict
+
+import six
+from six.moves import zip
+
+import warnings
+
+import numpy as np
+
+from matplotlib.path import Path
+from matplotlib import rcParams
+import matplotlib.font_manager as font_manager
+from matplotlib.ft2font import KERNING_DEFAULT, LOAD_NO_HINTING
+from matplotlib.ft2font import LOAD_TARGET_LIGHT
+from matplotlib.mathtext import MathTextParser
+import matplotlib.dviread as dviread
+from matplotlib.font_manager import FontProperties, get_font
+from matplotlib.transforms import Affine2D
+from six.moves.urllib.parse import quote as urllib_quote
+
+
+class TextToPath(object):
+ """
+ A class that convert a given text to a path using ttf fonts.
+ """
+
+ FONT_SCALE = 100.
+ DPI = 72
+
+ def __init__(self):
+ """
+ Initialization
+ """
+ self.mathtext_parser = MathTextParser('path')
+ self.tex_font_map = None
+
+ from matplotlib.cbook import maxdict
+ self._ps_fontd = maxdict(50)
+
+ self._texmanager = None
+
+ self._adobe_standard_encoding = None
+
+ def _get_adobe_standard_encoding(self):
+ enc_name = dviread.find_tex_file('8a.enc')
+ enc = dviread.Encoding(enc_name)
+ return {c: i for i, c in enumerate(enc.encoding)}
+
+ def _get_font(self, prop):
+ """
+ find a ttf font.
+ """
+ fname = font_manager.findfont(prop)
+ font = get_font(fname)
+ font.set_size(self.FONT_SCALE, self.DPI)
+
+ return font
+
+ def _get_hinting_flag(self):
+ return LOAD_NO_HINTING
+
+ def _get_char_id(self, font, ccode):
+ """
+ Return a unique id for the given font and character-code set.
+ """
+ sfnt = font.get_sfnt()
+ try:
+ ps_name = sfnt[1, 0, 0, 6].decode('mac_roman')
+ except KeyError:
+ ps_name = sfnt[3, 1, 0x0409, 6].decode('utf-16be')
+ char_id = urllib_quote('%s-%x' % (ps_name, ccode))
+ return char_id
+
+ def _get_char_id_ps(self, font, ccode):
+ """
+ Return a unique id for the given font and character-code set (for tex).
+ """
+ ps_name = font.get_ps_font_info()[2]
+ char_id = urllib_quote('%s-%d' % (ps_name, ccode))
+ return char_id
+
+ def glyph_to_path(self, font, currx=0.):
+ """
+ convert the ft2font glyph to vertices and codes.
+ """
+ verts, codes = font.get_path()
+ if currx != 0.0:
+ verts[:, 0] += currx
+ return verts, codes
+
+ def get_text_width_height_descent(self, s, prop, ismath):
+ if rcParams['text.usetex']:
+ texmanager = self.get_texmanager()
+ fontsize = prop.get_size_in_points()
+ w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
+ renderer=None)
+ return w, h, d
+
+ fontsize = prop.get_size_in_points()
+ scale = fontsize / self.FONT_SCALE
+
+ if ismath:
+ prop = prop.copy()
+ prop.set_size(self.FONT_SCALE)
+
+ width, height, descent, trash, used_characters = \
+ self.mathtext_parser.parse(s, 72, prop)
+ return width * scale, height * scale, descent * scale
+
+ font = self._get_font(prop)
+ font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
+ w, h = font.get_width_height()
+ w /= 64.0 # convert from subpixels
+ h /= 64.0
+ d = font.get_descent()
+ d /= 64.0
+ return w * scale, h * scale, d * scale
+
+ def get_text_path(self, prop, s, ismath=False, usetex=False):
+ """
+ convert text *s* to path (a tuple of vertices and codes for
+ matplotlib.path.Path).
+
+ *prop*
+ font property
+
+ *s*
+ text to be converted
+
+ *usetex*
+ If True, use matplotlib usetex mode.
+
+ *ismath*
+ If True, use mathtext parser. Effective only if usetex == False.
+
+
+ """
+ if not usetex:
+ if not ismath:
+ font = self._get_font(prop)
+ glyph_info, glyph_map, rects = self.get_glyphs_with_font(
+ font, s)
+ else:
+ glyph_info, glyph_map, rects = self.get_glyphs_mathtext(
+ prop, s)
+ else:
+ glyph_info, glyph_map, rects = self.get_glyphs_tex(prop, s)
+
+ verts, codes = [], []
+
+ for glyph_id, xposition, yposition, scale in glyph_info:
+ verts1, codes1 = glyph_map[glyph_id]
+ if len(verts1):
+ verts1 = np.array(verts1) * scale + [xposition, yposition]
+ verts.extend(verts1)
+ codes.extend(codes1)
+
+ for verts1, codes1 in rects:
+ verts.extend(verts1)
+ codes.extend(codes1)
+
+ return verts, codes
+
+ def get_glyphs_with_font(self, font, s, glyph_map=None,
+ return_new_glyphs_only=False):
+ """
+ convert the string *s* to vertices and codes using the
+ provided ttf font.
+ """
+
+ # Mostly copied from backend_svg.py.
+
+ lastgind = None
+
+ currx = 0
+ xpositions = []
+ glyph_ids = []
+
+ if glyph_map is None:
+ glyph_map = OrderedDict()
+
+ if return_new_glyphs_only:
+ glyph_map_new = OrderedDict()
+ else:
+ glyph_map_new = glyph_map
+
+ # I'm not sure if I get kernings right. Needs to be verified. -JJL
+
+ for c in s:
+ ccode = ord(c)
+ gind = font.get_char_index(ccode)
+ if gind is None:
+ ccode = ord('?')
+ gind = 0
+
+ if lastgind is not None:
+ kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
+ else:
+ kern = 0
+
+ glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
+ horiz_advance = (glyph.linearHoriAdvance / 65536.0)
+
+ char_id = self._get_char_id(font, ccode)
+ if char_id not in glyph_map:
+ glyph_map_new[char_id] = self.glyph_to_path(font)
+
+ currx += (kern / 64.0)
+
+ xpositions.append(currx)
+ glyph_ids.append(char_id)
+
+ currx += horiz_advance
+
+ lastgind = gind
+
+ ypositions = [0] * len(xpositions)
+ sizes = [1.] * len(xpositions)
+
+ rects = []
+
+ return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
+ glyph_map_new, rects)
+
+ def get_glyphs_mathtext(self, prop, s, glyph_map=None,
+ return_new_glyphs_only=False):
+ """
+ convert the string *s* to vertices and codes by parsing it with
+ mathtext.
+ """
+
+ prop = prop.copy()
+ prop.set_size(self.FONT_SCALE)
+
+ width, height, descent, glyphs, rects = self.mathtext_parser.parse(
+ s, self.DPI, prop)
+
+ if not glyph_map:
+ glyph_map = OrderedDict()
+
+ if return_new_glyphs_only:
+ glyph_map_new = OrderedDict()
+ else:
+ glyph_map_new = glyph_map
+
+ xpositions = []
+ ypositions = []
+ glyph_ids = []
+ sizes = []
+
+ currx, curry = 0, 0
+ for font, fontsize, ccode, ox, oy in glyphs:
+ char_id = self._get_char_id(font, ccode)
+ if char_id not in glyph_map:
+ font.clear()
+ font.set_size(self.FONT_SCALE, self.DPI)
+ glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
+ glyph_map_new[char_id] = self.glyph_to_path(font)
+
+ xpositions.append(ox)
+ ypositions.append(oy)
+ glyph_ids.append(char_id)
+ size = fontsize / self.FONT_SCALE
+ sizes.append(size)
+
+ myrects = []
+ for ox, oy, w, h in rects:
+ vert1 = [(ox, oy), (ox, oy + h), (ox + w, oy + h),
+ (ox + w, oy), (ox, oy), (0, 0)]
+ code1 = [Path.MOVETO,
+ Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.CLOSEPOLY]
+ myrects.append((vert1, code1))
+
+ return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
+ glyph_map_new, myrects)
+
+ def get_texmanager(self):
+ """
+ return the :class:`matplotlib.texmanager.TexManager` instance
+ """
+ if self._texmanager is None:
+ from matplotlib.texmanager import TexManager
+ self._texmanager = TexManager()
+ return self._texmanager
+
+ def get_glyphs_tex(self, prop, s, glyph_map=None,
+ return_new_glyphs_only=False):
+ """
+ convert the string *s* to vertices and codes using matplotlib's usetex
+ mode.
+ """
+
+ # codes are modstly borrowed from pdf backend.
+
+ texmanager = self.get_texmanager()
+
+ if self.tex_font_map is None:
+ self.tex_font_map = dviread.PsfontsMap(
+ dviread.find_tex_file('pdftex.map'))
+
+ if self._adobe_standard_encoding is None:
+ self._adobe_standard_encoding = self._get_adobe_standard_encoding()
+
+ fontsize = prop.get_size_in_points()
+ if hasattr(texmanager, "get_dvi"):
+ dvifilelike = texmanager.get_dvi(s, self.FONT_SCALE)
+ dvi = dviread.DviFromFileLike(dvifilelike, self.DPI)
+ else:
+ dvifile = texmanager.make_dvi(s, self.FONT_SCALE)
+ dvi = dviread.Dvi(dvifile, self.DPI)
+ with dvi:
+ page = next(iter(dvi))
+
+ if glyph_map is None:
+ glyph_map = OrderedDict()
+
+ if return_new_glyphs_only:
+ glyph_map_new = OrderedDict()
+ else:
+ glyph_map_new = glyph_map
+
+ glyph_ids, xpositions, ypositions, sizes = [], [], [], []
+
+ # Gather font information and do some setup for combining
+ # characters into strings.
+ # oldfont, seq = None, []
+ for x1, y1, dvifont, glyph, width in page.text:
+ font_and_encoding = self._ps_fontd.get(dvifont.texname)
+ font_bunch = self.tex_font_map[dvifont.texname]
+
+ if font_and_encoding is None:
+ if font_bunch.filename is None:
+ raise ValueError(
+ ("No usable font file found for %s (%s). "
+ "The font may lack a Type-1 version.")
+ % (font_bunch.psname, dvifont.texname))
+
+ font = get_font(font_bunch.filename)
+
+ for charmap_name, charmap_code in [("ADOBE_CUSTOM",
+ 1094992451),
+ ("ADOBE_STANDARD",
+ 1094995778)]:
+ try:
+ font.select_charmap(charmap_code)
+ except (ValueError, RuntimeError):
+ pass
+ else:
+ break
+ else:
+ charmap_name = ""
+ warnings.warn("No supported encoding in font (%s)." %
+ font_bunch.filename)
+
+ if charmap_name == "ADOBE_STANDARD" and font_bunch.encoding:
+ enc0 = dviread.Encoding(font_bunch.encoding)
+ enc = {i: self._adobe_standard_encoding.get(c, None)
+ for i, c in enumerate(enc0.encoding)}
+ else:
+ enc = {}
+ self._ps_fontd[dvifont.texname] = font, enc
+
+ else:
+ font, enc = font_and_encoding
+
+ ft2font_flag = LOAD_TARGET_LIGHT
+
+ char_id = self._get_char_id_ps(font, glyph)
+
+ if char_id not in glyph_map:
+ font.clear()
+ font.set_size(self.FONT_SCALE, self.DPI)
+ if enc:
+ charcode = enc.get(glyph, None)
+ else:
+ charcode = glyph
+
+ if charcode is not None:
+ glyph0 = font.load_char(charcode, flags=ft2font_flag)
+ else:
+ warnings.warn("The glyph (%d) of font (%s) cannot be "
+ "converted with the encoding. Glyph may "
+ "be wrong" % (glyph, font_bunch.filename))
+
+ glyph0 = font.load_char(glyph, flags=ft2font_flag)
+
+ glyph_map_new[char_id] = self.glyph_to_path(font)
+
+ glyph_ids.append(char_id)
+ xpositions.append(x1)
+ ypositions.append(y1)
+ sizes.append(dvifont.size / self.FONT_SCALE)
+
+ myrects = []
+
+ for ox, oy, h, w in page.boxes:
+ vert1 = [(ox, oy), (ox + w, oy), (ox + w, oy + h),
+ (ox, oy + h), (ox, oy), (0, 0)]
+ code1 = [Path.MOVETO,
+ Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO,
+ Path.CLOSEPOLY]
+ myrects.append((vert1, code1))
+
+ return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
+ glyph_map_new, myrects)
+
+
+text_to_path = TextToPath()
+
+
+class TextPath(Path):
+ """
+ Create a path from the text.
+ """
+
+ def __init__(self, xy, s, size=None, prop=None,
+ _interpolation_steps=1, usetex=False,
+ *kl, **kwargs):
+ """
+ Create a path from the text. No support for TeX yet. Note that
+ it simply is a path, not an artist. You need to use the
+ PathPatch (or other artists) to draw this path onto the
+ canvas.
+
+ xy : position of the text.
+ s : text
+ size : font size
+ prop : font property
+ """
+
+ if prop is None:
+ prop = FontProperties()
+
+ if size is None:
+ size = prop.get_size_in_points()
+
+ self._xy = xy
+ self.set_size(size)
+
+ self._cached_vertices = None
+
+ self._vertices, self._codes = self.text_get_vertices_codes(
+ prop, s,
+ usetex=usetex)
+
+ self._should_simplify = False
+ self._simplify_threshold = rcParams['path.simplify_threshold']
+ self._has_nonfinite = False
+ self._interpolation_steps = _interpolation_steps
+
+ def set_size(self, size):
+ """
+ set the size of the text
+ """
+ self._size = size
+ self._invalid = True
+
+ def get_size(self):
+ """
+ get the size of the text
+ """
+ return self._size
+
+ def _get_vertices(self):
+ """
+ Return the cached path after updating it if necessary.
+ """
+ self._revalidate_path()
+ return self._cached_vertices
+
+ def _get_codes(self):
+ """
+ Return the codes
+ """
+ return self._codes
+
+ vertices = property(_get_vertices)
+ codes = property(_get_codes)
+
+ def _revalidate_path(self):
+ """
+ update the path if necessary.
+
+ The path for the text is initially create with the font size
+ of FONT_SCALE, and this path is rescaled to other size when
+ necessary.
+
+ """
+ if (self._invalid or
+ (self._cached_vertices is None)):
+ tr = Affine2D().scale(
+ self._size / text_to_path.FONT_SCALE,
+ self._size / text_to_path.FONT_SCALE).translate(*self._xy)
+ self._cached_vertices = tr.transform(self._vertices)
+ self._invalid = False
+
+ def is_math_text(self, s):
+ """
+ Returns True if the given string *s* contains any mathtext.
+ """
+ # copied from Text.is_math_text -JJL
+
+ # Did we find an even number of non-escaped dollar signs?
+ # If so, treat is as math text.
+ dollar_count = s.count(r'$') - s.count(r'\$')
+ even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
+
+ if rcParams['text.usetex']:
+ return s, 'TeX'
+
+ if even_dollars:
+ return s, True
+ else:
+ return s.replace(r'\$', '$'), False
+
+ def text_get_vertices_codes(self, prop, s, usetex):
+ """
+ convert the string *s* to vertices and codes using the
+ provided font property *prop*. Mostly copied from
+ backend_svg.py.
+ """
+
+ if usetex:
+ verts, codes = text_to_path.get_text_path(prop, s, usetex=True)
+ else:
+ clean_line, ismath = self.is_math_text(s)
+ verts, codes = text_to_path.get_text_path(prop, clean_line,
+ ismath=ismath)
+
+ return verts, codes
diff --git a/contrib/python/matplotlib/py2/matplotlib/ticker.py b/contrib/python/matplotlib/py2/matplotlib/ticker.py
new file mode 100644
index 00000000000..c6946c78ec1
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/ticker.py
@@ -0,0 +1,2628 @@
+"""
+Tick locating and formatting
+============================
+
+This module contains classes to support completely configurable tick
+locating and formatting. Although the locators know nothing about major
+or minor ticks, they are used by the Axis class to support major and
+minor tick locating and formatting. Generic tick locators and
+formatters are provided, as well as domain specific custom ones.
+
+Default Formatter
+-----------------
+
+The default formatter identifies when the x-data being plotted is a
+small range on top of a large offset. To reduce the chances that the
+ticklabels overlap, the ticks are labeled as deltas from a fixed offset.
+For example::
+
+ ax.plot(np.arange(2000, 2010), range(10))
+
+will have tick of 0-9 with an offset of +2e3. If this is not desired
+turn off the use of the offset on the default formatter::
+
+ ax.get_xaxis().get_major_formatter().set_useOffset(False)
+
+set the rcParam ``axes.formatter.useoffset=False`` to turn it off
+globally, or set a different formatter.
+
+Tick locating
+-------------
+
+The Locator class is the base class for all tick locators. The locators
+handle autoscaling of the view limits based on the data limits, and the
+choosing of tick locations. A useful semi-automatic tick locator is
+`MultipleLocator`. It is initialized with a base, e.g., 10, and it picks
+axis limits and ticks that are multiples of that base.
+
+The Locator subclasses defined here are
+
+:class:`AutoLocator`
+ `MaxNLocator` with simple defaults. This is the default tick locator for
+ most plotting.
+
+:class:`MaxNLocator`
+ Finds up to a max number of intervals with ticks at nice locations.
+
+:class:`LinearLocator`
+ Space ticks evenly from min to max.
+
+:class:`LogLocator`
+ Space ticks logarithmically from min to max.
+
+:class:`MultipleLocator`
+ Ticks and range are a multiple of base; either integer or float.
+
+:class:`FixedLocator`
+ Tick locations are fixed.
+
+:class:`IndexLocator`
+ Locator for index plots (e.g., where ``x = range(len(y))``).
+
+:class:`NullLocator`
+ No ticks.
+
+:class:`SymmetricalLogLocator`
+ Locator for use with with the symlog norm; works like `LogLocator` for the
+ part outside of the threshold and adds 0 if inside the limits.
+
+:class:`LogitLocator`
+ Locator for logit scaling.
+
+:class:`OldAutoLocator`
+ Choose a `MultipleLocator` and dynamically reassign it for intelligent
+ ticking during navigation.
+
+:class:`AutoMinorLocator`
+ Locator for minor ticks when the axis is linear and the
+ major ticks are uniformly spaced. Subdivides the major
+ tick interval into a specified number of minor intervals,
+ defaulting to 4 or 5 depending on the major interval.
+
+
+There are a number of locators specialized for date locations - see
+the `dates` module.
+
+You can define your own locator by deriving from Locator. You must
+override the ``__call__`` method, which returns a sequence of locations,
+and you will probably want to override the autoscale method to set the
+view limits from the data limits.
+
+If you want to override the default locator, use one of the above or a custom
+locator and pass it to the x or y axis instance. The relevant methods are::
+
+ ax.xaxis.set_major_locator(xmajor_locator)
+ ax.xaxis.set_minor_locator(xminor_locator)
+ ax.yaxis.set_major_locator(ymajor_locator)
+ ax.yaxis.set_minor_locator(yminor_locator)
+
+The default minor locator is `NullLocator`, i.e., no minor ticks on by default.
+
+Tick formatting
+---------------
+
+Tick formatting is controlled by classes derived from Formatter. The formatter
+operates on a single tick value and returns a string to the axis.
+
+:class:`NullFormatter`
+ No labels on the ticks.
+
+:class:`IndexFormatter`
+ Set the strings from a list of labels.
+
+:class:`FixedFormatter`
+ Set the strings manually for the labels.
+
+:class:`FuncFormatter`
+ User defined function sets the labels.
+
+:class:`StrMethodFormatter`
+ Use string `format` method.
+
+:class:`FormatStrFormatter`
+ Use an old-style sprintf format string.
+
+:class:`ScalarFormatter`
+ Default formatter for scalars: autopick the format string.
+
+:class:`LogFormatter`
+ Formatter for log axes.
+
+:class:`LogFormatterExponent`
+ Format values for log axis using ``exponent = log_base(value)``.
+
+:class:`LogFormatterMathtext`
+ Format values for log axis using ``exponent = log_base(value)``
+ using Math text.
+
+:class:`LogFormatterSciNotation`
+ Format values for log axis using scientific notation.
+
+:class:`LogitFormatter`
+ Probability formatter.
+
+:class:`EngFormatter`
+ Format labels in engineering notation
+
+:class:`PercentFormatter`
+ Format labels as a percentage
+
+You can derive your own formatter from the Formatter base class by
+simply overriding the ``__call__`` method. The formatter class has
+access to the axis view and data limits.
+
+To control the major and minor tick label formats, use one of the
+following methods::
+
+ ax.xaxis.set_major_formatter(xmajor_formatter)
+ ax.xaxis.set_minor_formatter(xminor_formatter)
+ ax.yaxis.set_major_formatter(ymajor_formatter)
+ ax.yaxis.set_minor_formatter(yminor_formatter)
+
+See :doc:`/gallery/ticks_and_spines/major_minor_demo` for an
+example of setting major and minor ticks. See the :mod:`matplotlib.dates`
+module for more information and examples of using date locators and formatters.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import itertools
+import locale
+import math
+import numpy as np
+from matplotlib import rcParams
+from matplotlib import cbook
+from matplotlib import transforms as mtransforms
+from matplotlib.cbook import mplDeprecation
+
+import warnings
+
+
+__all__ = ('TickHelper', 'Formatter', 'FixedFormatter',
+ 'NullFormatter', 'FuncFormatter', 'FormatStrFormatter',
+ 'StrMethodFormatter', 'ScalarFormatter', 'LogFormatter',
+ 'LogFormatterExponent', 'LogFormatterMathtext',
+ 'IndexFormatter', 'LogFormatterSciNotation',
+ 'LogitFormatter', 'EngFormatter', 'PercentFormatter',
+ 'Locator', 'IndexLocator', 'FixedLocator', 'NullLocator',
+ 'LinearLocator', 'LogLocator', 'AutoLocator',
+ 'MultipleLocator', 'MaxNLocator', 'AutoMinorLocator',
+ 'SymmetricalLogLocator', 'LogitLocator')
+
+
+if six.PY3:
+ long = int
+
+
+# Work around numpy/numpy#6127.
+def _divmod(x, y):
+ if isinstance(x, np.generic):
+ x = x.item()
+ if isinstance(y, np.generic):
+ y = y.item()
+ return six.moves.builtins.divmod(x, y)
+
+
+def _mathdefault(s):
+ return '\\mathdefault{%s}' % s
+
+
+class _DummyAxis(object):
+ def __init__(self, minpos=0):
+ self.dataLim = mtransforms.Bbox.unit()
+ self.viewLim = mtransforms.Bbox.unit()
+ self._minpos = minpos
+
+ def get_view_interval(self):
+ return self.viewLim.intervalx
+
+ def set_view_interval(self, vmin, vmax):
+ self.viewLim.intervalx = vmin, vmax
+
+ def get_minpos(self):
+ return self._minpos
+
+ def get_data_interval(self):
+ return self.dataLim.intervalx
+
+ def set_data_interval(self, vmin, vmax):
+ self.dataLim.intervalx = vmin, vmax
+
+ def get_tick_space(self):
+ # Just use the long-standing default of nbins==9
+ return 9
+
+
+class TickHelper(object):
+ axis = None
+
+ def set_axis(self, axis):
+ self.axis = axis
+
+ def create_dummy_axis(self, **kwargs):
+ if self.axis is None:
+ self.axis = _DummyAxis(**kwargs)
+
+ def set_view_interval(self, vmin, vmax):
+ self.axis.set_view_interval(vmin, vmax)
+
+ def set_data_interval(self, vmin, vmax):
+ self.axis.set_data_interval(vmin, vmax)
+
+ def set_bounds(self, vmin, vmax):
+ self.set_view_interval(vmin, vmax)
+ self.set_data_interval(vmin, vmax)
+
+
+class Formatter(TickHelper):
+ """
+ Create a string based on a tick value and location.
+ """
+ # some classes want to see all the locs to help format
+ # individual ones
+ locs = []
+
+ def __call__(self, x, pos=None):
+ """
+ Return the format for tick value *x* at position pos.
+ ``pos=None`` indicates an unspecified location.
+ """
+ raise NotImplementedError('Derived must override')
+
+ def format_data(self, value):
+ """
+ Returns the full string representation of the value with the
+ position unspecified.
+ """
+ return self.__call__(value)
+
+ def format_data_short(self, value):
+ """
+ Return a short string version of the tick value.
+
+ Defaults to the position-independent long value.
+ """
+ return self.format_data(value)
+
+ def get_offset(self):
+ return ''
+
+ def set_locs(self, locs):
+ self.locs = locs
+
+ def fix_minus(self, s):
+ """
+ Some classes may want to replace a hyphen for minus with the
+ proper unicode symbol (U+2212) for typographical correctness.
+ The default is to not replace it.
+
+ Note, if you use this method, e.g., in :meth:`format_data` or
+ call, you probably don't want to use it for
+ :meth:`format_data_short` since the toolbar uses this for
+ interactive coord reporting and I doubt we can expect GUIs
+ across platforms will handle the unicode correctly. So for
+ now the classes that override :meth:`fix_minus` should have an
+ explicit :meth:`format_data_short` method
+ """
+ return s
+
+
+class IndexFormatter(Formatter):
+ """
+ Format the position x to the nearest i-th label where i=int(x+0.5)
+ """
+ def __init__(self, labels):
+ self.labels = labels
+ self.n = len(labels)
+
+ def __call__(self, x, pos=None):
+ """
+ Return the format for tick value `x` at position pos.
+
+ The position is ignored and the value is rounded to the nearest
+ integer, which is used to look up the label.
+ """
+ i = int(x + 0.5)
+ if i < 0 or i >= self.n:
+ return ''
+ else:
+ return self.labels[i]
+
+
+class NullFormatter(Formatter):
+ """
+ Always return the empty string.
+ """
+ def __call__(self, x, pos=None):
+ """
+ Returns an empty string for all inputs.
+ """
+ return ''
+
+
+class FixedFormatter(Formatter):
+ """
+ Return fixed strings for tick labels based only on position, not
+ value.
+ """
+ def __init__(self, seq):
+ """
+ Set the sequence of strings that will be used for labels.
+ """
+ self.seq = seq
+ self.offset_string = ''
+
+ def __call__(self, x, pos=None):
+ """
+ Returns the label that matches the position regardless of the
+ value.
+
+ For positions ``pos < len(seq)``, return `seq[i]` regardless of
+ `x`. Otherwise return empty string. `seq` is the sequence of
+ strings that this object was initialized with.
+ """
+ if pos is None or pos >= len(self.seq):
+ return ''
+ else:
+ return self.seq[pos]
+
+ def get_offset(self):
+ return self.offset_string
+
+ def set_offset_string(self, ofs):
+ self.offset_string = ofs
+
+
+class FuncFormatter(Formatter):
+ """
+ Use a user-defined function for formatting.
+
+ The function should take in two inputs (a tick value ``x`` and a
+ position ``pos``), and return a string containing the corresponding
+ tick label.
+ """
+ def __init__(self, func):
+ self.func = func
+
+ def __call__(self, x, pos=None):
+ """
+ Return the value of the user defined function.
+
+ `x` and `pos` are passed through as-is.
+ """
+ return self.func(x, pos)
+
+
+class FormatStrFormatter(Formatter):
+ """
+ Use an old-style ('%' operator) format string to format the tick.
+
+ The format string should have a single variable format (%) in it.
+ It will be applied to the value (not the position) of the tick.
+ """
+ def __init__(self, fmt):
+ self.fmt = fmt
+
+ def __call__(self, x, pos=None):
+ """
+ Return the formatted label string.
+
+ Only the value `x` is formatted. The position is ignored.
+ """
+ return self.fmt % x
+
+
+class StrMethodFormatter(Formatter):
+ """
+ Use a new-style format string (as used by `str.format()`)
+ to format the tick.
+
+ The field used for the value must be labeled `x` and the field used
+ for the position must be labeled `pos`.
+ """
+ def __init__(self, fmt):
+ self.fmt = fmt
+
+ def __call__(self, x, pos=None):
+ """
+ Return the formatted label string.
+
+ `x` and `pos` are passed to `str.format` as keyword arguments
+ with those exact names.
+ """
+ return self.fmt.format(x=x, pos=pos)
+
+
+class OldScalarFormatter(Formatter):
+ """
+ Tick location is a plain old number.
+ """
+
+ def __call__(self, x, pos=None):
+ """
+ Return the format for tick val `x` based on the width of the
+ axis.
+
+ The position `pos` is ignored.
+ """
+ xmin, xmax = self.axis.get_view_interval()
+ d = abs(xmax - xmin)
+
+ return self.pprint_val(x, d)
+
+ def pprint_val(self, x, d):
+ """
+ Formats the value `x` based on the size of the axis range `d`.
+ """
+ #if the number is not too big and it's an int, format it as an
+ #int
+ if abs(x) < 1e4 and x == int(x):
+ return '%d' % x
+
+ if d < 1e-2:
+ fmt = '%1.3e'
+ elif d < 1e-1:
+ fmt = '%1.3f'
+ elif d > 1e5:
+ fmt = '%1.1e'
+ elif d > 10:
+ fmt = '%1.1f'
+ elif d > 1:
+ fmt = '%1.2f'
+ else:
+ fmt = '%1.3f'
+ s = fmt % x
+ tup = s.split('e')
+ if len(tup) == 2:
+ mantissa = tup[0].rstrip('0').rstrip('.')
+ sign = tup[1][0].replace('+', '')
+ exponent = tup[1][1:].lstrip('0')
+ s = '%se%s%s' % (mantissa, sign, exponent)
+ else:
+ s = s.rstrip('0').rstrip('.')
+ return s
+
+
+class ScalarFormatter(Formatter):
+ """
+ Format tick values as a number.
+
+ Tick value is interpreted as a plain old number. If
+ ``useOffset==True`` and the data range is much smaller than the data
+ average, then an offset will be determined such that the tick labels
+ are meaningful. Scientific notation is used for ``data < 10^-n`` or
+ ``data >= 10^m``, where ``n`` and ``m`` are the power limits set
+ using ``set_powerlimits((n,m))``. The defaults for these are
+ controlled by the ``axes.formatter.limits`` rc parameter.
+ """
+ def __init__(self, useOffset=None, useMathText=None, useLocale=None):
+ # useOffset allows plotting small data ranges with large offsets: for
+ # example: [1+1e-9,1+2e-9,1+3e-9] useMathText will render the offset
+ # and scientific notation in mathtext
+
+ if useOffset is None:
+ useOffset = rcParams['axes.formatter.useoffset']
+ self._offset_threshold = rcParams['axes.formatter.offset_threshold']
+ self.set_useOffset(useOffset)
+ self._usetex = rcParams['text.usetex']
+ if useMathText is None:
+ useMathText = rcParams['axes.formatter.use_mathtext']
+ self.set_useMathText(useMathText)
+ self.orderOfMagnitude = 0
+ self.format = ''
+ self._scientific = True
+ self._powerlimits = rcParams['axes.formatter.limits']
+ if useLocale is None:
+ useLocale = rcParams['axes.formatter.use_locale']
+ self._useLocale = useLocale
+
+ def get_useOffset(self):
+ return self._useOffset
+
+ def set_useOffset(self, val):
+ if val in [True, False]:
+ self.offset = 0
+ self._useOffset = val
+ else:
+ self._useOffset = False
+ self.offset = val
+
+ useOffset = property(fget=get_useOffset, fset=set_useOffset)
+
+ def get_useLocale(self):
+ return self._useLocale
+
+ def set_useLocale(self, val):
+ if val is None:
+ self._useLocale = rcParams['axes.formatter.use_locale']
+ else:
+ self._useLocale = val
+
+ useLocale = property(fget=get_useLocale, fset=set_useLocale)
+
+ def get_useMathText(self):
+ return self._useMathText
+
+ def set_useMathText(self, val):
+ if val is None:
+ self._useMathText = rcParams['axes.formatter.use_mathtext']
+ else:
+ self._useMathText = val
+
+ useMathText = property(fget=get_useMathText, fset=set_useMathText)
+
+ def fix_minus(self, s):
+ """
+ Replace hyphens with a unicode minus.
+ """
+ if rcParams['text.usetex'] or not rcParams['axes.unicode_minus']:
+ return s
+ else:
+ return s.replace('-', '\N{MINUS SIGN}')
+
+ def __call__(self, x, pos=None):
+ """
+ Return the format for tick value `x` at position `pos`.
+ """
+ if len(self.locs) == 0:
+ return ''
+ else:
+ s = self.pprint_val(x)
+ return self.fix_minus(s)
+
+ def set_scientific(self, b):
+ """
+ Turn scientific notation on or off.
+
+ .. seealso:: Method :meth:`set_powerlimits`
+ """
+ self._scientific = bool(b)
+
+ def set_powerlimits(self, lims):
+ """
+ Sets size thresholds for scientific notation.
+
+ Parameters
+ ----------
+ lims : (min_exp, max_exp)
+ A tuple containing the powers of 10 that determine the switchover
+ threshold. Numbers below ``10**min_exp`` and above ``10**max_exp``
+ will be displayed in scientific notation.
+
+ For example, ``formatter.set_powerlimits((-3, 4))`` sets the
+ pre-2007 default in which scientific notation is used for
+ numbers less than 1e-3 or greater than 1e4.
+
+ .. seealso:: Method :meth:`set_scientific`
+ """
+ if len(lims) != 2:
+ raise ValueError("'lims' must be a sequence of length 2")
+ self._powerlimits = lims
+
+ def format_data_short(self, value):
+ """
+ Return a short formatted string representation of a number.
+ """
+ if self._useLocale:
+ return locale.format_string('%-12g', (value,))
+ else:
+ return '%-12g' % value
+
+ def format_data(self, value):
+ """
+ Return a formatted string representation of a number.
+ """
+ if self._useLocale:
+ s = locale.format_string('%1.10e', (value,))
+ else:
+ s = '%1.10e' % value
+ s = self._formatSciNotation(s)
+ return self.fix_minus(s)
+
+ def get_offset(self):
+ """
+ Return scientific notation, plus offset.
+ """
+ if len(self.locs) == 0:
+ return ''
+ s = ''
+ if self.orderOfMagnitude or self.offset:
+ offsetStr = ''
+ sciNotStr = ''
+ if self.offset:
+ offsetStr = self.format_data(self.offset)
+ if self.offset > 0:
+ offsetStr = '+' + offsetStr
+ if self.orderOfMagnitude:
+ if self._usetex or self._useMathText:
+ sciNotStr = self.format_data(10 ** self.orderOfMagnitude)
+ else:
+ sciNotStr = '1e%d' % self.orderOfMagnitude
+ if self._useMathText:
+ if sciNotStr != '':
+ sciNotStr = r'\times%s' % _mathdefault(sciNotStr)
+ s = ''.join(('$', sciNotStr, _mathdefault(offsetStr), '$'))
+ elif self._usetex:
+ if sciNotStr != '':
+ sciNotStr = r'\times%s' % sciNotStr
+ s = ''.join(('$', sciNotStr, offsetStr, '$'))
+ else:
+ s = ''.join((sciNotStr, offsetStr))
+
+ return self.fix_minus(s)
+
+ def set_locs(self, locs):
+ """
+ Set the locations of the ticks.
+ """
+ self.locs = locs
+ if len(self.locs) > 0:
+ vmin, vmax = self.axis.get_view_interval()
+ d = abs(vmax - vmin)
+ if self._useOffset:
+ self._compute_offset()
+ self._set_orderOfMagnitude(d)
+ self._set_format(vmin, vmax)
+
+ def _compute_offset(self):
+ locs = self.locs
+ if locs is None or not len(locs):
+ self.offset = 0
+ return
+ # Restrict to visible ticks.
+ vmin, vmax = sorted(self.axis.get_view_interval())
+ locs = np.asarray(locs)
+ locs = locs[(vmin <= locs) & (locs <= vmax)]
+ if not len(locs):
+ self.offset = 0
+ return
+ lmin, lmax = locs.min(), locs.max()
+ # Only use offset if there are at least two ticks and every tick has
+ # the same sign.
+ if lmin == lmax or lmin <= 0 <= lmax:
+ self.offset = 0
+ return
+ # min, max comparing absolute values (we want division to round towards
+ # zero so we work on absolute values).
+ abs_min, abs_max = sorted([abs(float(lmin)), abs(float(lmax))])
+ sign = math.copysign(1, lmin)
+ # What is the smallest power of ten such that abs_min and abs_max are
+ # equal up to that precision?
+ # Note: Internally using oom instead of 10 ** oom avoids some numerical
+ # accuracy issues.
+ oom_max = np.ceil(math.log10(abs_max))
+ oom = 1 + next(oom for oom in itertools.count(oom_max, -1)
+ if abs_min // 10 ** oom != abs_max // 10 ** oom)
+ if (abs_max - abs_min) / 10 ** oom <= 1e-2:
+ # Handle the case of straddling a multiple of a large power of ten
+ # (relative to the span).
+ # What is the smallest power of ten such that abs_min and abs_max
+ # are no more than 1 apart at that precision?
+ oom = 1 + next(oom for oom in itertools.count(oom_max, -1)
+ if abs_max // 10 ** oom - abs_min // 10 ** oom > 1)
+ # Only use offset if it saves at least _offset_threshold digits.
+ n = self._offset_threshold - 1
+ self.offset = (sign * (abs_max // 10 ** oom) * 10 ** oom
+ if abs_max // 10 ** oom >= 10**n
+ else 0)
+
+ def _set_orderOfMagnitude(self, range):
+ # if scientific notation is to be used, find the appropriate exponent
+ # if using an numerical offset, find the exponent after applying the
+ # offset
+ if not self._scientific:
+ self.orderOfMagnitude = 0
+ return
+ locs = np.abs(self.locs)
+ if self.offset:
+ oom = math.floor(math.log10(range))
+ else:
+ if locs[0] > locs[-1]:
+ val = locs[0]
+ else:
+ val = locs[-1]
+ if val == 0:
+ oom = 0
+ else:
+ oom = math.floor(math.log10(val))
+ if oom <= self._powerlimits[0]:
+ self.orderOfMagnitude = oom
+ elif oom >= self._powerlimits[1]:
+ self.orderOfMagnitude = oom
+ else:
+ self.orderOfMagnitude = 0
+
+ def _set_format(self, vmin, vmax):
+ # set the format string to format all the ticklabels
+ if len(self.locs) < 2:
+ # Temporarily augment the locations with the axis end points.
+ _locs = list(self.locs) + [vmin, vmax]
+ else:
+ _locs = self.locs
+ locs = (np.asarray(_locs) - self.offset) / 10. ** self.orderOfMagnitude
+ loc_range = np.ptp(locs)
+ # Curvilinear coordinates can yield two identical points.
+ if loc_range == 0:
+ loc_range = np.max(np.abs(locs))
+ # Both points might be zero.
+ if loc_range == 0:
+ loc_range = 1
+ if len(self.locs) < 2:
+ # We needed the end points only for the loc_range calculation.
+ locs = locs[:-2]
+ loc_range_oom = int(math.floor(math.log10(loc_range)))
+ # first estimate:
+ sigfigs = max(0, 3 - loc_range_oom)
+ # refined estimate:
+ thresh = 1e-3 * 10 ** loc_range_oom
+ while sigfigs >= 0:
+ if np.abs(locs - np.round(locs, decimals=sigfigs)).max() < thresh:
+ sigfigs -= 1
+ else:
+ break
+ sigfigs += 1
+ self.format = '%1.' + str(sigfigs) + 'f'
+ if self._usetex:
+ self.format = '$%s$' % self.format
+ elif self._useMathText:
+ self.format = '$%s$' % _mathdefault(self.format)
+
+ def pprint_val(self, x):
+ xp = (x - self.offset) / (10. ** self.orderOfMagnitude)
+ if np.abs(xp) < 1e-8:
+ xp = 0
+ if self._useLocale:
+ return locale.format_string(self.format, (xp,))
+ else:
+ return self.format % xp
+
+ def _formatSciNotation(self, s):
+ # transform 1e+004 into 1e4, for example
+ if self._useLocale:
+ decimal_point = locale.localeconv()['decimal_point']
+ positive_sign = locale.localeconv()['positive_sign']
+ else:
+ decimal_point = '.'
+ positive_sign = '+'
+ tup = s.split('e')
+ try:
+ significand = tup[0].rstrip('0').rstrip(decimal_point)
+ sign = tup[1][0].replace(positive_sign, '')
+ exponent = tup[1][1:].lstrip('0')
+ if self._useMathText or self._usetex:
+ if significand == '1' and exponent != '':
+ # reformat 1x10^y as 10^y
+ significand = ''
+ if exponent:
+ exponent = '10^{%s%s}' % (sign, exponent)
+ if significand and exponent:
+ return r'%s{\times}%s' % (significand, exponent)
+ else:
+ return r'%s%s' % (significand, exponent)
+ else:
+ s = ('%se%s%s' % (significand, sign, exponent)).rstrip('e')
+ return s
+ except IndexError:
+ return s
+
+
+class LogFormatter(Formatter):
+ """
+ Base class for formatting ticks on a log or symlog scale.
+
+ It may be instantiated directly, or subclassed.
+
+ Parameters
+ ----------
+ base : float, optional, default: 10.
+ Base of the logarithm used in all calculations.
+
+ labelOnlyBase : bool, optional, default: False
+ If True, label ticks only at integer powers of base.
+ This is normally True for major ticks and False for
+ minor ticks.
+
+ minor_thresholds : (subset, all), optional, default: (1, 0.4)
+ If labelOnlyBase is False, these two numbers control
+ the labeling of ticks that are not at integer powers of
+ base; normally these are the minor ticks. The controlling
+ parameter is the log of the axis data range. In the typical
+ case where base is 10 it is the number of decades spanned
+ by the axis, so we can call it 'numdec'. If ``numdec <= all``,
+ all minor ticks will be labeled. If ``all < numdec <= subset``,
+ then only a subset of minor ticks will be labeled, so as to
+ avoid crowding. If ``numdec > subset`` then no minor ticks will
+ be labeled.
+
+ linthresh : None or float, optional, default: None
+ If a symmetric log scale is in use, its ``linthresh``
+ parameter must be supplied here.
+
+ Notes
+ -----
+ The `set_locs` method must be called to enable the subsetting
+ logic controlled by the ``minor_thresholds`` parameter.
+
+ In some cases such as the colorbar, there is no distinction between
+ major and minor ticks; the tick locations might be set manually,
+ or by a locator that puts ticks at integer powers of base and
+ at intermediate locations. For this situation, disable the
+ minor_thresholds logic by using ``minor_thresholds=(np.inf, np.inf)``,
+ so that all ticks will be labeled.
+
+ To disable labeling of minor ticks when 'labelOnlyBase' is False,
+ use ``minor_thresholds=(0, 0)``. This is the default for the
+ "classic" style.
+
+ Examples
+ --------
+ To label a subset of minor ticks when the view limits span up
+ to 2 decades, and all of the ticks when zoomed in to 0.5 decades
+ or less, use ``minor_thresholds=(2, 0.5)``.
+
+ To label all minor ticks when the view limits span up to 1.5
+ decades, use ``minor_thresholds=(1.5, 1.5)``.
+
+ """
+ def __init__(self, base=10.0, labelOnlyBase=False,
+ minor_thresholds=None,
+ linthresh=None):
+
+ self._base = float(base)
+ self.labelOnlyBase = labelOnlyBase
+ if minor_thresholds is None:
+ if rcParams['_internal.classic_mode']:
+ minor_thresholds = (0, 0)
+ else:
+ minor_thresholds = (1, 0.4)
+ self.minor_thresholds = minor_thresholds
+ self._sublabels = None
+ self._linthresh = linthresh
+
+ def base(self, base):
+ """
+ Change the *base* for labeling.
+
+ .. warning::
+ Should always match the base used for :class:`LogLocator`
+
+ """
+ self._base = base
+
+ def label_minor(self, labelOnlyBase):
+ """
+ Switch minor tick labeling on or off.
+
+ Parameters
+ ----------
+ labelOnlyBase : bool
+ If True, label ticks only at integer powers of base.
+
+ """
+ self.labelOnlyBase = labelOnlyBase
+
+ def set_locs(self, locs=None):
+ """
+ Use axis view limits to control which ticks are labeled.
+
+ The *locs* parameter is ignored in the present algorithm.
+
+ """
+ if np.isinf(self.minor_thresholds[0]):
+ self._sublabels = None
+ return
+
+ # Handle symlog case:
+ linthresh = self._linthresh
+ if linthresh is None:
+ try:
+ linthresh = self.axis.get_transform().linthresh
+ except AttributeError:
+ pass
+
+ vmin, vmax = self.axis.get_view_interval()
+ if vmin > vmax:
+ vmin, vmax = vmax, vmin
+
+ if linthresh is None and vmin <= 0:
+ # It's probably a colorbar with
+ # a format kwarg setting a LogFormatter in the manner
+ # that worked with 1.5.x, but that doesn't work now.
+ self._sublabels = set((1,)) # label powers of base
+ return
+
+ b = self._base
+ if linthresh is not None: # symlog
+ # Only compute the number of decades in the logarithmic part of the
+ # axis
+ numdec = 0
+ if vmin < -linthresh:
+ rhs = min(vmax, -linthresh)
+ numdec += math.log(vmin / rhs) / math.log(b)
+ if vmax > linthresh:
+ lhs = max(vmin, linthresh)
+ numdec += math.log(vmax / lhs) / math.log(b)
+ else:
+ vmin = math.log(vmin) / math.log(b)
+ vmax = math.log(vmax) / math.log(b)
+ numdec = abs(vmax - vmin)
+
+ if numdec > self.minor_thresholds[0]:
+ # Label only bases
+ self._sublabels = {1}
+ elif numdec > self.minor_thresholds[1]:
+ # Add labels between bases at log-spaced coefficients;
+ # include base powers in case the locations include
+ # "major" and "minor" points, as in colorbar.
+ c = np.logspace(0, 1, int(b)//2 + 1, base=b)
+ self._sublabels = set(np.round(c))
+ # For base 10, this yields (1, 2, 3, 4, 6, 10).
+ else:
+ # Label all integer multiples of base**n.
+ self._sublabels = set(np.arange(1, b + 1))
+
+ def _num_to_string(self, x, vmin, vmax):
+ if x > 10000:
+ s = '%1.0e' % x
+ elif x < 1:
+ s = '%1.0e' % x
+ else:
+ s = self.pprint_val(x, vmax - vmin)
+ return s
+
+ def __call__(self, x, pos=None):
+ """
+ Return the format for tick val *x*.
+ """
+ if x == 0.0: # Symlog
+ return '0'
+
+ x = abs(x)
+ b = self._base
+ # only label the decades
+ fx = math.log(x) / math.log(b)
+ is_x_decade = is_close_to_int(fx)
+ exponent = np.round(fx) if is_x_decade else np.floor(fx)
+ coeff = np.round(x / b ** exponent)
+
+ if self.labelOnlyBase and not is_x_decade:
+ return ''
+ if self._sublabels is not None and coeff not in self._sublabels:
+ return ''
+
+ vmin, vmax = self.axis.get_view_interval()
+ vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
+ s = self._num_to_string(x, vmin, vmax)
+ return self.fix_minus(s)
+
+ def format_data(self, value):
+ b = self.labelOnlyBase
+ self.labelOnlyBase = False
+ value = cbook.strip_math(self.__call__(value))
+ self.labelOnlyBase = b
+ return value
+
+ def format_data_short(self, value):
+ """
+ Return a short formatted string representation of a number.
+ """
+ return '%-12g' % value
+
+ def pprint_val(self, x, d):
+ #if the number is not too big and it's an int, format it as an
+ #int
+ if abs(x) < 1e4 and x == int(x):
+ return '%d' % x
+
+ if d < 1e-2:
+ fmt = '%1.3e'
+ elif d < 1e-1:
+ fmt = '%1.3f'
+ elif d > 1e5:
+ fmt = '%1.1e'
+ elif d > 10:
+ fmt = '%1.1f'
+ elif d > 1:
+ fmt = '%1.2f'
+ else:
+ fmt = '%1.3f'
+ s = fmt % x
+
+ tup = s.split('e')
+ if len(tup) == 2:
+ mantissa = tup[0].rstrip('0').rstrip('.')
+ exponent = int(tup[1])
+ if exponent:
+ s = '%se%d' % (mantissa, exponent)
+ else:
+ s = mantissa
+ else:
+ s = s.rstrip('0').rstrip('.')
+ return s
+
+
+class LogFormatterExponent(LogFormatter):
+ """
+ Format values for log axis using ``exponent = log_base(value)``.
+ """
+ def _num_to_string(self, x, vmin, vmax):
+ fx = math.log(x) / math.log(self._base)
+ if abs(fx) > 10000:
+ s = '%1.0g' % fx
+ elif abs(fx) < 1:
+ s = '%1.0g' % fx
+ else:
+ fd = math.log(vmax - vmin) / math.log(self._base)
+ s = self.pprint_val(fx, fd)
+ return s
+
+
+class LogFormatterMathtext(LogFormatter):
+ """
+ Format values for log axis using ``exponent = log_base(value)``.
+ """
+
+ def _non_decade_format(self, sign_string, base, fx, usetex):
+ 'Return string for non-decade locations'
+ if usetex:
+ return (r'$%s%s^{%.2f}$') % (sign_string, base, fx)
+ else:
+ return ('$%s$' % _mathdefault('%s%s^{%.2f}' %
+ (sign_string, base, fx)))
+
+ def __call__(self, x, pos=None):
+ """
+ Return the format for tick value *x*.
+
+ The position *pos* is ignored.
+ """
+ usetex = rcParams['text.usetex']
+ min_exp = rcParams['axes.formatter.min_exponent']
+
+ if x == 0: # Symlog
+ if usetex:
+ return '$0$'
+ else:
+ return '$%s$' % _mathdefault('0')
+
+ sign_string = '-' if x < 0 else ''
+ x = abs(x)
+ b = self._base
+
+ # only label the decades
+ fx = math.log(x) / math.log(b)
+ is_x_decade = is_close_to_int(fx)
+ exponent = np.round(fx) if is_x_decade else np.floor(fx)
+ coeff = np.round(x / b ** exponent)
+ if is_x_decade:
+ fx = nearest_long(fx)
+
+ if self.labelOnlyBase and not is_x_decade:
+ return ''
+ if self._sublabels is not None and coeff not in self._sublabels:
+ return ''
+
+ # use string formatting of the base if it is not an integer
+ if b % 1 == 0.0:
+ base = '%d' % b
+ else:
+ base = '%s' % b
+
+ if np.abs(fx) < min_exp:
+ if usetex:
+ return r'${0}{1:g}$'.format(sign_string, x)
+ else:
+ return '${0}$'.format(_mathdefault(
+ '{0}{1:g}'.format(sign_string, x)))
+ elif not is_x_decade:
+ return self._non_decade_format(sign_string, base, fx, usetex)
+ else:
+ if usetex:
+ return (r'$%s%s^{%d}$') % (sign_string,
+ base,
+ nearest_long(fx))
+ else:
+ return ('$%s$' % _mathdefault(
+ '%s%s^{%d}' %
+ (sign_string, base, nearest_long(fx))))
+
+
+class LogFormatterSciNotation(LogFormatterMathtext):
+ """
+ Format values following scientific notation in a logarithmic axis.
+ """
+
+ def _non_decade_format(self, sign_string, base, fx, usetex):
+ 'Return string for non-decade locations'
+ b = float(base)
+ exponent = math.floor(fx)
+ coeff = b ** fx / b ** exponent
+ if is_close_to_int(coeff):
+ coeff = nearest_long(coeff)
+ if usetex:
+ return (r'$%s%g\times%s^{%d}$') % \
+ (sign_string, coeff, base, exponent)
+ else:
+ return ('$%s$' % _mathdefault(r'%s%g\times%s^{%d}' %
+ (sign_string, coeff, base, exponent)))
+
+
+class LogitFormatter(Formatter):
+ """
+ Probability formatter (using Math text).
+ """
+ def __call__(self, x, pos=None):
+ s = ''
+ if 0.01 <= x <= 0.99:
+ s = '{:.2f}'.format(x)
+ elif x < 0.01:
+ if is_decade(x):
+ s = '$10^{{{:.0f}}}$'.format(np.log10(x))
+ else:
+ s = '${:.5f}$'.format(x)
+ else: # x > 0.99
+ if is_decade(1-x):
+ s = '$1-10^{{{:.0f}}}$'.format(np.log10(1-x))
+ else:
+ s = '$1-{:.5f}$'.format(1-x)
+ return s
+
+ def format_data_short(self, value):
+ 'return a short formatted string representation of a number'
+ return '%-12g' % value
+
+
+class EngFormatter(Formatter):
+ """
+ Formats axis values using engineering prefixes to represent powers
+ of 1000, plus a specified unit, e.g., 10 MHz instead of 1e7.
+ """
+
+ # The SI engineering prefixes
+ ENG_PREFIXES = {
+ -24: "y",
+ -21: "z",
+ -18: "a",
+ -15: "f",
+ -12: "p",
+ -9: "n",
+ -6: "\N{GREEK SMALL LETTER MU}",
+ -3: "m",
+ 0: "",
+ 3: "k",
+ 6: "M",
+ 9: "G",
+ 12: "T",
+ 15: "P",
+ 18: "E",
+ 21: "Z",
+ 24: "Y"
+ }
+
+ def __init__(self, unit="", places=None, sep=" "):
+ """
+ Parameters
+ ----------
+ unit : str (default: "")
+ Unit symbol to use, suitable for use with single-letter
+ representations of powers of 1000. For example, 'Hz' or 'm'.
+
+ places : int (default: None)
+ Precision with which to display the number, specified in
+ digits after the decimal point (there will be between one
+ and three digits before the decimal point). If it is None,
+ the formatting falls back to the floating point format '%g',
+ which displays up to 6 *significant* digits, i.e. the equivalent
+ value for *places* varies between 0 and 5 (inclusive).
+
+ sep : str (default: " ")
+ Separator used between the value and the prefix/unit. For
+ example, one get '3.14 mV' if ``sep`` is " " (default) and
+ '3.14mV' if ``sep`` is "". Besides the default behavior, some
+ other useful options may be:
+
+ * ``sep=""`` to append directly the prefix/unit to the value;
+ * ``sep="\\N{THIN SPACE}"`` (``U+2009``);
+ * ``sep="\\N{NARROW NO-BREAK SPACE}"`` (``U+202F``);
+ * ``sep="\\N{NO-BREAK SPACE}"`` (``U+00A0``).
+ """
+ self.unit = unit
+ self.places = places
+ self.sep = sep
+
+ def __call__(self, x, pos=None):
+ s = "%s%s" % (self.format_eng(x), self.unit)
+ # Remove the trailing separator when there is neither prefix nor unit
+ if len(self.sep) > 0 and s.endswith(self.sep):
+ s = s[:-len(self.sep)]
+ return self.fix_minus(s)
+
+ def format_eng(self, num):
+ """
+ Formats a number in engineering notation, appending a letter
+ representing the power of 1000 of the original number.
+ Some examples:
+
+ >>> format_eng(0) # for self.places = 0
+ '0'
+
+ >>> format_eng(1000000) # for self.places = 1
+ '1.0 M'
+
+ >>> format_eng("-1e-6") # for self.places = 2
+ u'-1.00 \N{GREEK SMALL LETTER MU}'
+
+ `num` may be a numeric value or a string that can be converted
+ to a numeric value with ``float(num)``.
+ """
+ if isinstance(num, six.string_types):
+ warnings.warn(
+ "Passing a string as *num* argument is deprecated since"
+ "Matplotlib 2.1, and is expected to be removed in 2.3.",
+ mplDeprecation)
+
+ dnum = float(num)
+ sign = 1
+ fmt = "g" if self.places is None else ".{:d}f".format(self.places)
+
+ if dnum < 0:
+ sign = -1
+ dnum = -dnum
+
+ if dnum != 0:
+ pow10 = int(math.floor(math.log10(dnum) / 3) * 3)
+ else:
+ pow10 = 0
+ # Force dnum to zero, to avoid inconsistencies like
+ # format_eng(-0) = "0" and format_eng(0.0) = "0"
+ # but format_eng(-0.0) = "-0.0"
+ dnum = 0.0
+
+ pow10 = np.clip(pow10, min(self.ENG_PREFIXES), max(self.ENG_PREFIXES))
+
+ mant = sign * dnum / (10.0 ** pow10)
+ # Taking care of the cases like 999.9..., which
+ # may be rounded to 1000 instead of 1 k. Beware
+ # of the corner case of values that are beyond
+ # the range of SI prefixes (i.e. > 'Y').
+ _fmant = float("{mant:{fmt}}".format(mant=mant, fmt=fmt))
+ if _fmant >= 1000 and pow10 != max(self.ENG_PREFIXES):
+ mant /= 1000
+ pow10 += 3
+
+ prefix = self.ENG_PREFIXES[int(pow10)]
+
+ formatted = "{mant:{fmt}}{sep}{prefix}".format(
+ mant=mant, sep=self.sep, prefix=prefix, fmt=fmt)
+
+ return formatted
+
+
+class PercentFormatter(Formatter):
+ """
+ Format numbers as a percentage.
+
+ Parameters
+ ----------
+ xmax : float
+ Determines how the number is converted into a percentage.
+ *xmax* is the data value that corresponds to 100%.
+ Percentages are computed as ``x / xmax * 100``. So if the data is
+ already scaled to be percentages, *xmax* will be 100. Another common
+ situation is where `xmax` is 1.0.
+
+ decimals : None or int
+ The number of decimal places to place after the point.
+ If *None* (the default), the number will be computed automatically.
+
+ symbol : string or None
+ A string that will be appended to the label. It may be
+ *None* or empty to indicate that no symbol should be used. LaTeX
+ special characters are escaped in *symbol* whenever latex mode is
+ enabled, unless *is_latex* is *True*.
+
+ is_latex : bool
+ If *False*, reserved LaTeX characters in *symbol* will be escaped.
+ """
+ def __init__(self, xmax=100, decimals=None, symbol='%', is_latex=False):
+ self.xmax = xmax + 0.0
+ self.decimals = decimals
+ self._symbol = symbol
+ self._is_latex = is_latex
+
+ def __call__(self, x, pos=None):
+ """
+ Formats the tick as a percentage with the appropriate scaling.
+ """
+ ax_min, ax_max = self.axis.get_view_interval()
+ display_range = abs(ax_max - ax_min)
+
+ return self.fix_minus(self.format_pct(x, display_range))
+
+ def format_pct(self, x, display_range):
+ """
+ Formats the number as a percentage number with the correct
+ number of decimals and adds the percent symbol, if any.
+
+ If `self.decimals` is `None`, the number of digits after the
+ decimal point is set based on the `display_range` of the axis
+ as follows:
+
+ +---------------+----------+------------------------+
+ | display_range | decimals | sample |
+ +---------------+----------+------------------------+
+ | >50 | 0 | ``x = 34.5`` => 35% |
+ +---------------+----------+------------------------+
+ | >5 | 1 | ``x = 34.5`` => 34.5% |
+ +---------------+----------+------------------------+
+ | >0.5 | 2 | ``x = 34.5`` => 34.50% |
+ +---------------+----------+------------------------+
+ | ... | ... | ... |
+ +---------------+----------+------------------------+
+
+ This method will not be very good for tiny axis ranges or
+ extremely large ones. It assumes that the values on the chart
+ are percentages displayed on a reasonable scale.
+ """
+ x = self.convert_to_pct(x)
+ if self.decimals is None:
+ # conversion works because display_range is a difference
+ scaled_range = self.convert_to_pct(display_range)
+ if scaled_range <= 0:
+ decimals = 0
+ else:
+ # Luckily Python's built-in ceil rounds to +inf, not away from
+ # zero. This is very important since the equation for decimals
+ # starts out as `scaled_range > 0.5 * 10**(2 - decimals)`
+ # and ends up with `decimals > 2 - log10(2 * scaled_range)`.
+ decimals = math.ceil(2.0 - math.log10(2.0 * scaled_range))
+ if decimals > 5:
+ decimals = 5
+ elif decimals < 0:
+ decimals = 0
+ else:
+ decimals = self.decimals
+ s = '{x:0.{decimals}f}'.format(x=x, decimals=int(decimals))
+
+ return s + self.symbol
+
+ def convert_to_pct(self, x):
+ return 100.0 * (x / self.xmax)
+
+ @property
+ def symbol(self):
+ """
+ The configured percent symbol as a string.
+
+ If LaTeX is enabled via :rc:`text.usetex`, the special characters
+ ``{'#', '$', '%', '&', '~', '_', '^', '\\', '{', '}'}`` are
+ automatically escaped in the string.
+ """
+ symbol = self._symbol
+ if not symbol:
+ symbol = ''
+ elif rcParams['text.usetex'] and not self._is_latex:
+ # Source: http://www.personal.ceu.hu/tex/specchar.htm
+ # Backslash must be first for this to work correctly since
+ # it keeps getting added in
+ for spec in r'\#$%&~_^{}':
+ symbol = symbol.replace(spec, '\\' + spec)
+ return symbol
+
+ @symbol.setter
+ def symbol(self, symbol):
+ self._symbol = symbol
+
+
+class Locator(TickHelper):
+ """
+ Determine the tick locations;
+
+ Note, you should not use the same locator between different
+ :class:`~matplotlib.axis.Axis` because the locator stores references to
+ the Axis data and view limits
+ """
+
+ # Some automatic tick locators can generate so many ticks they
+ # kill the machine when you try and render them.
+ # This parameter is set to cause locators to raise an error if too
+ # many ticks are generated.
+ MAXTICKS = 1000
+
+ def tick_values(self, vmin, vmax):
+ """
+ Return the values of the located ticks given **vmin** and **vmax**.
+
+ .. note::
+ To get tick locations with the vmin and vmax values defined
+ automatically for the associated :attr:`axis` simply call
+ the Locator instance::
+
+ >>> print((type(loc)))
+ <type 'Locator'>
+ >>> print((loc()))
+ [1, 2, 3, 4]
+
+ """
+ raise NotImplementedError('Derived must override')
+
+ def set_params(self, **kwargs):
+ """
+ Do nothing, and rase a warning. Any locator class not supporting the
+ set_params() function will call this.
+ """
+ warnings.warn("'set_params()' not defined for locator of type " +
+ str(type(self)))
+
+ def __call__(self):
+ """Return the locations of the ticks"""
+ # note: some locators return data limits, other return view limits,
+ # hence there is no *one* interface to call self.tick_values.
+ raise NotImplementedError('Derived must override')
+
+ def raise_if_exceeds(self, locs):
+ """raise a RuntimeError if Locator attempts to create more than
+ MAXTICKS locs"""
+ if len(locs) >= self.MAXTICKS:
+ raise RuntimeError("Locator attempting to generate {} ticks from "
+ "{} to {}: exceeds Locator.MAXTICKS".format(
+ len(locs), locs[0], locs[-1]))
+ return locs
+
+ def view_limits(self, vmin, vmax):
+ """
+ select a scale for the range from vmin to vmax
+
+ Normally this method is overridden by subclasses to
+ change locator behaviour.
+ """
+ return mtransforms.nonsingular(vmin, vmax)
+
+ def autoscale(self):
+ """autoscale the view limits"""
+ return self.view_limits(*self.axis.get_view_interval())
+
+ def pan(self, numsteps):
+ """Pan numticks (can be positive or negative)"""
+ ticks = self()
+ numticks = len(ticks)
+
+ vmin, vmax = self.axis.get_view_interval()
+ vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
+ if numticks > 2:
+ step = numsteps * abs(ticks[0] - ticks[1])
+ else:
+ d = abs(vmax - vmin)
+ step = numsteps * d / 6.
+
+ vmin += step
+ vmax += step
+ self.axis.set_view_interval(vmin, vmax, ignore=True)
+
+ def zoom(self, direction):
+ "Zoom in/out on axis; if direction is >0 zoom in, else zoom out"
+
+ vmin, vmax = self.axis.get_view_interval()
+ vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
+ interval = abs(vmax - vmin)
+ step = 0.1 * interval * direction
+ self.axis.set_view_interval(vmin + step, vmax - step, ignore=True)
+
+ def refresh(self):
+ """refresh internal information based on current lim"""
+ pass
+
+
+class IndexLocator(Locator):
+ """
+ Place a tick on every multiple of some base number of points
+ plotted, e.g., on every 5th point. It is assumed that you are doing
+ index plotting; i.e., the axis is 0, len(data). This is mainly
+ useful for x ticks.
+ """
+ def __init__(self, base, offset):
+ 'place ticks on the i-th data points where (i-offset)%base==0'
+ self._base = base
+ self.offset = offset
+
+ def set_params(self, base=None, offset=None):
+ """Set parameters within this locator"""
+ if base is not None:
+ self._base = base
+ if offset is not None:
+ self.offset = offset
+
+ def __call__(self):
+ """Return the locations of the ticks"""
+ dmin, dmax = self.axis.get_data_interval()
+ return self.tick_values(dmin, dmax)
+
+ def tick_values(self, vmin, vmax):
+ return self.raise_if_exceeds(
+ np.arange(vmin + self.offset, vmax + 1, self._base))
+
+
+class FixedLocator(Locator):
+ """
+ Tick locations are fixed. If nbins is not None,
+ the array of possible positions will be subsampled to
+ keep the number of ticks <= nbins +1.
+ The subsampling will be done so as to include the smallest
+ absolute value; for example, if zero is included in the
+ array of possibilities, then it is guaranteed to be one of
+ the chosen ticks.
+ """
+
+ def __init__(self, locs, nbins=None):
+ self.locs = np.asarray(locs)
+ self.nbins = nbins
+ if self.nbins is not None:
+ self.nbins = max(self.nbins, 2)
+
+ def set_params(self, nbins=None):
+ """Set parameters within this locator."""
+ if nbins is not None:
+ self.nbins = nbins
+
+ def __call__(self):
+ return self.tick_values(None, None)
+
+ def tick_values(self, vmin, vmax):
+ """"
+ Return the locations of the ticks.
+
+ .. note::
+
+ Because the values are fixed, vmin and vmax are not used in this
+ method.
+
+ """
+ if self.nbins is None:
+ return self.locs
+ step = max(int(np.ceil(len(self.locs) / self.nbins)), 1)
+ ticks = self.locs[::step]
+ for i in range(1, step):
+ ticks1 = self.locs[i::step]
+ if np.abs(ticks1).min() < np.abs(ticks).min():
+ ticks = ticks1
+ return self.raise_if_exceeds(ticks)
+
+
+class NullLocator(Locator):
+ """
+ No ticks
+ """
+
+ def __call__(self):
+ return self.tick_values(None, None)
+
+ def tick_values(self, vmin, vmax):
+ """"
+ Return the locations of the ticks.
+
+ .. note::
+
+ Because the values are Null, vmin and vmax are not used in this
+ method.
+ """
+ return []
+
+
+class LinearLocator(Locator):
+ """
+ Determine the tick locations
+
+ The first time this function is called it will try to set the
+ number of ticks to make a nice tick partitioning. Thereafter the
+ number of ticks will be fixed so that interactive navigation will
+ be nice
+
+ """
+ def __init__(self, numticks=None, presets=None):
+ """
+ Use presets to set locs based on lom. A dict mapping vmin, vmax->locs
+ """
+ self.numticks = numticks
+ if presets is None:
+ self.presets = {}
+ else:
+ self.presets = presets
+
+ def set_params(self, numticks=None, presets=None):
+ """Set parameters within this locator."""
+ if presets is not None:
+ self.presets = presets
+ if numticks is not None:
+ self.numticks = numticks
+
+ def __call__(self):
+ 'Return the locations of the ticks'
+ vmin, vmax = self.axis.get_view_interval()
+ return self.tick_values(vmin, vmax)
+
+ def tick_values(self, vmin, vmax):
+ vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
+ if vmax < vmin:
+ vmin, vmax = vmax, vmin
+
+ if (vmin, vmax) in self.presets:
+ return self.presets[(vmin, vmax)]
+
+ if self.numticks is None:
+ self._set_numticks()
+
+ if self.numticks == 0:
+ return []
+ ticklocs = np.linspace(vmin, vmax, self.numticks)
+
+ return self.raise_if_exceeds(ticklocs)
+
+ def _set_numticks(self):
+ self.numticks = 11 # todo; be smart here; this is just for dev
+
+ def view_limits(self, vmin, vmax):
+ 'Try to choose the view limits intelligently'
+
+ if vmax < vmin:
+ vmin, vmax = vmax, vmin
+
+ if vmin == vmax:
+ vmin -= 1
+ vmax += 1
+
+ if rcParams['axes.autolimit_mode'] == 'round_numbers':
+ exponent, remainder = _divmod(
+ math.log10(vmax - vmin), math.log10(max(self.numticks - 1, 1)))
+ exponent -= (remainder < .5)
+ scale = max(self.numticks - 1, 1) ** (-exponent)
+ vmin = math.floor(scale * vmin) / scale
+ vmax = math.ceil(scale * vmax) / scale
+
+ return mtransforms.nonsingular(vmin, vmax)
+
+
+def closeto(x, y):
+ if abs(x - y) < 1e-10:
+ return True
+ else:
+ return False
+
+
+class Base(object):
+ 'this solution has some hacks to deal with floating point inaccuracies'
+ def __init__(self, base):
+ if base <= 0:
+ raise ValueError("'base' must be positive")
+ self._base = base
+
+ def lt(self, x):
+ 'return the largest multiple of base < x'
+ d, m = _divmod(x, self._base)
+ if closeto(m, 0) and not closeto(m / self._base, 1):
+ return (d - 1) * self._base
+ return d * self._base
+
+ def le(self, x):
+ 'return the largest multiple of base <= x'
+ d, m = _divmod(x, self._base)
+ if closeto(m / self._base, 1): # was closeto(m, self._base)
+ #looks like floating point error
+ return (d + 1) * self._base
+ return d * self._base
+
+ def gt(self, x):
+ 'return the smallest multiple of base > x'
+ d, m = _divmod(x, self._base)
+ if closeto(m / self._base, 1):
+ #looks like floating point error
+ return (d + 2) * self._base
+ return (d + 1) * self._base
+
+ def ge(self, x):
+ 'return the smallest multiple of base >= x'
+ d, m = _divmod(x, self._base)
+ if closeto(m, 0) and not closeto(m / self._base, 1):
+ return d * self._base
+ return (d + 1) * self._base
+
+ def get_base(self):
+ return self._base
+
+
+class MultipleLocator(Locator):
+ """
+ Set a tick on every integer that is multiple of base in the
+ view interval
+ """
+
+ def __init__(self, base=1.0):
+ self._base = Base(base)
+
+ def set_params(self, base):
+ """Set parameters within this locator."""
+ if base is not None:
+ self._base = base
+
+ def __call__(self):
+ 'Return the locations of the ticks'
+ vmin, vmax = self.axis.get_view_interval()
+ return self.tick_values(vmin, vmax)
+
+ def tick_values(self, vmin, vmax):
+ if vmax < vmin:
+ vmin, vmax = vmax, vmin
+ vmin = self._base.ge(vmin)
+ base = self._base.get_base()
+ n = (vmax - vmin + 0.001 * base) // base
+ locs = vmin - base + np.arange(n + 3) * base
+ return self.raise_if_exceeds(locs)
+
+ def view_limits(self, dmin, dmax):
+ """
+ Set the view limits to the nearest multiples of base that
+ contain the data
+ """
+ if rcParams['axes.autolimit_mode'] == 'round_numbers':
+ vmin = self._base.le(dmin)
+ vmax = self._base.ge(dmax)
+ if vmin == vmax:
+ vmin -= 1
+ vmax += 1
+ else:
+ vmin = dmin
+ vmax = dmax
+
+ return mtransforms.nonsingular(vmin, vmax)
+
+
+def scale_range(vmin, vmax, n=1, threshold=100):
+ dv = abs(vmax - vmin) # > 0 as nonsingular is called before.
+ meanv = (vmax + vmin) / 2
+ if abs(meanv) / dv < threshold:
+ offset = 0
+ else:
+ offset = math.copysign(10 ** (math.log10(abs(meanv)) // 1), meanv)
+ scale = 10 ** (math.log10(dv / n) // 1)
+ return scale, offset
+
+
+class MaxNLocator(Locator):
+ """
+ Select no more than N intervals at nice locations.
+ """
+ default_params = dict(nbins=10,
+ steps=None,
+ integer=False,
+ symmetric=False,
+ prune=None,
+ min_n_ticks=2)
+
+ def __init__(self, *args, **kwargs):
+ """
+ Keyword args:
+
+ *nbins*
+ Maximum number of intervals; one less than max number of
+ ticks. If the string `'auto'`, the number of bins will be
+ automatically determined based on the length of the axis.
+
+ *steps*
+ Sequence of nice numbers starting with 1 and ending with 10;
+ e.g., [1, 2, 4, 5, 10], where the values are acceptable
+ tick multiples. i.e. for the example, 20, 40, 60 would be
+ an acceptable set of ticks, as would 0.4, 0.6, 0.8, because
+ they are multiples of 2. However, 30, 60, 90 would not
+ be allowed because 3 does not appear in the list of steps.
+
+ *integer*
+ If True, ticks will take only integer values, provided
+ at least `min_n_ticks` integers are found within the
+ view limits.
+
+ *symmetric*
+ If True, autoscaling will result in a range symmetric
+ about zero.
+
+ *prune*
+ ['lower' | 'upper' | 'both' | None]
+ Remove edge ticks -- useful for stacked or ganged plots where
+ the upper tick of one axes overlaps with the lower tick of the
+ axes above it, primarily when :rc:`axes.autolimit_mode` is
+ ``'round_numbers'``. If ``prune=='lower'``, the smallest tick will
+ be removed. If ``prune == 'upper'``, the largest tick will be
+ removed. If ``prune == 'both'``, the largest and smallest ticks
+ will be removed. If ``prune == None``, no ticks will be removed.
+
+ *min_n_ticks*
+ Relax `nbins` and `integer` constraints if necessary to
+ obtain this minimum number of ticks.
+
+ """
+ if args:
+ kwargs['nbins'] = args[0]
+ if len(args) > 1:
+ raise ValueError(
+ "Keywords are required for all arguments except 'nbins'")
+ self.set_params(**self.default_params)
+ self.set_params(**kwargs)
+
+ @staticmethod
+ def _validate_steps(steps):
+ if not np.iterable(steps):
+ raise ValueError('steps argument must be a sequence of numbers '
+ 'from 1 to 10')
+ steps = np.asarray(steps)
+ if np.any(np.diff(steps) <= 0):
+ raise ValueError('steps argument must be uniformly increasing')
+ if steps[-1] > 10 or steps[0] < 1:
+ warnings.warn('Steps argument should be a sequence of numbers\n'
+ 'increasing from 1 to 10, inclusive. Behavior with\n'
+ 'values outside this range is undefined, and will\n'
+ 'raise a ValueError in future versions of mpl.')
+ if steps[0] != 1:
+ steps = np.hstack((1, steps))
+ if steps[-1] != 10:
+ steps = np.hstack((steps, 10))
+ return steps
+
+ @staticmethod
+ def _staircase(steps):
+ # Make an extended staircase within which the needed
+ # step will be found. This is probably much larger
+ # than necessary.
+ flights = (0.1 * steps[:-1], steps, 10 * steps[1])
+ return np.hstack(flights)
+
+ def set_params(self, **kwargs):
+ """Set parameters within this locator."""
+ if 'nbins' in kwargs:
+ self._nbins = kwargs['nbins']
+ if self._nbins != 'auto':
+ self._nbins = int(self._nbins)
+ if 'symmetric' in kwargs:
+ self._symmetric = kwargs['symmetric']
+ if 'prune' in kwargs:
+ prune = kwargs['prune']
+ if prune is not None and prune not in ['upper', 'lower', 'both']:
+ raise ValueError(
+ "prune must be 'upper', 'lower', 'both', or None")
+ self._prune = prune
+ if 'min_n_ticks' in kwargs:
+ self._min_n_ticks = max(1, kwargs['min_n_ticks'])
+ if 'steps' in kwargs:
+ steps = kwargs['steps']
+ if steps is None:
+ self._steps = np.array([1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10])
+ else:
+ self._steps = self._validate_steps(steps)
+ self._extended_steps = self._staircase(self._steps)
+ if 'integer' in kwargs:
+ self._integer = kwargs['integer']
+
+ def _raw_ticks(self, vmin, vmax):
+ if self._nbins == 'auto':
+ if self.axis is not None:
+ nbins = np.clip(self.axis.get_tick_space(),
+ max(1, self._min_n_ticks - 1), 9)
+ else:
+ nbins = 9
+ else:
+ nbins = self._nbins
+
+ scale, offset = scale_range(vmin, vmax, nbins)
+ _vmin = vmin - offset
+ _vmax = vmax - offset
+ raw_step = (vmax - vmin) / nbins
+ steps = self._extended_steps * scale
+ if self._integer:
+ # For steps > 1, keep only integer values.
+ igood = (steps < 1) | (np.abs(steps - np.round(steps)) < 0.001)
+ steps = steps[igood]
+
+ istep = np.nonzero(steps >= raw_step)[0][0]
+
+ # Classic round_numbers mode may require a larger step.
+ if rcParams['axes.autolimit_mode'] == 'round_numbers':
+ for istep in range(istep, len(steps)):
+ step = steps[istep]
+ best_vmin = (_vmin // step) * step
+ best_vmax = best_vmin + step * nbins
+ if (best_vmax >= _vmax):
+ break
+
+ # This is an upper limit; move to smaller steps if necessary.
+ for i in range(istep):
+ step = steps[istep - i]
+ if (self._integer and
+ np.floor(_vmax) - np.ceil(_vmin) >= self._min_n_ticks - 1):
+ step = max(1, step)
+ best_vmin = (_vmin // step) * step
+
+ low = np.round(Base(step).le(_vmin - best_vmin) / step)
+ high = np.round(Base(step).ge(_vmax - best_vmin) / step)
+ ticks = np.arange(low, high + 1) * step + best_vmin + offset
+ nticks = ((ticks <= vmax) & (ticks >= vmin)).sum()
+ if nticks >= self._min_n_ticks:
+ break
+ return ticks
+
+ def __call__(self):
+ vmin, vmax = self.axis.get_view_interval()
+ return self.tick_values(vmin, vmax)
+
+ def tick_values(self, vmin, vmax):
+ if self._symmetric:
+ vmax = max(abs(vmin), abs(vmax))
+ vmin = -vmax
+ vmin, vmax = mtransforms.nonsingular(
+ vmin, vmax, expander=1e-13, tiny=1e-14)
+ locs = self._raw_ticks(vmin, vmax)
+
+ prune = self._prune
+ if prune == 'lower':
+ locs = locs[1:]
+ elif prune == 'upper':
+ locs = locs[:-1]
+ elif prune == 'both':
+ locs = locs[1:-1]
+ return self.raise_if_exceeds(locs)
+
+ def view_limits(self, dmin, dmax):
+ if self._symmetric:
+ dmax = max(abs(dmin), abs(dmax))
+ dmin = -dmax
+
+ dmin, dmax = mtransforms.nonsingular(
+ dmin, dmax, expander=1e-12, tiny=1e-13)
+
+ if rcParams['axes.autolimit_mode'] == 'round_numbers':
+ return self._raw_ticks(dmin, dmax)[[0, -1]]
+ else:
+ return dmin, dmax
+
+
+def decade_down(x, base=10):
+ 'floor x to the nearest lower decade'
+ if x == 0.0:
+ return -base
+ lx = np.floor(np.log(x) / np.log(base))
+ return base ** lx
+
+
+def decade_up(x, base=10):
+ 'ceil x to the nearest higher decade'
+ if x == 0.0:
+ return base
+ lx = np.ceil(np.log(x) / np.log(base))
+ return base ** lx
+
+
+def nearest_long(x):
+ if x == 0:
+ return long(0)
+ elif x > 0:
+ return long(x + 0.5)
+ else:
+ return long(x - 0.5)
+
+
+def is_decade(x, base=10):
+ if not np.isfinite(x):
+ return False
+ if x == 0.0:
+ return True
+ lx = np.log(np.abs(x)) / np.log(base)
+ return is_close_to_int(lx)
+
+
+def is_close_to_int(x):
+ if not np.isfinite(x):
+ return False
+ return abs(x - nearest_long(x)) < 1e-10
+
+
+class LogLocator(Locator):
+ """
+ Determine the tick locations for log axes
+ """
+
+ def __init__(self, base=10.0, subs=(1.0,), numdecs=4, numticks=None):
+ """
+ Place ticks on the locations : subs[j] * base**i
+
+ Parameters
+ ----------
+ subs : None, string, or sequence of float, optional, default (1.0,)
+ Gives the multiples of integer powers of the base at which
+ to place ticks. The default places ticks only at
+ integer powers of the base.
+ The permitted string values are ``'auto'`` and ``'all'``,
+ both of which use an algorithm based on the axis view
+ limits to determine whether and how to put ticks between
+ integer powers of the base. With ``'auto'``, ticks are
+ placed only between integer powers; with ``'all'``, the
+ integer powers are included. A value of None is
+ equivalent to ``'auto'``.
+
+ """
+ if numticks is None:
+ if rcParams['_internal.classic_mode']:
+ numticks = 15
+ else:
+ numticks = 'auto'
+ self.base(base)
+ self.subs(subs)
+ self.numdecs = numdecs
+ self.numticks = numticks
+
+ def set_params(self, base=None, subs=None, numdecs=None, numticks=None):
+ """Set parameters within this locator."""
+ if base is not None:
+ self.base(base)
+ if subs is not None:
+ self.subs(subs)
+ if numdecs is not None:
+ self.numdecs = numdecs
+ if numticks is not None:
+ self.numticks = numticks
+
+ # FIXME: these base and subs functions are contrary to our
+ # usual and desired API.
+
+ def base(self, base):
+ """
+ set the base of the log scaling (major tick every base**i, i integer)
+ """
+ self._base = float(base)
+
+ def subs(self, subs):
+ """
+ set the minor ticks for the log scaling every base**i*subs[j]
+ """
+ if subs is None: # consistency with previous bad API
+ self._subs = 'auto'
+ elif isinstance(subs, six.string_types):
+ if subs not in ('all', 'auto'):
+ raise ValueError("A subs string must be 'all' or 'auto'; "
+ "found '%s'." % subs)
+ self._subs = subs
+ else:
+ self._subs = np.asarray(subs, dtype=float)
+
+ def __call__(self):
+ 'Return the locations of the ticks'
+ vmin, vmax = self.axis.get_view_interval()
+ return self.tick_values(vmin, vmax)
+
+ def tick_values(self, vmin, vmax):
+ if self.numticks == 'auto':
+ if self.axis is not None:
+ numticks = np.clip(self.axis.get_tick_space(), 2, 9)
+ else:
+ numticks = 9
+ else:
+ numticks = self.numticks
+
+ b = self._base
+ # dummy axis has no axes attribute
+ if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
+ vmax = math.ceil(math.log(vmax) / math.log(b))
+ decades = np.arange(vmax - self.numdecs, vmax)
+ ticklocs = b ** decades
+
+ return ticklocs
+
+ if vmin <= 0.0:
+ if self.axis is not None:
+ vmin = self.axis.get_minpos()
+
+ if vmin <= 0.0 or not np.isfinite(vmin):
+ raise ValueError(
+ "Data has no positive values, and therefore can not be "
+ "log-scaled.")
+
+ vmin = math.log(vmin) / math.log(b)
+ vmax = math.log(vmax) / math.log(b)
+
+ if vmax < vmin:
+ vmin, vmax = vmax, vmin
+
+ numdec = math.floor(vmax) - math.ceil(vmin)
+
+ if isinstance(self._subs, six.string_types):
+ _first = 2.0 if self._subs == 'auto' else 1.0
+ if numdec > 10 or b < 3:
+ if self._subs == 'auto':
+ return np.array([]) # no minor or major ticks
+ else:
+ subs = np.array([1.0]) # major ticks
+ else:
+ subs = np.arange(_first, b)
+ else:
+ subs = self._subs
+
+ stride = 1
+
+ if rcParams['_internal.classic_mode']:
+ # Leave the bug left over from the PY2-PY3 transition.
+ while numdec / stride + 1 > numticks:
+ stride += 1
+ else:
+ while numdec // stride + 1 > numticks:
+ stride += 1
+
+ # Does subs include anything other than 1?
+ have_subs = len(subs) > 1 or (len(subs) == 1 and subs[0] != 1.0)
+
+ decades = np.arange(math.floor(vmin) - stride,
+ math.ceil(vmax) + 2 * stride, stride)
+
+ if hasattr(self, '_transform'):
+ ticklocs = self._transform.inverted().transform(decades)
+ if have_subs:
+ if stride == 1:
+ ticklocs = np.ravel(np.outer(subs, ticklocs))
+ else:
+ ticklocs = []
+ else:
+ if have_subs:
+ ticklocs = []
+ if stride == 1:
+ for decadeStart in b ** decades:
+ ticklocs.extend(subs * decadeStart)
+ else:
+ ticklocs = b ** decades
+
+ return self.raise_if_exceeds(np.asarray(ticklocs))
+
+ def view_limits(self, vmin, vmax):
+ 'Try to choose the view limits intelligently'
+ b = self._base
+
+ vmin, vmax = self.nonsingular(vmin, vmax)
+
+ if self.axis.axes.name == 'polar':
+ vmax = math.ceil(math.log(vmax) / math.log(b))
+ vmin = b ** (vmax - self.numdecs)
+
+ if rcParams['axes.autolimit_mode'] == 'round_numbers':
+ if not is_decade(vmin, self._base):
+ vmin = decade_down(vmin, self._base)
+ if not is_decade(vmax, self._base):
+ vmax = decade_up(vmax, self._base)
+
+ return vmin, vmax
+
+ def nonsingular(self, vmin, vmax):
+ if not np.isfinite(vmin) or not np.isfinite(vmax):
+ return 1, 10 # initial range, no data plotted yet
+
+ if vmin > vmax:
+ vmin, vmax = vmax, vmin
+ if vmax <= 0:
+ warnings.warn(
+ "Data has no positive values, and therefore cannot be "
+ "log-scaled.")
+ return 1, 10
+
+ minpos = self.axis.get_minpos()
+ if not np.isfinite(minpos):
+ minpos = 1e-300 # This should never take effect.
+ if vmin <= 0:
+ vmin = minpos
+ if vmin == vmax:
+ vmin = decade_down(vmin, self._base)
+ vmax = decade_up(vmax, self._base)
+ return vmin, vmax
+
+
+class SymmetricalLogLocator(Locator):
+ """
+ Determine the tick locations for symmetric log axes
+ """
+
+ def __init__(self, transform=None, subs=None, linthresh=None, base=None):
+ """
+ place ticks on the location= base**i*subs[j]
+ """
+ if transform is not None:
+ self._base = transform.base
+ self._linthresh = transform.linthresh
+ elif linthresh is not None and base is not None:
+ self._base = base
+ self._linthresh = linthresh
+ else:
+ raise ValueError("Either transform, or both linthresh "
+ "and base, must be provided.")
+ if subs is None:
+ self._subs = [1.0]
+ else:
+ self._subs = subs
+ self.numticks = 15
+
+ def set_params(self, subs=None, numticks=None):
+ """Set parameters within this locator."""
+ if numticks is not None:
+ self.numticks = numticks
+ if subs is not None:
+ self._subs = subs
+
+ def __call__(self):
+ 'Return the locations of the ticks'
+ # Note, these are untransformed coordinates
+ vmin, vmax = self.axis.get_view_interval()
+ return self.tick_values(vmin, vmax)
+
+ def tick_values(self, vmin, vmax):
+ b = self._base
+ t = self._linthresh
+
+ if vmax < vmin:
+ vmin, vmax = vmax, vmin
+
+ # The domain is divided into three sections, only some of
+ # which may actually be present.
+ #
+ # <======== -t ==0== t ========>
+ # aaaaaaaaa bbbbb ccccccccc
+ #
+ # a) and c) will have ticks at integral log positions. The
+ # number of ticks needs to be reduced if there are more
+ # than self.numticks of them.
+ #
+ # b) has a tick at 0 and only 0 (we assume t is a small
+ # number, and the linear segment is just an implementation
+ # detail and not interesting.)
+ #
+ # We could also add ticks at t, but that seems to usually be
+ # uninteresting.
+ #
+ # "simple" mode is when the range falls entirely within (-t,
+ # t) -- it should just display (vmin, 0, vmax)
+
+ has_a = has_b = has_c = False
+ if vmin < -t:
+ has_a = True
+ if vmax > -t:
+ has_b = True
+ if vmax > t:
+ has_c = True
+ elif vmin < 0:
+ if vmax > 0:
+ has_b = True
+ if vmax > t:
+ has_c = True
+ else:
+ return [vmin, vmax]
+ elif vmin < t:
+ if vmax > t:
+ has_b = True
+ has_c = True
+ else:
+ return [vmin, vmax]
+ else:
+ has_c = True
+
+ def get_log_range(lo, hi):
+ lo = np.floor(np.log(lo) / np.log(b))
+ hi = np.ceil(np.log(hi) / np.log(b))
+ return lo, hi
+
+ # First, calculate all the ranges, so we can determine striding
+ if has_a:
+ if has_b:
+ a_range = get_log_range(t, -vmin + 1)
+ else:
+ a_range = get_log_range(-vmax, -vmin + 1)
+ else:
+ a_range = (0, 0)
+
+ if has_c:
+ if has_b:
+ c_range = get_log_range(t, vmax + 1)
+ else:
+ c_range = get_log_range(vmin, vmax + 1)
+ else:
+ c_range = (0, 0)
+
+ total_ticks = (a_range[1] - a_range[0]) + (c_range[1] - c_range[0])
+ if has_b:
+ total_ticks += 1
+ stride = max(total_ticks // (self.numticks - 1), 1)
+
+ decades = []
+ if has_a:
+ decades.extend(-1 * (b ** (np.arange(a_range[0], a_range[1],
+ stride)[::-1])))
+
+ if has_b:
+ decades.append(0.0)
+
+ if has_c:
+ decades.extend(b ** (np.arange(c_range[0], c_range[1], stride)))
+
+ # Add the subticks if requested
+ if self._subs is None:
+ subs = np.arange(2.0, b)
+ else:
+ subs = np.asarray(self._subs)
+
+ if len(subs) > 1 or subs[0] != 1.0:
+ ticklocs = []
+ for decade in decades:
+ if decade == 0:
+ ticklocs.append(decade)
+ else:
+ ticklocs.extend(subs * decade)
+ else:
+ ticklocs = decades
+
+ return self.raise_if_exceeds(np.array(ticklocs))
+
+ def view_limits(self, vmin, vmax):
+ 'Try to choose the view limits intelligently'
+ b = self._base
+ if vmax < vmin:
+ vmin, vmax = vmax, vmin
+
+ if rcParams['axes.autolimit_mode'] == 'round_numbers':
+ if not is_decade(abs(vmin), b):
+ if vmin < 0:
+ vmin = -decade_up(-vmin, b)
+ else:
+ vmin = decade_down(vmin, b)
+ if not is_decade(abs(vmax), b):
+ if vmax < 0:
+ vmax = -decade_down(-vmax, b)
+ else:
+ vmax = decade_up(vmax, b)
+
+ if vmin == vmax:
+ if vmin < 0:
+ vmin = -decade_up(-vmin, b)
+ vmax = -decade_down(-vmax, b)
+ else:
+ vmin = decade_down(vmin, b)
+ vmax = decade_up(vmax, b)
+
+ result = mtransforms.nonsingular(vmin, vmax)
+ return result
+
+
+class LogitLocator(Locator):
+ """
+ Determine the tick locations for logit axes
+ """
+
+ def __init__(self, minor=False):
+ """
+ place ticks on the logit locations
+ """
+ self.minor = minor
+
+ def set_params(self, minor=None):
+ """Set parameters within this locator."""
+ if minor is not None:
+ self.minor = minor
+
+ def __call__(self):
+ 'Return the locations of the ticks'
+ vmin, vmax = self.axis.get_view_interval()
+ return self.tick_values(vmin, vmax)
+
+ def tick_values(self, vmin, vmax):
+ # dummy axis has no axes attribute
+ if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
+ raise NotImplementedError('Polar axis cannot be logit scaled yet')
+
+ vmin, vmax = self.nonsingular(vmin, vmax)
+ vmin = np.log10(vmin / (1 - vmin))
+ vmax = np.log10(vmax / (1 - vmax))
+
+ decade_min = np.floor(vmin)
+ decade_max = np.ceil(vmax)
+
+ # major ticks
+ if not self.minor:
+ ticklocs = []
+ if (decade_min <= -1):
+ expo = np.arange(decade_min, min(0, decade_max + 1))
+ ticklocs.extend(list(10**expo))
+ if (decade_min <= 0) and (decade_max >= 0):
+ ticklocs.append(0.5)
+ if (decade_max >= 1):
+ expo = -np.arange(max(1, decade_min), decade_max + 1)
+ ticklocs.extend(list(1 - 10**expo))
+
+ # minor ticks
+ else:
+ ticklocs = []
+ if (decade_min <= -2):
+ expo = np.arange(decade_min, min(-1, decade_max))
+ newticks = np.outer(np.arange(2, 10), 10**expo).ravel()
+ ticklocs.extend(list(newticks))
+ if (decade_min <= 0) and (decade_max >= 0):
+ ticklocs.extend([0.2, 0.3, 0.4, 0.6, 0.7, 0.8])
+ if (decade_max >= 2):
+ expo = -np.arange(max(2, decade_min), decade_max + 1)
+ newticks = 1 - np.outer(np.arange(2, 10), 10**expo).ravel()
+ ticklocs.extend(list(newticks))
+
+ return self.raise_if_exceeds(np.array(ticklocs))
+
+ def nonsingular(self, vmin, vmax):
+ initial_range = (1e-7, 1 - 1e-7)
+ if not np.isfinite(vmin) or not np.isfinite(vmax):
+ return initial_range # no data plotted yet
+
+ if vmin > vmax:
+ vmin, vmax = vmax, vmin
+
+ # what to do if a window beyond ]0, 1[ is chosen
+ if self.axis is not None:
+ minpos = self.axis.get_minpos()
+ if not np.isfinite(minpos):
+ return initial_range # again, no data plotted
+ else:
+ minpos = 1e-7 # should not occur in normal use
+
+ # NOTE: for vmax, we should query a property similar to get_minpos, but
+ # related to the maximal, less-than-one data point. Unfortunately,
+ # Bbox._minpos is defined very deep in the BBox and updated with data,
+ # so for now we use 1 - minpos as a substitute.
+
+ if vmin <= 0:
+ vmin = minpos
+ if vmax >= 1:
+ vmax = 1 - minpos
+ if vmin == vmax:
+ return 0.1 * vmin, 1 - 0.1 * vmin
+
+ return vmin, vmax
+
+
+class AutoLocator(MaxNLocator):
+ """
+ Dynamically find major tick positions. This is actually a subclass
+ of `~matplotlib.ticker.MaxNLocator`, with parameters *nbins = 'auto'*
+ and *steps = [1, 2, 2.5, 5, 10]*.
+ """
+ def __init__(self):
+ """
+ To know the values of the non-public parameters, please have a
+ look to the defaults of `~matplotlib.ticker.MaxNLocator`.
+ """
+ if rcParams['_internal.classic_mode']:
+ nbins = 9
+ steps = [1, 2, 5, 10]
+ else:
+ nbins = 'auto'
+ steps = [1, 2, 2.5, 5, 10]
+ MaxNLocator.__init__(self, nbins=nbins, steps=steps)
+
+
+class AutoMinorLocator(Locator):
+ """
+ Dynamically find minor tick positions based on the positions of
+ major ticks. The scale must be linear with major ticks evenly spaced.
+ """
+ def __init__(self, n=None):
+ """
+ *n* is the number of subdivisions of the interval between
+ major ticks; e.g., n=2 will place a single minor tick midway
+ between major ticks.
+
+ If *n* is omitted or None, it will be set to 5 or 4.
+ """
+ self.ndivs = n
+
+ def __call__(self):
+ 'Return the locations of the ticks'
+ if self.axis.get_scale() == 'log':
+ warnings.warn('AutoMinorLocator does not work with logarithmic '
+ 'scale')
+ return []
+
+ majorlocs = self.axis.get_majorticklocs()
+ try:
+ majorstep = majorlocs[1] - majorlocs[0]
+ except IndexError:
+ # Need at least two major ticks to find minor tick locations
+ # TODO: Figure out a way to still be able to display minor
+ # ticks without two major ticks visible. For now, just display
+ # no ticks at all.
+ return []
+
+ if self.ndivs is None:
+ x = int(np.round(10 ** (np.log10(majorstep) % 1)))
+ if x in [1, 5, 10]:
+ ndivs = 5
+ else:
+ ndivs = 4
+ else:
+ ndivs = self.ndivs
+
+ minorstep = majorstep / ndivs
+
+ vmin, vmax = self.axis.get_view_interval()
+ if vmin > vmax:
+ vmin, vmax = vmax, vmin
+
+ t0 = majorlocs[0]
+ tmin = ((vmin - t0) // minorstep + 1) * minorstep
+ tmax = ((vmax - t0) // minorstep + 1) * minorstep
+ locs = np.arange(tmin, tmax, minorstep) + t0
+ mod = np.abs((locs - t0) % majorstep)
+ cond1 = mod > minorstep / 10.0
+ cond2 = ~np.isclose(mod, majorstep, atol=0)
+ locs = locs.compress(cond1 & cond2)
+
+ return self.raise_if_exceeds(np.array(locs))
+
+ def tick_values(self, vmin, vmax):
+ raise NotImplementedError('Cannot get tick locations for a '
+ '%s type.' % type(self))
+
+
+class OldAutoLocator(Locator):
+ """
+ On autoscale this class picks the best MultipleLocator to set the
+ view limits and the tick locs.
+
+ """
+ def __init__(self):
+ self._locator = LinearLocator()
+
+ def __call__(self):
+ 'Return the locations of the ticks'
+ self.refresh()
+ return self.raise_if_exceeds(self._locator())
+
+ def tick_values(self, vmin, vmax):
+ raise NotImplementedError('Cannot get tick locations for a '
+ '%s type.' % type(self))
+
+ def refresh(self):
+ 'refresh internal information based on current lim'
+ vmin, vmax = self.axis.get_view_interval()
+ vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
+ d = abs(vmax - vmin)
+ self._locator = self.get_locator(d)
+
+ def view_limits(self, vmin, vmax):
+ 'Try to choose the view limits intelligently'
+
+ d = abs(vmax - vmin)
+ self._locator = self.get_locator(d)
+ return self._locator.view_limits(vmin, vmax)
+
+ def get_locator(self, d):
+ 'pick the best locator based on a distance'
+ d = abs(d)
+ if d <= 0:
+ locator = MultipleLocator(0.2)
+ else:
+
+ try:
+ ld = math.log10(d)
+ except OverflowError:
+ raise RuntimeError('AutoLocator illegal data interval range')
+
+ fld = math.floor(ld)
+ base = 10 ** fld
+
+ #if ld==fld: base = 10**(fld-1)
+ #else: base = 10**fld
+
+ if d >= 5 * base:
+ ticksize = base
+ elif d >= 2 * base:
+ ticksize = base / 2.0
+ else:
+ ticksize = base / 5.0
+ locator = MultipleLocator(ticksize)
+
+ return locator
diff --git a/contrib/python/matplotlib/py2/matplotlib/tight_bbox.py b/contrib/python/matplotlib/py2/matplotlib/tight_bbox.py
new file mode 100644
index 00000000000..7bffb353129
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tight_bbox.py
@@ -0,0 +1,87 @@
+"""
+This module is to support *bbox_inches* option in savefig command.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
+
+
+def adjust_bbox(fig, bbox_inches, fixed_dpi=None):
+ """
+ Temporarily adjust the figure so that only the specified area
+ (bbox_inches) is saved.
+
+ It modifies fig.bbox, fig.bbox_inches,
+ fig.transFigure._boxout, and fig.patch. While the figure size
+ changes, the scale of the original figure is conserved. A
+ function which restores the original values are returned.
+ """
+
+ origBbox = fig.bbox
+ origBboxInches = fig.bbox_inches
+ _boxout = fig.transFigure._boxout
+
+ asp_list = []
+ locator_list = []
+ for ax in fig.axes:
+ pos = ax.get_position(original=False).frozen()
+ locator_list.append(ax.get_axes_locator())
+ asp_list.append(ax.get_aspect())
+
+ def _l(a, r, pos=pos):
+ return pos
+ ax.set_axes_locator(_l)
+ ax.set_aspect("auto")
+
+ def restore_bbox():
+
+ for ax, asp, loc in zip(fig.axes, asp_list, locator_list):
+ ax.set_aspect(asp)
+ ax.set_axes_locator(loc)
+
+ fig.bbox = origBbox
+ fig.bbox_inches = origBboxInches
+ fig.transFigure._boxout = _boxout
+ fig.transFigure.invalidate()
+ fig.patch.set_bounds(0, 0, 1, 1)
+
+ if fixed_dpi is not None:
+ tr = Affine2D().scale(fixed_dpi)
+ dpi_scale = fixed_dpi / fig.dpi
+ else:
+ tr = Affine2D().scale(fig.dpi)
+ dpi_scale = 1.
+
+ _bbox = TransformedBbox(bbox_inches, tr)
+
+ fig.bbox_inches = Bbox.from_bounds(0, 0,
+ bbox_inches.width, bbox_inches.height)
+ x0, y0 = _bbox.x0, _bbox.y0
+ w1, h1 = fig.bbox.width * dpi_scale, fig.bbox.height * dpi_scale
+ fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0, w1, h1)
+ fig.transFigure.invalidate()
+
+ fig.bbox = TransformedBbox(fig.bbox_inches, tr)
+
+ fig.patch.set_bounds(x0 / w1, y0 / h1,
+ fig.bbox.width / w1, fig.bbox.height / h1)
+
+ return restore_bbox
+
+
+def process_figure_for_rasterizing(fig, bbox_inches_restore, fixed_dpi=None):
+ """
+ This need to be called when figure dpi changes during the drawing
+ (e.g., rasterizing). It recovers the bbox and re-adjust it with
+ the new dpi.
+ """
+
+ bbox_inches, restore_bbox = bbox_inches_restore
+ restore_bbox()
+ r = adjust_bbox(fig, bbox_inches, fixed_dpi)
+
+ return bbox_inches, r
diff --git a/contrib/python/matplotlib/py2/matplotlib/tight_layout.py b/contrib/python/matplotlib/py2/matplotlib/tight_layout.py
new file mode 100644
index 00000000000..1c18dc63c30
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tight_layout.py
@@ -0,0 +1,381 @@
+"""
+This module provides routines to adjust subplot params so that subplots are
+nicely fit in the figure. In doing so, only axis labels, tick labels, axes
+titles and offsetboxes that are anchored to axes are currently considered.
+
+Internally, it assumes that the margins (left_margin, etc.) which are
+differences between ax.get_tightbbox and ax.bbox are independent of axes
+position. This may fail if Axes.adjustable is datalim. Also, This will fail
+for some cases (for example, left or right margin is affected by xlabel).
+"""
+
+import warnings
+
+import matplotlib
+from matplotlib.transforms import TransformedBbox, Bbox
+
+from matplotlib.font_manager import FontProperties
+rcParams = matplotlib.rcParams
+
+
+def _get_left(tight_bbox, axes_bbox):
+ return axes_bbox.xmin - tight_bbox.xmin
+
+
+def _get_right(tight_bbox, axes_bbox):
+ return tight_bbox.xmax - axes_bbox.xmax
+
+
+def _get_bottom(tight_bbox, axes_bbox):
+ return axes_bbox.ymin - tight_bbox.ymin
+
+
+def _get_top(tight_bbox, axes_bbox):
+ return tight_bbox.ymax - axes_bbox.ymax
+
+
+def auto_adjust_subplotpars(
+ fig, renderer, nrows_ncols, num1num2_list, subplot_list,
+ ax_bbox_list=None, pad=1.08, h_pad=None, w_pad=None, rect=None):
+ """
+ Return a dict of subplot parameters to adjust spacing between subplots.
+
+ Note that this function ignores geometry information of subplot
+ itself, but uses what is given by the *nrows_ncols* and *num1num2_list*
+ parameters. Also, the results could be incorrect if some subplots have
+ ``adjustable=datalim``.
+
+ Parameters
+ ----------
+ nrows_ncols : Tuple[int, int]
+ Number of rows and number of columns of the grid.
+ num1num2_list : List[int]
+ List of numbers specifying the area occupied by the subplot
+ subplot_list : list of subplots
+ List of subplots that will be used to calculate optimal subplot_params.
+ pad : float
+ Padding between the figure edge and the edges of subplots, as a
+ fraction of the font size.
+ h_pad, w_pad : float
+ Padding (height/width) between edges of adjacent subplots, as a
+ fraction of the font size. Defaults to *pad*.
+ rect : Tuple[float, float, float, float]
+ [left, bottom, right, top] in normalized (0, 1) figure coordinates.
+ """
+ rows, cols = nrows_ncols
+
+ font_size_inches = (
+ FontProperties(size=rcParams["font.size"]).get_size_in_points() / 72)
+ pad_inches = pad * font_size_inches
+ if h_pad is not None:
+ vpad_inches = h_pad * font_size_inches
+ else:
+ vpad_inches = pad_inches
+
+ if w_pad is not None:
+ hpad_inches = w_pad * font_size_inches
+ else:
+ hpad_inches = pad_inches
+
+ if len(num1num2_list) != len(subplot_list) or len(subplot_list) == 0:
+ raise ValueError
+
+ if rect is None:
+ margin_left = margin_bottom = margin_right = margin_top = None
+ else:
+ margin_left, margin_bottom, _right, _top = rect
+ if _right:
+ margin_right = 1 - _right
+ else:
+ margin_right = None
+ if _top:
+ margin_top = 1 - _top
+ else:
+ margin_top = None
+
+ vspaces = [[] for i in range((rows + 1) * cols)]
+ hspaces = [[] for i in range(rows * (cols + 1))]
+
+ union = Bbox.union
+
+ if ax_bbox_list is None:
+ ax_bbox_list = []
+ for subplots in subplot_list:
+ ax_bbox = union([ax.get_position(original=True)
+ for ax in subplots])
+ ax_bbox_list.append(ax_bbox)
+
+ for subplots, ax_bbox, (num1, num2) in zip(subplot_list,
+ ax_bbox_list,
+ num1num2_list):
+ if all([not ax.get_visible() for ax in subplots]):
+ continue
+
+ tight_bbox_raw = union([ax.get_tightbbox(renderer) for ax in subplots
+ if ax.get_visible()])
+ tight_bbox = TransformedBbox(tight_bbox_raw,
+ fig.transFigure.inverted())
+
+ row1, col1 = divmod(num1, cols)
+
+ if num2 is None:
+ # left
+ hspaces[row1 * (cols + 1) + col1].append(
+ _get_left(tight_bbox, ax_bbox))
+ # right
+ hspaces[row1 * (cols + 1) + (col1 + 1)].append(
+ _get_right(tight_bbox, ax_bbox))
+ # top
+ vspaces[row1 * cols + col1].append(
+ _get_top(tight_bbox, ax_bbox))
+ # bottom
+ vspaces[(row1 + 1) * cols + col1].append(
+ _get_bottom(tight_bbox, ax_bbox))
+
+ else:
+ row2, col2 = divmod(num2, cols)
+
+ for row_i in range(row1, row2 + 1):
+ # left
+ hspaces[row_i * (cols + 1) + col1].append(
+ _get_left(tight_bbox, ax_bbox))
+ # right
+ hspaces[row_i * (cols + 1) + (col2 + 1)].append(
+ _get_right(tight_bbox, ax_bbox))
+ for col_i in range(col1, col2 + 1):
+ # top
+ vspaces[row1 * cols + col_i].append(
+ _get_top(tight_bbox, ax_bbox))
+ # bottom
+ vspaces[(row2 + 1) * cols + col_i].append(
+ _get_bottom(tight_bbox, ax_bbox))
+
+ fig_width_inch, fig_height_inch = fig.get_size_inches()
+
+ # margins can be negative for axes with aspect applied. And we
+ # append + [0] to make minimum margins 0
+
+ if not margin_left:
+ margin_left = max([sum(s) for s in hspaces[::cols + 1]] + [0])
+ margin_left += pad_inches / fig_width_inch
+
+ if not margin_right:
+ margin_right = max([sum(s) for s in hspaces[cols::cols + 1]] + [0])
+ margin_right += pad_inches / fig_width_inch
+
+ if not margin_top:
+ margin_top = max([sum(s) for s in vspaces[:cols]] + [0])
+ margin_top += pad_inches / fig_height_inch
+
+ if not margin_bottom:
+ margin_bottom = max([sum(s) for s in vspaces[-cols:]] + [0])
+ margin_bottom += pad_inches / fig_height_inch
+
+ if margin_left + margin_right >= 1:
+ margin_left = 0.4999
+ margin_right = 0.4999
+ warnings.warn('The left and right margins cannot be made large '
+ 'enough to accommodate all axes decorations. ')
+ if margin_bottom + margin_top >= 1:
+ margin_bottom = 0.4999
+ margin_top = 0.4999
+ warnings.warn('The bottom and top margins cannot be made large '
+ 'enough to accommodate all axes decorations. ')
+
+ kwargs = dict(left=margin_left,
+ right=1 - margin_right,
+ bottom=margin_bottom,
+ top=1 - margin_top)
+ if cols > 1:
+ hspace = (
+ max(sum(s)
+ for i in range(rows)
+ for s in hspaces[i * (cols + 1) + 1:(i + 1) * (cols + 1) - 1])
+ + hpad_inches / fig_width_inch)
+ # axes widths:
+ h_axes = (1 - margin_right - margin_left - hspace * (cols - 1)) / cols
+ if h_axes < 0:
+ warnings.warn('tight_layout cannot make axes width small enough '
+ 'to accommodate all axes decorations')
+ kwargs["wspace"] = 0.5
+ else:
+ kwargs["wspace"] = hspace / h_axes
+
+ if rows > 1:
+ vspace = (max(sum(s) for s in vspaces[cols:-cols])
+ + vpad_inches / fig_height_inch)
+ v_axes = (1 - margin_top - margin_bottom - vspace * (rows - 1)) / rows
+ if v_axes < 0:
+ warnings.warn('tight_layout cannot make axes height small enough '
+ 'to accommodate all axes decorations')
+ kwargs["hspace"] = 0.5
+ else:
+ kwargs["hspace"] = vspace / v_axes
+
+ return kwargs
+
+
+def get_renderer(fig):
+ if fig._cachedRenderer:
+ renderer = fig._cachedRenderer
+ else:
+ canvas = fig.canvas
+
+ if canvas and hasattr(canvas, "get_renderer"):
+ renderer = canvas.get_renderer()
+ else:
+ # not sure if this can happen
+ warnings.warn("tight_layout : falling back to Agg renderer")
+ from matplotlib.backends.backend_agg import FigureCanvasAgg
+ canvas = FigureCanvasAgg(fig)
+ renderer = canvas.get_renderer()
+
+ return renderer
+
+
+def get_subplotspec_list(axes_list, grid_spec=None):
+ """Return a list of subplotspec from the given list of axes.
+
+ For an instance of axes that does not support subplotspec, None is inserted
+ in the list.
+
+ If grid_spec is given, None is inserted for those not from the given
+ grid_spec.
+ """
+ subplotspec_list = []
+ for ax in axes_list:
+ axes_or_locator = ax.get_axes_locator()
+ if axes_or_locator is None:
+ axes_or_locator = ax
+
+ if hasattr(axes_or_locator, "get_subplotspec"):
+ subplotspec = axes_or_locator.get_subplotspec()
+ subplotspec = subplotspec.get_topmost_subplotspec()
+ gs = subplotspec.get_gridspec()
+ if grid_spec is not None:
+ if gs != grid_spec:
+ subplotspec = None
+ elif gs.locally_modified_subplot_params():
+ subplotspec = None
+ else:
+ subplotspec = None
+
+ subplotspec_list.append(subplotspec)
+
+ return subplotspec_list
+
+
+def get_tight_layout_figure(fig, axes_list, subplotspec_list, renderer,
+ pad=1.08, h_pad=None, w_pad=None, rect=None):
+ """
+ Return subplot parameters for tight-layouted-figure with specified padding.
+
+ Parameters
+ ----------
+ fig : Figure
+ axes_list : list of Axes
+ subplotspec_list : list of `.SubplotSpec`
+ The subplotspecs of each axes.
+ renderer : renderer
+ pad : float
+ Padding between the figure edge and the edges of subplots, as a
+ fraction of the font size.
+ h_pad, w_pad : float
+ Padding (height/width) between edges of adjacent subplots. Defaults to
+ *pad_inches*.
+ rect : Tuple[float, float, float, float], optional
+ (left, bottom, right, top) rectangle in normalized figure coordinates
+ that the whole subplots area (including labels) will fit into.
+ Defaults to using the entire figure.
+ """
+
+ subplot_list = []
+ nrows_list = []
+ ncols_list = []
+ ax_bbox_list = []
+
+ subplot_dict = {} # Multiple axes can share same subplot_interface (e.g.,
+ # axes_grid1); thus we need to join them together.
+
+ subplotspec_list2 = []
+
+ for ax, subplotspec in zip(axes_list,
+ subplotspec_list):
+ if subplotspec is None:
+ continue
+
+ subplots = subplot_dict.setdefault(subplotspec, [])
+
+ if not subplots:
+ myrows, mycols, _, _ = subplotspec.get_geometry()
+ nrows_list.append(myrows)
+ ncols_list.append(mycols)
+ subplotspec_list2.append(subplotspec)
+ subplot_list.append(subplots)
+ ax_bbox_list.append(subplotspec.get_position(fig))
+
+ subplots.append(ax)
+
+ if (len(nrows_list) == 0) or (len(ncols_list) == 0):
+ return {}
+
+ max_nrows = max(nrows_list)
+ max_ncols = max(ncols_list)
+
+ num1num2_list = []
+ for subplotspec in subplotspec_list2:
+ rows, cols, num1, num2 = subplotspec.get_geometry()
+ div_row, mod_row = divmod(max_nrows, rows)
+ div_col, mod_col = divmod(max_ncols, cols)
+ if (mod_row != 0) or (mod_col != 0):
+ raise RuntimeError("")
+
+ rowNum1, colNum1 = divmod(num1, cols)
+ if num2 is None:
+ rowNum2, colNum2 = rowNum1, colNum1
+ else:
+ rowNum2, colNum2 = divmod(num2, cols)
+
+ num1num2_list.append((rowNum1 * div_row * max_ncols +
+ colNum1 * div_col,
+ ((rowNum2 + 1) * div_row - 1) * max_ncols +
+ (colNum2 + 1) * div_col - 1))
+
+ kwargs = auto_adjust_subplotpars(fig, renderer,
+ nrows_ncols=(max_nrows, max_ncols),
+ num1num2_list=num1num2_list,
+ subplot_list=subplot_list,
+ ax_bbox_list=ax_bbox_list,
+ pad=pad, h_pad=h_pad, w_pad=w_pad)
+
+ if rect is not None:
+ # if rect is given, the whole subplots area (including
+ # labels) will fit into the rect instead of the
+ # figure. Note that the rect argument of
+ # *auto_adjust_subplotpars* specify the area that will be
+ # covered by the total area of axes.bbox. Thus we call
+ # auto_adjust_subplotpars twice, where the second run
+ # with adjusted rect parameters.
+
+ left, bottom, right, top = rect
+ if left is not None:
+ left += kwargs["left"]
+ if bottom is not None:
+ bottom += kwargs["bottom"]
+ if right is not None:
+ right -= (1 - kwargs["right"])
+ if top is not None:
+ top -= (1 - kwargs["top"])
+
+ #if h_pad is None: h_pad = pad
+ #if w_pad is None: w_pad = pad
+
+ kwargs = auto_adjust_subplotpars(fig, renderer,
+ nrows_ncols=(max_nrows, max_ncols),
+ num1num2_list=num1num2_list,
+ subplot_list=subplot_list,
+ ax_bbox_list=ax_bbox_list,
+ pad=pad, h_pad=h_pad, w_pad=w_pad,
+ rect=(left, bottom, right, top))
+
+ return kwargs
diff --git a/contrib/python/matplotlib/py2/matplotlib/transforms.py b/contrib/python/matplotlib/py2/matplotlib/transforms.py
new file mode 100644
index 00000000000..65d741ed758
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/transforms.py
@@ -0,0 +1,3025 @@
+"""
+matplotlib includes a framework for arbitrary geometric
+transformations that is used determine the final position of all
+elements drawn on the canvas.
+
+Transforms are composed into trees of :class:`TransformNode` objects
+whose actual value depends on their children. When the contents of
+children change, their parents are automatically invalidated. The
+next time an invalidated transform is accessed, it is recomputed to
+reflect those changes. This invalidation/caching approach prevents
+unnecessary recomputations of transforms, and contributes to better
+interactive performance.
+
+For example, here is a graph of the transform tree used to plot data
+to the graph:
+
+.. image:: ../_static/transforms.png
+
+The framework can be used for both affine and non-affine
+transformations. However, for speed, we want use the backend
+renderers to perform affine transformations whenever possible.
+Therefore, it is possible to perform just the affine or non-affine
+part of a transformation on a set of data. The affine is always
+assumed to occur after the non-affine. For any transform::
+
+ full transform == non-affine part + affine part
+
+The backends are not expected to handle non-affine transformations
+themselves.
+"""
+
+# Note: There are a number of places in the code where we use `np.min` or
+# `np.minimum` instead of the builtin `min`, and likewise for `max`. This is
+# done so that `nan`s are propagated, instead of being silently dropped.
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import numpy as np
+from matplotlib._path import (affine_transform, count_bboxes_overlapping_bbox,
+ update_path_extents)
+from numpy.linalg import inv
+
+import re
+import weakref
+import warnings
+
+from . import cbook
+from .path import Path
+
+DEBUG = False
+
+
+def _indent_str(obj): # textwrap.indent(str(obj), 4) on Py3.
+ return re.sub("(^|\n)", r"\1 ", str(obj))
+
+
+class TransformNode(object):
+ """
+ :class:`TransformNode` is the base class for anything that
+ participates in the transform tree and needs to invalidate its
+ parents or be invalidated. This includes classes that are not
+ really transforms, such as bounding boxes, since some transforms
+ depend on bounding boxes to compute their values.
+ """
+ _gid = 0
+
+ # Invalidation may affect only the affine part. If the
+ # invalidation was "affine-only", the _invalid member is set to
+ # INVALID_AFFINE_ONLY
+ INVALID_NON_AFFINE = 1
+ INVALID_AFFINE = 2
+ INVALID = INVALID_NON_AFFINE | INVALID_AFFINE
+
+ # Some metadata about the transform, used to determine whether an
+ # invalidation is affine-only
+ is_affine = False
+ is_bbox = False
+
+ pass_through = False
+ """
+ If pass_through is True, all ancestors will always be
+ invalidated, even if 'self' is already invalid.
+ """
+
+ def __init__(self, shorthand_name=None):
+ """
+ Creates a new :class:`TransformNode`.
+
+ Parameters
+ ----------
+ shorthand_name : str
+ A string representing the "name" of the transform. The name carries
+ no significance other than to improve the readability of
+ ``str(transform)`` when DEBUG=True.
+ """
+ self._parents = {}
+
+ # TransformNodes start out as invalid until their values are
+ # computed for the first time.
+ self._invalid = 1
+ self._shorthand_name = shorthand_name or ''
+
+ if DEBUG:
+ def __str__(self):
+ # either just return the name of this TransformNode, or it's repr
+ return self._shorthand_name or repr(self)
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+ # turn the dictionary with weak values into a normal dictionary
+ d['_parents'] = dict((k, v()) for (k, v) in
+ six.iteritems(self._parents))
+ return d
+
+ def __setstate__(self, data_dict):
+ self.__dict__ = data_dict
+ # turn the normal dictionary back into a dictionary with weak
+ # values
+ self._parents = dict((k, weakref.ref(v)) for (k, v) in
+ six.iteritems(self._parents) if v is not None)
+
+ def __copy__(self, *args):
+ raise NotImplementedError(
+ "TransformNode instances can not be copied. "
+ "Consider using frozen() instead.")
+ __deepcopy__ = __copy__
+
+ def invalidate(self):
+ """
+ Invalidate this :class:`TransformNode` and triggers an
+ invalidation of its ancestors. Should be called any
+ time the transform changes.
+ """
+ value = self.INVALID
+ if self.is_affine:
+ value = self.INVALID_AFFINE
+ return self._invalidate_internal(value, invalidating_node=self)
+
+ def _invalidate_internal(self, value, invalidating_node):
+ """
+ Called by :meth:`invalidate` and subsequently ascends the transform
+ stack calling each TransformNode's _invalidate_internal method.
+ """
+ # determine if this call will be an extension to the invalidation
+ # status. If not, then a shortcut means that we needn't invoke an
+ # invalidation up the transform stack as it will already have been
+ # invalidated.
+
+ # N.B This makes the invalidation sticky, once a transform has been
+ # invalidated as NON_AFFINE, then it will always be invalidated as
+ # NON_AFFINE even when triggered with a AFFINE_ONLY invalidation.
+ # In most cases this is not a problem (i.e. for interactive panning and
+ # zooming) and the only side effect will be on performance.
+ status_changed = self._invalid < value
+
+ if self.pass_through or status_changed:
+ self._invalid = value
+
+ for parent in list(six.itervalues(self._parents)):
+ # Dereference the weak reference
+ parent = parent()
+ if parent is not None:
+ parent._invalidate_internal(
+ value=value, invalidating_node=self)
+
+ def set_children(self, *children):
+ """
+ Set the children of the transform, to let the invalidation
+ system know which transforms can invalidate this transform.
+ Should be called from the constructor of any transforms that
+ depend on other transforms.
+ """
+ # Parents are stored as weak references, so that if the
+ # parents are destroyed, references from the children won't
+ # keep them alive.
+ for child in children:
+ child._parents[id(self)] = weakref.ref(self)
+
+ if DEBUG:
+ _set_children = set_children
+
+ def set_children(self, *children):
+ self._set_children(*children)
+ self._children = children
+ set_children.__doc__ = _set_children.__doc__
+
+ def frozen(self):
+ """
+ Returns a frozen copy of this transform node. The frozen copy
+ will not update when its children change. Useful for storing
+ a previously known state of a transform where
+ ``copy.deepcopy()`` might normally be used.
+ """
+ return self
+
+ if DEBUG:
+ def write_graphviz(self, fobj, highlight=[]):
+ """
+ For debugging purposes.
+
+ Writes the transform tree rooted at 'self' to a graphviz "dot"
+ format file. This file can be run through the "dot" utility
+ to produce a graph of the transform tree.
+
+ Affine transforms are marked in blue. Bounding boxes are
+ marked in yellow.
+
+ *fobj*: A Python file-like object
+
+ Once the "dot" file has been created, it can be turned into a
+ png easily with::
+
+ $> dot -Tpng -o $OUTPUT_FILE $DOT_FILE
+
+ """
+ seen = set()
+
+ def recurse(root):
+ if root in seen:
+ return
+ seen.add(root)
+ props = {}
+ label = root.__class__.__name__
+ if root._invalid:
+ label = '[%s]' % label
+ if root in highlight:
+ props['style'] = 'bold'
+ props['shape'] = 'box'
+ props['label'] = '"%s"' % label
+ props = ' '.join(['%s=%s' % (key, val)
+ for key, val
+ in six.iteritems(props)])
+
+ fobj.write('%s [%s];\n' %
+ (hash(root), props))
+
+ if hasattr(root, '_children'):
+ for child in root._children:
+ name = '?'
+ for key, val in six.iteritems(root.__dict__):
+ if val is child:
+ name = key
+ break
+ fobj.write('"%s" -> "%s" [label="%s", fontsize=10];\n'
+ % (hash(root),
+ hash(child),
+ name))
+ recurse(child)
+
+ fobj.write("digraph G {\n")
+ recurse(self)
+ fobj.write("}\n")
+
+
+class BboxBase(TransformNode):
+ """
+ This is the base class of all bounding boxes, and provides
+ read-only access to its data. A mutable bounding box is provided
+ by the :class:`Bbox` class.
+
+ The canonical representation is as two points, with no
+ restrictions on their ordering. Convenience properties are
+ provided to get the left, bottom, right and top edges and width
+ and height, but these are not stored explicitly.
+ """
+ is_bbox = True
+ is_affine = True
+
+ if DEBUG:
+ def _check(points):
+ if isinstance(points, np.ma.MaskedArray):
+ warnings.warn("Bbox bounds are a masked array.")
+ points = np.asarray(points)
+ if (points[1, 0] - points[0, 0] == 0 or
+ points[1, 1] - points[0, 1] == 0):
+ warnings.warn("Singular Bbox.")
+ _check = staticmethod(_check)
+
+ def frozen(self):
+ return Bbox(self.get_points().copy())
+ frozen.__doc__ = TransformNode.__doc__
+
+ def __array__(self, *args, **kwargs):
+ return self.get_points()
+
+ def is_unit(self):
+ """
+ Returns True if the :class:`Bbox` is the unit bounding box
+ from (0, 0) to (1, 1).
+ """
+ return list(self.get_points().flatten()) == [0., 0., 1., 1.]
+
+ @property
+ def x0(self):
+ """
+ :attr:`x0` is the first of the pair of *x* coordinates that
+ define the bounding box. :attr:`x0` is not guaranteed to be less than
+ :attr:`x1`. If you require that, use :attr:`xmin`.
+ """
+ return self.get_points()[0, 0]
+
+ @property
+ def y0(self):
+ """
+ :attr:`y0` is the first of the pair of *y* coordinates that
+ define the bounding box. :attr:`y0` is not guaranteed to be less than
+ :attr:`y1`. If you require that, use :attr:`ymin`.
+ """
+ return self.get_points()[0, 1]
+
+ @property
+ def x1(self):
+ """
+ :attr:`x1` is the second of the pair of *x* coordinates that
+ define the bounding box. :attr:`x1` is not guaranteed to be greater
+ than :attr:`x0`. If you require that, use :attr:`xmax`.
+ """
+ return self.get_points()[1, 0]
+
+ @property
+ def y1(self):
+ """
+ :attr:`y1` is the second of the pair of *y* coordinates that
+ define the bounding box. :attr:`y1` is not guaranteed to be greater
+ than :attr:`y0`. If you require that, use :attr:`ymax`.
+ """
+ return self.get_points()[1, 1]
+
+ @property
+ def p0(self):
+ """
+ :attr:`p0` is the first pair of (*x*, *y*) coordinates that
+ define the bounding box. It is not guaranteed to be the bottom-left
+ corner. For that, use :attr:`min`.
+ """
+ return self.get_points()[0]
+
+ @property
+ def p1(self):
+ """
+ :attr:`p1` is the second pair of (*x*, *y*) coordinates that
+ define the bounding box. It is not guaranteed to be the top-right
+ corner. For that, use :attr:`max`.
+ """
+ return self.get_points()[1]
+
+ @property
+ def xmin(self):
+ """
+ :attr:`xmin` is the left edge of the bounding box.
+ """
+ return np.min(self.get_points()[:, 0])
+
+ @property
+ def ymin(self):
+ """
+ :attr:`ymin` is the bottom edge of the bounding box.
+ """
+ return np.min(self.get_points()[:, 1])
+
+ @property
+ def xmax(self):
+ """
+ :attr:`xmax` is the right edge of the bounding box.
+ """
+ return np.max(self.get_points()[:, 0])
+
+ @property
+ def ymax(self):
+ """
+ :attr:`ymax` is the top edge of the bounding box.
+ """
+ return np.max(self.get_points()[:, 1])
+
+ @property
+ def min(self):
+ """
+ :attr:`min` is the bottom-left corner of the bounding box.
+ """
+ return np.min(self.get_points(), axis=0)
+
+ @property
+ def max(self):
+ """
+ :attr:`max` is the top-right corner of the bounding box.
+ """
+ return np.max(self.get_points(), axis=0)
+
+ @property
+ def intervalx(self):
+ """
+ :attr:`intervalx` is the pair of *x* coordinates that define
+ the bounding box. It is not guaranteed to be sorted from left to right.
+ """
+ return self.get_points()[:, 0]
+
+ @property
+ def intervaly(self):
+ """
+ :attr:`intervaly` is the pair of *y* coordinates that define
+ the bounding box. It is not guaranteed to be sorted from bottom to
+ top.
+ """
+ return self.get_points()[:, 1]
+
+ @property
+ def width(self):
+ """
+ The width of the bounding box. It may be negative if
+ :attr:`x1` < :attr:`x0`.
+ """
+ points = self.get_points()
+ return points[1, 0] - points[0, 0]
+
+ @property
+ def height(self):
+ """
+ The height of the bounding box. It may be negative if
+ :attr:`y1` < :attr:`y0`.
+ """
+ points = self.get_points()
+ return points[1, 1] - points[0, 1]
+
+ @property
+ def size(self):
+ """
+ The width and height of the bounding box. May be negative,
+ in the same way as :attr:`width` and :attr:`height`.
+ """
+ points = self.get_points()
+ return points[1] - points[0]
+
+ @property
+ def bounds(self):
+ """
+ Returns (:attr:`x0`, :attr:`y0`, :attr:`width`,
+ :attr:`height`).
+ """
+ x0, y0, x1, y1 = self.get_points().flatten()
+ return (x0, y0, x1 - x0, y1 - y0)
+
+ @property
+ def extents(self):
+ """
+ Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`,
+ :attr:`y1`).
+ """
+ return self.get_points().flatten().copy()
+
+ def get_points(self):
+ raise NotImplementedError
+
+ def containsx(self, x):
+ """
+ Returns whether *x* is in the closed (:attr:`x0`, :attr:`x1`) interval.
+ """
+ x0, x1 = self.intervalx
+ return x0 <= x <= x1 or x0 >= x >= x1
+
+ def containsy(self, y):
+ """
+ Returns whether *y* is in the closed (:attr:`y0`, :attr:`y1`) interval.
+ """
+ y0, y1 = self.intervaly
+ return y0 <= y <= y1 or y0 >= y >= y1
+
+ def contains(self, x, y):
+ """
+ Returns whether ``(x, y)`` is in the bounding box or on its edge.
+ """
+ return self.containsx(x) and self.containsy(y)
+
+ def overlaps(self, other):
+ """
+ Returns whether this bounding box overlaps with the other bounding box.
+
+ Parameters
+ ----------
+ other : BboxBase
+ """
+ ax1, ay1, ax2, ay2 = self.extents
+ bx1, by1, bx2, by2 = other.extents
+ if ax2 < ax1:
+ ax2, ax1 = ax1, ax2
+ if ay2 < ay1:
+ ay2, ay1 = ay1, ay2
+ if bx2 < bx1:
+ bx2, bx1 = bx1, bx2
+ if by2 < by1:
+ by2, by1 = by1, by2
+ return ax1 <= bx2 and bx1 <= ax2 and ay1 <= by2 and by1 <= ay2
+
+ def fully_containsx(self, x):
+ """
+ Returns whether *x* is in the open (:attr:`x0`, :attr:`x1`) interval.
+ """
+ x0, x1 = self.intervalx
+ return x0 < x < x1 or x0 > x > x1
+
+ def fully_containsy(self, y):
+ """
+ Returns whether *y* is in the open (:attr:`y0`, :attr:`y1`) interval.
+ """
+ y0, y1 = self.intervaly
+ return y0 < y < y1 or y0 > y > y1
+
+ def fully_contains(self, x, y):
+ """
+ Returns whether ``x, y`` is in the bounding box, but not on its edge.
+ """
+ return self.fully_containsx(x) and self.fully_containsy(y)
+
+ def fully_overlaps(self, other):
+ """
+ Returns whether this bounding box overlaps with the other bounding box,
+ not including the edges.
+
+ Parameters
+ ----------
+ other : BboxBase
+ """
+ ax1, ay1, ax2, ay2 = self.extents
+ bx1, by1, bx2, by2 = other.extents
+ if ax2 < ax1:
+ ax2, ax1 = ax1, ax2
+ if ay2 < ay1:
+ ay2, ay1 = ay1, ay2
+ if bx2 < bx1:
+ bx2, bx1 = bx1, bx2
+ if by2 < by1:
+ by2, by1 = by1, by2
+ return ax1 < bx2 and bx1 < ax2 and ay1 < by2 and by1 < ay2
+
+ def transformed(self, transform):
+ """
+ Return a new :class:`Bbox` object, statically transformed by
+ the given transform.
+ """
+ pts = self.get_points()
+ ll, ul, lr = transform.transform(np.array([pts[0],
+ [pts[0, 0], pts[1, 1]], [pts[1, 0], pts[0, 1]]]))
+ return Bbox([ll, [lr[0], ul[1]]])
+
+ def inverse_transformed(self, transform):
+ """
+ Return a new :class:`Bbox` object, statically transformed by
+ the inverse of the given transform.
+ """
+ return self.transformed(transform.inverted())
+
+ coefs = {'C': (0.5, 0.5),
+ 'SW': (0, 0),
+ 'S': (0.5, 0),
+ 'SE': (1.0, 0),
+ 'E': (1.0, 0.5),
+ 'NE': (1.0, 1.0),
+ 'N': (0.5, 1.0),
+ 'NW': (0, 1.0),
+ 'W': (0, 0.5)}
+
+ def anchored(self, c, container=None):
+ """
+ Return a copy of the :class:`Bbox`, shifted to position *c*
+ within a container.
+
+ Parameters
+ ----------
+ c :
+ May be either:
+
+ * A sequence (*cx*, *cy*) where *cx* and *cy* range from 0
+ to 1, where 0 is left or bottom and 1 is right or top
+
+ * a string:
+ - 'C' for centered
+ - 'S' for bottom-center
+ - 'SE' for bottom-left
+ - 'E' for left
+ - etc.
+
+ container : Bbox, optional
+ The box within which the :class:`Bbox` is positioned; it defaults
+ to the initial :class:`Bbox`.
+ """
+ if container is None:
+ container = self
+ l, b, w, h = container.bounds
+ if isinstance(c, six.string_types):
+ cx, cy = self.coefs[c]
+ else:
+ cx, cy = c
+ L, B, W, H = self.bounds
+ return Bbox(self._points +
+ [(l + cx * (w - W)) - L,
+ (b + cy * (h - H)) - B])
+
+ def shrunk(self, mx, my):
+ """
+ Return a copy of the :class:`Bbox`, shrunk by the factor *mx*
+ in the *x* direction and the factor *my* in the *y* direction.
+ The lower left corner of the box remains unchanged. Normally
+ *mx* and *my* will be less than 1, but this is not enforced.
+ """
+ w, h = self.size
+ return Bbox([self._points[0],
+ self._points[0] + [mx * w, my * h]])
+
+ def shrunk_to_aspect(self, box_aspect, container=None, fig_aspect=1.0):
+ """
+ Return a copy of the :class:`Bbox`, shrunk so that it is as
+ large as it can be while having the desired aspect ratio,
+ *box_aspect*. If the box coordinates are relative---that
+ is, fractions of a larger box such as a figure---then the
+ physical aspect ratio of that figure is specified with
+ *fig_aspect*, so that *box_aspect* can also be given as a
+ ratio of the absolute dimensions, not the relative dimensions.
+ """
+ if box_aspect <= 0 or fig_aspect <= 0:
+ raise ValueError("'box_aspect' and 'fig_aspect' must be positive")
+ if container is None:
+ container = self
+ w, h = container.size
+ H = w * box_aspect / fig_aspect
+ if H <= h:
+ W = w
+ else:
+ W = h * fig_aspect / box_aspect
+ H = h
+ return Bbox([self._points[0],
+ self._points[0] + (W, H)])
+
+ def splitx(self, *args):
+ """
+ e.g., ``bbox.splitx(f1, f2, ...)``
+
+ Returns a list of new :class:`Bbox` objects formed by
+ splitting the original one with vertical lines at fractional
+ positions *f1*, *f2*, ...
+ """
+ xf = [0] + list(args) + [1]
+ x0, y0, x1, y1 = self.extents
+ w = x1 - x0
+ return [Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]])
+ for xf0, xf1 in zip(xf[:-1], xf[1:])]
+
+ def splity(self, *args):
+ """
+ e.g., ``bbox.splitx(f1, f2, ...)``
+
+ Returns a list of new :class:`Bbox` objects formed by
+ splitting the original one with horizontal lines at fractional
+ positions *f1*, *f2*, ...
+ """
+ yf = [0] + list(args) + [1]
+ x0, y0, x1, y1 = self.extents
+ h = y1 - y0
+ return [Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]])
+ for yf0, yf1 in zip(yf[:-1], yf[1:])]
+
+ def count_contains(self, vertices):
+ """
+ Count the number of vertices contained in the :class:`Bbox`.
+ Any vertices with a non-finite x or y value are ignored.
+
+ Parameters
+ ----------
+ vertices : Nx2 Numpy array.
+ """
+ if len(vertices) == 0:
+ return 0
+ vertices = np.asarray(vertices)
+ with np.errstate(invalid='ignore'):
+ return (((self.min < vertices) &
+ (vertices < self.max)).all(axis=1).sum())
+
+ def count_overlaps(self, bboxes):
+ """
+ Count the number of bounding boxes that overlap this one.
+
+ Parameters
+ ----------
+ bboxes : sequence of :class:`BboxBase` objects
+ """
+ return count_bboxes_overlapping_bbox(
+ self, np.atleast_3d([np.array(x) for x in bboxes]))
+
+ def expanded(self, sw, sh):
+ """
+ Return a new :class:`Bbox` which is this :class:`Bbox`
+ expanded around its center by the given factors *sw* and
+ *sh*.
+ """
+ width = self.width
+ height = self.height
+ deltaw = (sw * width - width) / 2.0
+ deltah = (sh * height - height) / 2.0
+ a = np.array([[-deltaw, -deltah], [deltaw, deltah]])
+ return Bbox(self._points + a)
+
+ def padded(self, p):
+ """
+ Return a new :class:`Bbox` that is padded on all four sides by
+ the given value.
+ """
+ points = self.get_points()
+ return Bbox(points + [[-p, -p], [p, p]])
+
+ def translated(self, tx, ty):
+ """
+ Return a copy of the :class:`Bbox`, statically translated by
+ *tx* and *ty*.
+ """
+ return Bbox(self._points + (tx, ty))
+
+ def corners(self):
+ """
+ Return an array of points which are the four corners of this
+ rectangle. For example, if this :class:`Bbox` is defined by
+ the points (*a*, *b*) and (*c*, *d*), :meth:`corners` returns
+ (*a*, *b*), (*a*, *d*), (*c*, *b*) and (*c*, *d*).
+ """
+ l, b, r, t = self.get_points().flatten()
+ return np.array([[l, b], [l, t], [r, b], [r, t]])
+
+ def rotated(self, radians):
+ """
+ Return a new bounding box that bounds a rotated version of
+ this bounding box by the given radians. The new bounding box
+ is still aligned with the axes, of course.
+ """
+ corners = self.corners()
+ corners_rotated = Affine2D().rotate(radians).transform(corners)
+ bbox = Bbox.unit()
+ bbox.update_from_data_xy(corners_rotated, ignore=True)
+ return bbox
+
+ @staticmethod
+ def union(bboxes):
+ """
+ Return a :class:`Bbox` that contains all of the given bboxes.
+ """
+ if not len(bboxes):
+ raise ValueError("'bboxes' cannot be empty")
+ x0 = np.min([bbox.xmin for bbox in bboxes])
+ x1 = np.max([bbox.xmax for bbox in bboxes])
+ y0 = np.min([bbox.ymin for bbox in bboxes])
+ y1 = np.max([bbox.ymax for bbox in bboxes])
+ return Bbox([[x0, y0], [x1, y1]])
+
+ @staticmethod
+ def intersection(bbox1, bbox2):
+ """
+ Return the intersection of the two bboxes or None
+ if they do not intersect.
+ """
+ x0 = np.maximum(bbox1.xmin, bbox2.xmin)
+ x1 = np.minimum(bbox1.xmax, bbox2.xmax)
+ y0 = np.maximum(bbox1.ymin, bbox2.ymin)
+ y1 = np.minimum(bbox1.ymax, bbox2.ymax)
+ return Bbox([[x0, y0], [x1, y1]]) if x0 <= x1 and y0 <= y1 else None
+
+
+class Bbox(BboxBase):
+ """
+ A mutable bounding box.
+ """
+
+ def __init__(self, points, **kwargs):
+ """
+ Parameters
+ ----------
+ points : ndarray
+ A 2x2 numpy array of the form ``[[x0, y0], [x1, y1]]``.
+
+ Notes
+ -----
+ If you need to create a :class:`Bbox` object from another form
+ of data, consider the static methods :meth:`unit`,
+ :meth:`from_bounds` and :meth:`from_extents`.
+ """
+ BboxBase.__init__(self, **kwargs)
+ points = np.asarray(points, float)
+ if points.shape != (2, 2):
+ raise ValueError('Bbox points must be of the form '
+ '"[[x0, y0], [x1, y1]]".')
+ self._points = points
+ self._minpos = np.array([np.inf, np.inf])
+ self._ignore = True
+ # it is helpful in some contexts to know if the bbox is a
+ # default or has been mutated; we store the orig points to
+ # support the mutated methods
+ self._points_orig = self._points.copy()
+ if DEBUG:
+ ___init__ = __init__
+
+ def __init__(self, points, **kwargs):
+ self._check(points)
+ self.___init__(points, **kwargs)
+
+ def invalidate(self):
+ self._check(self._points)
+ TransformNode.invalidate(self)
+
+ @staticmethod
+ def unit():
+ """
+ (staticmethod) Create a new unit :class:`Bbox` from (0, 0) to
+ (1, 1).
+ """
+ return Bbox(np.array([[0.0, 0.0], [1.0, 1.0]], float))
+
+ @staticmethod
+ def null():
+ """
+ (staticmethod) Create a new null :class:`Bbox` from (inf, inf) to
+ (-inf, -inf).
+ """
+ return Bbox(np.array([[np.inf, np.inf], [-np.inf, -np.inf]], float))
+
+ @staticmethod
+ def from_bounds(x0, y0, width, height):
+ """
+ (staticmethod) Create a new :class:`Bbox` from *x0*, *y0*,
+ *width* and *height*.
+
+ *width* and *height* may be negative.
+ """
+ return Bbox.from_extents(x0, y0, x0 + width, y0 + height)
+
+ @staticmethod
+ def from_extents(*args):
+ """
+ (staticmethod) Create a new Bbox from *left*, *bottom*,
+ *right* and *top*.
+
+ The *y*-axis increases upwards.
+ """
+ points = np.array(args, dtype=float).reshape(2, 2)
+ return Bbox(points)
+
+ def __format__(self, fmt):
+ return (
+ 'Bbox(x0={0.x0:{1}}, y0={0.y0:{1}}, x1={0.x1:{1}}, y1={0.y1:{1}})'.
+ format(self, fmt))
+
+ def __str__(self):
+ return format(self, '')
+
+ def __repr__(self):
+ return 'Bbox([[{0.x0}, {0.y0}], [{0.x1}, {0.y1}]])'.format(self)
+
+ def ignore(self, value):
+ """
+ Set whether the existing bounds of the box should be ignored
+ by subsequent calls to :meth:`update_from_data_xy`.
+
+ value : bool
+ - When ``True``, subsequent calls to :meth:`update_from_data_xy`
+ will ignore the existing bounds of the :class:`Bbox`.
+
+ - When ``False``, subsequent calls to :meth:`update_from_data_xy`
+ will include the existing bounds of the :class:`Bbox`.
+ """
+ self._ignore = value
+
+ def update_from_path(self, path, ignore=None, updatex=True, updatey=True):
+ """
+ Update the bounds of the :class:`Bbox` based on the passed in
+ data. After updating, the bounds will have positive *width*
+ and *height*; *x0* and *y0* will be the minimal values.
+
+ Parameters
+ ----------
+ path : :class:`~matplotlib.path.Path`
+
+ ignore : bool, optional
+ - when ``True``, ignore the existing bounds of the :class:`Bbox`.
+ - when ``False``, include the existing bounds of the :class:`Bbox`.
+ - when ``None``, use the last value passed to :meth:`ignore`.
+
+ updatex, updatey : bool, optional
+ When ``True``, update the x/y values.
+ """
+ if ignore is None:
+ ignore = self._ignore
+
+ if path.vertices.size == 0:
+ return
+
+ points, minpos, changed = update_path_extents(
+ path, None, self._points, self._minpos, ignore)
+
+ if changed:
+ self.invalidate()
+ if updatex:
+ self._points[:, 0] = points[:, 0]
+ self._minpos[0] = minpos[0]
+ if updatey:
+ self._points[:, 1] = points[:, 1]
+ self._minpos[1] = minpos[1]
+
+ def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):
+ """
+ Update the bounds of the :class:`Bbox` based on the passed in
+ data. After updating, the bounds will have positive *width*
+ and *height*; *x0* and *y0* will be the minimal values.
+
+ Parameters
+ ----------
+ xy : ndarray
+ A numpy array of 2D points.
+
+ ignore : bool, optional
+ - When ``True``, ignore the existing bounds of the :class:`Bbox`.
+ - When ``False``, include the existing bounds of the :class:`Bbox`.
+ - When ``None``, use the last value passed to :meth:`ignore`.
+
+ updatex, updatey : bool, optional
+ When ``True``, update the x/y values.
+ """
+ if len(xy) == 0:
+ return
+
+ path = Path(xy)
+ self.update_from_path(path, ignore=ignore,
+ updatex=updatex, updatey=updatey)
+
+ @BboxBase.x0.setter
+ def x0(self, val):
+ self._points[0, 0] = val
+ self.invalidate()
+
+ @BboxBase.y0.setter
+ def y0(self, val):
+ self._points[0, 1] = val
+ self.invalidate()
+
+ @BboxBase.x1.setter
+ def x1(self, val):
+ self._points[1, 0] = val
+ self.invalidate()
+
+ @BboxBase.y1.setter
+ def y1(self, val):
+ self._points[1, 1] = val
+ self.invalidate()
+
+ @BboxBase.p0.setter
+ def p0(self, val):
+ self._points[0] = val
+ self.invalidate()
+
+ @BboxBase.p1.setter
+ def p1(self, val):
+ self._points[1] = val
+ self.invalidate()
+
+ @BboxBase.intervalx.setter
+ def intervalx(self, interval):
+ self._points[:, 0] = interval
+ self.invalidate()
+
+ @BboxBase.intervaly.setter
+ def intervaly(self, interval):
+ self._points[:, 1] = interval
+ self.invalidate()
+
+ @BboxBase.bounds.setter
+ def bounds(self, bounds):
+ l, b, w, h = bounds
+ points = np.array([[l, b], [l + w, b + h]], float)
+ if np.any(self._points != points):
+ self._points = points
+ self.invalidate()
+
+ @property
+ def minpos(self):
+ return self._minpos
+
+ @property
+ def minposx(self):
+ return self._minpos[0]
+
+ @property
+ def minposy(self):
+ return self._minpos[1]
+
+ def get_points(self):
+ """
+ Get the points of the bounding box directly as a numpy array
+ of the form: ``[[x0, y0], [x1, y1]]``.
+ """
+ self._invalid = 0
+ return self._points
+
+ def set_points(self, points):
+ """
+ Set the points of the bounding box directly from a numpy array
+ of the form: ``[[x0, y0], [x1, y1]]``. No error checking is
+ performed, as this method is mainly for internal use.
+ """
+ if np.any(self._points != points):
+ self._points = points
+ self.invalidate()
+
+ def set(self, other):
+ """
+ Set this bounding box from the "frozen" bounds of another
+ :class:`Bbox`.
+ """
+ if np.any(self._points != other.get_points()):
+ self._points = other.get_points()
+ self.invalidate()
+
+ def mutated(self):
+ 'Return whether the bbox has changed since init.'
+ return self.mutatedx() or self.mutatedy()
+
+ def mutatedx(self):
+ 'Return whether the x-limits have changed since init.'
+ return (self._points[0, 0] != self._points_orig[0, 0] or
+ self._points[1, 0] != self._points_orig[1, 0])
+
+ def mutatedy(self):
+ 'Return whether the y-limits have changed since init.'
+ return (self._points[0, 1] != self._points_orig[0, 1] or
+ self._points[1, 1] != self._points_orig[1, 1])
+
+
+class TransformedBbox(BboxBase):
+ """
+ A :class:`Bbox` that is automatically transformed by a given
+ transform. When either the child bounding box or transform
+ changes, the bounds of this bbox will update accordingly.
+ """
+ def __init__(self, bbox, transform, **kwargs):
+ """
+ Parameters
+ ----------
+ bbox : :class:`Bbox`
+
+ transform : :class:`Transform`
+ """
+ if not bbox.is_bbox:
+ raise ValueError("'bbox' is not a bbox")
+ if not isinstance(transform, Transform):
+ raise ValueError("'transform' must be an instance of "
+ "'matplotlib.transform.Transform'")
+ if transform.input_dims != 2 or transform.output_dims != 2:
+ raise ValueError(
+ "The input and output dimensions of 'transform' must be 2")
+
+ BboxBase.__init__(self, **kwargs)
+ self._bbox = bbox
+ self._transform = transform
+ self.set_children(bbox, transform)
+ self._points = None
+
+ def __str__(self):
+ return ("{}(\n"
+ "{},\n"
+ "{})"
+ .format(type(self).__name__,
+ _indent_str(self._bbox),
+ _indent_str(self._transform)))
+
+ def get_points(self):
+ if self._invalid:
+ p = self._bbox.get_points()
+ # Transform all four points, then make a new bounding box
+ # from the result, taking care to make the orientation the
+ # same.
+ points = self._transform.transform(
+ [[p[0, 0], p[0, 1]],
+ [p[1, 0], p[0, 1]],
+ [p[0, 0], p[1, 1]],
+ [p[1, 0], p[1, 1]]])
+ points = np.ma.filled(points, 0.0)
+
+ xs = min(points[:, 0]), max(points[:, 0])
+ if p[0, 0] > p[1, 0]:
+ xs = xs[::-1]
+
+ ys = min(points[:, 1]), max(points[:, 1])
+ if p[0, 1] > p[1, 1]:
+ ys = ys[::-1]
+
+ self._points = np.array([
+ [xs[0], ys[0]],
+ [xs[1], ys[1]]
+ ])
+
+ self._invalid = 0
+ return self._points
+ get_points.__doc__ = Bbox.get_points.__doc__
+
+ if DEBUG:
+ _get_points = get_points
+
+ def get_points(self):
+ points = self._get_points()
+ self._check(points)
+ return points
+
+
+class LockableBbox(BboxBase):
+ """
+ A :class:`Bbox` where some elements may be locked at certain values.
+
+ When the child bounding box changes, the bounds of this bbox will update
+ accordingly with the exception of the locked elements.
+ """
+ def __init__(self, bbox, x0=None, y0=None, x1=None, y1=None, **kwargs):
+ """
+ Parameters
+ ----------
+ bbox : Bbox
+ The child bounding box to wrap.
+
+ x0 : float or None
+ The locked value for x0, or None to leave unlocked.
+
+ y0 : float or None
+ The locked value for y0, or None to leave unlocked.
+
+ x1 : float or None
+ The locked value for x1, or None to leave unlocked.
+
+ y1 : float or None
+ The locked value for y1, or None to leave unlocked.
+
+ """
+ if not bbox.is_bbox:
+ raise ValueError("'bbox' is not a bbox")
+
+ BboxBase.__init__(self, **kwargs)
+ self._bbox = bbox
+ self.set_children(bbox)
+ self._points = None
+ fp = [x0, y0, x1, y1]
+ mask = [val is None for val in fp]
+ self._locked_points = np.ma.array(fp, float, mask=mask).reshape((2, 2))
+
+ def __str__(self):
+ return ("{}(\n"
+ "{},\n"
+ "{})"
+ .format(type(self).__name__,
+ _indent_str(self._bbox),
+ _indent_str(self._locked_points)))
+
+ def get_points(self):
+ if self._invalid:
+ points = self._bbox.get_points()
+ self._points = np.where(self._locked_points.mask,
+ points,
+ self._locked_points)
+ self._invalid = 0
+ return self._points
+ get_points.__doc__ = Bbox.get_points.__doc__
+
+ if DEBUG:
+ _get_points = get_points
+
+ def get_points(self):
+ points = self._get_points()
+ self._check(points)
+ return points
+
+ @property
+ def locked_x0(self):
+ """
+ float or None: The value used for the locked x0.
+ """
+ if self._locked_points.mask[0, 0]:
+ return None
+ else:
+ return self._locked_points[0, 0]
+
+ @locked_x0.setter
+ def locked_x0(self, x0):
+ self._locked_points.mask[0, 0] = x0 is None
+ self._locked_points.data[0, 0] = x0
+ self.invalidate()
+
+ @property
+ def locked_y0(self):
+ """
+ float or None: The value used for the locked y0.
+ """
+ if self._locked_points.mask[0, 1]:
+ return None
+ else:
+ return self._locked_points[0, 1]
+
+ @locked_y0.setter
+ def locked_y0(self, y0):
+ self._locked_points.mask[0, 1] = y0 is None
+ self._locked_points.data[0, 1] = y0
+ self.invalidate()
+
+ @property
+ def locked_x1(self):
+ """
+ float or None: The value used for the locked x1.
+ """
+ if self._locked_points.mask[1, 0]:
+ return None
+ else:
+ return self._locked_points[1, 0]
+
+ @locked_x1.setter
+ def locked_x1(self, x1):
+ self._locked_points.mask[1, 0] = x1 is None
+ self._locked_points.data[1, 0] = x1
+ self.invalidate()
+
+ @property
+ def locked_y1(self):
+ """
+ float or None: The value used for the locked y1.
+ """
+ if self._locked_points.mask[1, 1]:
+ return None
+ else:
+ return self._locked_points[1, 1]
+
+ @locked_y1.setter
+ def locked_y1(self, y1):
+ self._locked_points.mask[1, 1] = y1 is None
+ self._locked_points.data[1, 1] = y1
+ self.invalidate()
+
+
+class Transform(TransformNode):
+ """
+ The base class of all :class:`TransformNode` instances that
+ actually perform a transformation.
+
+ All non-affine transformations should be subclasses of this class.
+ New affine transformations should be subclasses of
+ :class:`Affine2D`.
+
+ Subclasses of this class should override the following members (at
+ minimum):
+
+ - :attr:`input_dims`
+ - :attr:`output_dims`
+ - :meth:`transform`
+ - :attr:`is_separable`
+ - :attr:`has_inverse`
+ - :meth:`inverted` (if :attr:`has_inverse` is True)
+
+ If the transform needs to do something non-standard with
+ :class:`matplotlib.path.Path` objects, such as adding curves
+ where there were once line segments, it should override:
+
+ - :meth:`transform_path`
+ """
+ input_dims = None
+ """
+ The number of input dimensions of this transform.
+ Must be overridden (with integers) in the subclass.
+ """
+
+ output_dims = None
+ """
+ The number of output dimensions of this transform.
+ Must be overridden (with integers) in the subclass.
+ """
+
+ has_inverse = False
+ """True if this transform has a corresponding inverse transform."""
+
+ is_separable = False
+ """True if this transform is separable in the x- and y- dimensions."""
+
+ def __add__(self, other):
+ """
+ Composes two transforms together such that *self* is followed
+ by *other*.
+ """
+ if isinstance(other, Transform):
+ return composite_transform_factory(self, other)
+ raise TypeError(
+ "Can not add Transform to object of type '%s'" % type(other))
+
+ def __radd__(self, other):
+ """
+ Composes two transforms together such that *self* is followed
+ by *other*.
+ """
+ if isinstance(other, Transform):
+ return composite_transform_factory(other, self)
+ raise TypeError(
+ "Can not add Transform to object of type '%s'" % type(other))
+
+ # Equality is based on object identity for `Transform`s (so we don't
+ # override `__eq__`), but some subclasses, such as TransformWrapper &
+ # AffineBase, override this behavior.
+
+ if six.PY2:
+ def __ne__(self, other):
+ return not (self == other)
+
+ def _iter_break_from_left_to_right(self):
+ """
+ Returns an iterator breaking down this transform stack from left to
+ right recursively. If self == ((A, N), A) then the result will be an
+ iterator which yields I : ((A, N), A), followed by A : (N, A),
+ followed by (A, N) : (A), but not ((A, N), A) : I.
+
+ This is equivalent to flattening the stack then yielding
+ ``flat_stack[:i], flat_stack[i:]`` where i=0..(n-1).
+
+ """
+ yield IdentityTransform(), self
+
+ @property
+ def depth(self):
+ """
+ Returns the number of transforms which have been chained
+ together to form this Transform instance.
+
+ .. note::
+
+ For the special case of a Composite transform, the maximum depth
+ of the two is returned.
+
+ """
+ return 1
+
+ def contains_branch(self, other):
+ """
+ Return whether the given transform is a sub-tree of this transform.
+
+ This routine uses transform equality to identify sub-trees, therefore
+ in many situations it is object id which will be used.
+
+ For the case where the given transform represents the whole
+ of this transform, returns True.
+
+ """
+ if self.depth < other.depth:
+ return False
+
+ # check that a subtree is equal to other (starting from self)
+ for _, sub_tree in self._iter_break_from_left_to_right():
+ if sub_tree == other:
+ return True
+ return False
+
+ def contains_branch_seperately(self, other_transform):
+ """
+ Returns whether the given branch is a sub-tree of this transform on
+ each separate dimension.
+
+ A common use for this method is to identify if a transform is a blended
+ transform containing an axes' data transform. e.g.::
+
+ x_isdata, y_isdata = trans.contains_branch_seperately(ax.transData)
+
+ """
+ if self.output_dims != 2:
+ raise ValueError('contains_branch_seperately only supports '
+ 'transforms with 2 output dimensions')
+ # for a non-blended transform each separate dimension is the same, so
+ # just return the appropriate shape.
+ return [self.contains_branch(other_transform)] * 2
+
+ def __sub__(self, other):
+ """
+ Returns a transform stack which goes all the way down self's transform
+ stack, and then ascends back up other's stack. If it can, this is
+ optimised::
+
+ # normally
+ A - B == a + b.inverted()
+
+ # sometimes, when A contains the tree B there is no need to
+ # descend all the way down to the base of A (via B), instead we
+ # can just stop at B.
+
+ (A + B) - (B)^-1 == A
+
+ # similarly, when B contains tree A, we can avoid decending A at
+ # all, basically:
+ A - (A + B) == ((B + A) - A).inverted() or B^-1
+
+ For clarity, the result of ``(A + B) - B + B == (A + B)``.
+
+ """
+ # we only know how to do this operation if other is a Transform.
+ if not isinstance(other, Transform):
+ return NotImplemented
+
+ for remainder, sub_tree in self._iter_break_from_left_to_right():
+ if sub_tree == other:
+ return remainder
+
+ for remainder, sub_tree in other._iter_break_from_left_to_right():
+ if sub_tree == self:
+ if not remainder.has_inverse:
+ raise ValueError("The shortcut cannot be computed since "
+ "other's transform includes a non-invertable component.")
+ return remainder.inverted()
+
+ # if we have got this far, then there was no shortcut possible
+ if other.has_inverse:
+ return self + other.inverted()
+ else:
+ raise ValueError('It is not possible to compute transA - transB '
+ 'since transB cannot be inverted and there is no '
+ 'shortcut possible.')
+
+ def __array__(self, *args, **kwargs):
+ """
+ Array interface to get at this Transform's affine matrix.
+ """
+ return self.get_affine().get_matrix()
+
+ def transform(self, values):
+ """
+ Performs the transformation on the given array of values.
+
+ Accepts a numpy array of shape (N x :attr:`input_dims`) and
+ returns a numpy array of shape (N x :attr:`output_dims`).
+
+ Alternatively, accepts a numpy array of length :attr:`input_dims`
+ and returns a numpy array of length :attr:`output_dims`.
+ """
+ # Ensure that values is a 2d array (but remember whether
+ # we started with a 1d or 2d array).
+ values = np.asanyarray(values)
+ ndim = values.ndim
+ values = values.reshape((-1, self.input_dims))
+
+ # Transform the values
+ res = self.transform_affine(self.transform_non_affine(values))
+
+ # Convert the result back to the shape of the input values.
+ if ndim == 0:
+ assert not np.ma.is_masked(res) # just to be on the safe side
+ return res[0, 0]
+ if ndim == 1:
+ return res.reshape(-1)
+ elif ndim == 2:
+ return res
+ raise ValueError(
+ "Input values must have shape (N x {dims}) "
+ "or ({dims}).".format(dims=self.input_dims))
+
+ def transform_affine(self, values):
+ """
+ Performs only the affine part of this transformation on the
+ given array of values.
+
+ ``transform(values)`` is always equivalent to
+ ``transform_affine(transform_non_affine(values))``.
+
+ In non-affine transformations, this is generally a no-op. In
+ affine transformations, this is equivalent to
+ ``transform(values)``.
+
+ Accepts a numpy array of shape (N x :attr:`input_dims`) and
+ returns a numpy array of shape (N x :attr:`output_dims`).
+
+ Alternatively, accepts a numpy array of length :attr:`input_dims`
+ and returns a numpy array of length :attr:`output_dims`.
+ """
+ return self.get_affine().transform(values)
+
+ def transform_non_affine(self, values):
+ """
+ Performs only the non-affine part of the transformation.
+
+ ``transform(values)`` is always equivalent to
+ ``transform_affine(transform_non_affine(values))``.
+
+ In non-affine transformations, this is generally equivalent to
+ ``transform(values)``. In affine transformations, this is
+ always a no-op.
+
+ Accepts a numpy array of shape (N x :attr:`input_dims`) and
+ returns a numpy array of shape (N x :attr:`output_dims`).
+
+ Alternatively, accepts a numpy array of length :attr:`input_dims`
+ and returns a numpy array of length :attr:`output_dims`.
+ """
+ return values
+
+ def transform_bbox(self, bbox):
+ """
+ Transform the given bounding box.
+
+ Note, for smarter transforms including caching (a common
+ requirement for matplotlib figures), see :class:`TransformedBbox`.
+ """
+ return Bbox(self.transform(bbox.get_points()))
+
+ def get_affine(self):
+ """
+ Get the affine part of this transform.
+ """
+ return IdentityTransform()
+
+ def get_matrix(self):
+ """
+ Get the Affine transformation array for the affine part
+ of this transform.
+
+ """
+ return self.get_affine().get_matrix()
+
+ def transform_point(self, point):
+ """
+ A convenience function that returns the transformed copy of a
+ single point.
+
+ The point is given as a sequence of length :attr:`input_dims`.
+ The transformed point is returned as a sequence of length
+ :attr:`output_dims`.
+ """
+ if len(point) != self.input_dims:
+ raise ValueError("The length of 'point' must be 'self.input_dims'")
+ return self.transform(np.asarray([point]))[0]
+
+ def transform_path(self, path):
+ """
+ Returns a transformed path.
+
+ *path*: a :class:`~matplotlib.path.Path` instance.
+
+ In some cases, this transform may insert curves into the path
+ that began as line segments.
+ """
+ return self.transform_path_affine(self.transform_path_non_affine(path))
+
+ def transform_path_affine(self, path):
+ """
+ Returns a path, transformed only by the affine part of
+ this transform.
+
+ *path*: a :class:`~matplotlib.path.Path` instance.
+
+ ``transform_path(path)`` is equivalent to
+ ``transform_path_affine(transform_path_non_affine(values))``.
+ """
+ return self.get_affine().transform_path_affine(path)
+
+ def transform_path_non_affine(self, path):
+ """
+ Returns a path, transformed only by the non-affine
+ part of this transform.
+
+ *path*: a :class:`~matplotlib.path.Path` instance.
+
+ ``transform_path(path)`` is equivalent to
+ ``transform_path_affine(transform_path_non_affine(values))``.
+ """
+ x = self.transform_non_affine(path.vertices)
+ return Path._fast_from_codes_and_verts(x, path.codes,
+ {'interpolation_steps': path._interpolation_steps,
+ 'should_simplify': path.should_simplify})
+
+ def transform_angles(self, angles, pts, radians=False, pushoff=1e-5):
+ """
+ Performs transformation on a set of angles anchored at
+ specific locations.
+
+ The *angles* must be a column vector (i.e., numpy array).
+
+ The *pts* must be a two-column numpy array of x,y positions
+ (angle transforms currently only work in 2D). This array must
+ have the same number of rows as *angles*.
+
+ *radians* indicates whether or not input angles are given in
+ radians (True) or degrees (False; the default).
+
+ *pushoff* is the distance to move away from *pts* for
+ determining transformed angles (see discussion of method
+ below).
+
+ The transformed angles are returned in an array with the same
+ size as *angles*.
+
+ The generic version of this method uses a very generic
+ algorithm that transforms *pts*, as well as locations very
+ close to *pts*, to find the angle in the transformed system.
+ """
+ # Must be 2D
+ if self.input_dims != 2 or self.output_dims != 2:
+ raise NotImplementedError('Only defined in 2D')
+
+ if pts.shape[1] != 2:
+ raise ValueError("'pts' must be array with 2 columns for x,y")
+
+ if angles.ndim != 1 or angles.shape[0] != pts.shape[0]:
+ raise ValueError("'angles' must be a column vector and have same "
+ "number of rows as 'pts'")
+
+ # Convert to radians if desired
+ if not radians:
+ angles = angles / 180.0 * np.pi
+
+ # Move a short distance away
+ pts2 = pts + pushoff * np.c_[np.cos(angles), np.sin(angles)]
+
+ # Transform both sets of points
+ tpts = self.transform(pts)
+ tpts2 = self.transform(pts2)
+
+ # Calculate transformed angles
+ d = tpts2 - tpts
+ a = np.arctan2(d[:, 1], d[:, 0])
+
+ # Convert back to degrees if desired
+ if not radians:
+ a = np.rad2deg(a)
+
+ return a
+
+ def inverted(self):
+ """
+ Return the corresponding inverse transformation.
+
+ The return value of this method should be treated as
+ temporary. An update to *self* does not cause a corresponding
+ update to its inverted copy.
+
+ ``x === self.inverted().transform(self.transform(x))``
+ """
+ raise NotImplementedError()
+
+
+class TransformWrapper(Transform):
+ """
+ A helper class that holds a single child transform and acts
+ equivalently to it.
+
+ This is useful if a node of the transform tree must be replaced at
+ run time with a transform of a different type. This class allows
+ that replacement to correctly trigger invalidation.
+
+ Note that :class:`TransformWrapper` instances must have the same
+ input and output dimensions during their entire lifetime, so the
+ child transform may only be replaced with another child transform
+ of the same dimensions.
+ """
+ pass_through = True
+
+ def __init__(self, child):
+ """
+ *child*: A class:`Transform` instance. This child may later
+ be replaced with :meth:`set`.
+ """
+ if not isinstance(child, Transform):
+ raise ValueError("'child' must be an instance of "
+ "'matplotlib.transform.Transform'")
+ self._init(child)
+ self.set_children(child)
+
+ def _init(self, child):
+ Transform.__init__(self)
+ self.input_dims = child.input_dims
+ self.output_dims = child.output_dims
+ self._set(child)
+ self._invalid = 0
+
+ def __eq__(self, other):
+ return self._child.__eq__(other)
+
+ # NOTE: Transform.__[gs]etstate__ should be sufficient when using only
+ # Python 3.4+.
+ def __getstate__(self):
+ # only store the child information and parents
+ return {
+ 'child': self._child,
+ 'input_dims': self.input_dims,
+ 'output_dims': self.output_dims,
+ # turn the weak-values dictionary into a normal dictionary
+ 'parents': dict((k, v()) for (k, v) in
+ six.iteritems(self._parents))
+ }
+
+ def __setstate__(self, state):
+ # re-initialise the TransformWrapper with the state's child
+ self._init(state['child'])
+ # The child may not be unpickled yet, so restore its information.
+ self.input_dims = state['input_dims']
+ self.output_dims = state['output_dims']
+ # turn the normal dictionary back into a dictionary with weak
+ # values
+ self._parents = dict((k, weakref.ref(v)) for (k, v) in
+ six.iteritems(state['parents']) if v is not None)
+
+ def __str__(self):
+ return ("{}(\n"
+ "{})"
+ .format(type(self).__name__,
+ _indent_str(self._child)))
+
+ def frozen(self):
+ return self._child.frozen()
+ frozen.__doc__ = Transform.frozen.__doc__
+
+ def _set(self, child):
+ self._child = child
+
+ self.transform = child.transform
+ self.transform_affine = child.transform_affine
+ self.transform_non_affine = child.transform_non_affine
+ self.transform_path = child.transform_path
+ self.transform_path_affine = child.transform_path_affine
+ self.transform_path_non_affine = child.transform_path_non_affine
+ self.get_affine = child.get_affine
+ self.inverted = child.inverted
+ self.get_matrix = child.get_matrix
+
+ # note we do not wrap other properties here since the transform's
+ # child can be changed with WrappedTransform.set and so checking
+ # is_affine and other such properties may be dangerous.
+
+ def set(self, child):
+ """
+ Replace the current child of this transform with another one.
+
+ The new child must have the same number of input and output
+ dimensions as the current child.
+ """
+ if (child.input_dims != self.input_dims or
+ child.output_dims != self.output_dims):
+ raise ValueError(
+ "The new child must have the same number of input and output "
+ "dimensions as the current child")
+
+ self.set_children(child)
+ self._set(child)
+
+ self._invalid = 0
+ self.invalidate()
+ self._invalid = 0
+
+ def _get_is_affine(self):
+ return self._child.is_affine
+ is_affine = property(_get_is_affine)
+
+ def _get_is_separable(self):
+ return self._child.is_separable
+ is_separable = property(_get_is_separable)
+
+ def _get_has_inverse(self):
+ return self._child.has_inverse
+ has_inverse = property(_get_has_inverse)
+
+
+class AffineBase(Transform):
+ """
+ The base class of all affine transformations of any number of
+ dimensions.
+ """
+ is_affine = True
+
+ def __init__(self, *args, **kwargs):
+ Transform.__init__(self, *args, **kwargs)
+ self._inverted = None
+
+ def __array__(self, *args, **kwargs):
+ # optimises the access of the transform matrix vs the superclass
+ return self.get_matrix()
+
+ @staticmethod
+ def _concat(a, b):
+ """
+ Concatenates two transformation matrices (represented as numpy
+ arrays) together.
+ """
+ return np.dot(b, a)
+
+ def __eq__(self, other):
+ if getattr(other, "is_affine", False):
+ return np.all(self.get_matrix() == other.get_matrix())
+ return NotImplemented
+
+ def transform(self, values):
+ return self.transform_affine(values)
+ transform.__doc__ = Transform.transform.__doc__
+
+ def transform_affine(self, values):
+ raise NotImplementedError('Affine subclasses should override this '
+ 'method.')
+ transform_affine.__doc__ = Transform.transform_affine.__doc__
+
+ def transform_non_affine(self, points):
+ return points
+ transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
+
+ def transform_path(self, path):
+ return self.transform_path_affine(path)
+ transform_path.__doc__ = Transform.transform_path.__doc__
+
+ def transform_path_affine(self, path):
+ return Path(self.transform_affine(path.vertices),
+ path.codes, path._interpolation_steps)
+ transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
+
+ def transform_path_non_affine(self, path):
+ return path
+ transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
+
+ def get_affine(self):
+ return self
+ get_affine.__doc__ = Transform.get_affine.__doc__
+
+
+class Affine2DBase(AffineBase):
+ """
+ The base class of all 2D affine transformations.
+
+ 2D affine transformations are performed using a 3x3 numpy array::
+
+ a c e
+ b d f
+ 0 0 1
+
+ This class provides the read-only interface. For a mutable 2D
+ affine transformation, use :class:`Affine2D`.
+
+ Subclasses of this class will generally only need to override a
+ constructor and :meth:`get_matrix` that generates a custom 3x3 matrix.
+ """
+ has_inverse = True
+
+ input_dims = 2
+ output_dims = 2
+
+ def frozen(self):
+ return Affine2D(self.get_matrix().copy())
+ frozen.__doc__ = AffineBase.frozen.__doc__
+
+ def _get_is_separable(self):
+ mtx = self.get_matrix()
+ return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
+ is_separable = property(_get_is_separable)
+
+ def to_values(self):
+ """
+ Return the values of the matrix as a sequence (a,b,c,d,e,f)
+ """
+ mtx = self.get_matrix()
+ return tuple(mtx[:2].swapaxes(0, 1).flatten())
+
+ @staticmethod
+ def matrix_from_values(a, b, c, d, e, f):
+ """
+ (staticmethod) Create a new transformation matrix as a 3x3
+ numpy array of the form::
+
+ a c e
+ b d f
+ 0 0 1
+ """
+ return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], float)
+
+ def transform_affine(self, points):
+ mtx = self.get_matrix()
+ if isinstance(points, np.ma.MaskedArray):
+ tpoints = affine_transform(points.data, mtx)
+ return np.ma.MaskedArray(tpoints, mask=np.ma.getmask(points))
+ return affine_transform(points, mtx)
+
+ def transform_point(self, point):
+ mtx = self.get_matrix()
+ return affine_transform([point], mtx)[0]
+ transform_point.__doc__ = AffineBase.transform_point.__doc__
+
+ if DEBUG:
+ _transform_affine = transform_affine
+
+ def transform_affine(self, points):
+ # The major speed trap here is just converting to the
+ # points to an array in the first place. If we can use
+ # more arrays upstream, that should help here.
+ if not isinstance(points, (np.ma.MaskedArray, np.ndarray)):
+ warnings.warn(
+ ('A non-numpy array of type %s was passed in for ' +
+ 'transformation. Please correct this.')
+ % type(points))
+ return self._transform_affine(points)
+ transform_affine.__doc__ = AffineBase.transform_affine.__doc__
+
+ def inverted(self):
+ if self._inverted is None or self._invalid:
+ mtx = self.get_matrix()
+ shorthand_name = None
+ if self._shorthand_name:
+ shorthand_name = '(%s)-1' % self._shorthand_name
+ self._inverted = Affine2D(inv(mtx), shorthand_name=shorthand_name)
+ self._invalid = 0
+ return self._inverted
+ inverted.__doc__ = AffineBase.inverted.__doc__
+
+
+class Affine2D(Affine2DBase):
+ """
+ A mutable 2D affine transformation.
+ """
+
+ def __init__(self, matrix=None, **kwargs):
+ """
+ Initialize an Affine transform from a 3x3 numpy float array::
+
+ a c e
+ b d f
+ 0 0 1
+
+ If *matrix* is None, initialize with the identity transform.
+ """
+ Affine2DBase.__init__(self, **kwargs)
+ if matrix is None:
+ # A bit faster than np.identity(3).
+ matrix = IdentityTransform._mtx.copy()
+ self._mtx = matrix
+ self._invalid = 0
+
+ def __str__(self):
+ return ("{}(\n"
+ "{})"
+ .format(type(self).__name__,
+ _indent_str(self._mtx)))
+
+ @staticmethod
+ def from_values(a, b, c, d, e, f):
+ """
+ (staticmethod) Create a new Affine2D instance from the given
+ values::
+
+ a c e
+ b d f
+ 0 0 1
+
+ .
+ """
+ return Affine2D(
+ np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], float).reshape((3, 3)))
+
+ def get_matrix(self):
+ """
+ Get the underlying transformation matrix as a 3x3 numpy array::
+
+ a c e
+ b d f
+ 0 0 1
+
+ .
+ """
+ self._invalid = 0
+ return self._mtx
+
+ def set_matrix(self, mtx):
+ """
+ Set the underlying transformation matrix from a 3x3 numpy array::
+
+ a c e
+ b d f
+ 0 0 1
+
+ .
+ """
+ self._mtx = mtx
+ self.invalidate()
+
+ def set(self, other):
+ """
+ Set this transformation from the frozen copy of another
+ :class:`Affine2DBase` object.
+ """
+ if not isinstance(other, Affine2DBase):
+ raise ValueError("'other' must be an instance of "
+ "'matplotlib.transform.Affine2DBase'")
+ self._mtx = other.get_matrix()
+ self.invalidate()
+
+ @staticmethod
+ def identity():
+ """
+ (staticmethod) Return a new :class:`Affine2D` object that is
+ the identity transform.
+
+ Unless this transform will be mutated later on, consider using
+ the faster :class:`IdentityTransform` class instead.
+ """
+ return Affine2D()
+
+ def clear(self):
+ """
+ Reset the underlying matrix to the identity transform.
+ """
+ # A bit faster than np.identity(3).
+ self._mtx = IdentityTransform._mtx.copy()
+ self.invalidate()
+ return self
+
+ def rotate(self, theta):
+ """
+ Add a rotation (in radians) to this transform in place.
+
+ Returns *self*, so this method can easily be chained with more
+ calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
+ and :meth:`scale`.
+ """
+ a = np.cos(theta)
+ b = np.sin(theta)
+ rotate_mtx = np.array([[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]],
+ float)
+ self._mtx = np.dot(rotate_mtx, self._mtx)
+ self.invalidate()
+ return self
+
+ def rotate_deg(self, degrees):
+ """
+ Add a rotation (in degrees) to this transform in place.
+
+ Returns *self*, so this method can easily be chained with more
+ calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
+ and :meth:`scale`.
+ """
+ return self.rotate(np.deg2rad(degrees))
+
+ def rotate_around(self, x, y, theta):
+ """
+ Add a rotation (in radians) around the point (x, y) in place.
+
+ Returns *self*, so this method can easily be chained with more
+ calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
+ and :meth:`scale`.
+ """
+ return self.translate(-x, -y).rotate(theta).translate(x, y)
+
+ def rotate_deg_around(self, x, y, degrees):
+ """
+ Add a rotation (in degrees) around the point (x, y) in place.
+
+ Returns *self*, so this method can easily be chained with more
+ calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
+ and :meth:`scale`.
+ """
+ # Cast to float to avoid wraparound issues with uint8's
+ x, y = float(x), float(y)
+ return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
+
+ def translate(self, tx, ty):
+ """
+ Adds a translation in place.
+
+ Returns *self*, so this method can easily be chained with more
+ calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
+ and :meth:`scale`.
+ """
+ translate_mtx = np.array(
+ [[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]], float)
+ self._mtx = np.dot(translate_mtx, self._mtx)
+ self.invalidate()
+ return self
+
+ def scale(self, sx, sy=None):
+ """
+ Adds a scale in place.
+
+ If *sy* is None, the same scale is applied in both the *x*- and
+ *y*-directions.
+
+ Returns *self*, so this method can easily be chained with more
+ calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
+ and :meth:`scale`.
+ """
+ if sy is None:
+ sy = sx
+ scale_mtx = np.array(
+ [[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]], float)
+ self._mtx = np.dot(scale_mtx, self._mtx)
+ self.invalidate()
+ return self
+
+ def skew(self, xShear, yShear):
+ """
+ Adds a skew in place.
+
+ *xShear* and *yShear* are the shear angles along the *x*- and
+ *y*-axes, respectively, in radians.
+
+ Returns *self*, so this method can easily be chained with more
+ calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
+ and :meth:`scale`.
+ """
+ rotX = np.tan(xShear)
+ rotY = np.tan(yShear)
+ skew_mtx = np.array(
+ [[1.0, rotX, 0.0], [rotY, 1.0, 0.0], [0.0, 0.0, 1.0]], float)
+ self._mtx = np.dot(skew_mtx, self._mtx)
+ self.invalidate()
+ return self
+
+ def skew_deg(self, xShear, yShear):
+ """
+ Adds a skew in place.
+
+ *xShear* and *yShear* are the shear angles along the *x*- and
+ *y*-axes, respectively, in degrees.
+
+ Returns *self*, so this method can easily be chained with more
+ calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
+ and :meth:`scale`.
+ """
+ return self.skew(np.deg2rad(xShear), np.deg2rad(yShear))
+
+ def _get_is_separable(self):
+ mtx = self.get_matrix()
+ return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
+ is_separable = property(_get_is_separable)
+
+
+class IdentityTransform(Affine2DBase):
+ """
+ A special class that does one thing, the identity transform, in a
+ fast way.
+ """
+ _mtx = np.identity(3)
+
+ def frozen(self):
+ return self
+ frozen.__doc__ = Affine2DBase.frozen.__doc__
+
+ def __str__(self):
+ return ("{}()"
+ .format(type(self).__name__))
+
+ def get_matrix(self):
+ return self._mtx
+ get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
+
+ def transform(self, points):
+ return np.asanyarray(points)
+ transform.__doc__ = Affine2DBase.transform.__doc__
+
+ transform_affine = transform
+ transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__
+
+ transform_non_affine = transform
+ transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__
+
+ def transform_path(self, path):
+ return path
+ transform_path.__doc__ = Affine2DBase.transform_path.__doc__
+
+ transform_path_affine = transform_path
+ transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__
+
+ transform_path_non_affine = transform_path
+ transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__
+
+ def get_affine(self):
+ return self
+ get_affine.__doc__ = Affine2DBase.get_affine.__doc__
+
+ inverted = get_affine
+ inverted.__doc__ = Affine2DBase.inverted.__doc__
+
+
+class BlendedGenericTransform(Transform):
+ """
+ A "blended" transform uses one transform for the *x*-direction, and
+ another transform for the *y*-direction.
+
+ This "generic" version can handle any given child transform in the
+ *x*- and *y*-directions.
+ """
+ input_dims = 2
+ output_dims = 2
+ is_separable = True
+ pass_through = True
+
+ def __init__(self, x_transform, y_transform, **kwargs):
+ """
+ Create a new "blended" transform using *x_transform* to
+ transform the *x*-axis and *y_transform* to transform the
+ *y*-axis.
+
+ You will generally not call this constructor directly but use
+ the :func:`blended_transform_factory` function instead, which
+ can determine automatically which kind of blended transform to
+ create.
+ """
+ # Here we ask: "Does it blend?"
+
+ Transform.__init__(self, **kwargs)
+ self._x = x_transform
+ self._y = y_transform
+ self.set_children(x_transform, y_transform)
+ self._affine = None
+
+ def __eq__(self, other):
+ # Note, this is an exact copy of BlendedAffine2D.__eq__
+ if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
+ return (self._x == other._x) and (self._y == other._y)
+ elif self._x == self._y:
+ return self._x == other
+ else:
+ return NotImplemented
+
+ def contains_branch_seperately(self, transform):
+ # Note, this is an exact copy of BlendedAffine2D.contains_branch_seperately
+ return self._x.contains_branch(transform), self._y.contains_branch(transform)
+
+ @property
+ def depth(self):
+ return max(self._x.depth, self._y.depth)
+
+ def contains_branch(self, other):
+ # a blended transform cannot possibly contain a branch from two different transforms.
+ return False
+
+ def _get_is_affine(self):
+ return self._x.is_affine and self._y.is_affine
+ is_affine = property(_get_is_affine)
+
+ def _get_has_inverse(self):
+ return self._x.has_inverse and self._y.has_inverse
+ has_inverse = property(_get_has_inverse)
+
+ def frozen(self):
+ return blended_transform_factory(self._x.frozen(), self._y.frozen())
+ frozen.__doc__ = Transform.frozen.__doc__
+
+ def __str__(self):
+ return ("{}(\n"
+ "{},\n"
+ "{})"
+ .format(type(self).__name__,
+ _indent_str(self._x),
+ _indent_str(self._y)))
+
+ def transform_non_affine(self, points):
+ if self._x.is_affine and self._y.is_affine:
+ return points
+ x = self._x
+ y = self._y
+
+ if x == y and x.input_dims == 2:
+ return x.transform_non_affine(points)
+
+ if x.input_dims == 2:
+ x_points = x.transform_non_affine(points)[:, 0:1]
+ else:
+ x_points = x.transform_non_affine(points[:, 0])
+ x_points = x_points.reshape((len(x_points), 1))
+
+ if y.input_dims == 2:
+ y_points = y.transform_non_affine(points)[:, 1:]
+ else:
+ y_points = y.transform_non_affine(points[:, 1])
+ y_points = y_points.reshape((len(y_points), 1))
+
+ if (isinstance(x_points, np.ma.MaskedArray) or
+ isinstance(y_points, np.ma.MaskedArray)):
+ return np.ma.concatenate((x_points, y_points), 1)
+ else:
+ return np.concatenate((x_points, y_points), 1)
+ transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
+
+ def inverted(self):
+ return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
+ inverted.__doc__ = Transform.inverted.__doc__
+
+ def get_affine(self):
+ if self._invalid or self._affine is None:
+ if self._x == self._y:
+ self._affine = self._x.get_affine()
+ else:
+ x_mtx = self._x.get_affine().get_matrix()
+ y_mtx = self._y.get_affine().get_matrix()
+ # This works because we already know the transforms are
+ # separable, though normally one would want to set b and
+ # c to zero.
+ mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
+ self._affine = Affine2D(mtx)
+ self._invalid = 0
+ return self._affine
+ get_affine.__doc__ = Transform.get_affine.__doc__
+
+
+class BlendedAffine2D(Affine2DBase):
+ """
+ A "blended" transform uses one transform for the *x*-direction, and
+ another transform for the *y*-direction.
+
+ This version is an optimization for the case where both child
+ transforms are of type :class:`Affine2DBase`.
+ """
+ is_separable = True
+
+ def __init__(self, x_transform, y_transform, **kwargs):
+ """
+ Create a new "blended" transform using *x_transform* to
+ transform the *x*-axis and *y_transform* to transform the
+ *y*-axis.
+
+ Both *x_transform* and *y_transform* must be 2D affine
+ transforms.
+
+ You will generally not call this constructor directly but use
+ the :func:`blended_transform_factory` function instead, which
+ can determine automatically which kind of blended transform to
+ create.
+ """
+ is_affine = x_transform.is_affine and y_transform.is_affine
+ is_separable = x_transform.is_separable and y_transform.is_separable
+ is_correct = is_affine and is_separable
+ if not is_correct:
+ raise ValueError("Both *x_transform* and *y_transform* must be 2D "
+ "affine transforms")
+
+ Transform.__init__(self, **kwargs)
+ self._x = x_transform
+ self._y = y_transform
+ self.set_children(x_transform, y_transform)
+
+ Affine2DBase.__init__(self)
+ self._mtx = None
+
+ def __eq__(self, other):
+ # Note, this is an exact copy of BlendedGenericTransform.__eq__
+ if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
+ return (self._x == other._x) and (self._y == other._y)
+ elif self._x == self._y:
+ return self._x == other
+ else:
+ return NotImplemented
+
+ def contains_branch_seperately(self, transform):
+ # Note, this is an exact copy of BlendedTransform.contains_branch_seperately
+ return self._x.contains_branch(transform), self._y.contains_branch(transform)
+
+ def __str__(self):
+ return ("{}(\n"
+ "{},\n"
+ "{})"
+ .format(type(self).__name__,
+ _indent_str(self._x),
+ _indent_str(self._y)))
+
+ def get_matrix(self):
+ if self._invalid:
+ if self._x == self._y:
+ self._mtx = self._x.get_matrix()
+ else:
+ x_mtx = self._x.get_matrix()
+ y_mtx = self._y.get_matrix()
+ # This works because we already know the transforms are
+ # separable, though normally one would want to set b and
+ # c to zero.
+ self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
+ self._inverted = None
+ self._invalid = 0
+ return self._mtx
+ get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
+
+
+def blended_transform_factory(x_transform, y_transform):
+ """
+ Create a new "blended" transform using *x_transform* to transform
+ the *x*-axis and *y_transform* to transform the *y*-axis.
+
+ A faster version of the blended transform is returned for the case
+ where both child transforms are affine.
+ """
+ if (isinstance(x_transform, Affine2DBase)
+ and isinstance(y_transform, Affine2DBase)):
+ return BlendedAffine2D(x_transform, y_transform)
+ return BlendedGenericTransform(x_transform, y_transform)
+
+
+class CompositeGenericTransform(Transform):
+ """
+ A composite transform formed by applying transform *a* then
+ transform *b*.
+
+ This "generic" version can handle any two arbitrary
+ transformations.
+ """
+ pass_through = True
+
+ def __init__(self, a, b, **kwargs):
+ """
+ Create a new composite transform that is the result of
+ applying transform *a* then transform *b*.
+
+ You will generally not call this constructor directly but use
+ the :func:`composite_transform_factory` function instead,
+ which can automatically choose the best kind of composite
+ transform instance to create.
+ """
+ if a.output_dims != b.input_dims:
+ raise ValueError("The output dimension of 'a' must be equal to "
+ "the input dimensions of 'b'")
+ self.input_dims = a.input_dims
+ self.output_dims = b.output_dims
+
+ Transform.__init__(self, **kwargs)
+ self._a = a
+ self._b = b
+ self.set_children(a, b)
+
+ is_affine = property(lambda self: self._a.is_affine and self._b.is_affine)
+
+ def frozen(self):
+ self._invalid = 0
+ frozen = composite_transform_factory(self._a.frozen(), self._b.frozen())
+ if not isinstance(frozen, CompositeGenericTransform):
+ return frozen.frozen()
+ return frozen
+ frozen.__doc__ = Transform.frozen.__doc__
+
+ def _invalidate_internal(self, value, invalidating_node):
+ # In some cases for a composite transform, an invalidating call to AFFINE_ONLY needs
+ # to be extended to invalidate the NON_AFFINE part too. These cases are when the right
+ # hand transform is non-affine and either:
+ # (a) the left hand transform is non affine
+ # (b) it is the left hand node which has triggered the invalidation
+ if value == Transform.INVALID_AFFINE \
+ and not self._b.is_affine \
+ and (not self._a.is_affine or invalidating_node is self._a):
+
+ value = Transform.INVALID
+
+ Transform._invalidate_internal(self, value=value,
+ invalidating_node=invalidating_node)
+
+ def __eq__(self, other):
+ if isinstance(other, (CompositeGenericTransform, CompositeAffine2D)):
+ return self is other or (self._a == other._a and self._b == other._b)
+ else:
+ return False
+
+ def _iter_break_from_left_to_right(self):
+ for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():
+ yield lh_compliment, rh_compliment + self._b
+ for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():
+ yield self._a + lh_compliment, rh_compliment
+
+ @property
+ def depth(self):
+ return self._a.depth + self._b.depth
+
+ def _get_is_affine(self):
+ return self._a.is_affine and self._b.is_affine
+ is_affine = property(_get_is_affine)
+
+ def _get_is_separable(self):
+ return self._a.is_separable and self._b.is_separable
+ is_separable = property(_get_is_separable)
+
+ def __str__(self):
+ return ("{}(\n"
+ "{},\n"
+ "{})"
+ .format(type(self).__name__,
+ _indent_str(self._a),
+ _indent_str(self._b)))
+
+ def transform_affine(self, points):
+ return self.get_affine().transform(points)
+ transform_affine.__doc__ = Transform.transform_affine.__doc__
+
+ def transform_non_affine(self, points):
+ if self._a.is_affine and self._b.is_affine:
+ return points
+ elif not self._a.is_affine and self._b.is_affine:
+ return self._a.transform_non_affine(points)
+ else:
+ return self._b.transform_non_affine(
+ self._a.transform(points))
+ transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
+
+ def transform_path_non_affine(self, path):
+ if self._a.is_affine and self._b.is_affine:
+ return path
+ elif not self._a.is_affine and self._b.is_affine:
+ return self._a.transform_path_non_affine(path)
+ else:
+ return self._b.transform_path_non_affine(
+ self._a.transform_path(path))
+ transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
+
+ def get_affine(self):
+ if not self._b.is_affine:
+ return self._b.get_affine()
+ else:
+ return Affine2D(np.dot(self._b.get_affine().get_matrix(),
+ self._a.get_affine().get_matrix()))
+ get_affine.__doc__ = Transform.get_affine.__doc__
+
+ def inverted(self):
+ return CompositeGenericTransform(self._b.inverted(), self._a.inverted())
+ inverted.__doc__ = Transform.inverted.__doc__
+
+ def _get_has_inverse(self):
+ return self._a.has_inverse and self._b.has_inverse
+ has_inverse = property(_get_has_inverse)
+
+
+class CompositeAffine2D(Affine2DBase):
+ """
+ A composite transform formed by applying transform *a* then transform *b*.
+
+ This version is an optimization that handles the case where both *a*
+ and *b* are 2D affines.
+ """
+ def __init__(self, a, b, **kwargs):
+ """
+ Create a new composite transform that is the result of
+ applying transform *a* then transform *b*.
+
+ Both *a* and *b* must be instances of :class:`Affine2DBase`.
+
+ You will generally not call this constructor directly but use
+ the :func:`composite_transform_factory` function instead,
+ which can automatically choose the best kind of composite
+ transform instance to create.
+ """
+ if not a.is_affine or not b.is_affine:
+ raise ValueError("'a' and 'b' must be affine transforms")
+ if a.output_dims != b.input_dims:
+ raise ValueError("The output dimension of 'a' must be equal to "
+ "the input dimensions of 'b'")
+ self.input_dims = a.input_dims
+ self.output_dims = b.output_dims
+
+ Affine2DBase.__init__(self, **kwargs)
+ self._a = a
+ self._b = b
+ self.set_children(a, b)
+ self._mtx = None
+
+ @property
+ def depth(self):
+ return self._a.depth + self._b.depth
+
+ def _iter_break_from_left_to_right(self):
+ for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():
+ yield lh_compliment, rh_compliment + self._b
+ for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():
+ yield self._a + lh_compliment, rh_compliment
+
+ def __str__(self):
+ return ("{}(\n"
+ "{},\n"
+ "{})"
+ .format(type(self).__name__,
+ _indent_str(self._a),
+ _indent_str(self._b)))
+
+ def get_matrix(self):
+ if self._invalid:
+ self._mtx = np.dot(
+ self._b.get_matrix(),
+ self._a.get_matrix())
+ self._inverted = None
+ self._invalid = 0
+ return self._mtx
+ get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
+
+
+def composite_transform_factory(a, b):
+ """
+ Create a new composite transform that is the result of applying
+ transform a then transform b.
+
+ Shortcut versions of the blended transform are provided for the
+ case where both child transforms are affine, or one or the other
+ is the identity transform.
+
+ Composite transforms may also be created using the '+' operator,
+ e.g.::
+
+ c = a + b
+ """
+ # check to see if any of a or b are IdentityTransforms. We use
+ # isinstance here to guarantee that the transforms will *always*
+ # be IdentityTransforms. Since TransformWrappers are mutable,
+ # use of equality here would be wrong.
+ if isinstance(a, IdentityTransform):
+ return b
+ elif isinstance(b, IdentityTransform):
+ return a
+ elif isinstance(a, Affine2D) and isinstance(b, Affine2D):
+ return CompositeAffine2D(a, b)
+ return CompositeGenericTransform(a, b)
+
+
+class BboxTransform(Affine2DBase):
+ """
+ :class:`BboxTransform` linearly transforms points from one
+ :class:`Bbox` to another :class:`Bbox`.
+ """
+ is_separable = True
+
+ def __init__(self, boxin, boxout, **kwargs):
+ """
+ Create a new :class:`BboxTransform` that linearly transforms
+ points from *boxin* to *boxout*.
+ """
+ if not boxin.is_bbox or not boxout.is_bbox:
+ raise ValueError("'boxin' and 'boxout' must be bbox")
+
+ Affine2DBase.__init__(self, **kwargs)
+ self._boxin = boxin
+ self._boxout = boxout
+ self.set_children(boxin, boxout)
+ self._mtx = None
+ self._inverted = None
+
+ def __str__(self):
+ return ("{}(\n"
+ "{},\n"
+ "{})"
+ .format(type(self).__name__,
+ _indent_str(self._boxin),
+ _indent_str(self._boxout)))
+
+ def get_matrix(self):
+ if self._invalid:
+ inl, inb, inw, inh = self._boxin.bounds
+ outl, outb, outw, outh = self._boxout.bounds
+ x_scale = outw / inw
+ y_scale = outh / inh
+ if DEBUG and (x_scale == 0 or y_scale == 0):
+ raise ValueError("Transforming from or to a singular bounding box.")
+ self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)],
+ [0.0 , y_scale, (-inb*y_scale+outb)],
+ [0.0 , 0.0 , 1.0 ]],
+ float)
+ self._inverted = None
+ self._invalid = 0
+ return self._mtx
+ get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
+
+
+class BboxTransformTo(Affine2DBase):
+ """
+ :class:`BboxTransformTo` is a transformation that linearly
+ transforms points from the unit bounding box to a given
+ :class:`Bbox`.
+ """
+ is_separable = True
+
+ def __init__(self, boxout, **kwargs):
+ """
+ Create a new :class:`BboxTransformTo` that linearly transforms
+ points from the unit bounding box to *boxout*.
+ """
+ if not boxout.is_bbox:
+ raise ValueError("'boxout' must be bbox")
+
+ Affine2DBase.__init__(self, **kwargs)
+ self._boxout = boxout
+ self.set_children(boxout)
+ self._mtx = None
+ self._inverted = None
+
+ def __str__(self):
+ return ("{}(\n"
+ "{})"
+ .format(type(self).__name__,
+ _indent_str(self._boxout)))
+
+ def get_matrix(self):
+ if self._invalid:
+ outl, outb, outw, outh = self._boxout.bounds
+ if DEBUG and (outw == 0 or outh == 0):
+ raise ValueError("Transforming to a singular bounding box.")
+ self._mtx = np.array([[outw, 0.0, outl],
+ [ 0.0, outh, outb],
+ [ 0.0, 0.0, 1.0]],
+ float)
+ self._inverted = None
+ self._invalid = 0
+ return self._mtx
+ get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
+
+
+class BboxTransformToMaxOnly(BboxTransformTo):
+ """
+ :class:`BboxTransformTo` is a transformation that linearly
+ transforms points from the unit bounding box to a given
+ :class:`Bbox` with a fixed upper left of (0, 0).
+ """
+ def get_matrix(self):
+ if self._invalid:
+ xmax, ymax = self._boxout.max
+ if DEBUG and (xmax == 0 or ymax == 0):
+ raise ValueError("Transforming to a singular bounding box.")
+ self._mtx = np.array([[xmax, 0.0, 0.0],
+ [ 0.0, ymax, 0.0],
+ [ 0.0, 0.0, 1.0]],
+ float)
+ self._inverted = None
+ self._invalid = 0
+ return self._mtx
+ get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
+
+
+class BboxTransformFrom(Affine2DBase):
+ """
+ :class:`BboxTransformFrom` linearly transforms points from a given
+ :class:`Bbox` to the unit bounding box.
+ """
+ is_separable = True
+
+ def __init__(self, boxin, **kwargs):
+ if not boxin.is_bbox:
+ raise ValueError("'boxin' must be bbox")
+
+ Affine2DBase.__init__(self, **kwargs)
+ self._boxin = boxin
+ self.set_children(boxin)
+ self._mtx = None
+ self._inverted = None
+
+ def __str__(self):
+ return ("{}(\n"
+ "{})"
+ .format(type(self).__name__,
+ _indent_str(self._boxin)))
+
+ def get_matrix(self):
+ if self._invalid:
+ inl, inb, inw, inh = self._boxin.bounds
+ if DEBUG and (inw == 0 or inh == 0):
+ raise ValueError("Transforming from a singular bounding box.")
+ x_scale = 1.0 / inw
+ y_scale = 1.0 / inh
+ self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)],
+ [0.0 , y_scale, (-inb*y_scale)],
+ [0.0 , 0.0 , 1.0 ]],
+ float)
+ self._inverted = None
+ self._invalid = 0
+ return self._mtx
+ get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
+
+
+class ScaledTranslation(Affine2DBase):
+ """
+ A transformation that translates by *xt* and *yt*, after *xt* and *yt*
+ have been transformad by the given transform *scale_trans*.
+ """
+ def __init__(self, xt, yt, scale_trans, **kwargs):
+ Affine2DBase.__init__(self, **kwargs)
+ self._t = (xt, yt)
+ self._scale_trans = scale_trans
+ self.set_children(scale_trans)
+ self._mtx = None
+ self._inverted = None
+
+ def __str__(self):
+ return ("{}(\n"
+ "{})"
+ .format(type(self).__name__,
+ _indent_str(self._t)))
+
+ def get_matrix(self):
+ if self._invalid:
+ xt, yt = self._scale_trans.transform_point(self._t)
+ self._mtx = np.array([[1.0, 0.0, xt],
+ [0.0, 1.0, yt],
+ [0.0, 0.0, 1.0]],
+ float)
+ self._invalid = 0
+ self._inverted = None
+ return self._mtx
+ get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
+
+
+class TransformedPath(TransformNode):
+ """
+ A :class:`TransformedPath` caches a non-affine transformed copy of
+ the :class:`~matplotlib.path.Path`. This cached copy is
+ automatically updated when the non-affine part of the transform
+ changes.
+
+ .. note::
+
+ Paths are considered immutable by this class. Any update to the
+ path's vertices/codes will not trigger a transform recomputation.
+
+ """
+ def __init__(self, path, transform):
+ """
+ Create a new :class:`TransformedPath` from the given
+ :class:`~matplotlib.path.Path` and :class:`Transform`.
+ """
+ if not isinstance(transform, Transform):
+ raise ValueError("'transform' must be an instance of "
+ "'matplotlib.transform.Transform'")
+ TransformNode.__init__(self)
+
+ self._path = path
+ self._transform = transform
+ self.set_children(transform)
+ self._transformed_path = None
+ self._transformed_points = None
+
+ def _revalidate(self):
+ # only recompute if the invalidation includes the non_affine part of the transform
+ if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
+ or self._transformed_path is None):
+ self._transformed_path = \
+ self._transform.transform_path_non_affine(self._path)
+ self._transformed_points = \
+ Path._fast_from_codes_and_verts(
+ self._transform.transform_non_affine(self._path.vertices),
+ None,
+ {'interpolation_steps': self._path._interpolation_steps,
+ 'should_simplify': self._path.should_simplify})
+ self._invalid = 0
+
+ def get_transformed_points_and_affine(self):
+ """
+ Return a copy of the child path, with the non-affine part of
+ the transform already applied, along with the affine part of
+ the path necessary to complete the transformation. Unlike
+ :meth:`get_transformed_path_and_affine`, no interpolation will
+ be performed.
+ """
+ self._revalidate()
+ return self._transformed_points, self.get_affine()
+
+ def get_transformed_path_and_affine(self):
+ """
+ Return a copy of the child path, with the non-affine part of
+ the transform already applied, along with the affine part of
+ the path necessary to complete the transformation.
+ """
+ self._revalidate()
+ return self._transformed_path, self.get_affine()
+
+ def get_fully_transformed_path(self):
+ """
+ Return a fully-transformed copy of the child path.
+ """
+ self._revalidate()
+ return self._transform.transform_path_affine(self._transformed_path)
+
+ def get_affine(self):
+ return self._transform.get_affine()
+
+
+class TransformedPatchPath(TransformedPath):
+ """
+ A :class:`TransformedPatchPath` caches a non-affine transformed copy of
+ the :class:`~matplotlib.path.Patch`. This cached copy is automatically
+ updated when the non-affine part of the transform or the patch changes.
+ """
+ def __init__(self, patch):
+ """
+ Create a new :class:`TransformedPatchPath` from the given
+ :class:`~matplotlib.path.Patch`.
+ """
+ TransformNode.__init__(self)
+
+ transform = patch.get_transform()
+ self._patch = patch
+ self._transform = transform
+ self.set_children(transform)
+ self._path = patch.get_path()
+ self._transformed_path = None
+ self._transformed_points = None
+
+ def _revalidate(self):
+ patch_path = self._patch.get_path()
+ # Only recompute if the invalidation includes the non_affine part of
+ # the transform, or the Patch's Path has changed.
+ if (self._transformed_path is None or self._path != patch_path or
+ (self._invalid & self.INVALID_NON_AFFINE ==
+ self.INVALID_NON_AFFINE)):
+ self._path = patch_path
+ self._transformed_path = \
+ self._transform.transform_path_non_affine(patch_path)
+ self._transformed_points = \
+ Path._fast_from_codes_and_verts(
+ self._transform.transform_non_affine(patch_path.vertices),
+ None,
+ {'interpolation_steps': patch_path._interpolation_steps,
+ 'should_simplify': patch_path.should_simplify})
+ self._invalid = 0
+
+
+def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
+ """
+ Modify the endpoints of a range as needed to avoid singularities.
+
+ Parameters
+ ----------
+ vmin, vmax : float
+ The initial endpoints.
+ expander : float, optional, default: 0.001
+ Fractional amount by which *vmin* and *vmax* are expanded if
+ the original interval is too small, based on *tiny*.
+ tiny : float, optional, default: 1e-15
+ Threshold for the ratio of the interval to the maximum absolute
+ value of its endpoints. If the interval is smaller than
+ this, it will be expanded. This value should be around
+ 1e-15 or larger; otherwise the interval will be approaching
+ the double precision resolution limit.
+ increasing : bool, optional, default: True
+ If True, swap *vmin*, *vmax* if *vmin* > *vmax*.
+
+ Returns
+ -------
+ vmin, vmax : float
+ Endpoints, expanded and/or swapped if necessary.
+ If either input is inf or NaN, or if both inputs are 0 or very
+ close to zero, it returns -*expander*, *expander*.
+ """
+
+ if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
+ return -expander, expander
+
+ swapped = False
+ if vmax < vmin:
+ vmin, vmax = vmax, vmin
+ swapped = True
+
+ maxabsvalue = max(abs(vmin), abs(vmax))
+ if maxabsvalue < (1e6 / tiny) * np.finfo(float).tiny:
+ vmin = -expander
+ vmax = expander
+
+ elif vmax - vmin <= maxabsvalue * tiny:
+ if vmax == 0 and vmin == 0:
+ vmin = -expander
+ vmax = expander
+ else:
+ vmin -= expander*abs(vmin)
+ vmax += expander*abs(vmax)
+
+ if swapped and not increasing:
+ vmin, vmax = vmax, vmin
+ return vmin, vmax
+
+
+def interval_contains(interval, val):
+ """
+ Check, inclusively, whether an interval includes a given value.
+
+ Parameters
+ ----------
+ interval : sequence of scalar
+ A 2-length sequence, endpoints that define the interval.
+ val : scalar
+ Value to check is within interval.
+
+ Returns
+ -------
+ bool
+ Returns true if given val is within the interval.
+ """
+ a, b = interval
+ return a <= val <= b or a >= val >= b
+
+
+def interval_contains_open(interval, val):
+ """
+ Check, excluding endpoints, whether an interval includes a given value.
+
+ Parameters
+ ----------
+ interval : sequence of scalar
+ A 2-length sequence, endpoints that define the interval.
+ val : scalar
+ Value to check is within interval.
+
+ Returns
+ -------
+ bool
+ Returns true if given val is within the interval.
+ """
+ a, b = interval
+ return a < val < b or a > val > b
+
+
+def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):
+ """
+ Return a new transform with an added offset.
+
+ Parameters
+ ----------
+ trans : :class:`Transform` instance
+ Any transform, to which offset will be applied.
+ fig : :class:`~matplotlib.figure.Figure`, optional, default: None
+ Current figure. It can be None if *units* are 'dots'.
+ x, y : float, optional, default: 0.0
+ Specifies the offset to apply.
+ units : {'inches', 'points', 'dots'}, optional
+ Units of the offset.
+
+ Returns
+ -------
+ trans : :class:`Transform` instance
+ Transform with applied offset.
+ """
+ if units == 'dots':
+ return trans + Affine2D().translate(x, y)
+ if fig is None:
+ raise ValueError('For units of inches or points a fig kwarg is needed')
+ if units == 'points':
+ x /= 72.0
+ y /= 72.0
+ elif not units == 'inches':
+ raise ValueError('units must be dots, points, or inches')
+ return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/__init__.py b/contrib/python/matplotlib/py2/matplotlib/tri/__init__.py
new file mode 100644
index 00000000000..7ea09f8b3f1
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/__init__.py
@@ -0,0 +1,16 @@
+"""
+Unstructured triangular grid functions.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from .triangulation import *
+from .tricontour import *
+from .tritools import *
+from .trifinder import *
+from .triinterpolate import *
+from .trirefine import *
+from .tripcolor import *
+from .triplot import *
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/_tri.cpp b/contrib/python/matplotlib/py2/matplotlib/tri/_tri.cpp
new file mode 100644
index 00000000000..a27beff7f99
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/_tri.cpp
@@ -0,0 +1,1999 @@
+/* This file contains liberal use of asserts to assist code development and
+ * debugging. Standard matplotlib builds disable asserts so they cause no
+ * performance reduction. To enable the asserts, you need to undefine the
+ * NDEBUG macro, which is achieved by adding the following
+ * undef_macros=['NDEBUG']
+ * to the appropriate make_extension call in setupext.py, and then rebuilding.
+ */
+#define NO_IMPORT_ARRAY
+
+#include "_tri.h"
+
+#include <algorithm>
+#include <set>
+
+#define MOVETO 1
+#define LINETO 2
+
+
+
+TriEdge::TriEdge()
+ : tri(-1), edge(-1)
+{}
+
+TriEdge::TriEdge(int tri_, int edge_)
+ : tri(tri_), edge(edge_)
+{}
+
+bool TriEdge::operator<(const TriEdge& other) const
+{
+ if (tri != other.tri)
+ return tri < other.tri;
+ else
+ return edge < other.edge;
+}
+
+bool TriEdge::operator==(const TriEdge& other) const
+{
+ return tri == other.tri && edge == other.edge;
+}
+
+bool TriEdge::operator!=(const TriEdge& other) const
+{
+ return !operator==(other);
+}
+
+std::ostream& operator<<(std::ostream& os, const TriEdge& tri_edge)
+{
+ return os << tri_edge.tri << ' ' << tri_edge.edge;
+}
+
+
+
+XY::XY()
+{}
+
+XY::XY(const double& x_, const double& y_)
+ : x(x_), y(y_)
+{}
+
+double XY::angle() const
+{
+ return atan2(y, x);
+}
+
+double XY::cross_z(const XY& other) const
+{
+ return x*other.y - y*other.x;
+}
+
+bool XY::is_right_of(const XY& other) const
+{
+ if (x == other.x)
+ return y > other.y;
+ else
+ return x > other.x;
+}
+
+bool XY::operator==(const XY& other) const
+{
+ return x == other.x && y == other.y;
+}
+
+bool XY::operator!=(const XY& other) const
+{
+ return x != other.x || y != other.y;
+}
+
+XY XY::operator*(const double& multiplier) const
+{
+ return XY(x*multiplier, y*multiplier);
+}
+
+const XY& XY::operator+=(const XY& other)
+{
+ x += other.x;
+ y += other.y;
+ return *this;
+}
+
+const XY& XY::operator-=(const XY& other)
+{
+ x -= other.x;
+ y -= other.y;
+ return *this;
+}
+
+XY XY::operator+(const XY& other) const
+{
+ return XY(x + other.x, y + other.y);
+}
+
+XY XY::operator-(const XY& other) const
+{
+ return XY(x - other.x, y - other.y);
+}
+
+std::ostream& operator<<(std::ostream& os, const XY& xy)
+{
+ return os << '(' << xy.x << ' ' << xy.y << ')';
+}
+
+
+
+XYZ::XYZ(const double& x_, const double& y_, const double& z_)
+ : x(x_), y(y_), z(z_)
+{}
+
+XYZ XYZ::cross(const XYZ& other) const
+{
+ return XYZ(y*other.z - z*other.y,
+ z*other.x - x*other.z,
+ x*other.y - y*other.x);
+}
+
+double XYZ::dot(const XYZ& other) const
+{
+ return x*other.x + y*other.y + z*other.z;
+}
+
+double XYZ::length_squared() const
+{
+ return x*x + y*y + z*z;
+}
+
+XYZ XYZ::operator-(const XYZ& other) const
+{
+ return XYZ(x - other.x, y - other.y, z - other.z);
+}
+
+std::ostream& operator<<(std::ostream& os, const XYZ& xyz)
+{
+ return os << '(' << xyz.x << ' ' << xyz.y << ' ' << xyz.z << ')';
+}
+
+
+
+BoundingBox::BoundingBox()
+ : empty(true)
+{}
+
+void BoundingBox::add(const XY& point)
+{
+ if (empty) {
+ empty = false;
+ lower = upper = point;
+ } else {
+ if (point.x < lower.x) lower.x = point.x;
+ else if (point.x > upper.x) upper.x = point.x;
+
+ if (point.y < lower.y) lower.y = point.y;
+ else if (point.y > upper.y) upper.y = point.y;
+ }
+}
+
+void BoundingBox::expand(const XY& delta)
+{
+ if (!empty) {
+ lower -= delta;
+ upper += delta;
+ }
+}
+
+
+
+ContourLine::ContourLine()
+ : std::vector<XY>()
+{}
+
+void ContourLine::insert_unique(iterator pos, const XY& point)
+{
+ if (empty() || pos == end() || point != *pos)
+ std::vector<XY>::insert(pos, point);
+}
+
+void ContourLine::push_back(const XY& point)
+{
+ if (empty() || point != back())
+ std::vector<XY>::push_back(point);
+}
+
+void ContourLine::write() const
+{
+ std::cout << "ContourLine of " << size() << " points:";
+ for (const_iterator it = begin(); it != end(); ++it)
+ std::cout << ' ' << *it;
+ std::cout << std::endl;
+}
+
+
+
+void write_contour(const Contour& contour)
+{
+ std::cout << "Contour of " << contour.size() << " lines." << std::endl;
+ for (Contour::const_iterator it = contour.begin(); it != contour.end(); ++it)
+ it->write();
+}
+
+
+
+Triangulation::Triangulation(const CoordinateArray& x,
+ const CoordinateArray& y,
+ const TriangleArray& triangles,
+ const MaskArray& mask,
+ const EdgeArray& edges,
+ const NeighborArray& neighbors,
+ int correct_triangle_orientations)
+ : _x(x),
+ _y(y),
+ _triangles(triangles),
+ _mask(mask),
+ _edges(edges),
+ _neighbors(neighbors)
+{
+ if (correct_triangle_orientations)
+ correct_triangles();
+}
+
+void Triangulation::calculate_boundaries()
+{
+ get_neighbors(); // Ensure _neighbors has been created.
+
+ // Create set of all boundary TriEdges, which are those which do not
+ // have a neighbor triangle.
+ typedef std::set<TriEdge> BoundaryEdges;
+ BoundaryEdges boundary_edges;
+ for (int tri = 0; tri < get_ntri(); ++tri) {
+ if (!is_masked(tri)) {
+ for (int edge = 0; edge < 3; ++edge) {
+ if (get_neighbor(tri, edge) == -1) {
+ boundary_edges.insert(TriEdge(tri, edge));
+ }
+ }
+ }
+ }
+
+ // Take any boundary edge and follow the boundary until return to start
+ // point, removing edges from boundary_edges as they are used. At the same
+ // time, initialise the _tri_edge_to_boundary_map.
+ while (!boundary_edges.empty()) {
+ // Start of new boundary.
+ BoundaryEdges::iterator it = boundary_edges.begin();
+ int tri = it->tri;
+ int edge = it->edge;
+ _boundaries.push_back(Boundary());
+ Boundary& boundary = _boundaries.back();
+
+ while (true) {
+ boundary.push_back(TriEdge(tri, edge));
+ boundary_edges.erase(it);
+ _tri_edge_to_boundary_map[TriEdge(tri, edge)] =
+ BoundaryEdge(_boundaries.size()-1, boundary.size()-1);
+
+ // Move to next edge of current triangle.
+ edge = (edge+1) % 3;
+
+ // Find start point index of boundary edge.
+ int point = get_triangle_point(tri, edge);
+
+ // Find next TriEdge by traversing neighbors until find one
+ // without a neighbor.
+ while (get_neighbor(tri, edge) != -1) {
+ tri = get_neighbor(tri, edge);
+ edge = get_edge_in_triangle(tri, point);
+ }
+
+ if (TriEdge(tri,edge) == boundary.front())
+ break; // Reached beginning of this boundary, so finished it.
+ else
+ it = boundary_edges.find(TriEdge(tri, edge));
+ }
+ }
+}
+
+void Triangulation::calculate_edges()
+{
+ assert(_edges.empty() && "Expected empty edges array");
+
+ // Create set of all edges, storing them with start point index less than
+ // end point index.
+ typedef std::set<Edge> EdgeSet;
+ EdgeSet edge_set;
+ for (int tri = 0; tri < get_ntri(); ++tri) {
+ if (!is_masked(tri)) {
+ for (int edge = 0; edge < 3; edge++) {
+ int start = get_triangle_point(tri, edge);
+ int end = get_triangle_point(tri, (edge+1)%3);
+ edge_set.insert(start > end ? Edge(start,end) : Edge(end,start));
+ }
+ }
+ }
+
+ // Convert to python _edges array.
+ npy_intp dims[2] = {static_cast<npy_intp>(edge_set.size()), 2};
+ _edges = EdgeArray(dims);
+
+ int i = 0;
+ for (EdgeSet::const_iterator it = edge_set.begin(); it != edge_set.end(); ++it) {
+ _edges(i, 0) = it->start;
+ _edges(i++, 1) = it->end;
+ }
+}
+
+void Triangulation::calculate_neighbors()
+{
+ assert(_neighbors.empty() && "Expected empty neighbors array");
+
+ // Create _neighbors array with shape (ntri,3) and initialise all to -1.
+ npy_intp dims[2] = {get_ntri(), 3};
+ _neighbors = NeighborArray(dims);
+
+ int tri, edge;
+ for (tri = 0; tri < get_ntri(); ++tri) {
+ for (edge = 0; edge < 3; ++edge)
+ _neighbors(tri, edge) = -1;
+ }
+
+ // For each triangle edge (start to end point), find corresponding neighbor
+ // edge from end to start point. Do this by traversing all edges and
+ // storing them in a map from edge to TriEdge. If corresponding neighbor
+ // edge is already in the map, don't need to store new edge as neighbor
+ // already found.
+ typedef std::map<Edge, TriEdge> EdgeToTriEdgeMap;
+ EdgeToTriEdgeMap edge_to_tri_edge_map;
+ for (tri = 0; tri < get_ntri(); ++tri) {
+ if (!is_masked(tri)) {
+ for (edge = 0; edge < 3; ++edge) {
+ int start = get_triangle_point(tri, edge);
+ int end = get_triangle_point(tri, (edge+1)%3);
+ EdgeToTriEdgeMap::iterator it =
+ edge_to_tri_edge_map.find(Edge(end,start));
+ if (it == edge_to_tri_edge_map.end()) {
+ // No neighbor edge exists in the edge_to_tri_edge_map, so
+ // add this edge to it.
+ edge_to_tri_edge_map[Edge(start,end)] = TriEdge(tri,edge);
+ } else {
+ // Neighbor edge found, set the two elements of _neighbors
+ // and remove edge from edge_to_tri_edge_map.
+ _neighbors(tri, edge)= it->second.tri;
+ _neighbors(it->second.tri, it->second.edge) = tri;
+ edge_to_tri_edge_map.erase(it);
+ }
+ }
+ }
+ }
+
+ // Note that remaining edges in the edge_to_tri_edge_map correspond to
+ // boundary edges, but the boundaries are calculated separately elsewhere.
+}
+
+Triangulation::TwoCoordinateArray Triangulation::calculate_plane_coefficients(
+ const CoordinateArray& z)
+{
+ npy_intp dims[2] = {get_ntri(), 3};
+ Triangulation::TwoCoordinateArray planes(dims);
+
+ int point;
+ for (int tri = 0; tri < get_ntri(); ++tri) {
+ if (is_masked(tri)) {
+ planes(tri, 0) = 0.0;
+ planes(tri, 1) = 0.0;
+ planes(tri, 2) = 0.0;
+ }
+ else {
+ // Equation of plane for all points r on plane is r.normal = p
+ // where normal is vector normal to the plane, and p is a
+ // constant. Rewrite as
+ // r_x*normal_x + r_y*normal_y + r_z*normal_z = p
+ // and rearrange to give
+ // r_z = (-normal_x/normal_z)*r_x + (-normal_y/normal_z)*r_y +
+ // p/normal_z
+ point = _triangles(tri, 0);
+ XYZ point0(_x(point), _y(point), z(point));
+ point = _triangles(tri, 1);
+ XYZ side01 = XYZ(_x(point), _y(point), z(point)) - point0;
+ point = _triangles(tri, 2);
+ XYZ side02 = XYZ(_x(point), _y(point), z(point)) - point0;
+
+ XYZ normal = side01.cross(side02);
+
+ if (normal.z == 0.0) {
+ // Normal is in x-y plane which means triangle consists of
+ // colinear points. To avoid dividing by zero, we use the
+ // Moore-Penrose pseudo-inverse.
+ double sum2 = (side01.x*side01.x + side01.y*side01.y +
+ side02.x*side02.x + side02.y*side02.y);
+ double a = (side01.x*side01.z + side02.x*side02.z) / sum2;
+ double b = (side01.y*side01.z + side02.y*side02.z) / sum2;
+ planes(tri, 0) = a;
+ planes(tri, 1) = b;
+ planes(tri, 2) = point0.z - a*point0.x - b*point0.y;
+ }
+ else {
+ planes(tri, 0) = -normal.x / normal.z; // x
+ planes(tri, 1) = -normal.y / normal.z; // y
+ planes(tri, 2) = normal.dot(point0) / normal.z; // constant
+ }
+ }
+ }
+
+ return planes;
+}
+
+void Triangulation::correct_triangles()
+{
+ for (int tri = 0; tri < get_ntri(); ++tri) {
+ XY point0 = get_point_coords(_triangles(tri, 0));
+ XY point1 = get_point_coords(_triangles(tri, 1));
+ XY point2 = get_point_coords(_triangles(tri, 2));
+ if ( (point1 - point0).cross_z(point2 - point0) < 0.0) {
+ // Triangle points are clockwise, so change them to anticlockwise.
+ std::swap(_triangles(tri, 1), _triangles(tri, 2));
+ if (!_neighbors.empty())
+ std::swap(_neighbors(tri, 1), _neighbors(tri, 2));
+ }
+ }
+}
+
+const Triangulation::Boundaries& Triangulation::get_boundaries() const
+{
+ if (_boundaries.empty())
+ const_cast<Triangulation*>(this)->calculate_boundaries();
+ return _boundaries;
+}
+
+void Triangulation::get_boundary_edge(const TriEdge& triEdge,
+ int& boundary,
+ int& edge) const
+{
+ get_boundaries(); // Ensure _tri_edge_to_boundary_map has been created.
+ TriEdgeToBoundaryMap::const_iterator it =
+ _tri_edge_to_boundary_map.find(triEdge);
+ assert(it != _tri_edge_to_boundary_map.end() &&
+ "TriEdge is not on a boundary");
+ boundary = it->second.boundary;
+ edge = it->second.edge;
+}
+
+int Triangulation::get_edge_in_triangle(int tri, int point) const
+{
+ assert(tri >= 0 && tri < get_ntri() && "Triangle index out of bounds");
+ assert(point >= 0 && point < get_npoints() && "Point index out of bounds.");
+ for (int edge = 0; edge < 3; ++edge) {
+ if (_triangles(tri, edge) == point)
+ return edge;
+ }
+ return -1; // point is not in triangle.
+}
+
+Triangulation::EdgeArray& Triangulation::get_edges()
+{
+ if (_edges.empty())
+ calculate_edges();
+ return _edges;
+}
+
+int Triangulation::get_neighbor(int tri, int edge) const
+{
+ assert(tri >= 0 && tri < get_ntri() && "Triangle index out of bounds");
+ assert(edge >= 0 && edge < 3 && "Edge index out of bounds");
+ if (_neighbors.empty())
+ const_cast<Triangulation&>(*this).calculate_neighbors();
+ return _neighbors(tri, edge);
+}
+
+TriEdge Triangulation::get_neighbor_edge(int tri, int edge) const
+{
+ int neighbor_tri = get_neighbor(tri, edge);
+ if (neighbor_tri == -1)
+ return TriEdge(-1,-1);
+ else
+ return TriEdge(neighbor_tri,
+ get_edge_in_triangle(neighbor_tri,
+ get_triangle_point(tri,
+ (edge+1)%3)));
+}
+
+Triangulation::NeighborArray& Triangulation::get_neighbors()
+{
+ if (_neighbors.empty())
+ calculate_neighbors();
+ return _neighbors;
+}
+
+int Triangulation::get_npoints() const
+{
+ return _x.size();
+}
+
+int Triangulation::get_ntri() const
+{
+ return _triangles.size();
+}
+
+XY Triangulation::get_point_coords(int point) const
+{
+ assert(point >= 0 && point < get_npoints() && "Point index out of bounds.");
+ return XY(_x(point), _y(point));
+}
+
+int Triangulation::get_triangle_point(int tri, int edge) const
+{
+ assert(tri >= 0 && tri < get_ntri() && "Triangle index out of bounds");
+ assert(edge >= 0 && edge < 3 && "Edge index out of bounds");
+ return _triangles(tri, edge);
+}
+
+int Triangulation::get_triangle_point(const TriEdge& tri_edge) const
+{
+ return get_triangle_point(tri_edge.tri, tri_edge.edge);
+}
+
+bool Triangulation::is_masked(int tri) const
+{
+ assert(tri >= 0 && tri < get_ntri() && "Triangle index out of bounds.");
+ const npy_bool* mask_ptr = reinterpret_cast<const npy_bool*>(_mask.data());
+ return !_mask.empty() && mask_ptr[tri];
+}
+
+void Triangulation::set_mask(const MaskArray& mask)
+{
+ _mask = mask;
+
+ // Clear derived fields so they are recalculated when needed.
+ _edges = EdgeArray();
+ _neighbors = NeighborArray();
+ _boundaries.clear();
+}
+
+void Triangulation::write_boundaries() const
+{
+ const Boundaries& bs = get_boundaries();
+ std::cout << "Number of boundaries: " << bs.size() << std::endl;
+ for (Boundaries::const_iterator it = bs.begin(); it != bs.end(); ++it) {
+ const Boundary& b = *it;
+ std::cout << " Boundary of " << b.size() << " points: ";
+ for (Boundary::const_iterator itb = b.begin(); itb != b.end(); ++itb) {
+ std::cout << *itb << ", ";
+ }
+ std::cout << std::endl;
+ }
+}
+
+
+
+TriContourGenerator::TriContourGenerator(Triangulation& triangulation,
+ const CoordinateArray& z)
+ : _triangulation(triangulation),
+ _z(z),
+ _interior_visited(2*_triangulation.get_ntri()),
+ _boundaries_visited(0),
+ _boundaries_used(0)
+{}
+
+void TriContourGenerator::clear_visited_flags(bool include_boundaries)
+{
+ // Clear _interiorVisited.
+ std::fill(_interior_visited.begin(), _interior_visited.end(), false);
+
+ if (include_boundaries) {
+ if (_boundaries_visited.empty()) {
+ const Boundaries& boundaries = get_boundaries();
+
+ // Initialise _boundaries_visited.
+ _boundaries_visited.reserve(boundaries.size());
+ for (Boundaries::const_iterator it = boundaries.begin();
+ it != boundaries.end(); ++it)
+ _boundaries_visited.push_back(BoundaryVisited(it->size()));
+
+ // Initialise _boundaries_used.
+ _boundaries_used = BoundariesUsed(boundaries.size());
+ }
+
+ // Clear _boundaries_visited.
+ for (BoundariesVisited::iterator it = _boundaries_visited.begin();
+ it != _boundaries_visited.end(); ++it)
+ std::fill(it->begin(), it->end(), false);
+
+ // Clear _boundaries_used.
+ std::fill(_boundaries_used.begin(), _boundaries_used.end(), false);
+ }
+}
+
+PyObject* TriContourGenerator::contour_to_segs(const Contour& contour)
+{
+ PyObject* segs = PyList_New(contour.size());
+ for (Contour::size_type i = 0; i < contour.size(); ++i) {
+ const ContourLine& line = contour[i];
+ npy_intp dims[2] = {static_cast<npy_intp>(line.size()),2};
+ PyArrayObject* py_line = (PyArrayObject*)PyArray_SimpleNew(
+ 2, dims, NPY_DOUBLE);
+ double* p = (double*)PyArray_DATA(py_line);
+ for (ContourLine::const_iterator it = line.begin(); it != line.end(); ++it) {
+ *p++ = it->x;
+ *p++ = it->y;
+ }
+ if (PyList_SetItem(segs, i, (PyObject*)py_line)) {
+ Py_XDECREF(segs);
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unable to set contour segments");
+ return NULL;
+ }
+ }
+ return segs;
+}
+
+PyObject* TriContourGenerator::contour_to_segs_and_kinds(const Contour& contour)
+{
+ Contour::const_iterator line;
+ ContourLine::const_iterator point;
+
+ // Find total number of points in all contour lines.
+ int n_points = 0;
+ for (line = contour.begin(); line != contour.end(); ++line)
+ n_points += line->size();
+
+ // Create segs array for point coordinates.
+ npy_intp segs_dims[2] = {n_points, 2};
+ PyArrayObject* segs = (PyArrayObject*)PyArray_SimpleNew(
+ 2, segs_dims, NPY_DOUBLE);
+ double* segs_ptr = (double*)PyArray_DATA(segs);
+
+ // Create kinds array for code types.
+ npy_intp kinds_dims[1] = {n_points};
+ PyArrayObject* kinds = (PyArrayObject*)PyArray_SimpleNew(
+ 1, kinds_dims, NPY_UBYTE);
+ unsigned char* kinds_ptr = (unsigned char*)PyArray_DATA(kinds);
+
+ for (line = contour.begin(); line != contour.end(); ++line) {
+ for (point = line->begin(); point != line->end(); point++) {
+ *segs_ptr++ = point->x;
+ *segs_ptr++ = point->y;
+ *kinds_ptr++ = (point == line->begin() ? MOVETO : LINETO);
+ }
+ }
+
+ PyObject* result = PyTuple_New(2);
+ if (PyTuple_SetItem(result, 0, (PyObject*)segs) ||
+ PyTuple_SetItem(result, 1, (PyObject*)kinds)) {
+ Py_XDECREF(result);
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unable to set contour segments and kinds");
+ return NULL;
+ }
+ return result;
+}
+
+PyObject* TriContourGenerator::create_contour(const double& level)
+{
+ clear_visited_flags(false);
+ Contour contour;
+
+ find_boundary_lines(contour, level);
+ find_interior_lines(contour, level, false, false);
+
+ return contour_to_segs(contour);
+}
+
+PyObject* TriContourGenerator::create_filled_contour(const double& lower_level,
+ const double& upper_level)
+{
+ clear_visited_flags(true);
+ Contour contour;
+
+ find_boundary_lines_filled(contour, lower_level, upper_level);
+ find_interior_lines(contour, lower_level, false, true);
+ find_interior_lines(contour, upper_level, true, true);
+
+ return contour_to_segs_and_kinds(contour);
+}
+
+XY TriContourGenerator::edge_interp(int tri, int edge, const double& level)
+{
+ return interp(_triangulation.get_triangle_point(tri, edge),
+ _triangulation.get_triangle_point(tri, (edge+1)%3),
+ level);
+}
+
+void TriContourGenerator::find_boundary_lines(Contour& contour,
+ const double& level)
+{
+ // Traverse boundaries to find starting points for all contour lines that
+ // intersect the boundaries. For each starting point found, follow the
+ // line to its end before continuing.
+ const Triangulation& triang = _triangulation;
+ const Boundaries& boundaries = get_boundaries();
+ for (Boundaries::const_iterator it = boundaries.begin();
+ it != boundaries.end(); ++it) {
+ const Boundary& boundary = *it;
+ bool startAbove, endAbove = false;
+ for (Boundary::const_iterator itb = boundary.begin();
+ itb != boundary.end(); ++itb) {
+ if (itb == boundary.begin())
+ startAbove = get_z(triang.get_triangle_point(*itb)) >= level;
+ else
+ startAbove = endAbove;
+ endAbove = get_z(triang.get_triangle_point(itb->tri,
+ (itb->edge+1)%3)) >= level;
+ if (startAbove && !endAbove) {
+ // This boundary edge is the start point for a contour line,
+ // so follow the line.
+ contour.push_back(ContourLine());
+ ContourLine& contour_line = contour.back();
+ TriEdge tri_edge = *itb;
+ follow_interior(contour_line, tri_edge, true, level, false);
+ }
+ }
+ }
+}
+
+void TriContourGenerator::find_boundary_lines_filled(Contour& contour,
+ const double& lower_level,
+ const double& upper_level)
+{
+ // Traverse boundaries to find starting points for all contour lines that
+ // intersect the boundaries. For each starting point found, follow the
+ // line to its end before continuing.
+ const Triangulation& triang = _triangulation;
+ const Boundaries& boundaries = get_boundaries();
+ for (Boundaries::size_type i = 0; i < boundaries.size(); ++i) {
+ const Boundary& boundary = boundaries[i];
+ for (Boundary::size_type j = 0; j < boundary.size(); ++j) {
+ if (!_boundaries_visited[i][j]) {
+ // z values of start and end of this boundary edge.
+ double z_start = get_z(triang.get_triangle_point(boundary[j]));
+ double z_end = get_z(triang.get_triangle_point(
+ boundary[j].tri, (boundary[j].edge+1)%3));
+
+ // Does this boundary edge's z increase through upper level
+ // and/or decrease through lower level?
+ bool incr_upper = (z_start < upper_level && z_end >= upper_level);
+ bool decr_lower = (z_start >= lower_level && z_end < lower_level);
+
+ if (decr_lower || incr_upper) {
+ // Start point for contour line, so follow it.
+ contour.push_back(ContourLine());
+ ContourLine& contour_line = contour.back();
+ TriEdge start_tri_edge = boundary[j];
+ TriEdge tri_edge = start_tri_edge;
+
+ // Traverse interior and boundaries until return to start.
+ bool on_upper = incr_upper;
+ do {
+ follow_interior(contour_line, tri_edge, true,
+ on_upper ? upper_level : lower_level, on_upper);
+ on_upper = follow_boundary(contour_line, tri_edge,
+ lower_level, upper_level, on_upper);
+ } while (tri_edge != start_tri_edge);
+
+ // Filled contour lines must not have same first and last
+ // points.
+ if (contour_line.size() > 1 &&
+ contour_line.front() == contour_line.back())
+ contour_line.pop_back();
+ }
+ }
+ }
+ }
+
+ // Add full boundaries that lie between the lower and upper levels. These
+ // are boundaries that have not been touched by an internal contour line
+ // which are stored in _boundaries_used.
+ for (Boundaries::size_type i = 0; i < boundaries.size(); ++i) {
+ if (!_boundaries_used[i]) {
+ const Boundary& boundary = boundaries[i];
+ double z = get_z(triang.get_triangle_point(boundary[0]));
+ if (z >= lower_level && z < upper_level) {
+ contour.push_back(ContourLine());
+ ContourLine& contour_line = contour.back();
+ for (Boundary::size_type j = 0; j < boundary.size(); ++j)
+ contour_line.push_back(triang.get_point_coords(
+ triang.get_triangle_point(boundary[j])));
+ }
+ }
+ }
+}
+
+void TriContourGenerator::find_interior_lines(Contour& contour,
+ const double& level,
+ bool on_upper,
+ bool filled)
+{
+ const Triangulation& triang = _triangulation;
+ int ntri = triang.get_ntri();
+ for (int tri = 0; tri < ntri; ++tri) {
+ int visited_index = (on_upper ? tri+ntri : tri);
+
+ if (_interior_visited[visited_index] || triang.is_masked(tri))
+ continue; // Triangle has already been visited or is masked.
+
+ _interior_visited[visited_index] = true;
+
+ // Determine edge via which to leave this triangle.
+ int edge = get_exit_edge(tri, level, on_upper);
+ assert(edge >= -1 && edge < 3 && "Invalid exit edge");
+ if (edge == -1)
+ continue; // Contour does not pass through this triangle.
+
+ // Found start of new contour line loop.
+ contour.push_back(ContourLine());
+ ContourLine& contour_line = contour.back();
+ TriEdge tri_edge = triang.get_neighbor_edge(tri, edge);
+ follow_interior(contour_line, tri_edge, false, level, on_upper);
+
+ if (!filled)
+ // Non-filled contour lines must be closed.
+ contour_line.push_back(contour_line.front());
+ else if (contour_line.size() > 1 &&
+ contour_line.front() == contour_line.back())
+ // Filled contour lines must not have same first and last points.
+ contour_line.pop_back();
+ }
+}
+
+bool TriContourGenerator::follow_boundary(ContourLine& contour_line,
+ TriEdge& tri_edge,
+ const double& lower_level,
+ const double& upper_level,
+ bool on_upper)
+{
+ const Triangulation& triang = _triangulation;
+ const Boundaries& boundaries = get_boundaries();
+
+ // Have TriEdge to start at, need equivalent boundary edge.
+ int boundary, edge;
+ triang.get_boundary_edge(tri_edge, boundary, edge);
+ _boundaries_used[boundary] = true;
+
+ bool stop = false;
+ bool first_edge = true;
+ double z_start, z_end = 0;
+ while (!stop)
+ {
+ assert(!_boundaries_visited[boundary][edge] && "Boundary already visited");
+ _boundaries_visited[boundary][edge] = true;
+
+ // z values of start and end points of boundary edge.
+ if (first_edge)
+ z_start = get_z(triang.get_triangle_point(tri_edge));
+ else
+ z_start = z_end;
+ z_end = get_z(triang.get_triangle_point(tri_edge.tri,
+ (tri_edge.edge+1)%3));
+
+ if (z_end > z_start) { // z increasing.
+ if (!(!on_upper && first_edge) &&
+ z_end >= lower_level && z_start < lower_level) {
+ stop = true;
+ on_upper = false;
+ } else if (z_end >= upper_level && z_start < upper_level) {
+ stop = true;
+ on_upper = true;
+ }
+ } else { // z decreasing.
+ if (!(on_upper && first_edge) &&
+ z_start >= upper_level && z_end < upper_level) {
+ stop = true;
+ on_upper = true;
+ } else if (z_start >= lower_level && z_end < lower_level) {
+ stop = true;
+ on_upper = false;
+ }
+ }
+
+ first_edge = false;
+
+ if (!stop) {
+ // Move to next boundary edge, adding point to contour line.
+ edge = (edge+1) % (int)boundaries[boundary].size();
+ tri_edge = boundaries[boundary][edge];
+ contour_line.push_back(triang.get_point_coords(
+ triang.get_triangle_point(tri_edge)));
+ }
+ }
+
+ return on_upper;
+}
+
+void TriContourGenerator::follow_interior(ContourLine& contour_line,
+ TriEdge& tri_edge,
+ bool end_on_boundary,
+ const double& level,
+ bool on_upper)
+{
+ int& tri = tri_edge.tri;
+ int& edge = tri_edge.edge;
+
+ // Initial point.
+ contour_line.push_back(edge_interp(tri, edge, level));
+
+ while (true) {
+ int visited_index = tri;
+ if (on_upper)
+ visited_index += _triangulation.get_ntri();
+
+ // Check for end not on boundary.
+ if (!end_on_boundary && _interior_visited[visited_index])
+ break; // Reached start point, so return.
+
+ // Determine edge by which to leave this triangle.
+ edge = get_exit_edge(tri, level, on_upper);
+ assert(edge >= 0 && edge < 3 && "Invalid exit edge");
+
+ _interior_visited[visited_index] = true;
+
+ // Append new point to point set.
+ assert(edge >= 0 && edge < 3 && "Invalid triangle edge");
+ contour_line.push_back(edge_interp(tri, edge, level));
+
+ // Move to next triangle.
+ TriEdge next_tri_edge = _triangulation.get_neighbor_edge(tri,edge);
+
+ // Check if ending on a boundary.
+ if (end_on_boundary && next_tri_edge.tri == -1)
+ break;
+
+ tri_edge = next_tri_edge;
+ assert(tri_edge.tri != -1 && "Invalid triangle for internal loop");
+ }
+}
+
+const TriContourGenerator::Boundaries& TriContourGenerator::get_boundaries() const
+{
+ return _triangulation.get_boundaries();
+}
+
+int TriContourGenerator::get_exit_edge(int tri,
+ const double& level,
+ bool on_upper) const
+{
+ assert(tri >= 0 && tri < _triangulation.get_ntri() &&
+ "Triangle index out of bounds.");
+
+ unsigned int config =
+ (get_z(_triangulation.get_triangle_point(tri, 0)) >= level) |
+ (get_z(_triangulation.get_triangle_point(tri, 1)) >= level) << 1 |
+ (get_z(_triangulation.get_triangle_point(tri, 2)) >= level) << 2;
+
+ if (on_upper) config = 7-config;
+
+ switch (config) {
+ case 0: return -1;
+ case 1: return 2;
+ case 2: return 0;
+ case 3: return 2;
+ case 4: return 1;
+ case 5: return 1;
+ case 6: return 0;
+ case 7: return -1;
+ default: assert(0 && "Invalid config value"); return -1;
+ }
+}
+
+const double& TriContourGenerator::get_z(int point) const
+{
+ assert(point >= 0 && point < _triangulation.get_npoints() &&
+ "Point index out of bounds.");
+ return _z(point);
+}
+
+XY TriContourGenerator::interp(int point1,
+ int point2,
+ const double& level) const
+{
+ assert(point1 >= 0 && point1 < _triangulation.get_npoints() &&
+ "Point index 1 out of bounds.");
+ assert(point2 >= 0 && point2 < _triangulation.get_npoints() &&
+ "Point index 2 out of bounds.");
+ assert(point1 != point2 && "Identical points");
+ double fraction = (get_z(point2) - level) / (get_z(point2) - get_z(point1));
+ return _triangulation.get_point_coords(point1)*fraction +
+ _triangulation.get_point_coords(point2)*(1.0 - fraction);
+}
+
+
+
+TrapezoidMapTriFinder::TrapezoidMapTriFinder(Triangulation& triangulation)
+ : _triangulation(triangulation),
+ _points(0),
+ _tree(0)
+{}
+
+TrapezoidMapTriFinder::~TrapezoidMapTriFinder()
+{
+ clear();
+}
+
+bool
+TrapezoidMapTriFinder::add_edge_to_tree(const Edge& edge)
+{
+ std::vector<Trapezoid*> trapezoids;
+ if (!find_trapezoids_intersecting_edge(edge, trapezoids))
+ return false;
+ assert(!trapezoids.empty() && "No trapezoids intersect edge");
+
+ const Point* p = edge.left;
+ const Point* q = edge.right;
+ Trapezoid* left_old = 0; // old trapezoid to the left.
+ Trapezoid* left_below = 0; // below trapezoid to the left.
+ Trapezoid* left_above = 0; // above trapezoid to the left.
+
+ // Iterate through trapezoids intersecting edge from left to right.
+ // Replace each old trapezoid with 2+ new trapezoids, and replace its
+ // corresponding nodes in the search tree with new nodes.
+ unsigned int ntraps = trapezoids.size();
+ for (unsigned int i = 0; i < ntraps; ++i) {
+ Trapezoid* old = trapezoids[i]; // old trapezoid to replace.
+ bool start_trap = (i == 0);
+ bool end_trap = (i == ntraps-1);
+ bool have_left = (start_trap && edge.left != old->left);
+ bool have_right = (end_trap && edge.right != old->right);
+
+ // Old trapezoid is replaced by up to 4 new trapezoids: left is to the
+ // left of the start point p, below/above are below/above the edge
+ // inserted, and right is to the right of the end point q.
+ Trapezoid* left = 0;
+ Trapezoid* below = 0;
+ Trapezoid* above = 0;
+ Trapezoid* right = 0;
+
+ // There are 4 different cases here depending on whether the old
+ // trapezoid in question is the start and/or end trapezoid of those
+ // that intersect the edge inserted. There is some code duplication
+ // here but it is much easier to understand this way rather than
+ // interleave the 4 different cases with many more if-statements.
+ if (start_trap && end_trap) {
+ // Edge intersects a single trapezoid.
+ if (have_left)
+ left = new Trapezoid(old->left, p, old->below, old->above);
+ below = new Trapezoid(p, q, old->below, edge);
+ above = new Trapezoid(p, q, edge, old->above);
+ if (have_right)
+ right = new Trapezoid(q, old->right, old->below, old->above);
+
+ // Set pairs of trapezoid neighbours.
+ if (have_left) {
+ left->set_lower_left(old->lower_left);
+ left->set_upper_left(old->upper_left);
+ left->set_lower_right(below);
+ left->set_upper_right(above);
+ }
+ else {
+ below->set_lower_left(old->lower_left);
+ above->set_upper_left(old->upper_left);
+ }
+
+ if (have_right) {
+ right->set_lower_right(old->lower_right);
+ right->set_upper_right(old->upper_right);
+ below->set_lower_right(right);
+ above->set_upper_right(right);
+ }
+ else {
+ below->set_lower_right(old->lower_right);
+ above->set_upper_right(old->upper_right);
+ }
+ }
+ else if (start_trap) {
+ // Old trapezoid is the first of 2+ trapezoids that the edge
+ // intersects.
+ if (have_left)
+ left = new Trapezoid(old->left, p, old->below, old->above);
+ below = new Trapezoid(p, old->right, old->below, edge);
+ above = new Trapezoid(p, old->right, edge, old->above);
+
+ // Set pairs of trapezoid neighbours.
+ if (have_left) {
+ left->set_lower_left(old->lower_left);
+ left->set_upper_left(old->upper_left);
+ left->set_lower_right(below);
+ left->set_upper_right(above);
+ }
+ else {
+ below->set_lower_left(old->lower_left);
+ above->set_upper_left(old->upper_left);
+ }
+
+ below->set_lower_right(old->lower_right);
+ above->set_upper_right(old->upper_right);
+ }
+ else if (end_trap) {
+ // Old trapezoid is the last of 2+ trapezoids that the edge
+ // intersects.
+ if (left_below->below == old->below) {
+ below = left_below;
+ below->right = q;
+ }
+ else
+ below = new Trapezoid(old->left, q, old->below, edge);
+
+ if (left_above->above == old->above) {
+ above = left_above;
+ above->right = q;
+ }
+ else
+ above = new Trapezoid(old->left, q, edge, old->above);
+
+ if (have_right)
+ right = new Trapezoid(q, old->right, old->below, old->above);
+
+ // Set pairs of trapezoid neighbours.
+ if (have_right) {
+ right->set_lower_right(old->lower_right);
+ right->set_upper_right(old->upper_right);
+ below->set_lower_right(right);
+ above->set_upper_right(right);
+ }
+ else {
+ below->set_lower_right(old->lower_right);
+ above->set_upper_right(old->upper_right);
+ }
+
+ // Connect to new trapezoids replacing prevOld.
+ if (below != left_below) {
+ below->set_upper_left(left_below);
+ if (old->lower_left == left_old)
+ below->set_lower_left(left_below);
+ else
+ below->set_lower_left(old->lower_left);
+ }
+
+ if (above != left_above) {
+ above->set_lower_left(left_above);
+ if (old->upper_left == left_old)
+ above->set_upper_left(left_above);
+ else
+ above->set_upper_left(old->upper_left);
+ }
+ }
+ else { // Middle trapezoid.
+ // Old trapezoid is neither the first nor last of the 3+ trapezoids
+ // that the edge intersects.
+ if (left_below->below == old->below) {
+ below = left_below;
+ below->right = old->right;
+ }
+ else
+ below = new Trapezoid(old->left, old->right, old->below, edge);
+
+ if (left_above->above == old->above) {
+ above = left_above;
+ above->right = old->right;
+ }
+ else
+ above = new Trapezoid(old->left, old->right, edge, old->above);
+
+ // Connect to new trapezoids replacing prevOld.
+ if (below != left_below) { // below is new.
+ below->set_upper_left(left_below);
+ if (old->lower_left == left_old)
+ below->set_lower_left(left_below);
+ else
+ below->set_lower_left(old->lower_left);
+ }
+
+ if (above != left_above) { // above is new.
+ above->set_lower_left(left_above);
+ if (old->upper_left == left_old)
+ above->set_upper_left(left_above);
+ else
+ above->set_upper_left(old->upper_left);
+ }
+
+ below->set_lower_right(old->lower_right);
+ above->set_upper_right(old->upper_right);
+ }
+
+ // Create new nodes to add to search tree. Below and above trapezoids
+ // may already have owning trapezoid nodes, in which case reuse them.
+ Node* new_top_node = new Node(
+ &edge,
+ below == left_below ? below->trapezoid_node : new Node(below),
+ above == left_above ? above->trapezoid_node : new Node(above));
+ if (have_right)
+ new_top_node = new Node(q, new_top_node, new Node(right));
+ if (have_left)
+ new_top_node = new Node(p, new Node(left), new_top_node);
+
+ // Insert new_top_node in correct position or positions in search tree.
+ Node* old_node = old->trapezoid_node;
+ if (old_node == _tree)
+ _tree = new_top_node;
+ else
+ old_node->replace_with(new_top_node);
+
+ // old_node has been removed from all of its parents and is no longer
+ // needed.
+ assert(old_node->has_no_parents() && "Node should have no parents");
+ delete old_node;
+
+ // Clearing up.
+ if (!end_trap) {
+ // Prepare for next loop.
+ left_old = old;
+ left_above = above;
+ left_below = below;
+ }
+ }
+
+ return true;
+}
+
+void
+TrapezoidMapTriFinder::clear()
+{
+ delete [] _points;
+ _points = 0;
+
+ _edges.clear();
+
+ delete _tree;
+ _tree = 0;
+}
+
+TrapezoidMapTriFinder::TriIndexArray
+TrapezoidMapTriFinder::find_many(const CoordinateArray& x,
+ const CoordinateArray& y)
+{
+ // Create integer array to return.
+ npy_intp n = x.dim(0);
+ npy_intp dims[1] = {n};
+ TriIndexArray tri_indices(dims);
+
+ // Fill returned array.
+ for (npy_intp i = 0; i < n; ++i)
+ tri_indices(i) = find_one(XY(x(i), y(i)));
+
+ return tri_indices;
+}
+
+int
+TrapezoidMapTriFinder::find_one(const XY& xy)
+{
+ const Node* node = _tree->search(xy);
+ assert(node != 0 && "Search tree for point returned null node");
+ return node->get_tri();
+}
+
+bool
+TrapezoidMapTriFinder::find_trapezoids_intersecting_edge(
+ const Edge& edge,
+ std::vector<Trapezoid*>& trapezoids)
+{
+ // This is the FollowSegment algorithm of de Berg et al, with some extra
+ // checks to deal with simple colinear (i.e. invalid) triangles.
+ trapezoids.clear();
+ Trapezoid* trapezoid = _tree->search(edge);
+ if (trapezoid == 0) {
+ assert(trapezoid != 0 && "search(edge) returns null trapezoid");
+ return false;
+ }
+
+ trapezoids.push_back(trapezoid);
+ while (edge.right->is_right_of(*trapezoid->right)) {
+ int orient = edge.get_point_orientation(*trapezoid->right);
+ if (orient == 0) {
+ if (edge.point_below == trapezoid->right)
+ orient = +1;
+ else if (edge.point_above == trapezoid->right)
+ orient = -1;
+ else {
+ assert(0 && "Unable to deal with point on edge");
+ return false;
+ }
+ }
+
+ if (orient == -1)
+ trapezoid = trapezoid->lower_right;
+ else if (orient == +1)
+ trapezoid = trapezoid->upper_right;
+
+ if (trapezoid == 0) {
+ assert(0 && "Expected trapezoid neighbor");
+ return false;
+ }
+ trapezoids.push_back(trapezoid);
+ }
+
+ return true;
+}
+
+PyObject*
+TrapezoidMapTriFinder::get_tree_stats()
+{
+ NodeStats stats;
+ _tree->get_stats(0, stats);
+
+ return Py_BuildValue("[l,l,l,l,l,l,d]",
+ stats.node_count,
+ stats.unique_nodes.size(),
+ stats.trapezoid_count,
+ stats.unique_trapezoid_nodes.size(),
+ stats.max_parent_count,
+ stats.max_depth,
+ stats.sum_trapezoid_depth / stats.trapezoid_count);
+}
+
+void
+TrapezoidMapTriFinder::initialize()
+{
+ clear();
+ const Triangulation& triang = _triangulation;
+
+ // Set up points array, which contains all of the points in the
+ // triangulation plus the 4 corners of the enclosing rectangle.
+ int npoints = triang.get_npoints();
+ _points = new Point[npoints + 4];
+ BoundingBox bbox;
+ for (int i = 0; i < npoints; ++i) {
+ XY xy = triang.get_point_coords(i);
+ // Avoid problems with -0.0 values different from 0.0
+ if (xy.x == -0.0)
+ xy.x = 0.0;
+ if (xy.y == -0.0)
+ xy.y = 0.0;
+ _points[i] = Point(xy);
+ bbox.add(xy);
+ }
+
+ // Last 4 points are corner points of enclosing rectangle. Enclosing
+ // rectangle made slightly larger in case corner points are already in the
+ // triangulation.
+ if (bbox.empty) {
+ bbox.add(XY(0.0, 0.0));
+ bbox.add(XY(1.0, 1.0));
+ }
+ else {
+ const double small = 0.1; // Any value > 0.0
+ bbox.expand( (bbox.upper - bbox.lower)*small );
+ }
+ _points[npoints ] = Point(bbox.lower); // SW point.
+ _points[npoints+1] = Point(bbox.upper.x, bbox.lower.y); // SE point.
+ _points[npoints+2] = Point(bbox.lower.x, bbox.upper.y); // NW point.
+ _points[npoints+3] = Point(bbox.upper); // NE point.
+
+ // Set up edges array.
+ // First the bottom and top edges of the enclosing rectangle.
+ _edges.push_back(Edge(&_points[npoints], &_points[npoints+1],-1,-1,0,0));
+ _edges.push_back(Edge(&_points[npoints+2],&_points[npoints+3],-1,-1,0,0));
+
+ // Add all edges in the triangulation that point to the right. Do not
+ // explicitly include edges that point to the left as the neighboring
+ // triangle will supply that, unless there is no such neighbor.
+ int ntri = triang.get_ntri();
+ for (int tri = 0; tri < ntri; ++tri) {
+ if (!triang.is_masked(tri)) {
+ for (int edge = 0; edge < 3; ++edge) {
+ Point* start = _points + triang.get_triangle_point(tri,edge);
+ Point* end = _points +
+ triang.get_triangle_point(tri,(edge+1)%3);
+ Point* other = _points +
+ triang.get_triangle_point(tri,(edge+2)%3);
+ TriEdge neighbor = triang.get_neighbor_edge(tri,edge);
+ if (end->is_right_of(*start)) {
+ const Point* neighbor_point_below = (neighbor.tri == -1) ?
+ 0 : _points + triang.get_triangle_point(
+ neighbor.tri, (neighbor.edge+2)%3);
+ _edges.push_back(Edge(start, end, neighbor.tri, tri,
+ neighbor_point_below, other));
+ }
+ else if (neighbor.tri == -1)
+ _edges.push_back(Edge(end, start, tri, -1, other, 0));
+
+ // Set triangle associated with start point if not already set.
+ if (start->tri == -1)
+ start->tri = tri;
+ }
+ }
+ }
+
+ // Initial trapezoid is enclosing rectangle.
+ _tree = new Node(new Trapezoid(&_points[npoints], &_points[npoints+1],
+ _edges[0], _edges[1]));
+ _tree->assert_valid(false);
+
+ // Randomly shuffle all edges other than first 2.
+ RandomNumberGenerator rng(1234);
+ std::random_shuffle(_edges.begin()+2, _edges.end(), rng);
+
+ // Add edges, one at a time, to tree.
+ unsigned int nedges = _edges.size();
+ for (unsigned int index = 2; index < nedges; ++index) {
+ if (!add_edge_to_tree(_edges[index]))
+ throw std::runtime_error("Triangulation is invalid");
+ _tree->assert_valid(index == nedges-1);
+ }
+}
+
+void
+TrapezoidMapTriFinder::print_tree()
+{
+ assert(_tree != 0 && "Null Node tree");
+ _tree->print();
+}
+
+TrapezoidMapTriFinder::Edge::Edge(const Point* left_,
+ const Point* right_,
+ int triangle_below_,
+ int triangle_above_,
+ const Point* point_below_,
+ const Point* point_above_)
+ : left(left_),
+ right(right_),
+ triangle_below(triangle_below_),
+ triangle_above(triangle_above_),
+ point_below(point_below_),
+ point_above(point_above_)
+{
+ assert(left != 0 && "Null left point");
+ assert(right != 0 && "Null right point");
+ assert(right->is_right_of(*left) && "Incorrect point order");
+ assert(triangle_below >= -1 && "Invalid triangle below index");
+ assert(triangle_above >= -1 && "Invalid triangle above index");
+}
+
+int
+TrapezoidMapTriFinder::Edge::get_point_orientation(const XY& xy) const
+{
+ double cross_z = (xy - *left).cross_z(*right - *left);
+ return (cross_z > 0.0) ? +1 : ((cross_z < 0.0) ? -1 : 0);
+}
+
+double
+TrapezoidMapTriFinder::Edge::get_slope() const
+{
+ // Divide by zero is acceptable here.
+ XY diff = *right - *left;
+ return diff.y / diff.x;
+}
+
+double
+TrapezoidMapTriFinder::Edge::get_y_at_x(const double& x) const
+{
+ if (left->x == right->x) {
+ // If edge is vertical, return lowest y from left point.
+ assert(x == left->x && "x outside of edge");
+ return left->y;
+ }
+ else {
+ // Equation of line: left + lambda*(right - left) = xy.
+ // i.e. left.x + lambda(right.x - left.x) = x and similar for y.
+ double lambda = (x - left->x) / (right->x - left->x);
+ assert(lambda >= 0 && lambda <= 1.0 && "Lambda out of bounds");
+ return left->y + lambda*(right->y - left->y);
+ }
+}
+
+bool
+TrapezoidMapTriFinder::Edge::has_point(const Point* point) const
+{
+ assert(point != 0 && "Null point");
+ return (left == point || right == point);
+}
+
+bool
+TrapezoidMapTriFinder::Edge::operator==(const Edge& other) const
+{
+ return this == &other;
+}
+
+void
+TrapezoidMapTriFinder::Edge::print_debug() const
+{
+ std::cout << "Edge " << *this << " tri_below=" << triangle_below
+ << " tri_above=" << triangle_above << std::endl;
+}
+
+TrapezoidMapTriFinder::Node::Node(const Point* point, Node* left, Node* right)
+ : _type(Type_XNode)
+{
+ assert(point != 0 && "Invalid point");
+ assert(left != 0 && "Invalid left node");
+ assert(right != 0 && "Invalid right node");
+ _union.xnode.point = point;
+ _union.xnode.left = left;
+ _union.xnode.right = right;
+ left->add_parent(this);
+ right->add_parent(this);
+}
+
+TrapezoidMapTriFinder::Node::Node(const Edge* edge, Node* below, Node* above)
+ : _type(Type_YNode)
+{
+ assert(edge != 0 && "Invalid edge");
+ assert(below != 0 && "Invalid below node");
+ assert(above != 0 && "Invalid above node");
+ _union.ynode.edge = edge;
+ _union.ynode.below = below;
+ _union.ynode.above = above;
+ below->add_parent(this);
+ above->add_parent(this);
+}
+
+TrapezoidMapTriFinder::Node::Node(Trapezoid* trapezoid)
+ : _type(Type_TrapezoidNode)
+{
+ assert(trapezoid != 0 && "Null Trapezoid");
+ _union.trapezoid = trapezoid;
+ trapezoid->trapezoid_node = this;
+}
+
+TrapezoidMapTriFinder::Node::~Node()
+{
+ switch (_type) {
+ case Type_XNode:
+ if (_union.xnode.left->remove_parent(this))
+ delete _union.xnode.left;
+ if (_union.xnode.right->remove_parent(this))
+ delete _union.xnode.right;
+ break;
+ case Type_YNode:
+ if (_union.ynode.below->remove_parent(this))
+ delete _union.ynode.below;
+ if (_union.ynode.above->remove_parent(this))
+ delete _union.ynode.above;
+ break;
+ case Type_TrapezoidNode:
+ delete _union.trapezoid;
+ break;
+ }
+}
+
+void
+TrapezoidMapTriFinder::Node::add_parent(Node* parent)
+{
+ assert(parent != 0 && "Null parent");
+ assert(parent != this && "Cannot be parent of self");
+ assert(!has_parent(parent) && "Parent already in collection");
+ _parents.push_back(parent);
+}
+
+void
+TrapezoidMapTriFinder::Node::assert_valid(bool tree_complete) const
+{
+#ifndef NDEBUG
+ // Check parents.
+ for (Parents::const_iterator it = _parents.begin();
+ it != _parents.end(); ++it) {
+ Node* parent = *it;
+ assert(parent != this && "Cannot be parent of self");
+ assert(parent->has_child(this) && "Parent missing child");
+ }
+
+ // Check children, and recurse.
+ switch (_type) {
+ case Type_XNode:
+ assert(_union.xnode.left != 0 && "Null left child");
+ assert(_union.xnode.left->has_parent(this) && "Incorrect parent");
+ assert(_union.xnode.right != 0 && "Null right child");
+ assert(_union.xnode.right->has_parent(this) && "Incorrect parent");
+ _union.xnode.left->assert_valid(tree_complete);
+ _union.xnode.right->assert_valid(tree_complete);
+ break;
+ case Type_YNode:
+ assert(_union.ynode.below != 0 && "Null below child");
+ assert(_union.ynode.below->has_parent(this) && "Incorrect parent");
+ assert(_union.ynode.above != 0 && "Null above child");
+ assert(_union.ynode.above->has_parent(this) && "Incorrect parent");
+ _union.ynode.below->assert_valid(tree_complete);
+ _union.ynode.above->assert_valid(tree_complete);
+ break;
+ case Type_TrapezoidNode:
+ assert(_union.trapezoid != 0 && "Null trapezoid");
+ assert(_union.trapezoid->trapezoid_node == this &&
+ "Incorrect trapezoid node");
+ _union.trapezoid->assert_valid(tree_complete);
+ break;
+ }
+#endif
+}
+
+void
+TrapezoidMapTriFinder::Node::get_stats(int depth,
+ NodeStats& stats) const
+{
+ stats.node_count++;
+ if (depth > stats.max_depth)
+ stats.max_depth = depth;
+ bool new_node = stats.unique_nodes.insert(this).second;
+ if (new_node)
+ stats.max_parent_count = std::max(stats.max_parent_count,
+ static_cast<long>(_parents.size()));
+
+ switch (_type) {
+ case Type_XNode:
+ _union.xnode.left->get_stats(depth+1, stats);
+ _union.xnode.right->get_stats(depth+1, stats);
+ break;
+ case Type_YNode:
+ _union.ynode.below->get_stats(depth+1, stats);
+ _union.ynode.above->get_stats(depth+1, stats);
+ break;
+ default: // Type_TrapezoidNode:
+ stats.unique_trapezoid_nodes.insert(this);
+ stats.trapezoid_count++;
+ stats.sum_trapezoid_depth += depth;
+ break;
+ }
+}
+
+int
+TrapezoidMapTriFinder::Node::get_tri() const
+{
+ switch (_type) {
+ case Type_XNode:
+ return _union.xnode.point->tri;
+ case Type_YNode:
+ if (_union.ynode.edge->triangle_above != -1)
+ return _union.ynode.edge->triangle_above;
+ else
+ return _union.ynode.edge->triangle_below;
+ default: // Type_TrapezoidNode:
+ assert(_union.trapezoid->below.triangle_above ==
+ _union.trapezoid->above.triangle_below &&
+ "Inconsistent triangle indices from trapezoid edges");
+ return _union.trapezoid->below.triangle_above;
+ }
+}
+
+bool
+TrapezoidMapTriFinder::Node::has_child(const Node* child) const
+{
+ assert(child != 0 && "Null child node");
+ switch (_type) {
+ case Type_XNode:
+ return (_union.xnode.left == child || _union.xnode.right == child);
+ case Type_YNode:
+ return (_union.ynode.below == child ||
+ _union.ynode.above == child);
+ default: // Type_TrapezoidNode:
+ return false;
+ }
+}
+
+bool
+TrapezoidMapTriFinder::Node::has_no_parents() const
+{
+ return _parents.empty();
+}
+
+bool
+TrapezoidMapTriFinder::Node::has_parent(const Node* parent) const
+{
+ return (std::find(_parents.begin(), _parents.end(), parent) !=
+ _parents.end());
+}
+
+void
+TrapezoidMapTriFinder::Node::print(int depth /* = 0 */) const
+{
+ for (int i = 0; i < depth; ++i) std::cout << " ";
+ switch (_type) {
+ case Type_XNode:
+ std::cout << "XNode " << *_union.xnode.point << std::endl;
+ _union.xnode.left->print(depth + 1);
+ _union.xnode.right->print(depth + 1);
+ break;
+ case Type_YNode:
+ std::cout << "YNode " << *_union.ynode.edge << std::endl;
+ _union.ynode.below->print(depth + 1);
+ _union.ynode.above->print(depth + 1);
+ break;
+ case Type_TrapezoidNode:
+ std::cout << "Trapezoid ll="
+ << _union.trapezoid->get_lower_left_point() << " lr="
+ << _union.trapezoid->get_lower_right_point() << " ul="
+ << _union.trapezoid->get_upper_left_point() << " ur="
+ << _union.trapezoid->get_upper_right_point() << std::endl;
+ break;
+ }
+}
+
+bool
+TrapezoidMapTriFinder::Node::remove_parent(Node* parent)
+{
+ assert(parent != 0 && "Null parent");
+ assert(parent != this && "Cannot be parent of self");
+ Parents::iterator it = std::find(_parents.begin(), _parents.end(), parent);
+ assert(it != _parents.end() && "Parent not in collection");
+ _parents.erase(it);
+ return _parents.empty();
+}
+
+void
+TrapezoidMapTriFinder::Node::replace_child(Node* old_child, Node* new_child)
+{
+ switch (_type) {
+ case Type_XNode:
+ assert((_union.xnode.left == old_child ||
+ _union.xnode.right == old_child) && "Not a child Node");
+ assert(new_child != 0 && "Null child node");
+ if (_union.xnode.left == old_child)
+ _union.xnode.left = new_child;
+ else
+ _union.xnode.right = new_child;
+ break;
+ case Type_YNode:
+ assert((_union.ynode.below == old_child ||
+ _union.ynode.above == old_child) && "Not a child node");
+ assert(new_child != 0 && "Null child node");
+ if (_union.ynode.below == old_child)
+ _union.ynode.below = new_child;
+ else
+ _union.ynode.above = new_child;
+ break;
+ case Type_TrapezoidNode:
+ assert(0 && "Invalid type for this operation");
+ break;
+ }
+ old_child->remove_parent(this);
+ new_child->add_parent(this);
+}
+
+void
+TrapezoidMapTriFinder::Node::replace_with(Node* new_node)
+{
+ assert(new_node != 0 && "Null replacement node");
+ // Replace child of each parent with new_node. As each has parent has its
+ // child replaced it is removed from the _parents collection.
+ while (!_parents.empty())
+ _parents.front()->replace_child(this, new_node);
+}
+
+const TrapezoidMapTriFinder::Node*
+TrapezoidMapTriFinder::Node::search(const XY& xy)
+{
+ switch (_type) {
+ case Type_XNode:
+ if (xy == *_union.xnode.point)
+ return this;
+ else if (xy.is_right_of(*_union.xnode.point))
+ return _union.xnode.right->search(xy);
+ else
+ return _union.xnode.left->search(xy);
+ case Type_YNode: {
+ int orient = _union.ynode.edge->get_point_orientation(xy);
+ if (orient == 0)
+ return this;
+ else if (orient < 0)
+ return _union.ynode.above->search(xy);
+ else
+ return _union.ynode.below->search(xy);
+ }
+ default: // Type_TrapezoidNode:
+ return this;
+ }
+}
+
+TrapezoidMapTriFinder::Trapezoid*
+TrapezoidMapTriFinder::Node::search(const Edge& edge)
+{
+ switch (_type) {
+ case Type_XNode:
+ if (edge.left == _union.xnode.point)
+ return _union.xnode.right->search(edge);
+ else {
+ if (edge.left->is_right_of(*_union.xnode.point))
+ return _union.xnode.right->search(edge);
+ else
+ return _union.xnode.left->search(edge);
+ }
+ case Type_YNode:
+ if (edge.left == _union.ynode.edge->left) {
+ // Coinciding left edge points.
+ if (edge.get_slope() == _union.ynode.edge->get_slope()) {
+ if (_union.ynode.edge->triangle_above ==
+ edge.triangle_below)
+ return _union.ynode.above->search(edge);
+ else if (_union.ynode.edge->triangle_below ==
+ edge.triangle_above)
+ return _union.ynode.below->search(edge);
+ else {
+ assert(0 &&
+ "Invalid triangulation, common left points");
+ return 0;
+ }
+ }
+ if (edge.get_slope() > _union.ynode.edge->get_slope())
+ return _union.ynode.above->search(edge);
+ else
+ return _union.ynode.below->search(edge);
+ }
+ else if (edge.right == _union.ynode.edge->right) {
+ // Coinciding right edge points.
+ if (edge.get_slope() == _union.ynode.edge->get_slope()) {
+ if (_union.ynode.edge->triangle_above ==
+ edge.triangle_below)
+ return _union.ynode.above->search(edge);
+ else if (_union.ynode.edge->triangle_below ==
+ edge.triangle_above)
+ return _union.ynode.below->search(edge);
+ else {
+ assert(0 &&
+ "Invalid triangulation, common right points");
+ return 0;
+ }
+ }
+ if (edge.get_slope() > _union.ynode.edge->get_slope())
+ return _union.ynode.below->search(edge);
+ else
+ return _union.ynode.above->search(edge);
+ }
+ else {
+ int orient =
+ _union.ynode.edge->get_point_orientation(*edge.left);
+ if (orient == 0) {
+ // edge.left lies on _union.ynode.edge
+ if (_union.ynode.edge->point_above != 0 &&
+ edge.has_point(_union.ynode.edge->point_above))
+ orient = -1;
+ else if (_union.ynode.edge->point_below != 0 &&
+ edge.has_point(_union.ynode.edge->point_below))
+ orient = +1;
+ else {
+ assert(0 && "Invalid triangulation, point on edge");
+ return 0;
+ }
+ }
+ if (orient < 0)
+ return _union.ynode.above->search(edge);
+ else
+ return _union.ynode.below->search(edge);
+ }
+ default: // Type_TrapezoidNode:
+ return _union.trapezoid;
+ }
+}
+
+TrapezoidMapTriFinder::Trapezoid::Trapezoid(const Point* left_,
+ const Point* right_,
+ const Edge& below_,
+ const Edge& above_)
+ : left(left_), right(right_), below(below_), above(above_),
+ lower_left(0), lower_right(0), upper_left(0), upper_right(0),
+ trapezoid_node(0)
+{
+ assert(left != 0 && "Null left point");
+ assert(right != 0 && "Null right point");
+ assert(right->is_right_of(*left) && "Incorrect point order");
+}
+
+void
+TrapezoidMapTriFinder::Trapezoid::assert_valid(bool tree_complete) const
+{
+#ifndef NDEBUG
+ assert(left != 0 && "Null left point");
+ assert(right != 0 && "Null right point");
+
+ if (lower_left != 0) {
+ assert(lower_left->below == below &&
+ lower_left->lower_right == this &&
+ "Incorrect lower_left trapezoid");
+ assert(get_lower_left_point() == lower_left->get_lower_right_point() &&
+ "Incorrect lower left point");
+ }
+
+ if (lower_right != 0) {
+ assert(lower_right->below == below &&
+ lower_right->lower_left == this &&
+ "Incorrect lower_right trapezoid");
+ assert(get_lower_right_point() == lower_right->get_lower_left_point() &&
+ "Incorrect lower right point");
+ }
+
+ if (upper_left != 0) {
+ assert(upper_left->above == above &&
+ upper_left->upper_right == this &&
+ "Incorrect upper_left trapezoid");
+ assert(get_upper_left_point() == upper_left->get_upper_right_point() &&
+ "Incorrect upper left point");
+ }
+
+ if (upper_right != 0) {
+ assert(upper_right->above == above &&
+ upper_right->upper_left == this &&
+ "Incorrect upper_right trapezoid");
+ assert(get_upper_right_point() == upper_right->get_upper_left_point() &&
+ "Incorrect upper right point");
+ }
+
+ assert(trapezoid_node != 0 && "Null trapezoid_node");
+
+ if (tree_complete) {
+ assert(below.triangle_above == above.triangle_below &&
+ "Inconsistent triangle indices from trapezoid edges");
+ }
+#endif
+}
+
+XY
+TrapezoidMapTriFinder::Trapezoid::get_lower_left_point() const
+{
+ double x = left->x;
+ return XY(x, below.get_y_at_x(x));
+}
+
+XY
+TrapezoidMapTriFinder::Trapezoid::get_lower_right_point() const
+{
+ double x = right->x;
+ return XY(x, below.get_y_at_x(x));
+}
+
+XY
+TrapezoidMapTriFinder::Trapezoid::get_upper_left_point() const
+{
+ double x = left->x;
+ return XY(x, above.get_y_at_x(x));
+}
+
+XY
+TrapezoidMapTriFinder::Trapezoid::get_upper_right_point() const
+{
+ double x = right->x;
+ return XY(x, above.get_y_at_x(x));
+}
+
+void
+TrapezoidMapTriFinder::Trapezoid::print_debug() const
+{
+ std::cout << "Trapezoid " << this
+ << " left=" << *left
+ << " right=" << *right
+ << " below=" << below
+ << " above=" << above
+ << " ll=" << lower_left
+ << " lr=" << lower_right
+ << " ul=" << upper_left
+ << " ur=" << upper_right
+ << " node=" << trapezoid_node
+ << " llp=" << get_lower_left_point()
+ << " lrp=" << get_lower_right_point()
+ << " ulp=" << get_upper_left_point()
+ << " urp=" << get_upper_right_point() << std::endl;
+}
+
+void
+TrapezoidMapTriFinder::Trapezoid::set_lower_left(Trapezoid* lower_left_)
+{
+ lower_left = lower_left_;
+ if (lower_left != 0)
+ lower_left->lower_right = this;
+}
+
+void
+TrapezoidMapTriFinder::Trapezoid::set_lower_right(Trapezoid* lower_right_)
+{
+ lower_right = lower_right_;
+ if (lower_right != 0)
+ lower_right->lower_left = this;
+}
+
+void
+TrapezoidMapTriFinder::Trapezoid::set_upper_left(Trapezoid* upper_left_)
+{
+ upper_left = upper_left_;
+ if (upper_left != 0)
+ upper_left->upper_right = this;
+}
+
+void
+TrapezoidMapTriFinder::Trapezoid::set_upper_right(Trapezoid* upper_right_)
+{
+ upper_right = upper_right_;
+ if (upper_right != 0)
+ upper_right->upper_left = this;
+}
+
+
+
+RandomNumberGenerator::RandomNumberGenerator(unsigned long seed)
+ : _m(21870), _a(1291), _c(4621), _seed(seed % _m)
+{}
+
+unsigned long
+RandomNumberGenerator::operator()(unsigned long max_value)
+{
+ _seed = (_seed*_a + _c) % _m;
+ return (_seed*max_value) / _m;
+}
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/_tri.h b/contrib/python/matplotlib/py2/matplotlib/tri/_tri.h
new file mode 100644
index 00000000000..fc24af50f00
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/_tri.h
@@ -0,0 +1,815 @@
+/*
+ * Unstructured triangular grid functions, particularly contouring.
+ *
+ * There are two main classes: Triangulation and TriContourGenerator.
+ *
+ * Triangulation
+ * -------------
+ * Triangulation is an unstructured triangular grid with npoints and ntri
+ * triangles. It consists of point x and y coordinates, and information about
+ * the triangulation stored in an integer array of shape (ntri,3) called
+ * triangles. Each triangle is represented by three point indices (in the
+ * range 0 to npoints-1) that comprise the triangle, ordered anticlockwise.
+ * There is an optional mask of length ntri which can be used to mask out
+ * triangles and has the same result as removing those triangles from the
+ * 'triangles' array.
+ *
+ * A particular edge of a triangulation is termed a TriEdge, which is a
+ * triangle index and an edge index in the range 0 to 2. TriEdge(tri,edge)
+ * refers to the edge that starts at point index triangles(tri,edge) and ends
+ * at point index triangles(tri,(edge+1)%3).
+ *
+ * Various derived fields are calculated when they are first needed. The
+ * triangle connectivity is stored in a neighbors array of shape (ntri,3) such
+ * that neighbors(tri,edge) is the index of the triangle that adjoins the
+ * TriEdge(tri,edge), or -1 if there is no such neighbor.
+ *
+ * A triangulation has one or more boundaries, each of which is a 1D array of
+ * the TriEdges that comprise the boundary, in order following the boundary
+ * with non-masked triangles on the left.
+ *
+ * TriContourGenerator
+ * -------------------
+ * A TriContourGenerator generates contours for a particular Triangulation.
+ * The process followed is different for non-filled and filled contours, with
+ * one and two contour levels respectively. In both cases boundary contour
+ * lines are found first, then interior lines.
+ *
+ * Boundary lines start and end on a boundary. They are found by traversing
+ * the triangulation boundary edges until a suitable start point is found, and
+ * then the contour line is followed across the interior of the triangulation
+ * until it ends on another boundary edge. For a non-filled contour this
+ * completes a line, whereas a filled contour continues by following the
+ * boundary around until either another boundary start point is found or the
+ * start of the contour line is reached. Filled contour generation stores
+ * boolean flags to indicate which boundary edges have already been traversed
+ * so that they are not dealt with twice. Similar flags are used to indicate
+ * which triangles have been used when following interior lines.
+ *
+ * Interior lines do not intersect any boundaries. They are found by
+ * traversing all triangles that have not yet been visited until a suitable
+ * starting point is found, and then the contour line is followed across the
+ * interior of the triangulation until it returns to the start point. For
+ * filled contours this process is repeated for both lower and upper contour
+ * levels, and the direction of traversal is reversed for upper contours.
+ *
+ * Working out in which direction a contour line leaves a triangle uses the
+ * a lookup table. A triangle has three points, each of which has a z-value
+ * which is either less than the contour level or not. Hence there are 8
+ * configurations to deal with, 2 of which do not have a contour line (all
+ * points below or above (including the same as) the contour level) and 6 that
+ * do. See the function get_exit_edge for details.
+ */
+#ifndef _TRI_H
+#define _TRI_H
+
+#include "src/numpy_cpp.h"
+
+#include <iostream>
+#include <list>
+#include <map>
+#include <set>
+#include <vector>
+
+
+
+/* An edge of a triangle consisting of an triangle index in the range 0 to
+ * ntri-1 and an edge index in the range 0 to 2. Edge i goes from the
+ * triangle's point i to point (i+1)%3. */
+struct TriEdge
+{
+ TriEdge();
+ TriEdge(int tri_, int edge_);
+ bool operator<(const TriEdge& other) const;
+ bool operator==(const TriEdge& other) const;
+ bool operator!=(const TriEdge& other) const;
+ friend std::ostream& operator<<(std::ostream& os, const TriEdge& tri_edge);
+
+ int tri, edge;
+};
+
+// 2D point with x,y coordinates.
+struct XY
+{
+ XY();
+ XY(const double& x_, const double& y_);
+ double angle() const; // Angle in radians with respect to x-axis.
+ double cross_z(const XY& other) const; // z-component of cross product.
+ bool is_right_of(const XY& other) const; // Compares x then y.
+ bool operator==(const XY& other) const;
+ bool operator!=(const XY& other) const;
+ XY operator*(const double& multiplier) const;
+ const XY& operator+=(const XY& other);
+ const XY& operator-=(const XY& other);
+ XY operator+(const XY& other) const;
+ XY operator-(const XY& other) const;
+ friend std::ostream& operator<<(std::ostream& os, const XY& xy);
+
+ double x, y;
+};
+
+// 3D point with x,y,z coordinates.
+struct XYZ
+{
+ XYZ(const double& x_, const double& y_, const double& z_);
+ XYZ cross(const XYZ& other) const;
+ double dot(const XYZ& other) const;
+ double length_squared() const;
+ XYZ operator-(const XYZ& other) const;
+ friend std::ostream& operator<<(std::ostream& os, const XYZ& xyz);
+
+ double x, y, z;
+};
+
+// 2D bounding box, which may be empty.
+class BoundingBox
+{
+public:
+ BoundingBox();
+ void add(const XY& point);
+ void expand(const XY& delta);
+
+ // Consider these member variables read-only.
+ bool empty;
+ XY lower, upper;
+};
+
+/* A single line of a contour, which may be a closed line loop or an open line
+ * strip. Identical adjacent points are avoided using insert_unique() and
+ * push_back(), and a closed line loop should also not have identical first and
+ * last points. */
+class ContourLine : public std::vector<XY>
+{
+public:
+ ContourLine();
+ void insert_unique(iterator pos, const XY& point);
+ void push_back(const XY& point);
+ void write() const;
+};
+
+// A Contour is a collection of zero or more ContourLines.
+typedef std::vector<ContourLine> Contour;
+
+// Debug contour writing function.
+void write_contour(const Contour& contour);
+
+
+
+
+/* Triangulation with npoints points and ntri triangles. Derived fields are
+ * calculated when they are first needed. */
+class Triangulation
+{
+public:
+ typedef numpy::array_view<const double, 1> CoordinateArray;
+ typedef numpy::array_view<double, 2> TwoCoordinateArray;
+ typedef numpy::array_view<int, 2> TriangleArray;
+ typedef numpy::array_view<const bool, 1> MaskArray;
+ typedef numpy::array_view<int, 2> EdgeArray;
+ typedef numpy::array_view<int, 2> NeighborArray;
+
+ /* A single boundary is a vector of the TriEdges that make up that boundary
+ * following it around with unmasked triangles on the left. */
+ typedef std::vector<TriEdge> Boundary;
+ typedef std::vector<Boundary> Boundaries;
+
+ /* Constructor with optional mask, edges and neighbors. The latter two
+ * are calculated when first needed.
+ * x: double array of shape (npoints) of points' x-coordinates.
+ * y: double array of shape (npoints) of points' y-coordinates.
+ * triangles: int array of shape (ntri,3) of triangle point indices.
+ * Those ordered clockwise are changed to be anticlockwise.
+ * mask: Optional bool array of shape (ntri) indicating which triangles
+ * are masked.
+ * edges: Optional int array of shape (?,2) of start and end point
+ * indices, each edge (start,end and end,start) appearing only
+ * once.
+ * neighbors: Optional int array of shape (ntri,3) indicating which
+ * triangles are the neighbors of which TriEdges, or -1 if
+ * there is no such neighbor.
+ * correct_triangle_orientations: Whether or not should correct triangle
+ * orientations so that vertices are
+ * ordered anticlockwise. */
+ Triangulation(const CoordinateArray& x,
+ const CoordinateArray& y,
+ const TriangleArray& triangles,
+ const MaskArray& mask,
+ const EdgeArray& edges,
+ const NeighborArray& neighbors,
+ int correct_triangle_orientations);
+
+ /* Calculate plane equation coefficients for all unmasked triangles from
+ * the point (x,y) coordinates and point z-array of shape (npoints) passed
+ * in via the args. Returned array has shape (npoints,3) and allows
+ * z-value at (x,y) coordinates in triangle tri to be calculated using
+ * z = array[tri,0]*x + array[tri,1]*y + array[tri,2]. */
+ TwoCoordinateArray calculate_plane_coefficients(const CoordinateArray& z);
+
+ // Return the boundaries collection, creating it if necessary.
+ const Boundaries& get_boundaries() const;
+
+ // Return which boundary and boundary edge the specified TriEdge is.
+ void get_boundary_edge(const TriEdge& triEdge,
+ int& boundary,
+ int& edge) const;
+
+ /* Return the edges array, creating it if necessary. */
+ EdgeArray& get_edges();
+
+ /* Return the triangle index of the neighbor of the specified triangle
+ * edge. */
+ int get_neighbor(int tri, int edge) const;
+
+ /* Return the TriEdge that is the neighbor of the specified triangle edge,
+ * or TriEdge(-1,-1) if there is no such neighbor. */
+ TriEdge get_neighbor_edge(int tri, int edge) const;
+
+ /* Return the neighbors array, creating it if necessary. */
+ NeighborArray& get_neighbors();
+
+ // Return the number of points in this triangulation.
+ int get_npoints() const;
+
+ // Return the number of triangles in this triangulation.
+ int get_ntri() const;
+
+ /* Return the index of the point that is at the start of the specified
+ * triangle edge. */
+ int get_triangle_point(int tri, int edge) const;
+ int get_triangle_point(const TriEdge& tri_edge) const;
+
+ // Return the coordinates of the specified point index.
+ XY get_point_coords(int point) const;
+
+ // Indicates if the specified triangle is masked or not.
+ bool is_masked(int tri) const;
+
+ /* Set or clear the mask array. Clears various derived fields so they are
+ * recalculated when next needed.
+ * mask: bool array of shape (ntri) indicating which triangles are
+ * masked, or an empty array to clear mask. */
+ void set_mask(const MaskArray& mask);
+
+ // Debug function to write boundaries.
+ void write_boundaries() const;
+
+private:
+ // An edge of a triangulation, composed of start and end point indices.
+ struct Edge
+ {
+ Edge() : start(-1), end(-1) {}
+ Edge(int start_, int end_) : start(start_), end(end_) {}
+ bool operator<(const Edge& other) const {
+ return start != other.start ? start < other.start : end < other.end;
+ }
+ int start, end;
+ };
+
+ /* An edge of a boundary of a triangulation, composed of a boundary index
+ * and an edge index within that boundary. Used to index into the
+ * boundaries collection to obtain the corresponding TriEdge. */
+ struct BoundaryEdge
+ {
+ BoundaryEdge() : boundary(-1), edge(-1) {}
+ BoundaryEdge(int boundary_, int edge_)
+ : boundary(boundary_), edge(edge_) {}
+ int boundary, edge;
+ };
+
+ /* Calculate the boundaries collection. Should normally be accessed via
+ * get_boundaries(), which will call this function if necessary. */
+ void calculate_boundaries();
+
+ /* Calculate the edges array. Should normally be accessed via
+ * get_edges(), which will call this function if necessary. */
+ void calculate_edges();
+
+ /* Calculate the neighbors array. Should normally be accessed via
+ * get_neighbors(), which will call this function if necessary. */
+ void calculate_neighbors();
+
+ /* Correct each triangle so that the vertices are ordered in an
+ * anticlockwise manner. */
+ void correct_triangles();
+
+ /* Determine which edge index (0,1 or 2) the specified point index is in
+ * the specified triangle, or -1 if the point is not in the triangle. */
+ int get_edge_in_triangle(int tri, int point) const;
+
+
+
+
+ // Variables shared with python, always set.
+ CoordinateArray _x, _y; // double array (npoints).
+ TriangleArray _triangles; // int array (ntri,3) of triangle point indices,
+ // ordered anticlockwise.
+
+ // Variables shared with python, may be zero.
+ MaskArray _mask; // bool array (ntri).
+
+ // Derived variables shared with python, may be zero. If zero, are
+ // recalculated when needed.
+ EdgeArray _edges; // int array (?,2) of start & end point indices.
+ NeighborArray _neighbors; // int array (ntri,3), neighbor triangle indices
+ // or -1 if no neighbor.
+
+ // Variables internal to C++ only.
+ Boundaries _boundaries;
+
+ // Map used to look up BoundaryEdges from TriEdges. Normally accessed via
+ // get_boundary_edge().
+ typedef std::map<TriEdge, BoundaryEdge> TriEdgeToBoundaryMap;
+ TriEdgeToBoundaryMap _tri_edge_to_boundary_map;
+};
+
+
+
+// Contour generator for a triangulation.
+class TriContourGenerator
+{
+public:
+ typedef Triangulation::CoordinateArray CoordinateArray;
+
+ /* Constructor.
+ * triangulation: Triangulation to generate contours for.
+ * z: Double array of shape (npoints) of z-values at triangulation
+ * points. */
+ TriContourGenerator(Triangulation& triangulation,
+ const CoordinateArray& z);
+
+ /* Create and return a non-filled contour.
+ * level: Contour level.
+ * Returns new python list [segs0, segs1, ...] where
+ * segs0: double array of shape (?,2) of point coordinates of first
+ * contour line, etc. */
+ PyObject* create_contour(const double& level);
+
+ /* Create and return a filled contour.
+ * lower_level: Lower contour level.
+ * upper_level: Upper contour level.
+ * Returns new python tuple (segs, kinds) where
+ * segs: double array of shape (n_points,2) of all point coordinates,
+ * kinds: ubyte array of shape (n_points) of all point code types. */
+ PyObject* create_filled_contour(const double& lower_level,
+ const double& upper_level);
+
+private:
+ typedef Triangulation::Boundary Boundary;
+ typedef Triangulation::Boundaries Boundaries;
+
+ /* Clear visited flags.
+ * include_boundaries: Whether to clear boundary flags or not, which are
+ * only used for filled contours. */
+ void clear_visited_flags(bool include_boundaries);
+
+ /* Convert a non-filled Contour from C++ to Python.
+ * Returns new python list [segs0, segs1, ...] where
+ * segs0: double array of shape (?,2) of point coordinates of first
+ * contour line, etc. */
+ PyObject* contour_to_segs(const Contour& contour);
+
+ /* Convert a filled Contour from C++ to Python.
+ * Returns new python tuple (segs, kinds) where
+ * segs: double array of shape (n_points,2) of all point coordinates,
+ * kinds: ubyte array of shape (n_points) of all point code types. */
+ PyObject* contour_to_segs_and_kinds(const Contour& contour);
+
+ /* Return the point on the specified TriEdge that intersects the specified
+ * level. */
+ XY edge_interp(int tri, int edge, const double& level);
+
+ /* Find and follow non-filled contour lines that start and end on a
+ * boundary of the Triangulation.
+ * contour: Contour to add new lines to.
+ * level: Contour level. */
+ void find_boundary_lines(Contour& contour,
+ const double& level);
+
+ /* Find and follow filled contour lines at either of the specified contour
+ * levels that start and end of a boundary of the Triangulation.
+ * contour: Contour to add new lines to.
+ * lower_level: Lower contour level.
+ * upper_level: Upper contour level. */
+ void find_boundary_lines_filled(Contour& contour,
+ const double& lower_level,
+ const double& upper_level);
+
+ /* Find and follow lines at the specified contour level that are
+ * completely in the interior of the Triangulation and hence do not
+ * intersect any boundary.
+ * contour: Contour to add new lines to.
+ * level: Contour level.
+ * on_upper: Whether on upper or lower contour level.
+ * filled: Whether contours are filled or not. */
+ void find_interior_lines(Contour& contour,
+ const double& level,
+ bool on_upper,
+ bool filled);
+
+ /* Follow contour line around boundary of the Triangulation from the
+ * specified TriEdge to its end which can be on either the lower or upper
+ * levels. Only used for filled contours.
+ * contour_line: Contour line to append new points to.
+ * tri_edge: On entry, TriEdge to start from. On exit, TriEdge that is
+ * finished on.
+ * lower_level: Lower contour level.
+ * upper_level: Upper contour level.
+ * on_upper: Whether starts on upper level or not.
+ * Return true if finishes on upper level, false if lower. */
+ bool follow_boundary(ContourLine& contour_line,
+ TriEdge& tri_edge,
+ const double& lower_level,
+ const double& upper_level,
+ bool on_upper);
+
+ /* Follow contour line across interior of Triangulation.
+ * contour_line: Contour line to append new points to.
+ * tri_edge: On entry, TriEdge to start from. On exit, TriEdge that is
+ * finished on.
+ * end_on_boundary: Whether this line ends on a boundary, or loops back
+ * upon itself.
+ * level: Contour level to follow.
+ * on_upper: Whether following upper or lower contour level. */
+ void follow_interior(ContourLine& contour_line,
+ TriEdge& tri_edge,
+ bool end_on_boundary,
+ const double& level,
+ bool on_upper);
+
+ // Return the Triangulation boundaries.
+ const Boundaries& get_boundaries() const;
+
+ /* Return the edge by which the a level leaves a particular triangle,
+ * which is 0, 1 or 2 if the contour passes through the triangle or -1
+ * otherwise.
+ * tri: Triangle index.
+ * level: Contour level to follow.
+ * on_upper: Whether following upper or lower contour level. */
+ int get_exit_edge(int tri, const double& level, bool on_upper) const;
+
+ // Return the z-value at the specified point index.
+ const double& get_z(int point) const;
+
+ /* Return the point at which the a level intersects the line connecting the
+ * two specified point indices. */
+ XY interp(int point1, int point2, const double& level) const;
+
+
+
+ // Variables shared with python, always set.
+ Triangulation& _triangulation;
+ CoordinateArray _z; // double array (npoints).
+
+ // Variables internal to C++ only.
+ typedef std::vector<bool> InteriorVisited; // Size 2*ntri
+ typedef std::vector<bool> BoundaryVisited;
+ typedef std::vector<BoundaryVisited> BoundariesVisited;
+ typedef std::vector<bool> BoundariesUsed;
+
+ InteriorVisited _interior_visited;
+ BoundariesVisited _boundaries_visited; // Only used for filled contours.
+ BoundariesUsed _boundaries_used; // Only used for filled contours.
+};
+
+
+
+/* TriFinder class implemented using the trapezoid map algorithm from the book
+ * "Computational Geometry, Algorithms and Applications", second edition, by
+ * M. de Berg, M. van Kreveld, M. Overmars and O. Schwarzkopf.
+ *
+ * The domain of interest is composed of vertical-sided trapezoids that are
+ * bounded to the left and right by points of the triangulation, and below and
+ * above by edges of the triangulation. Each triangle is represented by 1 or
+ * more of these trapezoids. Edges are inserted one a time in a random order.
+ *
+ * As the trapezoid map is created, a search tree is also created which allows
+ * fast lookup O(log N) of the trapezoid containing the point of interest.
+ * There are 3 types of node in the search tree: all leaf nodes represent
+ * trapezoids and all branch nodes have 2 child nodes and are either x-nodes or
+ * y-nodes. X-nodes represent points in the triangulation, and their 2 children
+ * refer to those parts of the search tree to the left and right of the point.
+ * Y-nodes represent edges in the triangulation, and their 2 children refer to
+ * those parts of the search tree below and above the edge.
+ *
+ * Nodes can be repeated throughout the search tree, and each is reference
+ * counted through the multiple parent nodes it is a child of.
+ *
+ * The algorithm is only intended to work with valid triangulations, i.e. it
+ * must not contain duplicate points, triangles formed from colinear points, or
+ * overlapping triangles. It does have some tolerance to triangles formed from
+ * colinear points but only in the simplest of cases. No explicit testing of
+ * the validity of the triangulation is performed as this is a computationally
+ * more complex task than the trifinding itself. */
+class TrapezoidMapTriFinder
+{
+public:
+ typedef Triangulation::CoordinateArray CoordinateArray;
+ typedef numpy::array_view<int, 1> TriIndexArray;
+
+ /* Constructor. A separate call to initialize() is required to initialize
+ * the object before use.
+ * triangulation: Triangulation to find triangles in. */
+ TrapezoidMapTriFinder(Triangulation& triangulation);
+
+ ~TrapezoidMapTriFinder();
+
+ /* Return an array of triangle indices. Takes 1D arrays x and y of
+ * point coordinates, and returns an array of the same size containing the
+ * indices of the triangles at those points. */
+ TriIndexArray find_many(const CoordinateArray& x, const CoordinateArray& y);
+
+ /* Return a reference to a new python list containing the following
+ * statistics about the tree:
+ * 0: number of nodes (tree size)
+ * 1: number of unique nodes (number of unique Node objects in tree)
+ * 2: number of trapezoids (tree leaf nodes)
+ * 3: number of unique trapezoids
+ * 4: maximum parent count (max number of times a node is repeated in
+ * tree)
+ * 5: maximum depth of tree (one more than the maximum number of
+ * comparisons needed to search through the tree)
+ * 6: mean of all trapezoid depths (one more than the average number of
+ * comparisons needed to search through the tree) */
+ PyObject* get_tree_stats();
+
+ /* Initialize this object before use. May be called multiple times, if,
+ * for example, the triangulation is changed by setting the mask. */
+ void initialize();
+
+ // Print the search tree as text to stdout; useful for debug purposes.
+ void print_tree();
+
+private:
+ /* A Point consists of x,y coordinates as well as the index of a triangle
+ * associated with the point, so that a search at this point's coordinates
+ * can return a valid triangle index. */
+ struct Point : XY
+ {
+ Point() : XY(), tri(-1) {}
+ Point(const double& x, const double& y) : XY(x,y), tri(-1) {}
+ explicit Point(const XY& xy) : XY(xy), tri(-1) {}
+
+ int tri;
+ };
+
+ /* An Edge connects two Points, left and right. It is always true that
+ * right->is_right_of(*left). Stores indices of triangles below and above
+ * the Edge which are used to map from trapezoid to triangle index. Also
+ * stores pointers to the 3rd points of the below and above triangles,
+ * which are only used to disambiguate triangles with colinear points. */
+ struct Edge
+ {
+ Edge(const Point* left_,
+ const Point* right_,
+ int triangle_below_,
+ int triangle_above_,
+ const Point* point_below_,
+ const Point* point_above_);
+
+ // Return -1 if point to left of edge, 0 if on edge, +1 if to right.
+ int get_point_orientation(const XY& xy) const;
+
+ // Return slope of edge, even if vertical (divide by zero is OK here).
+ double get_slope() const;
+
+ /* Return y-coordinate of point on edge with specified x-coordinate.
+ * x must be within the x-limits of this edge. */
+ double get_y_at_x(const double& x) const;
+
+ // Return true if the specified point is either of the edge end points.
+ bool has_point(const Point* point) const;
+
+ bool operator==(const Edge& other) const;
+
+ friend std::ostream& operator<<(std::ostream& os, const Edge& edge)
+ {
+ return os << *edge.left << "->" << *edge.right;
+ }
+
+ void print_debug() const;
+
+
+ const Point* left; // Not owned.
+ const Point* right; // Not owned.
+ int triangle_below; // Index of triangle below (to right of) Edge.
+ int triangle_above; // Index of triangle above (to left of) Edge.
+ const Point* point_below; // Used only for resolving ambiguous cases;
+ const Point* point_above; // is 0 if corresponding triangle is -1
+ };
+
+ class Node; // Forward declaration.
+
+ // Helper structure used by TrapezoidMapTriFinder::get_tree_stats.
+ struct NodeStats
+ {
+ NodeStats()
+ : node_count(0), trapezoid_count(0), max_parent_count(0),
+ max_depth(0), sum_trapezoid_depth(0.0)
+ {}
+
+ long node_count, trapezoid_count, max_parent_count, max_depth;
+ double sum_trapezoid_depth;
+ std::set<const Node*> unique_nodes, unique_trapezoid_nodes;
+ };
+
+ struct Trapezoid; // Forward declaration.
+
+ /* Node of the trapezoid map search tree. There are 3 possible types:
+ * Type_XNode, Type_YNode and Type_TrapezoidNode. Data members are
+ * represented using a union: an XNode has a Point and 2 child nodes
+ * (left and right of the point), a YNode has an Edge and 2 child nodes
+ * (below and above the edge), and a TrapezoidNode has a Trapezoid.
+ * Each Node has multiple parents so it can appear in the search tree
+ * multiple times without having to create duplicate identical Nodes.
+ * The parent collection acts as a reference count to the number of times
+ * a Node occurs in the search tree. When the parent count is reduced to
+ * zero a Node can be safely deleted. */
+ class Node
+ {
+ public:
+ Node(const Point* point, Node* left, Node* right);// Type_XNode.
+ Node(const Edge* edge, Node* below, Node* above); // Type_YNode.
+ Node(Trapezoid* trapezoid); // Type_TrapezoidNode.
+
+ ~Node();
+
+ void add_parent(Node* parent);
+
+ /* Recurse through the search tree and assert that everything is valid.
+ * Reduces to a no-op if NDEBUG is defined. */
+ void assert_valid(bool tree_complete) const;
+
+ // Recurse through the tree to return statistics about it.
+ void get_stats(int depth, NodeStats& stats) const;
+
+ // Return the index of the triangle corresponding to this node.
+ int get_tri() const;
+
+ bool has_child(const Node* child) const;
+ bool has_no_parents() const;
+ bool has_parent(const Node* parent) const;
+
+ /* Recurse through the tree and print a textual representation to
+ * stdout. Argument depth used to indent for readability. */
+ void print(int depth = 0) const;
+
+ /* Remove a parent from this Node. Return true if no parents remain
+ * so that this Node can be deleted. */
+ bool remove_parent(Node* parent);
+
+ void replace_child(Node* old_child, Node* new_child);
+
+ // Replace this node with the specified new_node in all parents.
+ void replace_with(Node* new_node);
+
+ /* Recursive search through the tree to find the Node containing the
+ * specified XY point. */
+ const Node* search(const XY& xy);
+
+ /* Recursive search through the tree to find the Trapezoid containing
+ * the left endpoint of the specified Edge. Return 0 if fails, which
+ * can only happen if the triangulation is invalid. */
+ Trapezoid* search(const Edge& edge);
+
+ /* Copy constructor and assignment operator defined but not implemented
+ * to prevent objects being copied. */
+ Node(const Node& other);
+ Node& operator=(const Node& other);
+
+ private:
+ typedef enum {
+ Type_XNode,
+ Type_YNode,
+ Type_TrapezoidNode
+ } Type;
+ Type _type;
+
+ union {
+ struct {
+ const Point* point; // Not owned.
+ Node* left; // Owned.
+ Node* right; // Owned.
+ } xnode;
+ struct {
+ const Edge* edge; // Not owned.
+ Node* below; // Owned.
+ Node* above; // Owned.
+ } ynode;
+ Trapezoid* trapezoid; // Owned.
+ } _union;
+
+ typedef std::list<Node*> Parents;
+ Parents _parents; // Not owned.
+ };
+
+ /* A Trapezoid is bounded by Points to left and right, and Edges below and
+ * above. Has up to 4 neighboring Trapezoids to lower/upper left/right.
+ * Lower left neighbor is Trapezoid to left that shares the below Edge, or
+ * is 0 if there is no such Trapezoid (and similar for other neighbors).
+ * To obtain the index of the triangle corresponding to a particular
+ * Trapezoid, use the Edge member variables below.triangle_above or
+ * above.triangle_below. */
+ struct Trapezoid
+ {
+ Trapezoid(const Point* left_,
+ const Point* right_,
+ const Edge& below_,
+ const Edge& above_);
+
+ /* Assert that this Trapezoid is valid. Reduces to a no-op if NDEBUG
+ * is defined. */
+ void assert_valid(bool tree_complete) const;
+
+ /* Return one of the 4 corner points of this Trapezoid. Only used for
+ * debugging purposes. */
+ XY get_lower_left_point() const;
+ XY get_lower_right_point() const;
+ XY get_upper_left_point() const;
+ XY get_upper_right_point() const;
+
+ void print_debug() const;
+
+ /* Set one of the 4 neighbor trapezoids and the corresponding reverse
+ * Trapezoid of the new neighbor (if it is not 0), so that they are
+ * consistent. */
+ void set_lower_left(Trapezoid* lower_left_);
+ void set_lower_right(Trapezoid* lower_right_);
+ void set_upper_left(Trapezoid* upper_left_);
+ void set_upper_right(Trapezoid* upper_right_);
+
+ /* Copy constructor and assignment operator defined but not implemented
+ * to prevent objects being copied. */
+ Trapezoid(const Trapezoid& other);
+ Trapezoid& operator=(const Trapezoid& other);
+
+
+ const Point* left; // Not owned.
+ const Point* right; // Not owned.
+ const Edge& below;
+ const Edge& above;
+
+ // 4 neighboring trapezoids, can be 0, not owned.
+ Trapezoid* lower_left; // Trapezoid to left that shares below
+ Trapezoid* lower_right; // Trapezoid to right that shares below
+ Trapezoid* upper_left; // Trapezoid to left that shares above
+ Trapezoid* upper_right; // Trapezoid to right that shares above
+
+ Node* trapezoid_node; // Node that owns this Trapezoid.
+ };
+
+
+ // Add the specified Edge to the search tree, returning true if successful.
+ bool add_edge_to_tree(const Edge& edge);
+
+ // Clear all memory allocated by this object.
+ void clear();
+
+ // Return the triangle index at the specified point, or -1 if no triangle.
+ int find_one(const XY& xy);
+
+ /* Determine the trapezoids that the specified Edge intersects, returning
+ * true if successful. */
+ bool find_trapezoids_intersecting_edge(const Edge& edge,
+ std::vector<Trapezoid*>& trapezoids);
+
+
+
+ // Variables shared with python, always set.
+ Triangulation& _triangulation;
+
+ // Variables internal to C++ only.
+ Point* _points; // Array of all points in triangulation plus corners of
+ // enclosing rectangle. Owned.
+
+ typedef std::vector<Edge> Edges;
+ Edges _edges; // All Edges in triangulation plus bottom and top Edges of
+ // enclosing rectangle.
+
+ Node* _tree; // Root node of the trapezoid map search tree. Owned.
+};
+
+
+
+/* Linear congruential random number generator. Edges in the triangulation are
+ * randomly shuffled before being added to the trapezoid map. Want the
+ * shuffling to be identical across different operating systems and the same
+ * regardless of previous random number use. Would prefer to use a STL or
+ * Boost random number generator, but support is not consistent across
+ * different operating systems so implementing own here.
+ *
+ * This is not particularly random, but is perfectly adequate for the use here.
+ * Coefficients taken from Numerical Recipes in C. */
+class RandomNumberGenerator
+{
+public:
+ RandomNumberGenerator(unsigned long seed);
+
+ // Return random integer in the range 0 to max_value-1.
+ unsigned long operator()(unsigned long max_value);
+
+private:
+ const unsigned long _m, _a, _c;
+ unsigned long _seed;
+};
+
+#endif
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/_tri_wrapper.cpp b/contrib/python/matplotlib/py2/matplotlib/tri/_tri_wrapper.cpp
new file mode 100644
index 00000000000..8ad269b3538
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/_tri_wrapper.cpp
@@ -0,0 +1,550 @@
+#include "_tri.h"
+#include "src/mplutils.h"
+#include "src/py_exceptions.h"
+
+
+/* Triangulation */
+
+typedef struct
+{
+ PyObject_HEAD
+ Triangulation* ptr;
+} PyTriangulation;
+
+static PyTypeObject PyTriangulationType;
+
+static PyObject* PyTriangulation_new(PyTypeObject* type, PyObject* args, PyObject* kwds)
+{
+ PyTriangulation* self;
+ self = (PyTriangulation*)type->tp_alloc(type, 0);
+ self->ptr = NULL;
+ return (PyObject*)self;
+}
+
+const char* PyTriangulation_init__doc__ =
+ "Triangulation(x, y, triangles, mask, edges, neighbors)\n"
+ "\n"
+ "Create a new C++ Triangulation object\n"
+ "This should not be called directly, instead use the python class\n"
+ "matplotlib.tri.Triangulation instead.\n";
+
+static int PyTriangulation_init(PyTriangulation* self, PyObject* args, PyObject* kwds)
+{
+ Triangulation::CoordinateArray x, y;
+ Triangulation::TriangleArray triangles;
+ Triangulation::MaskArray mask;
+ Triangulation::EdgeArray edges;
+ Triangulation::NeighborArray neighbors;
+ int correct_triangle_orientations;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&O&O&O&O&i",
+ &x.converter, &x,
+ &y.converter, &y,
+ &triangles.converter, &triangles,
+ &mask.converter, &mask,
+ &edges.converter, &edges,
+ &neighbors.converter, &neighbors,
+ &correct_triangle_orientations)) {
+ return -1;
+ }
+
+ // x and y.
+ if (x.empty() || y.empty() || x.dim(0) != y.dim(0)) {
+ PyErr_SetString(PyExc_ValueError,
+ "x and y must be 1D arrays of the same length");
+ return -1;
+ }
+
+ // triangles.
+ if (triangles.empty() || triangles.dim(1) != 3) {
+ PyErr_SetString(PyExc_ValueError,
+ "triangles must be a 2D array of shape (?,3)");
+ return -1;
+ }
+
+ // Optional mask.
+ if (!mask.empty() && mask.dim(0) != triangles.dim(0)) {
+ PyErr_SetString(PyExc_ValueError,
+ "mask must be a 1D array with the same length as the triangles array");
+ return -1;
+ }
+
+ // Optional edges.
+ if (!edges.empty() && edges.dim(1) != 2) {
+ PyErr_SetString(PyExc_ValueError,
+ "edges must be a 2D array with shape (?,2)");
+ return -1;
+ }
+
+ // Optional neighbors.
+ if (!neighbors.empty() && (neighbors.dim(0) != triangles.dim(0) ||
+ neighbors.dim(1) != triangles.dim(1))) {
+ PyErr_SetString(PyExc_ValueError,
+ "neighbors must be a 2D array with the same shape as the triangles array");
+ return -1;
+ }
+
+ CALL_CPP_INIT("Triangulation",
+ (self->ptr = new Triangulation(x, y, triangles, mask,
+ edges, neighbors,
+ correct_triangle_orientations)));
+ return 0;
+}
+
+static void PyTriangulation_dealloc(PyTriangulation* self)
+{
+ delete self->ptr;
+ Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+const char* PyTriangulation_calculate_plane_coefficients__doc__ =
+ "calculate_plane_coefficients(z, plane_coefficients)\n"
+ "\n"
+ "Calculate plane equation coefficients for all unmasked triangles";
+
+static PyObject* PyTriangulation_calculate_plane_coefficients(PyTriangulation* self, PyObject* args, PyObject* kwds)
+{
+ Triangulation::CoordinateArray z;
+ if (!PyArg_ParseTuple(args, "O&:calculate_plane_coefficients",
+ &z.converter, &z)) {
+ return NULL;
+ }
+
+ if (z.empty() || z.dim(0) != self->ptr->get_npoints()) {
+ PyErr_SetString(PyExc_ValueError,
+ "z array must have same length as triangulation x and y arrays");
+ return NULL;
+ }
+
+ Triangulation::TwoCoordinateArray result;
+ CALL_CPP("calculate_plane_coefficients",
+ (result = self->ptr->calculate_plane_coefficients(z)));
+ return result.pyobj();
+}
+
+const char* PyTriangulation_get_edges__doc__ =
+ "get_edges()\n"
+ "\n"
+ "Return edges array";
+
+static PyObject* PyTriangulation_get_edges(PyTriangulation* self, PyObject* args, PyObject* kwds)
+{
+ Triangulation::EdgeArray* result;
+ CALL_CPP("get_edges", (result = &self->ptr->get_edges()));
+
+ if (result->empty()) {
+ Py_RETURN_NONE;
+ }
+ else
+ return result->pyobj();
+}
+
+const char* PyTriangulation_get_neighbors__doc__ =
+ "get_neighbors()\n"
+ "\n"
+ "Return neighbors array";
+
+static PyObject* PyTriangulation_get_neighbors(PyTriangulation* self, PyObject* args, PyObject* kwds)
+{
+ Triangulation::NeighborArray* result;
+ CALL_CPP("get_neighbors", (result = &self->ptr->get_neighbors()));
+
+ if (result->empty()) {
+ Py_RETURN_NONE;
+ }
+ else
+ return result->pyobj();
+}
+
+const char* PyTriangulation_set_mask__doc__ =
+ "set_mask(mask)\n"
+ "\n"
+ "Set or clear the mask array.";
+
+static PyObject* PyTriangulation_set_mask(PyTriangulation* self, PyObject* args, PyObject* kwds)
+{
+ Triangulation::MaskArray mask;
+
+ if (!PyArg_ParseTuple(args, "O&:set_mask", &mask.converter, &mask)) {
+ return NULL;
+ }
+
+ if (!mask.empty() && mask.dim(0) != self->ptr->get_ntri()) {
+ PyErr_SetString(PyExc_ValueError,
+ "mask must be a 1D array with the same length as the triangles array");
+ return NULL;
+ }
+
+ CALL_CPP("set_mask", (self->ptr->set_mask(mask)));
+ Py_RETURN_NONE;
+}
+
+static PyTypeObject* PyTriangulation_init_type(PyObject* m, PyTypeObject* type)
+{
+ static PyMethodDef methods[] = {
+ {"calculate_plane_coefficients", (PyCFunction)PyTriangulation_calculate_plane_coefficients, METH_VARARGS, PyTriangulation_calculate_plane_coefficients__doc__},
+ {"get_edges", (PyCFunction)PyTriangulation_get_edges, METH_NOARGS, PyTriangulation_get_edges__doc__},
+ {"get_neighbors", (PyCFunction)PyTriangulation_get_neighbors, METH_NOARGS, PyTriangulation_get_neighbors__doc__},
+ {"set_mask", (PyCFunction)PyTriangulation_set_mask, METH_VARARGS, PyTriangulation_set_mask__doc__},
+ {NULL}
+ };
+
+ memset(type, 0, sizeof(PyTypeObject));
+ type->tp_name = "matplotlib._tri.Triangulation";
+ type->tp_doc = PyTriangulation_init__doc__;
+ type->tp_basicsize = sizeof(PyTriangulation);
+ type->tp_dealloc = (destructor)PyTriangulation_dealloc;
+ type->tp_flags = Py_TPFLAGS_DEFAULT;
+ type->tp_methods = methods;
+ type->tp_new = PyTriangulation_new;
+ type->tp_init = (initproc)PyTriangulation_init;
+
+ if (PyType_Ready(type) < 0) {
+ return NULL;
+ }
+
+ if (PyModule_AddObject(m, "Triangulation", (PyObject*)type)) {
+ return NULL;
+ }
+
+ return type;
+}
+
+
+/* TriContourGenerator */
+
+typedef struct
+{
+ PyObject_HEAD
+ TriContourGenerator* ptr;
+ PyTriangulation* py_triangulation;
+} PyTriContourGenerator;
+
+static PyTypeObject PyTriContourGeneratorType;
+
+static PyObject* PyTriContourGenerator_new(PyTypeObject* type, PyObject* args, PyObject* kwds)
+{
+ PyTriContourGenerator* self;
+ self = (PyTriContourGenerator*)type->tp_alloc(type, 0);
+ self->ptr = NULL;
+ self->py_triangulation = NULL;
+ return (PyObject*)self;
+}
+
+const char* PyTriContourGenerator_init__doc__ =
+ "TriContourGenerator(triangulation, z)\n"
+ "\n"
+ "Create a new C++ TriContourGenerator object\n"
+ "This should not be called directly, instead use the functions\n"
+ "matplotlib.axes.tricontour and tricontourf instead.\n";
+
+static int PyTriContourGenerator_init(PyTriContourGenerator* self, PyObject* args, PyObject* kwds)
+{
+ PyObject* triangulation_arg;
+ TriContourGenerator::CoordinateArray z;
+
+ if (!PyArg_ParseTuple(args, "O!O&",
+ &PyTriangulationType, &triangulation_arg,
+ &z.converter, &z)) {
+ return -1;
+ }
+
+ PyTriangulation* py_triangulation = (PyTriangulation*)triangulation_arg;
+ Py_INCREF(py_triangulation);
+ self->py_triangulation = py_triangulation;
+ Triangulation& triangulation = *(py_triangulation->ptr);
+
+ if (z.empty() || z.dim(0) != triangulation.get_npoints()) {
+ PyErr_SetString(PyExc_ValueError,
+ "z must be a 1D array with the same length as the x and y arrays");
+ return -1;
+ }
+
+ CALL_CPP_INIT("TriContourGenerator",
+ (self->ptr = new TriContourGenerator(triangulation, z)));
+ return 0;
+}
+
+static void PyTriContourGenerator_dealloc(PyTriContourGenerator* self)
+{
+ delete self->ptr;
+ Py_XDECREF(self->py_triangulation);
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+const char* PyTriContourGenerator_create_contour__doc__ =
+ "create_contour(level)\n"
+ "\n"
+ "Create and return a non-filled contour.";
+
+static PyObject* PyTriContourGenerator_create_contour(PyTriContourGenerator* self, PyObject* args, PyObject* kwds)
+{
+ double level;
+ if (!PyArg_ParseTuple(args, "d:create_contour", &level)) {
+ return NULL;
+ }
+
+ PyObject* result;
+ CALL_CPP("create_contour", (result = self->ptr->create_contour(level)));
+ return result;
+}
+
+const char* PyTriContourGenerator_create_filled_contour__doc__ =
+ "create_filled_contour(lower_level, upper_level)\n"
+ "\n"
+ "Create and return a filled contour";
+
+static PyObject* PyTriContourGenerator_create_filled_contour(PyTriContourGenerator* self, PyObject* args, PyObject* kwds)
+{
+ double lower_level, upper_level;
+ if (!PyArg_ParseTuple(args, "dd:create_filled_contour",
+ &lower_level, &upper_level)) {
+ return NULL;
+ }
+
+ if (lower_level >= upper_level)
+ {
+ PyErr_SetString(PyExc_ValueError,
+ "filled contour levels must be increasing");
+ return NULL;
+ }
+
+ PyObject* result;
+ CALL_CPP("create_filled_contour",
+ (result = self->ptr->create_filled_contour(lower_level,
+ upper_level)));
+ return result;
+}
+
+static PyTypeObject* PyTriContourGenerator_init_type(PyObject* m, PyTypeObject* type)
+{
+ static PyMethodDef methods[] = {
+ {"create_contour", (PyCFunction)PyTriContourGenerator_create_contour, METH_VARARGS, PyTriContourGenerator_create_contour__doc__},
+ {"create_filled_contour", (PyCFunction)PyTriContourGenerator_create_filled_contour, METH_VARARGS, PyTriContourGenerator_create_filled_contour__doc__},
+ {NULL}
+ };
+
+ memset(type, 0, sizeof(PyTypeObject));
+ type->tp_name = "matplotlib._tri.TriContourGenerator";
+ type->tp_doc = PyTriContourGenerator_init__doc__;
+ type->tp_basicsize = sizeof(PyTriContourGenerator);
+ type->tp_dealloc = (destructor)PyTriContourGenerator_dealloc;
+ type->tp_flags = Py_TPFLAGS_DEFAULT;
+ type->tp_methods = methods;
+ type->tp_new = PyTriContourGenerator_new;
+ type->tp_init = (initproc)PyTriContourGenerator_init;
+
+ if (PyType_Ready(type) < 0) {
+ return NULL;
+ }
+
+ if (PyModule_AddObject(m, "TriContourGenerator", (PyObject*)type)) {
+ return NULL;
+ }
+
+ return type;
+}
+
+
+/* TrapezoidMapTriFinder */
+
+typedef struct
+{
+ PyObject_HEAD
+ TrapezoidMapTriFinder* ptr;
+ PyTriangulation* py_triangulation;
+} PyTrapezoidMapTriFinder;
+
+static PyTypeObject PyTrapezoidMapTriFinderType;
+
+static PyObject* PyTrapezoidMapTriFinder_new(PyTypeObject* type, PyObject* args, PyObject* kwds)
+{
+ PyTrapezoidMapTriFinder* self;
+ self = (PyTrapezoidMapTriFinder*)type->tp_alloc(type, 0);
+ self->ptr = NULL;
+ self->py_triangulation = NULL;
+ return (PyObject*)self;
+}
+
+const char* PyTrapezoidMapTriFinder_init__doc__ =
+ "TrapezoidMapTriFinder(triangulation)\n"
+ "\n"
+ "Create a new C++ TrapezoidMapTriFinder object\n"
+ "This should not be called directly, instead use the python class\n"
+ "matplotlib.tri.TrapezoidMapTriFinder instead.\n";
+
+static int PyTrapezoidMapTriFinder_init(PyTrapezoidMapTriFinder* self, PyObject* args, PyObject* kwds)
+{
+ PyObject* triangulation_arg;
+ if (!PyArg_ParseTuple(args, "O!",
+ &PyTriangulationType, &triangulation_arg)) {
+ return -1;
+ }
+
+ PyTriangulation* py_triangulation = (PyTriangulation*)triangulation_arg;
+ Py_INCREF(py_triangulation);
+ self->py_triangulation = py_triangulation;
+ Triangulation& triangulation = *(py_triangulation->ptr);
+
+ CALL_CPP_INIT("TrapezoidMapTriFinder",
+ (self->ptr = new TrapezoidMapTriFinder(triangulation)));
+ return 0;
+}
+
+static void PyTrapezoidMapTriFinder_dealloc(PyTrapezoidMapTriFinder* self)
+{
+ delete self->ptr;
+ Py_XDECREF(self->py_triangulation);
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+const char* PyTrapezoidMapTriFinder_find_many__doc__ =
+ "find_many(x, y)\n"
+ "\n"
+ "Find indices of triangles containing the point coordinates (x, y)";
+
+static PyObject* PyTrapezoidMapTriFinder_find_many(PyTrapezoidMapTriFinder* self, PyObject* args, PyObject* kwds)
+{
+ TrapezoidMapTriFinder::CoordinateArray x, y;
+ if (!PyArg_ParseTuple(args, "O&O&:find_many",
+ &x.converter, &x,
+ &y.converter, &y)) {
+ return NULL;
+ }
+
+ if (x.empty() || y.empty() || x.dim(0) != y.dim(0)) {
+ PyErr_SetString(PyExc_ValueError,
+ "x and y must be array_like with same shape");
+ return NULL;
+ }
+
+ TrapezoidMapTriFinder::TriIndexArray result;
+ CALL_CPP("find_many", (result = self->ptr->find_many(x, y)));
+ return result.pyobj();
+}
+
+const char* PyTrapezoidMapTriFinder_get_tree_stats__doc__ =
+ "get_tree_stats()\n"
+ "\n"
+ "Return statistics about the tree used by the trapezoid map";
+
+static PyObject* PyTrapezoidMapTriFinder_get_tree_stats(PyTrapezoidMapTriFinder* self, PyObject* args, PyObject* kwds)
+{
+ PyObject* result;
+ CALL_CPP("get_tree_stats", (result = self->ptr->get_tree_stats()));
+ return result;
+}
+
+const char* PyTrapezoidMapTriFinder_initialize__doc__ =
+ "initialize()\n"
+ "\n"
+ "Initialize this object, creating the trapezoid map from the triangulation";
+
+static PyObject* PyTrapezoidMapTriFinder_initialize(PyTrapezoidMapTriFinder* self, PyObject* args, PyObject* kwds)
+{
+ CALL_CPP("initialize", (self->ptr->initialize()));
+ Py_RETURN_NONE;
+}
+
+const char* PyTrapezoidMapTriFinder_print_tree__doc__ =
+ "print_tree()\n"
+ "\n"
+ "Print the search tree as text to stdout; useful for debug purposes";
+
+static PyObject* PyTrapezoidMapTriFinder_print_tree(PyTrapezoidMapTriFinder* self, PyObject* args, PyObject* kwds)
+{
+ CALL_CPP("print_tree", (self->ptr->print_tree()));
+ Py_RETURN_NONE;
+}
+
+static PyTypeObject* PyTrapezoidMapTriFinder_init_type(PyObject* m, PyTypeObject* type)
+{
+ static PyMethodDef methods[] = {
+ {"find_many", (PyCFunction)PyTrapezoidMapTriFinder_find_many, METH_VARARGS, PyTrapezoidMapTriFinder_find_many__doc__},
+ {"get_tree_stats", (PyCFunction)PyTrapezoidMapTriFinder_get_tree_stats, METH_NOARGS, PyTrapezoidMapTriFinder_get_tree_stats__doc__},
+ {"initialize", (PyCFunction)PyTrapezoidMapTriFinder_initialize, METH_NOARGS, PyTrapezoidMapTriFinder_initialize__doc__},
+ {"print_tree", (PyCFunction)PyTrapezoidMapTriFinder_print_tree, METH_NOARGS, PyTrapezoidMapTriFinder_print_tree__doc__},
+ {NULL}
+ };
+
+ memset(type, 0, sizeof(PyTypeObject));
+ type->tp_name = "matplotlib._tri.TrapezoidMapTriFinder";
+ type->tp_doc = PyTrapezoidMapTriFinder_init__doc__;
+ type->tp_basicsize = sizeof(PyTrapezoidMapTriFinder);
+ type->tp_dealloc = (destructor)PyTrapezoidMapTriFinder_dealloc;
+ type->tp_flags = Py_TPFLAGS_DEFAULT;
+ type->tp_methods = methods;
+ type->tp_new = PyTrapezoidMapTriFinder_new;
+ type->tp_init = (initproc)PyTrapezoidMapTriFinder_init;
+
+ if (PyType_Ready(type) < 0) {
+ return NULL;
+ }
+
+ if (PyModule_AddObject(m, "TrapezoidMapTriFinder", (PyObject*)type)) {
+ return NULL;
+ }
+
+ return type;
+}
+
+
+/* Module */
+
+extern "C" {
+
+#if PY3K
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_tri",
+ NULL,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__tri(void)
+
+#else
+#define INITERROR return
+
+PyMODINIT_FUNC init_tri(void)
+#endif
+
+{
+ PyObject *m;
+
+#if PY3K
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule3("_tri", NULL, NULL);
+#endif
+
+ if (m == NULL) {
+ INITERROR;
+ }
+
+ if (!PyTriangulation_init_type(m, &PyTriangulationType)) {
+ INITERROR;
+ }
+ if (!PyTriContourGenerator_init_type(m, &PyTriContourGeneratorType)) {
+ INITERROR;
+ }
+ if (!PyTrapezoidMapTriFinder_init_type(m, &PyTrapezoidMapTriFinderType)) {
+ INITERROR;
+ }
+
+ import_array();
+
+#if PY3K
+ return m;
+#endif
+}
+
+} // extern "C"
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/triangulation.py b/contrib/python/matplotlib/py2/matplotlib/tri/triangulation.py
new file mode 100644
index 00000000000..b80aaf87b98
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/triangulation.py
@@ -0,0 +1,218 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import matplotlib._tri as _tri
+import matplotlib._qhull as _qhull
+import numpy as np
+
+
+class Triangulation(object):
+ """
+ An unstructured triangular grid consisting of npoints points and
+ ntri triangles. The triangles can either be specified by the user
+ or automatically generated using a Delaunay triangulation.
+
+ Parameters
+ ----------
+ x, y : array_like of shape (npoints)
+ Coordinates of grid points.
+ triangles : integer array_like of shape (ntri, 3), optional
+ For each triangle, the indices of the three points that make
+ up the triangle, ordered in an anticlockwise manner. If not
+ specified, the Delaunay triangulation is calculated.
+ mask : boolean array_like of shape (ntri), optional
+ Which triangles are masked out.
+
+ Attributes
+ ----------
+ `edges`
+ `neighbors`
+ is_delaunay : bool
+ Whether the Triangulation is a calculated Delaunay
+ triangulation (where `triangles` was not specified) or not.
+
+ Notes
+ -----
+ For a Triangulation to be valid it must not have duplicate points,
+ triangles formed from colinear points, or overlapping triangles.
+ """
+ def __init__(self, x, y, triangles=None, mask=None):
+ self.x = np.asarray(x, dtype=np.float64)
+ self.y = np.asarray(y, dtype=np.float64)
+ if self.x.shape != self.y.shape or self.x.ndim != 1:
+ raise ValueError("x and y must be equal-length 1-D arrays")
+
+ self.mask = None
+ self._edges = None
+ self._neighbors = None
+ self.is_delaunay = False
+
+ if triangles is None:
+ # No triangulation specified, so use matplotlib._qhull to obtain
+ # Delaunay triangulation.
+ self.triangles, self._neighbors = _qhull.delaunay(x, y)
+ self.is_delaunay = True
+ else:
+ # Triangulation specified. Copy, since we may correct triangle
+ # orientation.
+ self.triangles = np.array(triangles, dtype=np.int32, order='C')
+ if self.triangles.ndim != 2 or self.triangles.shape[1] != 3:
+ raise ValueError('triangles must be a (?,3) array')
+ if self.triangles.max() >= len(self.x):
+ raise ValueError('triangles max element is out of bounds')
+ if self.triangles.min() < 0:
+ raise ValueError('triangles min element is out of bounds')
+
+ if mask is not None:
+ self.mask = np.asarray(mask, dtype=bool)
+ if self.mask.shape != (self.triangles.shape[0],):
+ raise ValueError('mask array must have same length as '
+ 'triangles array')
+
+ # Underlying C++ object is not created until first needed.
+ self._cpp_triangulation = None
+
+ # Default TriFinder not created until needed.
+ self._trifinder = None
+
+ def calculate_plane_coefficients(self, z):
+ """
+ Calculate plane equation coefficients for all unmasked triangles from
+ the point (x,y) coordinates and specified z-array of shape (npoints).
+ Returned array has shape (npoints,3) and allows z-value at (x,y)
+ position in triangle tri to be calculated using
+ z = array[tri,0]*x + array[tri,1]*y + array[tri,2].
+ """
+ return self.get_cpp_triangulation().calculate_plane_coefficients(z)
+
+ @property
+ def edges(self):
+ """
+ Return integer array of shape (nedges,2) containing all edges of
+ non-masked triangles.
+
+ Each edge is the start point index and end point index. Each
+ edge (start,end and end,start) appears only once.
+ """
+ if self._edges is None:
+ self._edges = self.get_cpp_triangulation().get_edges()
+ return self._edges
+
+ def get_cpp_triangulation(self):
+ # Return the underlying C++ Triangulation object, creating it
+ # if necessary.
+ if self._cpp_triangulation is None:
+ self._cpp_triangulation = _tri.Triangulation(
+ self.x, self.y, self.triangles, self.mask, self._edges,
+ self._neighbors, not self.is_delaunay)
+ return self._cpp_triangulation
+
+ def get_masked_triangles(self):
+ """
+ Return an array of triangles that are not masked.
+ """
+ if self.mask is not None:
+ return self.triangles.compress(1 - self.mask, axis=0)
+ else:
+ return self.triangles
+
+ @staticmethod
+ def get_from_args_and_kwargs(*args, **kwargs):
+ """
+ Return a Triangulation object from the args and kwargs, and
+ the remaining args and kwargs with the consumed values removed.
+
+ There are two alternatives: either the first argument is a
+ Triangulation object, in which case it is returned, or the args
+ and kwargs are sufficient to create a new Triangulation to
+ return. In the latter case, see Triangulation.__init__ for
+ the possible args and kwargs.
+ """
+ if isinstance(args[0], Triangulation):
+ triangulation = args[0]
+ args = args[1:]
+ else:
+ x = args[0]
+ y = args[1]
+ args = args[2:] # Consumed first two args.
+
+ # Check triangles in kwargs then args.
+ triangles = kwargs.pop('triangles', None)
+ from_args = False
+ if triangles is None and len(args) > 0:
+ triangles = args[0]
+ from_args = True
+
+ if triangles is not None:
+ try:
+ triangles = np.asarray(triangles, dtype=np.int32)
+ except ValueError:
+ triangles = None
+
+ if triangles is not None and (triangles.ndim != 2 or
+ triangles.shape[1] != 3):
+ triangles = None
+
+ if triangles is not None and from_args:
+ args = args[1:] # Consumed first item in args.
+
+ # Check for mask in kwargs.
+ mask = kwargs.pop('mask', None)
+
+ triangulation = Triangulation(x, y, triangles, mask)
+ return triangulation, args, kwargs
+
+ def get_trifinder(self):
+ """
+ Return the default :class:`matplotlib.tri.TriFinder` of this
+ triangulation, creating it if necessary. This allows the same
+ TriFinder object to be easily shared.
+ """
+ if self._trifinder is None:
+ # Default TriFinder class.
+ from matplotlib.tri.trifinder import TrapezoidMapTriFinder
+ self._trifinder = TrapezoidMapTriFinder(self)
+ return self._trifinder
+
+ @property
+ def neighbors(self):
+ """
+ Return integer array of shape (ntri,3) containing neighbor
+ triangles.
+
+ For each triangle, the indices of the three triangles that
+ share the same edges, or -1 if there is no such neighboring
+ triangle. neighbors[i,j] is the triangle that is the neighbor
+ to the edge from point index triangles[i,j] to point index
+ triangles[i,(j+1)%3].
+ """
+ if self._neighbors is None:
+ self._neighbors = self.get_cpp_triangulation().get_neighbors()
+ return self._neighbors
+
+ def set_mask(self, mask):
+ """
+ Set or clear the mask array. This is either None, or a boolean
+ array of shape (ntri).
+ """
+ if mask is None:
+ self.mask = None
+ else:
+ self.mask = np.asarray(mask, dtype=bool)
+ if self.mask.shape != (self.triangles.shape[0],):
+ raise ValueError('mask array must have same length as '
+ 'triangles array')
+
+ # Set mask in C++ Triangulation.
+ if self._cpp_triangulation is not None:
+ self._cpp_triangulation.set_mask(self.mask)
+
+ # Clear derived fields so they are recalculated when needed.
+ self._edges = None
+ self._neighbors = None
+
+ # Recalculate TriFinder if it exists.
+ if self._trifinder is not None:
+ self._trifinder._initialize()
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/tricontour.py b/contrib/python/matplotlib/py2/matplotlib/tri/tricontour.py
new file mode 100644
index 00000000000..3087409b729
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/tricontour.py
@@ -0,0 +1,283 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from matplotlib.contour import ContourSet
+from matplotlib.tri.triangulation import Triangulation
+import matplotlib._tri as _tri
+import numpy as np
+
+
+class TriContourSet(ContourSet):
+ """
+ Create and store a set of contour lines or filled regions for
+ a triangular grid.
+
+ User-callable method: clabel
+
+ Useful attributes:
+ ax:
+ the axes object in which the contours are drawn
+ collections:
+ a silent_list of LineCollections or PolyCollections
+ levels:
+ contour levels
+ layers:
+ same as levels for line contours; half-way between
+ levels for filled contours. See _process_colors method.
+ """
+ def __init__(self, ax, *args, **kwargs):
+ """
+ Draw triangular grid contour lines or filled regions,
+ depending on whether keyword arg 'filled' is False
+ (default) or True.
+
+ The first argument of the initializer must be an axes
+ object. The remaining arguments and keyword arguments
+ are described in the docstring of `tricontour`.
+ """
+ ContourSet.__init__(self, ax, *args, **kwargs)
+
+ def _process_args(self, *args, **kwargs):
+ """
+ Process args and kwargs.
+ """
+ if isinstance(args[0], TriContourSet):
+ C = args[0].cppContourGenerator
+ if self.levels is None:
+ self.levels = args[0].levels
+ else:
+ tri, z = self._contour_args(args, kwargs)
+ C = _tri.TriContourGenerator(tri.get_cpp_triangulation(), z)
+ self._mins = [tri.x.min(), tri.y.min()]
+ self._maxs = [tri.x.max(), tri.y.max()]
+
+ self.cppContourGenerator = C
+ return kwargs
+
+ def _get_allsegs_and_allkinds(self):
+ """
+ Create and return allsegs and allkinds by calling underlying C code.
+ """
+ allsegs = []
+ if self.filled:
+ lowers, uppers = self._get_lowers_and_uppers()
+ allkinds = []
+ for lower, upper in zip(lowers, uppers):
+ segs, kinds = self.cppContourGenerator.create_filled_contour(
+ lower, upper)
+ allsegs.append([segs])
+ allkinds.append([kinds])
+ else:
+ allkinds = None
+ for level in self.levels:
+ segs = self.cppContourGenerator.create_contour(level)
+ allsegs.append(segs)
+ return allsegs, allkinds
+
+ def _contour_args(self, args, kwargs):
+ if self.filled:
+ fn = 'contourf'
+ else:
+ fn = 'contour'
+ tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args,
+ **kwargs)
+ z = np.asarray(args[0])
+ if z.shape != tri.x.shape:
+ raise ValueError('z array must have same length as triangulation x'
+ ' and y arrays')
+ self.zmax = z.max()
+ self.zmin = z.min()
+ if self.logscale and self.zmin <= 0:
+ raise ValueError('Cannot %s log of negative values.' % fn)
+ self._contour_level_args(z, args[1:])
+ return (tri, z)
+
+
+def tricontour(ax, *args, **kwargs):
+ """
+ Draw contours on an unstructured triangular grid.
+ :func:`~matplotlib.pyplot.tricontour` and
+ :func:`~matplotlib.pyplot.tricontourf` draw contour lines and
+ filled contours, respectively. Except as noted, function
+ signatures and return values are the same for both versions.
+
+ The triangulation can be specified in one of two ways; either::
+
+ tricontour(triangulation, ...)
+
+ where triangulation is a :class:`matplotlib.tri.Triangulation`
+ object, or
+
+ ::
+
+ tricontour(x, y, ...)
+ tricontour(x, y, triangles, ...)
+ tricontour(x, y, triangles=triangles, ...)
+ tricontour(x, y, mask=mask, ...)
+ tricontour(x, y, triangles, mask=mask, ...)
+
+ in which case a Triangulation object will be created. See
+ :class:`~matplotlib.tri.Triangulation` for a explanation of
+ these possibilities.
+
+ The remaining arguments may be::
+
+ tricontour(..., Z)
+
+ where *Z* is the array of values to contour, one per point
+ in the triangulation. The level values are chosen
+ automatically.
+
+ ::
+
+ tricontour(..., Z, N)
+
+ contour up to *N+1* automatically chosen contour levels
+ (*N* intervals).
+
+ ::
+
+ tricontour(..., Z, V)
+
+ draw contour lines at the values specified in sequence *V*,
+ which must be in increasing order.
+
+ ::
+
+ tricontourf(..., Z, V)
+
+ fill the (len(*V*)-1) regions between the values in *V*,
+ which must be in increasing order.
+
+ ::
+
+ tricontour(Z, **kwargs)
+
+ Use keyword args to control colors, linewidth, origin, cmap ... see
+ below for more details.
+
+ ``C = tricontour(...)`` returns a
+ :class:`~matplotlib.contour.TriContourSet` object.
+
+ Optional keyword arguments:
+
+ *colors*: [ *None* | string | (mpl_colors) ]
+ If *None*, the colormap specified by cmap will be used.
+
+ If a string, like 'r' or 'red', all levels will be plotted in this
+ color.
+
+ If a tuple of matplotlib color args (string, float, rgb, etc),
+ different levels will be plotted in different colors in the order
+ specified.
+
+ *alpha*: float
+ The alpha blending value
+
+ *cmap*: [ *None* | Colormap ]
+ A cm :class:`~matplotlib.colors.Colormap` instance or
+ *None*. If *cmap* is *None* and *colors* is *None*, a
+ default Colormap is used.
+
+ *norm*: [ *None* | Normalize ]
+ A :class:`matplotlib.colors.Normalize` instance for
+ scaling data values to colors. If *norm* is *None* and
+ *colors* is *None*, the default linear scaling is used.
+
+ *levels* [level0, level1, ..., leveln]
+ A list of floating point numbers indicating the level
+ curves to draw, in increasing order; e.g., to draw just
+ the zero contour pass ``levels=[0]``
+
+ *origin*: [ *None* | 'upper' | 'lower' | 'image' ]
+ If *None*, the first value of *Z* will correspond to the
+ lower left corner, location (0,0). If 'image', the rc
+ value for ``image.origin`` will be used.
+
+ This keyword is not active if *X* and *Y* are specified in
+ the call to contour.
+
+ *extent*: [ *None* | (x0,x1,y0,y1) ]
+
+ If *origin* is not *None*, then *extent* is interpreted as
+ in :func:`matplotlib.pyplot.imshow`: it gives the outer
+ pixel boundaries. In this case, the position of Z[0,0]
+ is the center of the pixel, not a corner. If *origin* is
+ *None*, then (*x0*, *y0*) is the position of Z[0,0], and
+ (*x1*, *y1*) is the position of Z[-1,-1].
+
+ This keyword is not active if *X* and *Y* are specified in
+ the call to contour.
+
+ *locator*: [ *None* | ticker.Locator subclass ]
+ If *locator* is None, the default
+ :class:`~matplotlib.ticker.MaxNLocator` is used. The
+ locator is used to determine the contour levels if they
+ are not given explicitly via the *V* argument.
+
+ *extend*: [ 'neither' | 'both' | 'min' | 'max' ]
+ Unless this is 'neither', contour levels are automatically
+ added to one or both ends of the range so that all data
+ are included. These added ranges are then mapped to the
+ special colormap values which default to the ends of the
+ colormap range, but can be set via
+ :meth:`matplotlib.colors.Colormap.set_under` and
+ :meth:`matplotlib.colors.Colormap.set_over` methods.
+
+ *xunits*, *yunits*: [ *None* | registered units ]
+ Override axis units by specifying an instance of a
+ :class:`matplotlib.units.ConversionInterface`.
+
+
+ tricontour-only keyword arguments:
+
+ *linewidths*: [ *None* | number | tuple of numbers ]
+ If *linewidths* is *None*, the default width in
+ ``lines.linewidth`` in ``matplotlibrc`` is used.
+
+ If a number, all levels will be plotted with this linewidth.
+
+ If a tuple, different levels will be plotted with different
+ linewidths in the order specified
+
+ *linestyles*: [ *None* | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
+ If *linestyles* is *None*, the 'solid' is used.
+
+ *linestyles* can also be an iterable of the above strings
+ specifying a set of linestyles to be used. If this
+ iterable is shorter than the number of contour levels
+ it will be repeated as necessary.
+
+ If contour is using a monochrome colormap and the contour
+ level is less than 0, then the linestyle specified
+ in ``contour.negative_linestyle`` in ``matplotlibrc``
+ will be used.
+
+ tricontourf-only keyword arguments:
+
+ *antialiased*: bool
+ enable antialiasing
+
+ Note: tricontourf fills intervals that are closed at the top; that
+ is, for boundaries *z1* and *z2*, the filled region is::
+
+ z1 < z <= z2
+
+ There is one exception: if the lowest boundary coincides with
+ the minimum value of the *z* array, then that minimum value
+ will be included in the lowest interval.
+ """
+ if not ax._hold:
+ ax.cla()
+ kwargs['filled'] = False
+ return TriContourSet(ax, *args, **kwargs)
+
+
+def tricontourf(ax, *args, **kwargs):
+ if not ax._hold:
+ ax.cla()
+ kwargs['filled'] = True
+ return TriContourSet(ax, *args, **kwargs)
+tricontourf.__doc__ = tricontour.__doc__
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/trifinder.py b/contrib/python/matplotlib/py2/matplotlib/tri/trifinder.py
new file mode 100644
index 00000000000..08a07f854f7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/trifinder.py
@@ -0,0 +1,96 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from matplotlib.tri import Triangulation
+import matplotlib._tri as _tri
+import numpy as np
+
+
+class TriFinder(object):
+ """
+ Abstract base class for classes used to find the triangles of a
+ Triangulation in which (x,y) points lie.
+
+ Rather than instantiate an object of a class derived from TriFinder, it is
+ usually better to use the function
+ :func:`matplotlib.tri.Triangulation.get_trifinder`.
+
+ Derived classes implement __call__(x,y) where x,y are array_like point
+ coordinates of the same shape.
+ """
+ def __init__(self, triangulation):
+ if not isinstance(triangulation, Triangulation):
+ raise ValueError('Expected a Triangulation object')
+ self._triangulation = triangulation
+
+
+class TrapezoidMapTriFinder(TriFinder):
+ """
+ :class:`~matplotlib.tri.TriFinder` class implemented using the trapezoid
+ map algorithm from the book "Computational Geometry, Algorithms and
+ Applications", second edition, by M. de Berg, M. van Kreveld, M. Overmars
+ and O. Schwarzkopf.
+
+ The triangulation must be valid, i.e. it must not have duplicate points,
+ triangles formed from colinear points, or overlapping triangles. The
+ algorithm has some tolerance to triangles formed from colinear points, but
+ this should not be relied upon.
+ """
+ def __init__(self, triangulation):
+ TriFinder.__init__(self, triangulation)
+ self._cpp_trifinder = _tri.TrapezoidMapTriFinder(
+ triangulation.get_cpp_triangulation())
+ self._initialize()
+
+ def __call__(self, x, y):
+ """
+ Return an array containing the indices of the triangles in which the
+ specified x,y points lie, or -1 for points that do not lie within a
+ triangle.
+
+ *x*, *y* are array_like x and y coordinates of the same shape and any
+ number of dimensions.
+
+ Returns integer array with the same shape and *x* and *y*.
+ """
+ x = np.asarray(x, dtype=np.float64)
+ y = np.asarray(y, dtype=np.float64)
+ if x.shape != y.shape:
+ raise ValueError("x and y must be array-like with the same shape")
+
+ # C++ does the heavy lifting, and expects 1D arrays.
+ indices = (self._cpp_trifinder.find_many(x.ravel(), y.ravel())
+ .reshape(x.shape))
+ return indices
+
+ def _get_tree_stats(self):
+ """
+ Return a python list containing the statistics about the node tree:
+ 0: number of nodes (tree size)
+ 1: number of unique nodes
+ 2: number of trapezoids (tree leaf nodes)
+ 3: number of unique trapezoids
+ 4: maximum parent count (max number of times a node is repeated in
+ tree)
+ 5: maximum depth of tree (one more than the maximum number of
+ comparisons needed to search through the tree)
+ 6: mean of all trapezoid depths (one more than the average number
+ of comparisons needed to search through the tree)
+ """
+ return self._cpp_trifinder.get_tree_stats()
+
+ def _initialize(self):
+ """
+ Initialize the underlying C++ object. Can be called multiple times if,
+ for example, the triangulation is modified.
+ """
+ self._cpp_trifinder.initialize()
+
+ def _print_tree(self):
+ """
+ Print a text representation of the node tree, which is useful for
+ debugging purposes.
+ """
+ self._cpp_trifinder.print_tree()
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/triinterpolate.py b/contrib/python/matplotlib/py2/matplotlib/tri/triinterpolate.py
new file mode 100644
index 00000000000..f3c6deb0c97
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/triinterpolate.py
@@ -0,0 +1,1637 @@
+"""
+Interpolation inside triangular grids.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange
+
+from matplotlib.tri import Triangulation
+from matplotlib.tri.trifinder import TriFinder
+from matplotlib.tri.tritools import TriAnalyzer
+import numpy as np
+import warnings
+
+__all__ = ('TriInterpolator', 'LinearTriInterpolator', 'CubicTriInterpolator')
+
+
+class TriInterpolator(object):
+ """
+ Abstract base class for classes used to perform interpolation on
+ triangular grids.
+
+ Derived classes implement the following methods:
+
+ - ``__call__(x, y)`` ,
+ where x, y are array_like point coordinates of the same shape, and
+ that returns a masked array of the same shape containing the
+ interpolated z-values.
+
+ - ``gradient(x, y)`` ,
+ where x, y are array_like point coordinates of the same
+ shape, and that returns a list of 2 masked arrays of the same shape
+ containing the 2 derivatives of the interpolator (derivatives of
+ interpolated z values with respect to x and y).
+
+ """
+ def __init__(self, triangulation, z, trifinder=None):
+ if not isinstance(triangulation, Triangulation):
+ raise ValueError("Expected a Triangulation object")
+ self._triangulation = triangulation
+
+ self._z = np.asarray(z)
+ if self._z.shape != self._triangulation.x.shape:
+ raise ValueError("z array must have same length as triangulation x"
+ " and y arrays")
+
+ if trifinder is not None and not isinstance(trifinder, TriFinder):
+ raise ValueError("Expected a TriFinder object")
+ self._trifinder = trifinder or self._triangulation.get_trifinder()
+
+ # Default scaling factors : 1.0 (= no scaling)
+ # Scaling may be used for interpolations for which the order of
+ # magnitude of x, y has an impact on the interpolant definition.
+ # Please refer to :meth:`_interpolate_multikeys` for details.
+ self._unit_x = 1.0
+ self._unit_y = 1.0
+
+ # Default triangle renumbering: None (= no renumbering)
+ # Renumbering may be used to avoid unnecessary computations
+ # if complex calculations are done inside the Interpolator.
+ # Please refer to :meth:`_interpolate_multikeys` for details.
+ self._tri_renum = None
+
+ # __call__ and gradient docstrings are shared by all subclasses
+ # (except, if needed, relevant additions).
+ # However these methods are only implemented in subclasses to avoid
+ # confusion in the documentation.
+ _docstring__call__ = """
+ Returns a masked array containing interpolated values at the specified
+ x,y points.
+
+ Parameters
+ ----------
+ x, y : array-like
+ x and y coordinates of the same shape and any number of
+ dimensions.
+
+ Returns
+ -------
+ z : np.ma.array
+ Masked array of the same shape as *x* and *y* ; values
+ corresponding to (*x*, *y*) points outside of the triangulation
+ are masked out.
+
+ """
+
+ _docstringgradient = """
+ Returns a list of 2 masked arrays containing interpolated derivatives
+ at the specified x,y points.
+
+ Parameters
+ ----------
+ x, y : array-like
+ x and y coordinates of the same shape and any number of
+ dimensions.
+
+ Returns
+ -------
+ dzdx, dzdy : np.ma.array
+ 2 masked arrays of the same shape as *x* and *y* ; values
+ corresponding to (x,y) points outside of the triangulation
+ are masked out.
+ The first returned array contains the values of
+ :math:`\\frac{\\partial z}{\\partial x}` and the second those of
+ :math:`\\frac{\\partial z}{\\partial y}`.
+
+ """
+
+ def _interpolate_multikeys(self, x, y, tri_index=None,
+ return_keys=('z',)):
+ """
+ Versatile (private) method defined for all TriInterpolators.
+
+ :meth:`_interpolate_multikeys` is a wrapper around method
+ :meth:`_interpolate_single_key` (to be defined in the child
+ subclasses).
+ :meth:`_interpolate_single_key actually performs the interpolation,
+ but only for 1-dimensional inputs and at valid locations (inside
+ unmasked triangles of the triangulation).
+
+ The purpose of :meth:`_interpolate_multikeys` is to implement the
+ following common tasks needed in all subclasses implementations:
+
+ - calculation of containing triangles
+ - dealing with more than one interpolation request at the same
+ location (e.g., if the 2 derivatives are requested, it is
+ unnecessary to compute the containing triangles twice)
+ - scaling according to self._unit_x, self._unit_y
+ - dealing with points outside of the grid (with fill value np.nan)
+ - dealing with multi-dimensionnal *x*, *y* arrays: flattening for
+ :meth:`_interpolate_params` call and final reshaping.
+
+ (Note that np.vectorize could do most of those things very well for
+ you, but it does it by function evaluations over successive tuples of
+ the input arrays. Therefore, this tends to be more time consuming than
+ using optimized numpy functions - e.g., np.dot - which can be used
+ easily on the flattened inputs, in the child-subclass methods
+ :meth:`_interpolate_single_key`.)
+
+ It is guaranteed that the calls to :meth:`_interpolate_single_key`
+ will be done with flattened (1-d) array_like input parameters `x`, `y`
+ and with flattened, valid `tri_index` arrays (no -1 index allowed).
+
+ Parameters
+ ----------
+ x, y : array_like
+ x and y coordinates indicating where interpolated values are
+ requested.
+ tri_index : integer array_like, optional
+ Array of the containing triangle indices, same shape as
+ *x* and *y*. Defaults to None. If None, these indices
+ will be computed by a TriFinder instance.
+ (Note: For point outside the grid, tri_index[ipt] shall be -1).
+ return_keys : tuple of keys from {'z', 'dzdx', 'dzdy'}
+ Defines the interpolation arrays to return, and in which order.
+
+ Returns
+ -------
+ ret : list of arrays
+ Each array-like contains the expected interpolated values in the
+ order defined by *return_keys* parameter.
+ """
+ # Flattening and rescaling inputs arrays x, y
+ # (initial shape is stored for output)
+ x = np.asarray(x, dtype=np.float64)
+ y = np.asarray(y, dtype=np.float64)
+ sh_ret = x.shape
+ if x.shape != y.shape:
+ raise ValueError("x and y shall have same shapes."
+ " Given: {0} and {1}".format(x.shape, y.shape))
+ x = np.ravel(x)
+ y = np.ravel(y)
+ x_scaled = x/self._unit_x
+ y_scaled = y/self._unit_y
+ size_ret = np.size(x_scaled)
+
+ # Computes & ravels the element indexes, extract the valid ones.
+ if tri_index is None:
+ tri_index = self._trifinder(x, y)
+ else:
+ if (tri_index.shape != sh_ret):
+ raise ValueError(
+ "tri_index array is provided and shall"
+ " have same shape as x and y. Given: "
+ "{0} and {1}".format(tri_index.shape, sh_ret))
+ tri_index = np.ravel(tri_index)
+
+ mask_in = (tri_index != -1)
+ if self._tri_renum is None:
+ valid_tri_index = tri_index[mask_in]
+ else:
+ valid_tri_index = self._tri_renum[tri_index[mask_in]]
+ valid_x = x_scaled[mask_in]
+ valid_y = y_scaled[mask_in]
+
+ ret = []
+ for return_key in return_keys:
+ # Find the return index associated with the key.
+ try:
+ return_index = {'z': 0, 'dzdx': 1, 'dzdy': 2}[return_key]
+ except KeyError:
+ raise ValueError("return_keys items shall take values in"
+ " {'z', 'dzdx', 'dzdy'}")
+
+ # Sets the scale factor for f & df components
+ scale = [1., 1./self._unit_x, 1./self._unit_y][return_index]
+
+ # Computes the interpolation
+ ret_loc = np.empty(size_ret, dtype=np.float64)
+ ret_loc[~mask_in] = np.nan
+ ret_loc[mask_in] = self._interpolate_single_key(
+ return_key, valid_tri_index, valid_x, valid_y) * scale
+ ret += [np.ma.masked_invalid(ret_loc.reshape(sh_ret), copy=False)]
+
+ return ret
+
+ def _interpolate_single_key(self, return_key, tri_index, x, y):
+ """
+ Performs the interpolation at points belonging to the triangulation
+ (inside an unmasked triangles).
+
+ Parameters
+ ----------
+ return_index : string key from {'z', 'dzdx', 'dzdy'}
+ Identifies the requested values (z or its derivatives)
+ tri_index : 1d integer array
+ Valid triangle index (-1 prohibited)
+ x, y : 1d arrays, same shape as `tri_index`
+ Valid locations where interpolation is requested.
+
+ Returns
+ -------
+ ret : 1-d array
+ Returned array of the same size as *tri_index*
+ """
+ raise NotImplementedError("TriInterpolator subclasses" +
+ "should implement _interpolate_single_key!")
+
+
+class LinearTriInterpolator(TriInterpolator):
+ """
+ A LinearTriInterpolator performs linear interpolation on a triangular grid.
+
+ Each triangle is represented by a plane so that an interpolated value at
+ point (x,y) lies on the plane of the triangle containing (x,y).
+ Interpolated values are therefore continuous across the triangulation, but
+ their first derivatives are discontinuous at edges between triangles.
+
+ Parameters
+ ----------
+ triangulation : :class:`~matplotlib.tri.Triangulation` object
+ The triangulation to interpolate over.
+ z : array_like of shape (npoints,)
+ Array of values, defined at grid points, to interpolate between.
+ trifinder : :class:`~matplotlib.tri.TriFinder` object, optional
+ If this is not specified, the Triangulation's default TriFinder will
+ be used by calling
+ :func:`matplotlib.tri.Triangulation.get_trifinder`.
+
+ Methods
+ -------
+ `__call__` (x, y) : Returns interpolated values at x,y points
+ `gradient` (x, y) : Returns interpolated derivatives at x,y points
+
+ """
+ def __init__(self, triangulation, z, trifinder=None):
+ TriInterpolator.__init__(self, triangulation, z, trifinder)
+
+ # Store plane coefficients for fast interpolation calculations.
+ self._plane_coefficients = \
+ self._triangulation.calculate_plane_coefficients(self._z)
+
+ def __call__(self, x, y):
+ return self._interpolate_multikeys(x, y, tri_index=None,
+ return_keys=('z',))[0]
+ __call__.__doc__ = TriInterpolator._docstring__call__
+
+ def gradient(self, x, y):
+ return self._interpolate_multikeys(x, y, tri_index=None,
+ return_keys=('dzdx', 'dzdy'))
+ gradient.__doc__ = TriInterpolator._docstringgradient
+
+ def _interpolate_single_key(self, return_key, tri_index, x, y):
+ if return_key == 'z':
+ return (self._plane_coefficients[tri_index, 0]*x +
+ self._plane_coefficients[tri_index, 1]*y +
+ self._plane_coefficients[tri_index, 2])
+ elif return_key == 'dzdx':
+ return self._plane_coefficients[tri_index, 0]
+ elif return_key == 'dzdy':
+ return self._plane_coefficients[tri_index, 1]
+ else:
+ raise ValueError("Invalid return_key: " + return_key)
+
+
+class CubicTriInterpolator(TriInterpolator):
+ """
+ A CubicTriInterpolator performs cubic interpolation on triangular grids.
+
+ In one-dimension - on a segment - a cubic interpolating function is
+ defined by the values of the function and its derivative at both ends.
+ This is almost the same in 2-d inside a triangle, except that the values
+ of the function and its 2 derivatives have to be defined at each triangle
+ node.
+
+ The CubicTriInterpolator takes the value of the function at each node -
+ provided by the user - and internally computes the value of the
+ derivatives, resulting in a smooth interpolation.
+ (As a special feature, the user can also impose the value of the
+ derivatives at each node, but this is not supposed to be the common
+ usage.)
+
+ Parameters
+ ----------
+ triangulation : :class:`~matplotlib.tri.Triangulation` object
+ The triangulation to interpolate over.
+ z : array_like of shape (npoints,)
+ Array of values, defined at grid points, to interpolate between.
+ kind : {'min_E', 'geom', 'user'}, optional
+ Choice of the smoothing algorithm, in order to compute
+ the interpolant derivatives (defaults to 'min_E'):
+
+ - if 'min_E': (default) The derivatives at each node is computed
+ to minimize a bending energy.
+ - if 'geom': The derivatives at each node is computed as a
+ weighted average of relevant triangle normals. To be used for
+ speed optimization (large grids).
+ - if 'user': The user provides the argument `dz`, no computation
+ is hence needed.
+
+ trifinder : :class:`~matplotlib.tri.TriFinder` object, optional
+ If not specified, the Triangulation's default TriFinder will
+ be used by calling
+ :func:`matplotlib.tri.Triangulation.get_trifinder`.
+ dz : tuple of array_likes (dzdx, dzdy), optional
+ Used only if *kind* ='user'. In this case *dz* must be provided as
+ (dzdx, dzdy) where dzdx, dzdy are arrays of the same shape as *z* and
+ are the interpolant first derivatives at the *triangulation* points.
+
+ Methods
+ -------
+ `__call__` (x, y) : Returns interpolated values at x,y points
+ `gradient` (x, y) : Returns interpolated derivatives at x,y points
+
+ Notes
+ -----
+ This note is a bit technical and details the way a
+ :class:`~matplotlib.tri.CubicTriInterpolator` computes a cubic
+ interpolation.
+
+ The interpolation is based on a Clough-Tocher subdivision scheme of
+ the *triangulation* mesh (to make it clearer, each triangle of the
+ grid will be divided in 3 child-triangles, and on each child triangle
+ the interpolated function is a cubic polynomial of the 2 coordinates).
+ This technique originates from FEM (Finite Element Method) analysis;
+ the element used is a reduced Hsieh-Clough-Tocher (HCT)
+ element. Its shape functions are described in [1]_.
+ The assembled function is guaranteed to be C1-smooth, i.e. it is
+ continuous and its first derivatives are also continuous (this
+ is easy to show inside the triangles but is also true when crossing the
+ edges).
+
+ In the default case (*kind* ='min_E'), the interpolant minimizes a
+ curvature energy on the functional space generated by the HCT element
+ shape functions - with imposed values but arbitrary derivatives at each
+ node. The minimized functional is the integral of the so-called total
+ curvature (implementation based on an algorithm from [2]_ - PCG sparse
+ solver):
+
+ .. math::
+
+ E(z) = \\ \\frac{1}{2} \\int_{\\Omega} \\left(
+ \\left( \\frac{\\partial^2{z}}{\\partial{x}^2} \\right)^2 +
+ \\left( \\frac{\\partial^2{z}}{\\partial{y}^2} \\right)^2 +
+ 2\\left( \\frac{\\partial^2{z}}{\\partial{y}\\partial{x}}
+ \\right)^2 \\right) dx\\,dy
+
+ If the case *kind* ='geom' is chosen by the user, a simple geometric
+ approximation is used (weighted average of the triangle normal
+ vectors), which could improve speed on very large grids.
+
+ References
+ ----------
+ .. [1] Michel Bernadou, Kamal Hassan, "Basis functions for general
+ Hsieh-Clough-Tocher triangles, complete or reduced.",
+ International Journal for Numerical Methods in Engineering,
+ 17(5):784 - 789. 2.01.
+ .. [2] C.T. Kelley, "Iterative Methods for Optimization".
+
+ """
+ def __init__(self, triangulation, z, kind='min_E', trifinder=None,
+ dz=None):
+ TriInterpolator.__init__(self, triangulation, z, trifinder)
+
+ # Loads the underlying c++ _triangulation.
+ # (During loading, reordering of triangulation._triangles may occur so
+ # that all final triangles are now anti-clockwise)
+ self._triangulation.get_cpp_triangulation()
+
+ # To build the stiffness matrix and avoid zero-energy spurious modes
+ # we will only store internally the valid (unmasked) triangles and
+ # the necessary (used) points coordinates.
+ # 2 renumbering tables need to be computed and stored:
+ # - a triangle renum table in order to translate the result from a
+ # TriFinder instance into the internal stored triangle number.
+ # - a node renum table to overwrite the self._z values into the new
+ # (used) node numbering.
+ tri_analyzer = TriAnalyzer(self._triangulation)
+ (compressed_triangles, compressed_x, compressed_y, tri_renum,
+ node_renum) = tri_analyzer._get_compressed_triangulation(True, True)
+ self._triangles = compressed_triangles
+ self._tri_renum = tri_renum
+ # Taking into account the node renumbering in self._z:
+ node_mask = (node_renum == -1)
+ self._z[node_renum[~node_mask]] = self._z
+ self._z = self._z[~node_mask]
+
+ # Computing scale factors
+ self._unit_x = np.ptp(compressed_x)
+ self._unit_y = np.ptp(compressed_y)
+ self._pts = np.column_stack([compressed_x / self._unit_x,
+ compressed_y / self._unit_y])
+ # Computing triangle points
+ self._tris_pts = self._pts[self._triangles]
+ # Computing eccentricities
+ self._eccs = self._compute_tri_eccentricities(self._tris_pts)
+ # Computing dof estimations for HCT triangle shape function
+ self._dof = self._compute_dof(kind, dz=dz)
+ # Loading HCT element
+ self._ReferenceElement = _ReducedHCT_Element()
+
+ def __call__(self, x, y):
+ return self._interpolate_multikeys(x, y, tri_index=None,
+ return_keys=('z',))[0]
+ __call__.__doc__ = TriInterpolator._docstring__call__
+
+ def gradient(self, x, y):
+ return self._interpolate_multikeys(x, y, tri_index=None,
+ return_keys=('dzdx', 'dzdy'))
+ gradient.__doc__ = TriInterpolator._docstringgradient
+
+ def _interpolate_single_key(self, return_key, tri_index, x, y):
+ tris_pts = self._tris_pts[tri_index]
+ alpha = self._get_alpha_vec(x, y, tris_pts)
+ ecc = self._eccs[tri_index]
+ dof = np.expand_dims(self._dof[tri_index], axis=1)
+ if return_key == 'z':
+ return self._ReferenceElement.get_function_values(
+ alpha, ecc, dof)
+ elif return_key in ['dzdx', 'dzdy']:
+ J = self._get_jacobian(tris_pts)
+ dzdx = self._ReferenceElement.get_function_derivatives(
+ alpha, J, ecc, dof)
+ if return_key == 'dzdx':
+ return dzdx[:, 0, 0]
+ else:
+ return dzdx[:, 1, 0]
+ else:
+ raise ValueError("Invalid return_key: " + return_key)
+
+ def _compute_dof(self, kind, dz=None):
+ """
+ Computes and returns nodal dofs according to kind
+
+ Parameters
+ ----------
+ kind: {'min_E', 'geom', 'user'}
+ Choice of the _DOF_estimator subclass to perform the gradient
+ estimation.
+ dz: tuple of array_likes (dzdx, dzdy), optional
+ Used only if *kind=user ; in this case passed to the
+ :class:`_DOF_estimator_user`.
+
+ Returns
+ -------
+ dof : array_like, shape (npts,2)
+ Estimation of the gradient at triangulation nodes (stored as
+ degree of freedoms of reduced-HCT triangle elements).
+ """
+ if kind == 'user':
+ if dz is None:
+ raise ValueError("For a CubicTriInterpolator with "
+ "*kind*='user', a valid *dz* "
+ "argument is expected.")
+ TE = _DOF_estimator_user(self, dz=dz)
+ elif kind == 'geom':
+ TE = _DOF_estimator_geom(self)
+ elif kind == 'min_E':
+ TE = _DOF_estimator_min_E(self)
+ else:
+ raise ValueError("CubicTriInterpolator *kind* proposed: {0} ; "
+ "should be one of: "
+ "'user', 'geom', 'min_E'".format(kind))
+ return TE.compute_dof_from_df()
+
+ @staticmethod
+ def _get_alpha_vec(x, y, tris_pts):
+ """
+ Fast (vectorized) function to compute barycentric coordinates alpha.
+
+ Parameters
+ ----------
+ x, y : array-like of dim 1 (shape (nx,))
+ Coordinates of the points whose points barycentric
+ coordinates are requested
+ tris_pts : array like of dim 3 (shape: (nx,3,2))
+ Coordinates of the containing triangles apexes.
+
+ Returns
+ -------
+ alpha : array of dim 2 (shape (nx,3))
+ Barycentric coordinates of the points inside the containing
+ triangles.
+ """
+ ndim = tris_pts.ndim-2
+
+ a = tris_pts[:, 1, :] - tris_pts[:, 0, :]
+ b = tris_pts[:, 2, :] - tris_pts[:, 0, :]
+ abT = np.concatenate([np.expand_dims(a, ndim+1),
+ np.expand_dims(b, ndim+1)], ndim+1)
+ ab = _transpose_vectorized(abT)
+ x = np.expand_dims(x, ndim)
+ y = np.expand_dims(y, ndim)
+ OM = np.concatenate([x, y], ndim) - tris_pts[:, 0, :]
+
+ metric = _prod_vectorized(ab, abT)
+ # Here we try to deal with the colinear cases.
+ # metric_inv is in this case set to the Moore-Penrose pseudo-inverse
+ # meaning that we will still return a set of valid barycentric
+ # coordinates.
+ metric_inv = _pseudo_inv22sym_vectorized(metric)
+ Covar = _prod_vectorized(ab, _transpose_vectorized(
+ np.expand_dims(OM, ndim)))
+ ksi = _prod_vectorized(metric_inv, Covar)
+ alpha = _to_matrix_vectorized([
+ [1-ksi[:, 0, 0]-ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]])
+ return alpha
+
+ @staticmethod
+ def _get_jacobian(tris_pts):
+ """
+ Fast (vectorized) function to compute triangle jacobian matrix.
+
+ Parameters
+ ----------
+ tris_pts : array like of dim 3 (shape: (nx,3,2))
+ Coordinates of the containing triangles apexes.
+
+ Returns
+ -------
+ J : array of dim 3 (shape (nx,2,2))
+ Barycentric coordinates of the points inside the containing
+ triangles.
+ J[itri,:,:] is the jacobian matrix at apex 0 of the triangle
+ itri, so that the following (matrix) relationship holds:
+ [dz/dksi] = [J] x [dz/dx]
+ with x: global coordinates
+ ksi: element parametric coordinates in triangle first apex
+ local basis.
+ """
+ a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])
+ b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])
+ J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],
+ [b[:, 0], b[:, 1]]])
+ return J
+
+ @staticmethod
+ def _compute_tri_eccentricities(tris_pts):
+ """
+ Computes triangle eccentricities
+
+ Parameters
+ ----------
+ tris_pts : array like of dim 3 (shape: (nx,3,2))
+ Coordinates of the triangles apexes.
+
+ Returns
+ -------
+ ecc : array like of dim 2 (shape: (nx,3))
+ The so-called eccentricity parameters [1] needed for
+ HCT triangular element.
+ """
+ a = np.expand_dims(tris_pts[:, 2, :] - tris_pts[:, 1, :], axis=2)
+ b = np.expand_dims(tris_pts[:, 0, :] - tris_pts[:, 2, :], axis=2)
+ c = np.expand_dims(tris_pts[:, 1, :] - tris_pts[:, 0, :], axis=2)
+ # Do not use np.squeeze, this is dangerous if only one triangle
+ # in the triangulation...
+ dot_a = _prod_vectorized(_transpose_vectorized(a), a)[:, 0, 0]
+ dot_b = _prod_vectorized(_transpose_vectorized(b), b)[:, 0, 0]
+ dot_c = _prod_vectorized(_transpose_vectorized(c), c)[:, 0, 0]
+ # Note that this line will raise a warning for dot_a, dot_b or dot_c
+ # zeros, but we choose not to support triangles with duplicate points.
+ return _to_matrix_vectorized([[(dot_c-dot_b) / dot_a],
+ [(dot_a-dot_c) / dot_b],
+ [(dot_b-dot_a) / dot_c]])
+
+
+# FEM element used for interpolation and for solving minimisation
+# problem (Reduced HCT element)
+class _ReducedHCT_Element():
+ """
+ Implementation of reduced HCT triangular element with explicit shape
+ functions.
+
+ Computes z, dz, d2z and the element stiffness matrix for bending energy:
+ E(f) = integral( (d2z/dx2 + d2z/dy2)**2 dA)
+
+ *** Reference for the shape functions: ***
+ [1] Basis functions for general Hsieh-Clough-Tocher _triangles, complete or
+ reduced.
+ Michel Bernadou, Kamal Hassan
+ International Journal for Numerical Methods in Engineering.
+ 17(5):784 - 789. 2.01
+
+ *** Element description: ***
+ 9 dofs: z and dz given at 3 apex
+ C1 (conform)
+
+ """
+ # 1) Loads matrices to generate shape functions as a function of
+ # triangle eccentricities - based on [1] p.11 '''
+ M = np.array([
+ [ 0.00, 0.00, 0.00, 4.50, 4.50, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [-0.25, 0.00, 0.00, 0.50, 1.25, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [-0.25, 0.00, 0.00, 1.25, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.50, 1.00, 0.00, -1.50, 0.00, 3.00, 3.00, 0.00, 0.00, 3.00],
+ [ 0.00, 0.00, 0.00, -0.25, 0.25, 0.00, 1.00, 0.00, 0.00, 0.50],
+ [ 0.25, 0.00, 0.00, -0.50, -0.25, 1.00, 0.00, 0.00, 0.00, 1.00],
+ [ 0.50, 0.00, 1.00, 0.00, -1.50, 0.00, 0.00, 3.00, 3.00, 3.00],
+ [ 0.25, 0.00, 0.00, -0.25, -0.50, 0.00, 0.00, 0.00, 1.00, 1.00],
+ [ 0.00, 0.00, 0.00, 0.25, -0.25, 0.00, 0.00, 1.00, 0.00, 0.50]])
+ M0 = np.array([
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [-1.00, 0.00, 0.00, 1.50, 1.50, 0.00, 0.00, 0.00, 0.00, -3.00],
+ [-0.50, 0.00, 0.00, 0.75, 0.75, 0.00, 0.00, 0.00, 0.00, -1.50],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 1.00, 0.00, 0.00, -1.50, -1.50, 0.00, 0.00, 0.00, 0.00, 3.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.50, 0.00, 0.00, -0.75, -0.75, 0.00, 0.00, 0.00, 0.00, 1.50]])
+ M1 = np.array([
+ [-0.50, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [-0.25, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.50, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.25, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
+ M2 = np.array([
+ [ 0.50, 0.00, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.25, 0.00, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [-0.50, 0.00, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [-0.25, 0.00, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
+
+ # 2) Loads matrices to rotate components of gradient & Hessian
+ # vectors in the reference basis of triangle first apex (a0)
+ rotate_dV = np.array([[ 1., 0.], [ 0., 1.],
+ [ 0., 1.], [-1., -1.],
+ [-1., -1.], [ 1., 0.]])
+
+ rotate_d2V = np.array([[1., 0., 0.], [0., 1., 0.], [ 0., 0., 1.],
+ [0., 1., 0.], [1., 1., 1.], [ 0., -2., -1.],
+ [1., 1., 1.], [1., 0., 0.], [-2., 0., -1.]])
+
+ # 3) Loads Gauss points & weights on the 3 sub-_triangles for P2
+ # exact integral - 3 points on each subtriangles.
+ # NOTE: as the 2nd derivative is discontinuous , we really need those 9
+ # points!
+ n_gauss = 9
+ gauss_pts = np.array([[13./18., 4./18., 1./18.],
+ [ 4./18., 13./18., 1./18.],
+ [ 7./18., 7./18., 4./18.],
+ [ 1./18., 13./18., 4./18.],
+ [ 1./18., 4./18., 13./18.],
+ [ 4./18., 7./18., 7./18.],
+ [ 4./18., 1./18., 13./18.],
+ [13./18., 1./18., 4./18.],
+ [ 7./18., 4./18., 7./18.]], dtype=np.float64)
+ gauss_w = np.ones([9], dtype=np.float64) / 9.
+
+ # 4) Stiffness matrix for curvature energy
+ E = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 2.]])
+
+ # 5) Loads the matrix to compute DOF_rot from tri_J at apex 0
+ J0_to_J1 = np.array([[-1., 1.], [-1., 0.]])
+ J0_to_J2 = np.array([[ 0., -1.], [ 1., -1.]])
+
+ def get_function_values(self, alpha, ecc, dofs):
+ """
+ Parameters
+ ----------
+ alpha : is a (N x 3 x 1) array (array of column-matrices) of
+ barycentric coordinates,
+ ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle
+ eccentricities,
+ dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
+ degrees of freedom.
+
+ Returns
+ -------
+ Returns the N-array of interpolated function values.
+ """
+ subtri = np.argmin(alpha, axis=1)[:, 0]
+ ksi = _roll_vectorized(alpha, -subtri, axis=0)
+ E = _roll_vectorized(ecc, -subtri, axis=0)
+ x = ksi[:, 0, 0]
+ y = ksi[:, 1, 0]
+ z = ksi[:, 2, 0]
+ x_sq = x*x
+ y_sq = y*y
+ z_sq = z*z
+ V = _to_matrix_vectorized([
+ [x_sq*x], [y_sq*y], [z_sq*z], [x_sq*z], [x_sq*y], [y_sq*x],
+ [y_sq*z], [z_sq*y], [z_sq*x], [x*y*z]])
+ prod = _prod_vectorized(self.M, V)
+ prod += _scalar_vectorized(E[:, 0, 0],
+ _prod_vectorized(self.M0, V))
+ prod += _scalar_vectorized(E[:, 1, 0],
+ _prod_vectorized(self.M1, V))
+ prod += _scalar_vectorized(E[:, 2, 0],
+ _prod_vectorized(self.M2, V))
+ s = _roll_vectorized(prod, 3*subtri, axis=0)
+ return _prod_vectorized(dofs, s)[:, 0, 0]
+
+ def get_function_derivatives(self, alpha, J, ecc, dofs):
+ """
+ Parameters
+ ----------
+ *alpha* is a (N x 3 x 1) array (array of column-matrices of
+ barycentric coordinates)
+ *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
+ triangle first apex)
+ *ecc* is a (N x 3 x 1) array (array of column-matrices of triangle
+ eccentricities)
+ *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
+ degrees of freedom.
+
+ Returns
+ -------
+ Returns the values of interpolated function derivatives [dz/dx, dz/dy]
+ in global coordinates at locations alpha, as a column-matrices of
+ shape (N x 2 x 1).
+ """
+ subtri = np.argmin(alpha, axis=1)[:, 0]
+ ksi = _roll_vectorized(alpha, -subtri, axis=0)
+ E = _roll_vectorized(ecc, -subtri, axis=0)
+ x = ksi[:, 0, 0]
+ y = ksi[:, 1, 0]
+ z = ksi[:, 2, 0]
+ x_sq = x*x
+ y_sq = y*y
+ z_sq = z*z
+ dV = _to_matrix_vectorized([
+ [ -3.*x_sq, -3.*x_sq],
+ [ 3.*y_sq, 0.],
+ [ 0., 3.*z_sq],
+ [ -2.*x*z, -2.*x*z+x_sq],
+ [-2.*x*y+x_sq, -2.*x*y],
+ [ 2.*x*y-y_sq, -y_sq],
+ [ 2.*y*z, y_sq],
+ [ z_sq, 2.*y*z],
+ [ -z_sq, 2.*x*z-z_sq],
+ [ x*z-y*z, x*y-y*z]])
+ # Puts back dV in first apex basis
+ dV = _prod_vectorized(dV, _extract_submatrices(
+ self.rotate_dV, subtri, block_size=2, axis=0))
+
+ prod = _prod_vectorized(self.M, dV)
+ prod += _scalar_vectorized(E[:, 0, 0],
+ _prod_vectorized(self.M0, dV))
+ prod += _scalar_vectorized(E[:, 1, 0],
+ _prod_vectorized(self.M1, dV))
+ prod += _scalar_vectorized(E[:, 2, 0],
+ _prod_vectorized(self.M2, dV))
+ dsdksi = _roll_vectorized(prod, 3*subtri, axis=0)
+ dfdksi = _prod_vectorized(dofs, dsdksi)
+ # In global coordinates:
+ # Here we try to deal with the simplest colinear cases, returning a
+ # null matrix.
+ J_inv = _safe_inv22_vectorized(J)
+ dfdx = _prod_vectorized(J_inv, _transpose_vectorized(dfdksi))
+ return dfdx
+
+ def get_function_hessians(self, alpha, J, ecc, dofs):
+ """
+ Parameters
+ ----------
+ *alpha* is a (N x 3 x 1) array (array of column-matrices) of
+ barycentric coordinates
+ *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
+ triangle first apex)
+ *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
+ eccentricities
+ *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
+ degrees of freedom.
+
+ Returns
+ -------
+ Returns the values of interpolated function 2nd-derivatives
+ [d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha,
+ as a column-matrices of shape (N x 3 x 1).
+ """
+ d2sdksi2 = self.get_d2Sidksij2(alpha, ecc)
+ d2fdksi2 = _prod_vectorized(dofs, d2sdksi2)
+ H_rot = self.get_Hrot_from_J(J)
+ d2fdx2 = _prod_vectorized(d2fdksi2, H_rot)
+ return _transpose_vectorized(d2fdx2)
+
+ def get_d2Sidksij2(self, alpha, ecc):
+ """
+ Parameters
+ ----------
+ *alpha* is a (N x 3 x 1) array (array of column-matrices) of
+ barycentric coordinates
+ *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
+ eccentricities
+
+ Returns
+ -------
+ Returns the arrays d2sdksi2 (N x 3 x 1) Hessian of shape functions
+ expressed in covariante coordinates in first apex basis.
+ """
+ subtri = np.argmin(alpha, axis=1)[:, 0]
+ ksi = _roll_vectorized(alpha, -subtri, axis=0)
+ E = _roll_vectorized(ecc, -subtri, axis=0)
+ x = ksi[:, 0, 0]
+ y = ksi[:, 1, 0]
+ z = ksi[:, 2, 0]
+ d2V = _to_matrix_vectorized([
+ [ 6.*x, 6.*x, 6.*x],
+ [ 6.*y, 0., 0.],
+ [ 0., 6.*z, 0.],
+ [ 2.*z, 2.*z-4.*x, 2.*z-2.*x],
+ [2.*y-4.*x, 2.*y, 2.*y-2.*x],
+ [2.*x-4.*y, 0., -2.*y],
+ [ 2.*z, 0., 2.*y],
+ [ 0., 2.*y, 2.*z],
+ [ 0., 2.*x-4.*z, -2.*z],
+ [ -2.*z, -2.*y, x-y-z]])
+ # Puts back d2V in first apex basis
+ d2V = _prod_vectorized(d2V, _extract_submatrices(
+ self.rotate_d2V, subtri, block_size=3, axis=0))
+ prod = _prod_vectorized(self.M, d2V)
+ prod += _scalar_vectorized(E[:, 0, 0],
+ _prod_vectorized(self.M0, d2V))
+ prod += _scalar_vectorized(E[:, 1, 0],
+ _prod_vectorized(self.M1, d2V))
+ prod += _scalar_vectorized(E[:, 2, 0],
+ _prod_vectorized(self.M2, d2V))
+ d2sdksi2 = _roll_vectorized(prod, 3*subtri, axis=0)
+ return d2sdksi2
+
+ def get_bending_matrices(self, J, ecc):
+ """
+ Parameters
+ ----------
+ *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
+ triangle first apex)
+ *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
+ eccentricities
+
+ Returns
+ -------
+ Returns the element K matrices for bending energy expressed in
+ GLOBAL nodal coordinates.
+ K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA]
+ tri_J is needed to rotate dofs from local basis to global basis
+ """
+ n = np.size(ecc, 0)
+
+ # 1) matrix to rotate dofs in global coordinates
+ J1 = _prod_vectorized(self.J0_to_J1, J)
+ J2 = _prod_vectorized(self.J0_to_J2, J)
+ DOF_rot = np.zeros([n, 9, 9], dtype=np.float64)
+ DOF_rot[:, 0, 0] = 1
+ DOF_rot[:, 3, 3] = 1
+ DOF_rot[:, 6, 6] = 1
+ DOF_rot[:, 1:3, 1:3] = J
+ DOF_rot[:, 4:6, 4:6] = J1
+ DOF_rot[:, 7:9, 7:9] = J2
+
+ # 2) matrix to rotate Hessian in global coordinates.
+ H_rot, area = self.get_Hrot_from_J(J, return_area=True)
+
+ # 3) Computes stiffness matrix
+ # Gauss quadrature.
+ K = np.zeros([n, 9, 9], dtype=np.float64)
+ weights = self.gauss_w
+ pts = self.gauss_pts
+ for igauss in range(self.n_gauss):
+ alpha = np.tile(pts[igauss, :], n).reshape(n, 3)
+ alpha = np.expand_dims(alpha, 2)
+ weight = weights[igauss]
+ d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc)
+ d2Skdx2 = _prod_vectorized(d2Skdksi2, H_rot)
+ K += weight * _prod_vectorized(_prod_vectorized(d2Skdx2, self.E),
+ _transpose_vectorized(d2Skdx2))
+
+ # 4) With nodal (not elem) dofs
+ K = _prod_vectorized(_prod_vectorized(_transpose_vectorized(DOF_rot),
+ K), DOF_rot)
+
+ # 5) Need the area to compute total element energy
+ return _scalar_vectorized(area, K)
+
+ def get_Hrot_from_J(self, J, return_area=False):
+ """
+ Parameters
+ ----------
+ *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
+ triangle first apex)
+
+ Returns
+ -------
+ Returns H_rot used to rotate Hessian from local basis of first apex,
+ to global coordinates.
+ if *return_area* is True, returns also the triangle area (0.5*det(J))
+ """
+ # Here we try to deal with the simplest colinear cases ; a null
+ # energy and area is imposed.
+ J_inv = _safe_inv22_vectorized(J)
+ Ji00 = J_inv[:, 0, 0]
+ Ji11 = J_inv[:, 1, 1]
+ Ji10 = J_inv[:, 1, 0]
+ Ji01 = J_inv[:, 0, 1]
+ H_rot = _to_matrix_vectorized([
+ [Ji00*Ji00, Ji10*Ji10, Ji00*Ji10],
+ [Ji01*Ji01, Ji11*Ji11, Ji01*Ji11],
+ [2*Ji00*Ji01, 2*Ji11*Ji10, Ji00*Ji11+Ji10*Ji01]])
+ if not return_area:
+ return H_rot
+ else:
+ area = 0.5 * (J[:, 0, 0]*J[:, 1, 1] - J[:, 0, 1]*J[:, 1, 0])
+ return H_rot, area
+
+ def get_Kff_and_Ff(self, J, ecc, triangles, Uc):
+ """
+ Builds K and F for the following elliptic formulation:
+ minimization of curvature energy with value of function at node
+ imposed and derivatives 'free'.
+ Builds the global Kff matrix in cco format.
+ Builds the full Ff vec Ff = - Kfc x Uc
+
+ Parameters
+ ----------
+ *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
+ triangle first apex)
+ *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
+ eccentricities
+ *triangles* is a (N x 3) array of nodes indexes.
+ *Uc* is (N x 3) array of imposed displacements at nodes
+
+ Returns
+ -------
+ (Kff_rows, Kff_cols, Kff_vals) Kff matrix in coo format - Duplicate
+ (row, col) entries must be summed.
+ Ff: force vector - dim npts * 3
+ """
+ ntri = np.size(ecc, 0)
+ vec_range = np.arange(ntri, dtype=np.int32)
+ c_indices = -np.ones(ntri, dtype=np.int32) # for unused dofs, -1
+ f_dof = [1, 2, 4, 5, 7, 8]
+ c_dof = [0, 3, 6]
+
+ # vals, rows and cols indices in global dof numbering
+ f_dof_indices = _to_matrix_vectorized([[
+ c_indices, triangles[:, 0]*2, triangles[:, 0]*2+1,
+ c_indices, triangles[:, 1]*2, triangles[:, 1]*2+1,
+ c_indices, triangles[:, 2]*2, triangles[:, 2]*2+1]])
+
+ expand_indices = np.ones([ntri, 9, 1], dtype=np.int32)
+ f_row_indices = _prod_vectorized(_transpose_vectorized(f_dof_indices),
+ _transpose_vectorized(expand_indices))
+ f_col_indices = _prod_vectorized(expand_indices, f_dof_indices)
+ K_elem = self.get_bending_matrices(J, ecc)
+
+ # Extracting sub-matrices
+ # Explanation & notations:
+ # * Subscript f denotes 'free' degrees of freedom (i.e. dz/dx, dz/dx)
+ # * Subscript c denotes 'condensated' (imposed) degrees of freedom
+ # (i.e. z at all nodes)
+ # * F = [Ff, Fc] is the force vector
+ # * U = [Uf, Uc] is the imposed dof vector
+ # [ Kff Kfc ]
+ # * K = [ ] is the laplacian stiffness matrix
+ # [ Kcf Kff ]
+ # * As F = K x U one gets straightforwardly: Ff = - Kfc x Uc
+
+ # Computing Kff stiffness matrix in sparse coo format
+ Kff_vals = np.ravel(K_elem[np.ix_(vec_range, f_dof, f_dof)])
+ Kff_rows = np.ravel(f_row_indices[np.ix_(vec_range, f_dof, f_dof)])
+ Kff_cols = np.ravel(f_col_indices[np.ix_(vec_range, f_dof, f_dof)])
+
+ # Computing Ff force vector in sparse coo format
+ Kfc_elem = K_elem[np.ix_(vec_range, f_dof, c_dof)]
+ Uc_elem = np.expand_dims(Uc, axis=2)
+ Ff_elem = - _prod_vectorized(Kfc_elem, Uc_elem)[:, :, 0]
+ Ff_indices = f_dof_indices[np.ix_(vec_range, [0], f_dof)][:, 0, :]
+
+ # Extracting Ff force vector in dense format
+ # We have to sum duplicate indices - using bincount
+ Ff = np.bincount(np.ravel(Ff_indices), weights=np.ravel(Ff_elem))
+ return Kff_rows, Kff_cols, Kff_vals, Ff
+
+
+# :class:_DOF_estimator, _DOF_estimator_user, _DOF_estimator_geom,
+# _DOF_estimator_min_E
+# Private classes used to compute the degree of freedom of each triangular
+# element for the TriCubicInterpolator.
+class _DOF_estimator():
+ """
+ Abstract base class for classes used to perform estimation of a function
+ first derivatives, and deduce the dofs for a CubicTriInterpolator using a
+ reduced HCT element formulation.
+ Derived classes implement compute_df(self,**kwargs), returning
+ np.vstack([dfx,dfy]).T where : dfx, dfy are the estimation of the 2
+ gradient coordinates.
+ """
+ def __init__(self, interpolator, **kwargs):
+ if not isinstance(interpolator, CubicTriInterpolator):
+ raise ValueError("Expected a CubicTriInterpolator object")
+ self._pts = interpolator._pts
+ self._tris_pts = interpolator._tris_pts
+ self.z = interpolator._z
+ self._triangles = interpolator._triangles
+ (self._unit_x, self._unit_y) = (interpolator._unit_x,
+ interpolator._unit_y)
+ self.dz = self.compute_dz(**kwargs)
+ self.compute_dof_from_df()
+
+ def compute_dz(self, **kwargs):
+ raise NotImplementedError
+
+ def compute_dof_from_df(self):
+ """
+ Computes reduced-HCT elements degrees of freedom, knowing the
+ gradient.
+ """
+ J = CubicTriInterpolator._get_jacobian(self._tris_pts)
+ tri_z = self.z[self._triangles]
+ tri_dz = self.dz[self._triangles]
+ tri_dof = self.get_dof_vec(tri_z, tri_dz, J)
+ return tri_dof
+
+ @staticmethod
+ def get_dof_vec(tri_z, tri_dz, J):
+ """
+ Computes the dof vector of a triangle, knowing the value of f, df and
+ of the local Jacobian at each node.
+
+ *tri_z*: array of shape (3,) of f nodal values
+ *tri_dz*: array of shape (3,2) of df/dx, df/dy nodal values
+ *J*: Jacobian matrix in local basis of apex 0
+
+ Returns dof array of shape (9,) so that for each apex iapex:
+ dof[iapex*3+0] = f(Ai)
+ dof[iapex*3+1] = df(Ai).(AiAi+)
+ dof[iapex*3+2] = df(Ai).(AiAi-)]
+ """
+ npt = tri_z.shape[0]
+ dof = np.zeros([npt, 9], dtype=np.float64)
+ J1 = _prod_vectorized(_ReducedHCT_Element.J0_to_J1, J)
+ J2 = _prod_vectorized(_ReducedHCT_Element.J0_to_J2, J)
+
+ col0 = _prod_vectorized(J, np.expand_dims(tri_dz[:, 0, :], axis=2))
+ col1 = _prod_vectorized(J1, np.expand_dims(tri_dz[:, 1, :], axis=2))
+ col2 = _prod_vectorized(J2, np.expand_dims(tri_dz[:, 2, :], axis=2))
+
+ dfdksi = _to_matrix_vectorized([
+ [col0[:, 0, 0], col1[:, 0, 0], col2[:, 0, 0]],
+ [col0[:, 1, 0], col1[:, 1, 0], col2[:, 1, 0]]])
+ dof[:, 0:7:3] = tri_z
+ dof[:, 1:8:3] = dfdksi[:, 0]
+ dof[:, 2:9:3] = dfdksi[:, 1]
+ return dof
+
+
+class _DOF_estimator_user(_DOF_estimator):
+ """ dz is imposed by user / Accounts for scaling if any """
+ def compute_dz(self, dz):
+ (dzdx, dzdy) = dz
+ dzdx = dzdx * self._unit_x
+ dzdy = dzdy * self._unit_y
+ return np.vstack([dzdx, dzdy]).T
+
+
+class _DOF_estimator_geom(_DOF_estimator):
+ """ Fast 'geometric' approximation, recommended for large arrays. """
+ def compute_dz(self):
+ """
+ self.df is computed as weighted average of _triangles sharing a common
+ node. On each triangle itri f is first assumed linear (= ~f), which
+ allows to compute d~f[itri]
+ Then the following approximation of df nodal values is then proposed:
+ f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt)
+ The weighted coeff. w[itri] are proportional to the angle of the
+ triangle itri at apex ipt
+ """
+ el_geom_w = self.compute_geom_weights()
+ el_geom_grad = self.compute_geom_grads()
+
+ # Sum of weights coeffs
+ w_node_sum = np.bincount(np.ravel(self._triangles),
+ weights=np.ravel(el_geom_w))
+
+ # Sum of weighted df = (dfx, dfy)
+ dfx_el_w = np.empty_like(el_geom_w)
+ dfy_el_w = np.empty_like(el_geom_w)
+ for iapex in range(3):
+ dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0]
+ dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1]
+ dfx_node_sum = np.bincount(np.ravel(self._triangles),
+ weights=np.ravel(dfx_el_w))
+ dfy_node_sum = np.bincount(np.ravel(self._triangles),
+ weights=np.ravel(dfy_el_w))
+
+ # Estimation of df
+ dfx_estim = dfx_node_sum/w_node_sum
+ dfy_estim = dfy_node_sum/w_node_sum
+ return np.vstack([dfx_estim, dfy_estim]).T
+
+ def compute_geom_weights(self):
+ """
+ Builds the (nelems x 3) weights coeffs of _triangles angles,
+ renormalized so that np.sum(weights, axis=1) == np.ones(nelems)
+ """
+ weights = np.zeros([np.size(self._triangles, 0), 3])
+ tris_pts = self._tris_pts
+ for ipt in range(3):
+ p0 = tris_pts[:, (ipt) % 3, :]
+ p1 = tris_pts[:, (ipt+1) % 3, :]
+ p2 = tris_pts[:, (ipt-1) % 3, :]
+ alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0])
+ alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0])
+ # In the below formula we could take modulo 2. but
+ # modulo 1. is safer regarding round-off errors (flat triangles).
+ angle = np.abs(np.mod((alpha2-alpha1) / np.pi, 1.))
+ # Weight proportional to angle up np.pi/2 ; null weight for
+ # degenerated cases 0. and np.pi (Note that `angle` is normalized
+ # by np.pi)
+ weights[:, ipt] = 0.5 - np.abs(angle-0.5)
+ return weights
+
+ def compute_geom_grads(self):
+ """
+ Compute the (global) gradient component of f assumed linear (~f).
+ returns array df of shape (nelems,2)
+ df[ielem].dM[ielem] = dz[ielem] i.e. df = dz x dM = dM.T^-1 x dz
+ """
+ tris_pts = self._tris_pts
+ tris_f = self.z[self._triangles]
+
+ dM1 = tris_pts[:, 1, :] - tris_pts[:, 0, :]
+ dM2 = tris_pts[:, 2, :] - tris_pts[:, 0, :]
+ dM = np.dstack([dM1, dM2])
+ # Here we try to deal with the simplest colinear cases: a null
+ # gradient is assumed in this case.
+ dM_inv = _safe_inv22_vectorized(dM)
+
+ dZ1 = tris_f[:, 1] - tris_f[:, 0]
+ dZ2 = tris_f[:, 2] - tris_f[:, 0]
+ dZ = np.vstack([dZ1, dZ2]).T
+ df = np.empty_like(dZ)
+
+ # With np.einsum : could be ej,eji -> ej
+ df[:, 0] = dZ[:, 0]*dM_inv[:, 0, 0] + dZ[:, 1]*dM_inv[:, 1, 0]
+ df[:, 1] = dZ[:, 0]*dM_inv[:, 0, 1] + dZ[:, 1]*dM_inv[:, 1, 1]
+ return df
+
+
+class _DOF_estimator_min_E(_DOF_estimator_geom):
+ """
+ The 'smoothest' approximation, df is computed through global minimization
+ of the bending energy:
+ E(f) = integral[(d2z/dx2 + d2z/dy2 + 2 d2z/dxdy)**2 dA]
+ """
+ def __init__(self, Interpolator):
+ self._eccs = Interpolator._eccs
+ _DOF_estimator_geom.__init__(self, Interpolator)
+
+ def compute_dz(self):
+ """
+ Elliptic solver for bending energy minimization.
+ Uses a dedicated 'toy' sparse Jacobi PCG solver.
+ """
+ # Initial guess for iterative PCG solver.
+ dz_init = _DOF_estimator_geom.compute_dz(self)
+ Uf0 = np.ravel(dz_init)
+
+ reference_element = _ReducedHCT_Element()
+ J = CubicTriInterpolator._get_jacobian(self._tris_pts)
+ eccs = self._eccs
+ triangles = self._triangles
+ Uc = self.z[self._triangles]
+
+ # Building stiffness matrix and force vector in coo format
+ Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff(
+ J, eccs, triangles, Uc)
+
+ # Building sparse matrix and solving minimization problem
+ # We could use scipy.sparse direct solver ; however to avoid this
+ # external dependency an implementation of a simple PCG solver with
+ # a simplendiagonal Jocabi preconditioner is implemented.
+ tol = 1.e-10
+ n_dof = Ff.shape[0]
+ Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols,
+ shape=(n_dof, n_dof))
+ Kff_coo.compress_csc()
+ Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol)
+ # If the PCG did not converge, we return the best guess between Uf0
+ # and Uf.
+ err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff)
+ if err0 < err:
+ # Maybe a good occasion to raise a warning here ?
+ warnings.warn("In TriCubicInterpolator initialization, PCG sparse"
+ " solver did not converge after 1000 iterations. "
+ "`geom` approximation is used instead of `min_E`")
+ Uf = Uf0
+
+ # Building dz from Uf
+ dz = np.empty([self._pts.shape[0], 2], dtype=np.float64)
+ dz[:, 0] = Uf[::2]
+ dz[:, 1] = Uf[1::2]
+ return dz
+
+
+# The following private :class:_Sparse_Matrix_coo and :func:_cg provide
+# a PCG sparse solver for (symmetric) elliptic problems.
+class _Sparse_Matrix_coo(object):
+ def __init__(self, vals, rows, cols, shape):
+ """
+ Creates a sparse matrix in coo format
+ *vals*: arrays of values of non-null entries of the matrix
+ *rows*: int arrays of rows of non-null entries of the matrix
+ *cols*: int arrays of cols of non-null entries of the matrix
+ *shape*: 2-tuple (n,m) of matrix shape
+
+ """
+ self.n, self.m = shape
+ self.vals = np.asarray(vals, dtype=np.float64)
+ self.rows = np.asarray(rows, dtype=np.int32)
+ self.cols = np.asarray(cols, dtype=np.int32)
+
+ def dot(self, V):
+ """
+ Dot product of self by a vector *V* in sparse-dense to dense format
+ *V* dense vector of shape (self.m,)
+ """
+ assert V.shape == (self.m,)
+ return np.bincount(self.rows,
+ weights=self.vals*V[self.cols],
+ minlength=self.m)
+
+ def compress_csc(self):
+ """
+ Compress rows, cols, vals / summing duplicates. Sort for csc format.
+ """
+ _, unique, indices = np.unique(
+ self.rows + self.n*self.cols,
+ return_index=True, return_inverse=True)
+ self.rows = self.rows[unique]
+ self.cols = self.cols[unique]
+ self.vals = np.bincount(indices, weights=self.vals)
+
+ def compress_csr(self):
+ """
+ Compress rows, cols, vals / summing duplicates. Sort for csr format.
+ """
+ _, unique, indices = np.unique(
+ self.m*self.rows + self.cols,
+ return_index=True, return_inverse=True)
+ self.rows = self.rows[unique]
+ self.cols = self.cols[unique]
+ self.vals = np.bincount(indices, weights=self.vals)
+
+ def to_dense(self):
+ """
+ Returns a dense matrix representing self.
+ Mainly for debugging purposes.
+ """
+ ret = np.zeros([self.n, self.m], dtype=np.float64)
+ nvals = self.vals.size
+ for i in range(nvals):
+ ret[self.rows[i], self.cols[i]] += self.vals[i]
+ return ret
+
+ def __str__(self):
+ return self.to_dense().__str__()
+
+ @property
+ def diag(self):
+ """
+ Returns the (dense) vector of the diagonal elements.
+ """
+ in_diag = (self.rows == self.cols)
+ diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.
+ diag[self.rows[in_diag]] = self.vals[in_diag]
+ return diag
+
+
+def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000):
+ """
+ Use Preconditioned Conjugate Gradient iteration to solve A x = b
+ A simple Jacobi (diagonal) preconditionner is used.
+
+ Parameters
+ ----------
+ A: _Sparse_Matrix_coo
+ *A* must have been compressed before by compress_csc or
+ compress_csr method.
+
+ b: array
+ Right hand side of the linear system.
+
+ Returns
+ -------
+ x: array.
+ The converged solution.
+ err: float
+ The absolute error np.linalg.norm(A.dot(x) - b)
+
+ Other parameters
+ ----------------
+ x0: array.
+ Starting guess for the solution.
+ tol: float.
+ Tolerance to achieve. The algorithm terminates when the relative
+ residual is below tol.
+ maxiter: integer.
+ Maximum number of iterations. Iteration will stop
+ after maxiter steps even if the specified tolerance has not
+ been achieved.
+ """
+ n = b.size
+ assert A.n == n
+ assert A.m == n
+ b_norm = np.linalg.norm(b)
+
+ # Jacobi pre-conditioner
+ kvec = A.diag
+ # For diag elem < 1e-6 we keep 1e-6.
+ kvec = np.where(kvec > 1.e-6, kvec, 1.e-6)
+
+ # Initial guess
+ if x0 is None:
+ x = np.zeros(n)
+ else:
+ x = x0
+
+ r = b - A.dot(x)
+ w = r/kvec
+
+ p = np.zeros(n)
+ beta = 0.0
+ rho = np.dot(r, w)
+ k = 0
+
+ # Following C. T. Kelley
+ while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter):
+ p = w + beta*p
+ z = A.dot(p)
+ alpha = rho/np.dot(p, z)
+ r = r - alpha*z
+ w = r/kvec
+ rhoold = rho
+ rho = np.dot(r, w)
+ x = x + alpha*p
+ beta = rho/rhoold
+ #err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used
+ k += 1
+ err = np.linalg.norm(A.dot(x) - b)
+ return x, err
+
+
+# The following private functions:
+# :func:`_inv22_vectorized`
+# :func:`_safe_inv22_vectorized`
+# :func:`_pseudo_inv22sym_vectorized`
+# :func:`_prod_vectorized`
+# :func:`_scalar_vectorized`
+# :func:`_transpose_vectorized`
+# :func:`_roll_vectorized`
+# :func:`_to_matrix_vectorized`
+# :func:`_extract_submatrices`
+# provide fast numpy implementation of some standard operations on arrays of
+# matrices - stored as (:, n_rows, n_cols)-shaped np.arrays.
+def _inv22_vectorized(M):
+ """
+ Inversion of arrays of (2,2) matrices.
+ """
+ assert (M.ndim == 3)
+ assert (M.shape[-2:] == (2, 2))
+ M_inv = np.empty_like(M)
+ delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])
+ M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
+ M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
+ M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
+ M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
+ return M_inv
+
+
+# Development note: Dealing with pathologic 'flat' triangles in the
+# CubicTriInterpolator code and impact on (2,2)-matrix inversion functions
+# :func:`_safe_inv22_vectorized` and :func:`_pseudo_inv22sym_vectorized`.
+#
+# Goals:
+# 1) The CubicTriInterpolator should be able to handle flat or almost flat
+# triangles without raising an error,
+# 2) These degenerated triangles should have no impact on the automatic dof
+# calculation (associated with null weight for the _DOF_estimator_geom and
+# with null energy for the _DOF_estimator_min_E),
+# 3) Linear patch test should be passed exactly on degenerated meshes,
+# 4) Interpolation (with :meth:`_interpolate_single_key` or
+# :meth:`_interpolate_multi_key`) shall be correctly handled even *inside*
+# the pathologic triangles, to interact correctly with a TriRefiner class.
+#
+# Difficulties:
+# Flat triangles have rank-deficient *J* (so-called jacobian matrix) and
+# *metric* (the metric tensor = J x J.T). Computation of the local
+# tangent plane is also problematic.
+#
+# Implementation:
+# Most of the time, when computing the inverse of a rank-deficient matrix it
+# is safe to simply return the null matrix (which is the implementation in
+# :func:`_safe_inv22_vectorized`). This is because of point 2), itself
+# enforced by:
+# - null area hence null energy in :class:`_DOF_estimator_min_E`
+# - angles close or equal to 0 or np.pi hence null weight in
+# :class:`_DOF_estimator_geom`.
+# Note that the function angle -> weight is continuous and maximum for an
+# angle np.pi/2 (refer to :meth:`compute_geom_weights`)
+# The exception is the computation of barycentric coordinates, which is done
+# by inversion of the *metric* matrix. In this case, we need to compute a set
+# of valid coordinates (1 among numerous possibilities), to ensure point 4).
+# We benefit here from the symmetry of metric = J x J.T, which makes it easier
+# to compute a pseudo-inverse in :func:`_pseudo_inv22sym_vectorized`
+def _safe_inv22_vectorized(M):
+ """
+ Inversion of arrays of (2,2) matrices, returns 0 for rank-deficient
+ matrices.
+
+ *M* : array of (2,2) matrices to inverse, shape (n,2,2)
+ """
+ assert M.ndim == 3
+ assert M.shape[-2:] == (2, 2)
+ M_inv = np.empty_like(M)
+ prod1 = M[:, 0, 0]*M[:, 1, 1]
+ delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
+
+ # We set delta_inv to 0. in case of a rank deficient matrix ; a
+ # rank-deficient input matrix *M* will lead to a null matrix in output
+ rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
+ if np.all(rank2):
+ # Normal 'optimized' flow.
+ delta_inv = 1./delta
+ else:
+ # 'Pathologic' flow.
+ delta_inv = np.zeros(M.shape[0])
+ delta_inv[rank2] = 1./delta[rank2]
+
+ M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
+ M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
+ M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
+ M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
+ return M_inv
+
+
+def _pseudo_inv22sym_vectorized(M):
+ """
+ Inversion of arrays of (2,2) SYMMETRIC matrices ; returns the
+ (Moore-Penrose) pseudo-inverse for rank-deficient matrices.
+
+ In case M is of rank 1, we have M = trace(M) x P where P is the orthogonal
+ projection on Im(M), and we return trace(M)^-1 x P == M / trace(M)**2
+ In case M is of rank 0, we return the null matrix.
+
+ *M* : array of (2,2) matrices to inverse, shape (n,2,2)
+ """
+ assert M.ndim == 3
+ assert M.shape[-2:] == (2, 2)
+ M_inv = np.empty_like(M)
+ prod1 = M[:, 0, 0]*M[:, 1, 1]
+ delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
+ rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
+
+ if np.all(rank2):
+ # Normal 'optimized' flow.
+ M_inv[:, 0, 0] = M[:, 1, 1] / delta
+ M_inv[:, 0, 1] = -M[:, 0, 1] / delta
+ M_inv[:, 1, 0] = -M[:, 1, 0] / delta
+ M_inv[:, 1, 1] = M[:, 0, 0] / delta
+ else:
+ # 'Pathologic' flow.
+ # Here we have to deal with 2 sub-cases
+ # 1) First sub-case: matrices of rank 2:
+ delta = delta[rank2]
+ M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta
+ M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta
+ M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta
+ M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta
+ # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:
+ rank01 = ~rank2
+ tr = M[rank01, 0, 0] + M[rank01, 1, 1]
+ tr_zeros = (np.abs(tr) < 1.e-8)
+ sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)
+ #sq_tr_inv = 1. / tr**2
+ M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv
+ M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv
+ M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv
+ M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv
+
+ return M_inv
+
+
+def _prod_vectorized(M1, M2):
+ """
+ Matrix product between arrays of matrices, or a matrix and an array of
+ matrices (*M1* and *M2*)
+ """
+ sh1 = M1.shape
+ sh2 = M2.shape
+ assert len(sh1) >= 2
+ assert len(sh2) >= 2
+ assert sh1[-1] == sh2[-2]
+
+ ndim1 = len(sh1)
+ t1_index = list(xrange(ndim1-2)) + [ndim1-1, ndim1-2]
+ return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *
+ M2[..., np.newaxis, :], -3)
+
+
+def _scalar_vectorized(scalar, M):
+ """
+ Scalar product between scalars and matrices.
+ """
+ return scalar[:, np.newaxis, np.newaxis]*M
+
+
+def _transpose_vectorized(M):
+ """
+ Transposition of an array of matrices *M*.
+ """
+ ndim = M.ndim
+ assert ndim == 3
+ return np.transpose(M, [0, ndim-1, ndim-2])
+
+
+def _roll_vectorized(M, roll_indices, axis):
+ """
+ Rolls an array of matrices along an axis according to an array of indices
+ *roll_indices*
+ *axis* can be either 0 (rolls rows) or 1 (rolls columns).
+ """
+ assert axis in [0, 1]
+ ndim = M.ndim
+ assert ndim == 3
+ ndim_roll = roll_indices.ndim
+ assert ndim_roll == 1
+ sh = M.shape
+ r, c = sh[-2:]
+ assert sh[0] == roll_indices.shape[0]
+ vec_indices = np.arange(sh[0], dtype=np.int32)
+
+ # Builds the rolled matrix
+ M_roll = np.empty_like(M)
+ if axis == 0:
+ for ir in range(r):
+ for ic in range(c):
+ M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]
+ elif axis == 1:
+ for ir in range(r):
+ for ic in range(c):
+ M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c]
+ return M_roll
+
+
+def _to_matrix_vectorized(M):
+ """
+ Builds an array of matrices from individuals np.arrays of identical
+ shapes.
+ *M*: ncols-list of nrows-lists of shape sh.
+
+ Returns M_res np.array of shape (sh, nrow, ncols) so that:
+ M_res[...,i,j] = M[i][j]
+ """
+ assert isinstance(M, (tuple, list))
+ assert all([isinstance(item, (tuple, list)) for item in M])
+ c_vec = np.asarray([len(item) for item in M])
+ assert np.all(c_vec-c_vec[0] == 0)
+ r = len(M)
+ c = c_vec[0]
+ M00 = np.asarray(M[0][0])
+ dt = M00.dtype
+ sh = [M00.shape[0], r, c]
+ M_ret = np.empty(sh, dtype=dt)
+ for irow in range(r):
+ for icol in range(c):
+ M_ret[:, irow, icol] = np.asarray(M[irow][icol])
+ return M_ret
+
+
+def _extract_submatrices(M, block_indices, block_size, axis):
+ """
+ Extracts selected blocks of a matrices *M* depending on parameters
+ *block_indices* and *block_size*.
+
+ Returns the array of extracted matrices *Mres* so that:
+ M_res[...,ir,:] = M[(block_indices*block_size+ir), :]
+ """
+ assert block_indices.ndim == 1
+ assert axis in [0, 1]
+
+ r, c = M.shape
+ if axis == 0:
+ sh = [block_indices.shape[0], block_size, c]
+ elif axis == 1:
+ sh = [block_indices.shape[0], r, block_size]
+
+ dt = M.dtype
+ M_res = np.empty(sh, dtype=dt)
+ if axis == 0:
+ for ir in range(block_size):
+ M_res[:, ir, :] = M[(block_indices*block_size+ir), :]
+ elif axis == 1:
+ for ic in range(block_size):
+ M_res[:, :, ic] = M[:, (block_indices*block_size+ic)]
+
+ return M_res
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/tripcolor.py b/contrib/python/matplotlib/py2/matplotlib/tri/tripcolor.py
new file mode 100644
index 00000000000..1da789a0774
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/tripcolor.py
@@ -0,0 +1,154 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from matplotlib.collections import PolyCollection, TriMesh
+from matplotlib.colors import Normalize
+from matplotlib.tri.triangulation import Triangulation
+import numpy as np
+
+
+def tripcolor(ax, *args, **kwargs):
+ """
+ Create a pseudocolor plot of an unstructured triangular grid.
+
+ The triangulation can be specified in one of two ways; either::
+
+ tripcolor(triangulation, ...)
+
+ where triangulation is a :class:`matplotlib.tri.Triangulation`
+ object, or
+
+ ::
+
+ tripcolor(x, y, ...)
+ tripcolor(x, y, triangles, ...)
+ tripcolor(x, y, triangles=triangles, ...)
+ tripcolor(x, y, mask=mask, ...)
+ tripcolor(x, y, triangles, mask=mask, ...)
+
+ in which case a Triangulation object will be created. See
+ :class:`~matplotlib.tri.Triangulation` for a explanation of these
+ possibilities.
+
+ The next argument must be *C*, the array of color values, either
+ one per point in the triangulation if color values are defined at
+ points, or one per triangle in the triangulation if color values
+ are defined at triangles. If there are the same number of points
+ and triangles in the triangulation it is assumed that color
+ values are defined at points; to force the use of color values at
+ triangles use the kwarg ``facecolors=C`` instead of just ``C``.
+
+ *shading* may be 'flat' (the default) or 'gouraud'. If *shading*
+ is 'flat' and C values are defined at points, the color values
+ used for each triangle are from the mean C of the triangle's
+ three points. If *shading* is 'gouraud' then color values must be
+ defined at points.
+
+ The remaining kwargs are the same as for
+ :meth:`~matplotlib.axes.Axes.pcolor`.
+ """
+ if not ax._hold:
+ ax.cla()
+
+ alpha = kwargs.pop('alpha', 1.0)
+ norm = kwargs.pop('norm', None)
+ cmap = kwargs.pop('cmap', None)
+ vmin = kwargs.pop('vmin', None)
+ vmax = kwargs.pop('vmax', None)
+ shading = kwargs.pop('shading', 'flat')
+ facecolors = kwargs.pop('facecolors', None)
+
+ if shading not in ['flat', 'gouraud']:
+ raise ValueError("shading must be one of ['flat', 'gouraud'] "
+ "not {0}".format(shading))
+
+ tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
+
+ # C is the colors array defined at either points or faces (i.e. triangles).
+ # If facecolors is None, C are defined at points.
+ # If facecolors is not None, C are defined at faces.
+ if facecolors is not None:
+ C = facecolors
+ else:
+ C = np.asarray(args[0])
+
+ # If there are a different number of points and triangles in the
+ # triangulation, can omit facecolors kwarg as it is obvious from
+ # length of C whether it refers to points or faces.
+ # Do not do this for gouraud shading.
+ if (facecolors is None and len(C) == len(tri.triangles) and
+ len(C) != len(tri.x) and shading != 'gouraud'):
+ facecolors = C
+
+ # Check length of C is OK.
+ if ((facecolors is None and len(C) != len(tri.x)) or
+ (facecolors is not None and len(C) != len(tri.triangles))):
+ raise ValueError('Length of color values array must be the same '
+ 'as either the number of triangulation points '
+ 'or triangles')
+
+ # Handling of linewidths, shading, edgecolors and antialiased as
+ # in Axes.pcolor
+ linewidths = (0.25,)
+ if 'linewidth' in kwargs:
+ kwargs['linewidths'] = kwargs.pop('linewidth')
+ kwargs.setdefault('linewidths', linewidths)
+
+ edgecolors = 'none'
+ if 'edgecolor' in kwargs:
+ kwargs['edgecolors'] = kwargs.pop('edgecolor')
+ ec = kwargs.setdefault('edgecolors', edgecolors)
+
+ if 'antialiased' in kwargs:
+ kwargs['antialiaseds'] = kwargs.pop('antialiased')
+ if 'antialiaseds' not in kwargs and ec.lower() == "none":
+ kwargs['antialiaseds'] = False
+
+ if shading == 'gouraud':
+ if facecolors is not None:
+ raise ValueError('Gouraud shading does not support the use '
+ 'of facecolors kwarg')
+ if len(C) != len(tri.x):
+ raise ValueError('For gouraud shading, the length of color '
+ 'values array must be the same as the '
+ 'number of triangulation points')
+ collection = TriMesh(tri, **kwargs)
+ else:
+ # Vertices of triangles.
+ maskedTris = tri.get_masked_triangles()
+ verts = np.concatenate((tri.x[maskedTris][..., np.newaxis],
+ tri.y[maskedTris][..., np.newaxis]), axis=2)
+
+ # Color values.
+ if facecolors is None:
+ # One color per triangle, the mean of the 3 vertex color values.
+ C = C[maskedTris].mean(axis=1)
+ elif tri.mask is not None:
+ # Remove color values of masked triangles.
+ C = C.compress(1-tri.mask)
+
+ collection = PolyCollection(verts, **kwargs)
+
+ collection.set_alpha(alpha)
+ collection.set_array(C)
+ if norm is not None and not isinstance(norm, Normalize):
+ raise ValueError("'norm' must be an instance of 'Normalize'")
+ collection.set_cmap(cmap)
+ collection.set_norm(norm)
+ if vmin is not None or vmax is not None:
+ collection.set_clim(vmin, vmax)
+ else:
+ collection.autoscale_None()
+ ax.grid(False)
+
+ minx = tri.x.min()
+ maxx = tri.x.max()
+ miny = tri.y.min()
+ maxy = tri.y.max()
+ corners = (minx, miny), (maxx, maxy)
+ ax.update_datalim(corners)
+ ax.autoscale_view()
+ ax.add_collection(collection)
+ return collection
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/triplot.py b/contrib/python/matplotlib/py2/matplotlib/tri/triplot.py
new file mode 100644
index 00000000000..b22d77b71e6
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/triplot.py
@@ -0,0 +1,88 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import numpy as np
+from matplotlib.tri.triangulation import Triangulation
+
+
+def triplot(ax, *args, **kwargs):
+ """
+ Draw a unstructured triangular grid as lines and/or markers.
+
+ The triangulation to plot can be specified in one of two ways;
+ either::
+
+ triplot(triangulation, ...)
+
+ where triangulation is a :class:`matplotlib.tri.Triangulation`
+ object, or
+
+ ::
+
+ triplot(x, y, ...)
+ triplot(x, y, triangles, ...)
+ triplot(x, y, triangles=triangles, ...)
+ triplot(x, y, mask=mask, ...)
+ triplot(x, y, triangles, mask=mask, ...)
+
+ in which case a Triangulation object will be created. See
+ :class:`~matplotlib.tri.Triangulation` for a explanation of these
+ possibilities.
+
+ The remaining args and kwargs are the same as for
+ :meth:`~matplotlib.axes.Axes.plot`.
+
+ Return a list of 2 :class:`~matplotlib.lines.Line2D` containing
+ respectively:
+
+ - the lines plotted for triangles edges
+ - the markers plotted for triangles nodes
+ """
+ import matplotlib.axes
+
+ tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
+ x, y, edges = (tri.x, tri.y, tri.edges)
+
+ # Decode plot format string, e.g., 'ro-'
+ fmt = ""
+ if len(args) > 0:
+ fmt = args[0]
+ linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)
+
+ # Insert plot format string into a copy of kwargs (kwargs values prevail).
+ kw = kwargs.copy()
+ for key, val in zip(('linestyle', 'marker', 'color'),
+ (linestyle, marker, color)):
+ if val is not None:
+ kw[key] = kwargs.get(key, val)
+
+ # Draw lines without markers.
+ # Note 1: If we drew markers here, most markers would be drawn more than
+ # once as they belong to several edges.
+ # Note 2: We insert nan values in the flattened edges arrays rather than
+ # plotting directly (triang.x[edges].T, triang.y[edges].T)
+ # as it considerably speeds-up code execution.
+ linestyle = kw['linestyle']
+ kw_lines = kw.copy()
+ kw_lines['marker'] = 'None' # No marker to draw.
+ kw_lines['zorder'] = kw.get('zorder', 1) # Path default zorder is used.
+ if (linestyle is not None) and (linestyle not in ['None', '', ' ']):
+ tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)
+ tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)
+ tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(),
+ **kw_lines)
+ else:
+ tri_lines = ax.plot([], [], **kw_lines)
+
+ # Draw markers separately.
+ marker = kw['marker']
+ kw_markers = kw.copy()
+ kw_markers['linestyle'] = 'None' # No line to draw.
+ if (marker is not None) and (marker not in ['None', '', ' ']):
+ tri_markers = ax.plot(x, y, **kw_markers)
+ else:
+ tri_markers = ax.plot([], [], **kw_markers)
+
+ return tri_lines + tri_markers
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/trirefine.py b/contrib/python/matplotlib/py2/matplotlib/tri/trirefine.py
new file mode 100644
index 00000000000..bbf33988096
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/trirefine.py
@@ -0,0 +1,323 @@
+"""
+Mesh refinement for triangular grids.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import numpy as np
+from matplotlib.tri.triangulation import Triangulation
+import matplotlib.tri.triinterpolate
+
+
+class TriRefiner(object):
+ """
+ Abstract base class for classes implementing mesh refinement.
+
+ A TriRefiner encapsulates a Triangulation object and provides tools for
+ mesh refinement and interpolation.
+
+ Derived classes must implements:
+
+ - ``refine_triangulation(return_tri_index=False, **kwargs)`` , where
+ the optional keyword arguments *kwargs* are defined in each
+ TriRefiner concrete implementation, and which returns :
+
+ - a refined triangulation
+ - optionally (depending on *return_tri_index*), for each
+ point of the refined triangulation: the index of
+ the initial triangulation triangle to which it belongs.
+
+ - ``refine_field(z, triinterpolator=None, **kwargs)`` , where:
+
+ - *z* array of field values (to refine) defined at the base
+ triangulation nodes
+ - *triinterpolator* is a
+ :class:`~matplotlib.tri.TriInterpolator` (optional)
+ - the other optional keyword arguments *kwargs* are defined in
+ each TriRefiner concrete implementation
+
+ and which returns (as a tuple) a refined triangular mesh and the
+ interpolated values of the field at the refined triangulation nodes.
+
+ """
+ def __init__(self, triangulation):
+ if not isinstance(triangulation, Triangulation):
+ raise ValueError("Expected a Triangulation object")
+ self._triangulation = triangulation
+
+
+class UniformTriRefiner(TriRefiner):
+ """
+ Uniform mesh refinement by recursive subdivisions.
+
+ Parameters
+ ----------
+ triangulation : :class:`~matplotlib.tri.Triangulation`
+ The encapsulated triangulation (to be refined)
+ """
+# See Also
+# --------
+# :class:`~matplotlib.tri.CubicTriInterpolator` and
+# :class:`~matplotlib.tri.TriAnalyzer`.
+# """
+ def __init__(self, triangulation):
+ TriRefiner.__init__(self, triangulation)
+
+ def refine_triangulation(self, return_tri_index=False, subdiv=3):
+ """
+ Computes an uniformly refined triangulation *refi_triangulation* of
+ the encapsulated :attr:`triangulation`.
+
+ This function refines the encapsulated triangulation by splitting each
+ father triangle into 4 child sub-triangles built on the edges midside
+ nodes, recursively (level of recursion *subdiv*).
+ In the end, each triangle is hence divided into ``4**subdiv``
+ child triangles.
+ The default value for *subdiv* is 3 resulting in 64 refined
+ subtriangles for each triangle of the initial triangulation.
+
+ Parameters
+ ----------
+ return_tri_index : boolean, optional
+ Boolean indicating whether an index table indicating the father
+ triangle index of each point will be returned. Default value
+ False.
+ subdiv : integer, optional
+ Recursion level for the subdivision. Defaults value 3.
+ Each triangle will be divided into ``4**subdiv`` child triangles.
+
+ Returns
+ -------
+ refi_triangulation : :class:`~matplotlib.tri.Triangulation`
+ The returned refined triangulation
+ found_index : array-like of integers
+ Index of the initial triangulation containing triangle, for each
+ point of *refi_triangulation*.
+ Returned only if *return_tri_index* is set to True.
+
+ """
+ refi_triangulation = self._triangulation
+ ntri = refi_triangulation.triangles.shape[0]
+
+ # Computes the triangulation ancestors numbers in the reference
+ # triangulation.
+ ancestors = np.arange(ntri, dtype=np.int32)
+ for _ in range(subdiv):
+ refi_triangulation, ancestors = self._refine_triangulation_once(
+ refi_triangulation, ancestors)
+ refi_npts = refi_triangulation.x.shape[0]
+ refi_triangles = refi_triangulation.triangles
+
+ # Now we compute found_index table if needed
+ if return_tri_index:
+ # We have to initialize found_index with -1 because some nodes
+ # may very well belong to no triangle at all, e.g., in case of
+ # Delaunay Triangulation with DuplicatePointWarning.
+ found_index = - np.ones(refi_npts, dtype=np.int32)
+ tri_mask = self._triangulation.mask
+ if tri_mask is None:
+ found_index[refi_triangles] = np.repeat(ancestors,
+ 3).reshape(-1, 3)
+ else:
+ # There is a subtlety here: we want to avoid whenever possible
+ # that refined points container is a masked triangle (which
+ # would result in artifacts in plots).
+ # So we impose the numbering from masked ancestors first,
+ # then overwrite it with unmasked ancestor numbers.
+ ancestor_mask = tri_mask[ancestors]
+ found_index[refi_triangles[ancestor_mask, :]
+ ] = np.repeat(ancestors[ancestor_mask],
+ 3).reshape(-1, 3)
+ found_index[refi_triangles[~ancestor_mask, :]
+ ] = np.repeat(ancestors[~ancestor_mask],
+ 3).reshape(-1, 3)
+ return refi_triangulation, found_index
+ else:
+ return refi_triangulation
+
+ def refine_field(self, z, triinterpolator=None, subdiv=3):
+ """
+ Refines a field defined on the encapsulated triangulation.
+
+ Returns *refi_tri* (refined triangulation), *refi_z* (interpolated
+ values of the field at the node of the refined triangulation).
+
+ Parameters
+ ----------
+ z : 1d-array-like of length ``n_points``
+ Values of the field to refine, defined at the nodes of the
+ encapsulated triangulation. (``n_points`` is the number of points
+ in the initial triangulation)
+ triinterpolator : :class:`~matplotlib.tri.TriInterpolator`, optional
+ Interpolator used for field interpolation. If not specified,
+ a :class:`~matplotlib.tri.CubicTriInterpolator` will
+ be used.
+ subdiv : integer, optional
+ Recursion level for the subdivision. Defaults to 3.
+ Each triangle will be divided into ``4**subdiv`` child triangles.
+
+ Returns
+ -------
+ refi_tri : :class:`~matplotlib.tri.Triangulation` object
+ The returned refined triangulation
+ refi_z : 1d array of length: *refi_tri* node count.
+ The returned interpolated field (at *refi_tri* nodes)
+ """
+ if triinterpolator is None:
+ interp = matplotlib.tri.CubicTriInterpolator(
+ self._triangulation, z)
+ else:
+ if not isinstance(triinterpolator,
+ matplotlib.tri.TriInterpolator):
+ raise ValueError("Expected a TriInterpolator object")
+ interp = triinterpolator
+
+ refi_tri, found_index = self.refine_triangulation(
+ subdiv=subdiv, return_tri_index=True)
+ refi_z = interp._interpolate_multikeys(
+ refi_tri.x, refi_tri.y, tri_index=found_index)[0]
+ return refi_tri, refi_z
+
+ @staticmethod
+ def _refine_triangulation_once(triangulation, ancestors=None):
+ """
+ This function refines a matplotlib.tri *triangulation* by splitting
+ each triangle into 4 child-masked_triangles built on the edges midside
+ nodes.
+ The masked triangles, if present, are also split but their children
+ returned masked.
+
+ If *ancestors* is not provided, returns only a new triangulation:
+ child_triangulation.
+
+ If the array-like key table *ancestor* is given, it shall be of shape
+ (ntri,) where ntri is the number of *triangulation* masked_triangles.
+ In this case, the function returns
+ (child_triangulation, child_ancestors)
+ child_ancestors is defined so that the 4 child masked_triangles share
+ the same index as their father: child_ancestors.shape = (4 * ntri,).
+
+ """
+ x = triangulation.x
+ y = triangulation.y
+
+ # According to tri.triangulation doc:
+ # neighbors[i,j] is the triangle that is the neighbor
+ # to the edge from point index masked_triangles[i,j] to point
+ # index masked_triangles[i,(j+1)%3].
+ neighbors = triangulation.neighbors
+ triangles = triangulation.triangles
+ npts = np.shape(x)[0]
+ ntri = np.shape(triangles)[0]
+ if ancestors is not None:
+ ancestors = np.asarray(ancestors)
+ if np.shape(ancestors) != (ntri,):
+ raise ValueError(
+ "Incompatible shapes provide for triangulation"
+ ".masked_triangles and ancestors: {0} and {1}".format(
+ np.shape(triangles), np.shape(ancestors)))
+
+ # Initiating tables refi_x and refi_y of the refined triangulation
+ # points
+ # hint: each apex is shared by 2 masked_triangles except the borders.
+ borders = np.sum(neighbors == -1)
+ added_pts = (3*ntri + borders) // 2
+ refi_npts = npts + added_pts
+ refi_x = np.zeros(refi_npts)
+ refi_y = np.zeros(refi_npts)
+
+ # First part of refi_x, refi_y is just the initial points
+ refi_x[:npts] = x
+ refi_y[:npts] = y
+
+ # Second part contains the edge midside nodes.
+ # Each edge belongs to 1 triangle (if border edge) or is shared by 2
+ # masked_triangles (interior edge).
+ # We first build 2 * ntri arrays of edge starting nodes (edge_elems,
+ # edge_apexes) ; we then extract only the masters to avoid overlaps.
+ # The so-called 'master' is the triangle with biggest index
+ # The 'slave' is the triangle with lower index
+ # (can be -1 if border edge)
+ # For slave and master we will identify the apex pointing to the edge
+ # start
+ edge_elems = np.ravel(np.vstack([np.arange(ntri, dtype=np.int32),
+ np.arange(ntri, dtype=np.int32),
+ np.arange(ntri, dtype=np.int32)]))
+ edge_apexes = np.ravel(np.vstack([np.zeros(ntri, dtype=np.int32),
+ np.ones(ntri, dtype=np.int32),
+ np.ones(ntri, dtype=np.int32)*2]))
+ edge_neighbors = neighbors[edge_elems, edge_apexes]
+ mask_masters = (edge_elems > edge_neighbors)
+
+ # Identifying the "masters" and adding to refi_x, refi_y vec
+ masters = edge_elems[mask_masters]
+ apex_masters = edge_apexes[mask_masters]
+ x_add = (x[triangles[masters, apex_masters]] +
+ x[triangles[masters, (apex_masters+1) % 3]]) * 0.5
+ y_add = (y[triangles[masters, apex_masters]] +
+ y[triangles[masters, (apex_masters+1) % 3]]) * 0.5
+ refi_x[npts:] = x_add
+ refi_y[npts:] = y_add
+
+ # Building the new masked_triangles ; each old masked_triangles hosts
+ # 4 new masked_triangles
+ # there are 6 pts to identify per 'old' triangle, 3 new_pt_corner and
+ # 3 new_pt_midside
+ new_pt_corner = triangles
+
+ # What is the index in refi_x, refi_y of point at middle of apex iapex
+ # of elem ielem ?
+ # If ielem is the apex master: simple count, given the way refi_x was
+ # built.
+ # If ielem is the apex slave: yet we do not know ; but we will soon
+ # using the neighbors table.
+ new_pt_midside = np.empty([ntri, 3], dtype=np.int32)
+ cum_sum = npts
+ for imid in range(3):
+ mask_st_loc = (imid == apex_masters)
+ n_masters_loc = np.sum(mask_st_loc)
+ elem_masters_loc = masters[mask_st_loc]
+ new_pt_midside[:, imid][elem_masters_loc] = np.arange(
+ n_masters_loc, dtype=np.int32) + cum_sum
+ cum_sum += n_masters_loc
+
+ # Now dealing with slave elems.
+ # for each slave element we identify the master and then the inode
+ # once slave_masters is identified, slave_masters_apex is such that:
+ # neighbors[slaves_masters, slave_masters_apex] == slaves
+ mask_slaves = np.logical_not(mask_masters)
+ slaves = edge_elems[mask_slaves]
+ slaves_masters = edge_neighbors[mask_slaves]
+ diff_table = np.abs(neighbors[slaves_masters, :] -
+ np.outer(slaves, np.ones(3, dtype=np.int32)))
+ slave_masters_apex = np.argmin(diff_table, axis=1)
+ slaves_apex = edge_apexes[mask_slaves]
+ new_pt_midside[slaves, slaves_apex] = new_pt_midside[
+ slaves_masters, slave_masters_apex]
+
+ # Builds the 4 child masked_triangles
+ child_triangles = np.empty([ntri*4, 3], dtype=np.int32)
+ child_triangles[0::4, :] = np.vstack([
+ new_pt_corner[:, 0], new_pt_midside[:, 0],
+ new_pt_midside[:, 2]]).T
+ child_triangles[1::4, :] = np.vstack([
+ new_pt_corner[:, 1], new_pt_midside[:, 1],
+ new_pt_midside[:, 0]]).T
+ child_triangles[2::4, :] = np.vstack([
+ new_pt_corner[:, 2], new_pt_midside[:, 2],
+ new_pt_midside[:, 1]]).T
+ child_triangles[3::4, :] = np.vstack([
+ new_pt_midside[:, 0], new_pt_midside[:, 1],
+ new_pt_midside[:, 2]]).T
+ child_triangulation = Triangulation(refi_x, refi_y, child_triangles)
+
+ # Builds the child mask
+ if triangulation.mask is not None:
+ child_triangulation.set_mask(np.repeat(triangulation.mask, 4))
+
+ if ancestors is None:
+ return child_triangulation
+ else:
+ return child_triangulation, np.repeat(ancestors, 4)
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/tritools.py b/contrib/python/matplotlib/py2/matplotlib/tri/tritools.py
new file mode 100644
index 00000000000..c7491f9ea55
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/tritools.py
@@ -0,0 +1,304 @@
+"""
+Tools for triangular grids.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from matplotlib.tri import Triangulation
+import numpy as np
+
+
+class TriAnalyzer(object):
+ """
+ Define basic tools for triangular mesh analysis and improvement.
+
+ A TriAnalizer encapsulates a :class:`~matplotlib.tri.Triangulation`
+ object and provides basic tools for mesh analysis and mesh improvement.
+
+ Parameters
+ ----------
+ triangulation : :class:`~matplotlib.tri.Triangulation` object
+ The encapsulated triangulation to analyze.
+
+ Attributes
+ ----------
+ `scale_factors`
+
+ """
+ def __init__(self, triangulation):
+ if not isinstance(triangulation, Triangulation):
+ raise ValueError("Expected a Triangulation object")
+ self._triangulation = triangulation
+
+ @property
+ def scale_factors(self):
+ """
+ Factors to rescale the triangulation into a unit square.
+
+ Returns *k*, tuple of 2 scale factors.
+
+ Returns
+ -------
+ k : tuple of 2 floats (kx, ky)
+ Tuple of floats that would rescale the triangulation :
+ ``[triangulation.x * kx, triangulation.y * ky]``
+ fits exactly inside a unit square.
+
+ """
+ compressed_triangles = self._triangulation.get_masked_triangles()
+ node_used = (np.bincount(np.ravel(compressed_triangles),
+ minlength=self._triangulation.x.size) != 0)
+ return (1 / np.ptp(self._triangulation.x[node_used]),
+ 1 / np.ptp(self._triangulation.y[node_used]))
+
+ def circle_ratios(self, rescale=True):
+ """
+ Returns a measure of the triangulation triangles flatness.
+
+ The ratio of the incircle radius over the circumcircle radius is a
+ widely used indicator of a triangle flatness.
+ It is always ``<= 0.5`` and ``== 0.5`` only for equilateral
+ triangles. Circle ratios below 0.01 denote very flat triangles.
+
+ To avoid unduly low values due to a difference of scale between the 2
+ axis, the triangular mesh can first be rescaled to fit inside a unit
+ square with :attr:`scale_factors` (Only if *rescale* is True, which is
+ its default value).
+
+ Parameters
+ ----------
+ rescale : boolean, optional
+ If True, a rescaling will be internally performed (based on
+ :attr:`scale_factors`, so that the (unmasked) triangles fit
+ exactly inside a unit square mesh. Default is True.
+
+ Returns
+ -------
+ circle_ratios : masked array
+ Ratio of the incircle radius over the
+ circumcircle radius, for each 'rescaled' triangle of the
+ encapsulated triangulation.
+ Values corresponding to masked triangles are masked out.
+
+ """
+ # Coords rescaling
+ if rescale:
+ (kx, ky) = self.scale_factors
+ else:
+ (kx, ky) = (1.0, 1.0)
+ pts = np.vstack([self._triangulation.x*kx,
+ self._triangulation.y*ky]).T
+ tri_pts = pts[self._triangulation.triangles]
+ # Computes the 3 side lengths
+ a = tri_pts[:, 1, :] - tri_pts[:, 0, :]
+ b = tri_pts[:, 2, :] - tri_pts[:, 1, :]
+ c = tri_pts[:, 0, :] - tri_pts[:, 2, :]
+ a = np.sqrt(a[:, 0]**2 + a[:, 1]**2)
+ b = np.sqrt(b[:, 0]**2 + b[:, 1]**2)
+ c = np.sqrt(c[:, 0]**2 + c[:, 1]**2)
+ # circumcircle and incircle radii
+ s = (a+b+c)*0.5
+ prod = s*(a+b-s)*(a+c-s)*(b+c-s)
+ # We have to deal with flat triangles with infinite circum_radius
+ bool_flat = (prod == 0.)
+ if np.any(bool_flat):
+ # Pathologic flow
+ ntri = tri_pts.shape[0]
+ circum_radius = np.empty(ntri, dtype=np.float64)
+ circum_radius[bool_flat] = np.inf
+ abc = a*b*c
+ circum_radius[~bool_flat] = abc[~bool_flat] / (
+ 4.0*np.sqrt(prod[~bool_flat]))
+ else:
+ # Normal optimized flow
+ circum_radius = (a*b*c) / (4.0*np.sqrt(prod))
+ in_radius = (a*b*c) / (4.0*circum_radius*s)
+ circle_ratio = in_radius/circum_radius
+ mask = self._triangulation.mask
+ if mask is None:
+ return circle_ratio
+ else:
+ return np.ma.array(circle_ratio, mask=mask)
+
+ def get_flat_tri_mask(self, min_circle_ratio=0.01, rescale=True):
+ """
+ Eliminates excessively flat border triangles from the triangulation.
+
+ Returns a mask *new_mask* which allows to clean the encapsulated
+ triangulation from its border-located flat triangles
+ (according to their :meth:`circle_ratios`).
+ This mask is meant to be subsequently applied to the triangulation
+ using :func:`matplotlib.tri.Triangulation.set_mask` .
+ *new_mask* is an extension of the initial triangulation mask
+ in the sense that an initially masked triangle will remain masked.
+
+ The *new_mask* array is computed recursively ; at each step flat
+ triangles are removed only if they share a side with the current
+ mesh border. Thus no new holes in the triangulated domain will be
+ created.
+
+ Parameters
+ ----------
+ min_circle_ratio : float, optional
+ Border triangles with incircle/circumcircle radii ratio r/R will
+ be removed if r/R < *min_circle_ratio*. Default value: 0.01
+ rescale : boolean, optional
+ If True, a rescaling will first be internally performed (based on
+ :attr:`scale_factors` ), so that the (unmasked) triangles fit
+ exactly inside a unit square mesh. This rescaling accounts for the
+ difference of scale which might exist between the 2 axis. Default
+ (and recommended) value is True.
+
+ Returns
+ -------
+ new_mask : array-like of booleans
+ Mask to apply to encapsulated triangulation.
+ All the initially masked triangles remain masked in the
+ *new_mask*.
+
+ Notes
+ -----
+ The rationale behind this function is that a Delaunay
+ triangulation - of an unstructured set of points - sometimes contains
+ almost flat triangles at its border, leading to artifacts in plots
+ (especially for high-resolution contouring).
+ Masked with computed *new_mask*, the encapsulated
+ triangulation would contain no more unmasked border triangles
+ with a circle ratio below *min_circle_ratio*, thus improving the
+ mesh quality for subsequent plots or interpolation.
+ """
+ # Recursively computes the mask_current_borders, true if a triangle is
+ # at the border of the mesh OR touching the border through a chain of
+ # invalid aspect ratio masked_triangles.
+ ntri = self._triangulation.triangles.shape[0]
+ mask_bad_ratio = self.circle_ratios(rescale) < min_circle_ratio
+
+ current_mask = self._triangulation.mask
+ if current_mask is None:
+ current_mask = np.zeros(ntri, dtype=bool)
+ valid_neighbors = np.copy(self._triangulation.neighbors)
+ renum_neighbors = np.arange(ntri, dtype=np.int32)
+ nadd = -1
+ while nadd != 0:
+ # The active wavefront is the triangles from the border (unmasked
+ # but with a least 1 neighbor equal to -1
+ wavefront = ((np.min(valid_neighbors, axis=1) == -1)
+ & ~current_mask)
+ # The element from the active wavefront will be masked if their
+ # circle ratio is bad.
+ added_mask = np.logical_and(wavefront, mask_bad_ratio)
+ current_mask = (added_mask | current_mask)
+ nadd = np.sum(added_mask)
+
+ # now we have to update the tables valid_neighbors
+ valid_neighbors[added_mask, :] = -1
+ renum_neighbors[added_mask] = -1
+ valid_neighbors = np.where(valid_neighbors == -1, -1,
+ renum_neighbors[valid_neighbors])
+
+ return np.ma.filled(current_mask, True)
+
+ def _get_compressed_triangulation(self, return_tri_renum=False,
+ return_node_renum=False):
+ """
+ Compress (if masked) the encapsulated triangulation.
+
+ Returns minimal-length triangles array (*compressed_triangles*) and
+ coordinates arrays (*compressed_x*, *compressed_y*) that can still
+ describe the unmasked triangles of the encapsulated triangulation.
+
+ Parameters
+ ----------
+ return_tri_renum : boolean, optional
+ Indicates whether a renumbering table to translate the triangle
+ numbers from the encapsulated triangulation numbering into the
+ new (compressed) renumbering will be returned.
+ return_node_renum : boolean, optional
+ Indicates whether a renumbering table to translate the nodes
+ numbers from the encapsulated triangulation numbering into the
+ new (compressed) renumbering will be returned.
+
+ Returns
+ -------
+ compressed_triangles : array-like
+ the returned compressed triangulation triangles
+ compressed_x : array-like
+ the returned compressed triangulation 1st coordinate
+ compressed_y : array-like
+ the returned compressed triangulation 2nd coordinate
+ tri_renum : array-like of integers
+ renumbering table to translate the triangle numbers from the
+ encapsulated triangulation into the new (compressed) renumbering.
+ -1 for masked triangles (deleted from *compressed_triangles*).
+ Returned only if *return_tri_renum* is True.
+ node_renum : array-like of integers
+ renumbering table to translate the point numbers from the
+ encapsulated triangulation into the new (compressed) renumbering.
+ -1 for unused points (i.e. those deleted from *compressed_x* and
+ *compressed_y*). Returned only if *return_node_renum* is True.
+
+ """
+ # Valid triangles and renumbering
+ tri_mask = self._triangulation.mask
+ compressed_triangles = self._triangulation.get_masked_triangles()
+ ntri = self._triangulation.triangles.shape[0]
+ tri_renum = self._total_to_compress_renum(tri_mask, ntri)
+
+ # Valid nodes and renumbering
+ node_mask = (np.bincount(np.ravel(compressed_triangles),
+ minlength=self._triangulation.x.size) == 0)
+ compressed_x = self._triangulation.x[~node_mask]
+ compressed_y = self._triangulation.y[~node_mask]
+ node_renum = self._total_to_compress_renum(node_mask)
+
+ # Now renumbering the valid triangles nodes
+ compressed_triangles = node_renum[compressed_triangles]
+
+ # 4 cases possible for return
+ if not return_tri_renum:
+ if not return_node_renum:
+ return compressed_triangles, compressed_x, compressed_y
+ else:
+ return (compressed_triangles, compressed_x, compressed_y,
+ node_renum)
+ else:
+ if not return_node_renum:
+ return (compressed_triangles, compressed_x, compressed_y,
+ tri_renum)
+ else:
+ return (compressed_triangles, compressed_x, compressed_y,
+ tri_renum, node_renum)
+
+ @staticmethod
+ def _total_to_compress_renum(mask, n=None):
+ """
+ Parameters
+ ----------
+ mask : 1d boolean array or None
+ mask
+ n : integer
+ length of the mask. Useful only id mask can be None
+
+ Returns
+ -------
+ renum : integer array
+ array so that (`valid_array` being a compressed array
+ based on a `masked_array` with mask *mask*) :
+
+ - For all i such as mask[i] = False:
+ valid_array[renum[i]] = masked_array[i]
+ - For all i such as mask[i] = True:
+ renum[i] = -1 (invalid value)
+
+ """
+ if n is None:
+ n = np.size(mask)
+ if mask is not None:
+ renum = -np.ones(n, dtype=np.int32) # Default num is -1
+ valid = np.arange(n, dtype=np.int32).compress(~mask, axis=0)
+ renum[valid] = np.arange(np.size(valid, 0), dtype=np.int32)
+ return renum
+ else:
+ return np.arange(n, dtype=np.int32)
diff --git a/contrib/python/matplotlib/py2/matplotlib/tri/ya.make b/contrib/python/matplotlib/py2/matplotlib/tri/ya.make
new file mode 100644
index 00000000000..ca8a6324716
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/tri/ya.make
@@ -0,0 +1,41 @@
+PY2_LIBRARY()
+
+LICENSE(PSF-2.0)
+
+NO_COMPILER_WARNINGS()
+NO_LINT()
+
+PEERDIR(
+ ADDINCL contrib/python/numpy
+)
+
+ADDINCL(
+ contrib/python/matplotlib/py2
+)
+
+CFLAGS(
+ -DPY_ARRAY_UNIQUE_SYMBOL=MPL_matplotlib__tri_ARRAY_API
+ -DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION
+)
+
+PY_SRCS(
+ NAMESPACE matplotlib.tri
+ __init__.py
+ triangulation.py
+ tricontour.py
+ trifinder.py
+ triinterpolate.py
+ tripcolor.py
+ triplot.py
+ trirefine.py
+ tritools.py
+)
+
+PY_REGISTER(matplotlib._tri)
+
+SRCS(
+ _tri.cpp
+ _tri_wrapper.cpp
+)
+
+END()
diff --git a/contrib/python/matplotlib/py2/matplotlib/type1font.py b/contrib/python/matplotlib/py2/matplotlib/type1font.py
new file mode 100644
index 00000000000..0eed97ec68e
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/type1font.py
@@ -0,0 +1,334 @@
+"""
+This module contains a class representing a Type 1 font.
+
+This version reads pfa and pfb files and splits them for embedding in
+pdf files. It also supports SlantFont and ExtendFont transformations,
+similarly to pdfTeX and friends. There is no support yet for
+subsetting.
+
+Usage::
+
+ >>> font = Type1Font(filename)
+ >>> clear_part, encrypted_part, finale = font.parts
+ >>> slanted_font = font.transform({'slant': 0.167})
+ >>> extended_font = font.transform({'extend': 1.2})
+
+Sources:
+
+* Adobe Technical Note #5040, Supporting Downloadable PostScript
+ Language Fonts.
+
+* Adobe Type 1 Font Format, Adobe Systems Incorporated, third printing,
+ v1.1, 1993. ISBN 0-201-57044-0.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import binascii
+import io
+import itertools
+import re
+import struct
+
+import numpy as np
+
+if six.PY3:
+ def ord(x):
+ return x
+
+
+class Type1Font(object):
+ """
+ A class representing a Type-1 font, for use by backends.
+
+ Attributes
+ ----------
+ parts : tuple
+ A 3-tuple of the cleartext part, the encrypted part, and the finale of
+ zeros.
+
+ prop : Dict[str, Any]
+ A dictionary of font properties.
+
+ """
+ __slots__ = ('parts', 'prop')
+
+ def __init__(self, input):
+ """
+ Initialize a Type-1 font. *input* can be either the file name of
+ a pfb file or a 3-tuple of already-decoded Type-1 font parts.
+ """
+ if isinstance(input, tuple) and len(input) == 3:
+ self.parts = input
+ else:
+ with open(input, 'rb') as file:
+ data = self._read(file)
+ self.parts = self._split(data)
+
+ self._parse()
+
+ def _read(self, file):
+ """
+ Read the font from a file, decoding into usable parts.
+ """
+ rawdata = file.read()
+ if not rawdata.startswith(b'\x80'):
+ return rawdata
+
+ data = b''
+ while len(rawdata) > 0:
+ if not rawdata.startswith(b'\x80'):
+ raise RuntimeError('Broken pfb file (expected byte 128, '
+ 'got %d)' % ord(rawdata[0]))
+ type = ord(rawdata[1])
+ if type in (1, 2):
+ length, = struct.unpack(str('<i'), rawdata[2:6])
+ segment = rawdata[6:6 + length]
+ rawdata = rawdata[6 + length:]
+
+ if type == 1: # ASCII text: include verbatim
+ data += segment
+ elif type == 2: # binary data: encode in hexadecimal
+ data += binascii.hexlify(segment)
+ elif type == 3: # end of file
+ break
+ else:
+ raise RuntimeError('Unknown segment type %d in pfb file' %
+ type)
+
+ return data
+
+ def _split(self, data):
+ """
+ Split the Type 1 font into its three main parts.
+
+ The three parts are: (1) the cleartext part, which ends in a
+ eexec operator; (2) the encrypted part; (3) the fixed part,
+ which contains 512 ASCII zeros possibly divided on various
+ lines, a cleartomark operator, and possibly something else.
+ """
+
+ # Cleartext part: just find the eexec and skip whitespace
+ idx = data.index(b'eexec')
+ idx += len(b'eexec')
+ while data[idx] in b' \t\r\n':
+ idx += 1
+ len1 = idx
+
+ # Encrypted part: find the cleartomark operator and count
+ # zeros backward
+ idx = data.rindex(b'cleartomark') - 1
+ zeros = 512
+ while zeros and data[idx] in b'0' or data[idx] in b'\r\n':
+ if data[idx] in b'0':
+ zeros -= 1
+ idx -= 1
+ if zeros:
+ raise RuntimeError('Insufficiently many zeros in Type 1 font')
+
+ # Convert encrypted part to binary (if we read a pfb file, we
+ # may end up converting binary to hexadecimal to binary again;
+ # but if we read a pfa file, this part is already in hex, and
+ # I am not quite sure if even the pfb format guarantees that
+ # it will be in binary).
+ binary = binascii.unhexlify(data[len1:idx+1])
+
+ return data[:len1], binary, data[idx+1:]
+
+ _whitespace_re = re.compile(br'[\0\t\r\014\n ]+')
+ _token_re = re.compile(br'/{0,2}[^]\0\t\r\v\n ()<>{}/%[]+')
+ _comment_re = re.compile(br'%[^\r\n\v]*')
+ _instring_re = re.compile(br'[()\\]')
+
+ # token types, compared via object identity (poor man's enum)
+ _whitespace = object()
+ _name = object()
+ _string = object()
+ _delimiter = object()
+ _number = object()
+
+ @classmethod
+ def _tokens(cls, text):
+ """
+ A PostScript tokenizer. Yield (token, value) pairs such as
+ (cls._whitespace, ' ') or (cls._name, '/Foobar').
+ """
+ pos = 0
+ while pos < len(text):
+ match = (cls._comment_re.match(text[pos:]) or
+ cls._whitespace_re.match(text[pos:]))
+ if match:
+ yield (cls._whitespace, match.group())
+ pos += match.end()
+ elif text[pos] == b'(':
+ start = pos
+ pos += 1
+ depth = 1
+ while depth:
+ match = cls._instring_re.search(text[pos:])
+ if match is None:
+ return
+ pos += match.end()
+ if match.group() == b'(':
+ depth += 1
+ elif match.group() == b')':
+ depth -= 1
+ else: # a backslash - skip the next character
+ pos += 1
+ yield (cls._string, text[start:pos])
+ elif text[pos:pos + 2] in (b'<<', b'>>'):
+ yield (cls._delimiter, text[pos:pos + 2])
+ pos += 2
+ elif text[pos] == b'<':
+ start = pos
+ pos += text[pos:].index(b'>')
+ yield (cls._string, text[start:pos])
+ else:
+ match = cls._token_re.match(text[pos:])
+ if match:
+ try:
+ float(match.group())
+ yield (cls._number, match.group())
+ except ValueError:
+ yield (cls._name, match.group())
+ pos += match.end()
+ else:
+ yield (cls._delimiter, text[pos:pos + 1])
+ pos += 1
+
+ def _parse(self):
+ """
+ Find the values of various font properties. This limited kind
+ of parsing is described in Chapter 10 "Adobe Type Manager
+ Compatibility" of the Type-1 spec.
+ """
+ # Start with reasonable defaults
+ prop = {'weight': 'Regular', 'ItalicAngle': 0.0, 'isFixedPitch': False,
+ 'UnderlinePosition': -100, 'UnderlineThickness': 50}
+ filtered = ((token, value)
+ for token, value in self._tokens(self.parts[0])
+ if token is not self._whitespace)
+ # The spec calls this an ASCII format; in Python 2.x we could
+ # just treat the strings and names as opaque bytes but let's
+ # turn them into proper Unicode, and be lenient in case of high bytes.
+ convert = lambda x: x.decode('ascii', 'replace')
+ for token, value in filtered:
+ if token is self._name and value.startswith(b'/'):
+ key = convert(value[1:])
+ token, value = next(filtered)
+ if token is self._name:
+ if value in (b'true', b'false'):
+ value = value == b'true'
+ else:
+ value = convert(value.lstrip(b'/'))
+ elif token is self._string:
+ value = convert(value.lstrip(b'(').rstrip(b')'))
+ elif token is self._number:
+ if b'.' in value:
+ value = float(value)
+ else:
+ value = int(value)
+ else: # more complicated value such as an array
+ value = None
+ if key != 'FontInfo' and value is not None:
+ prop[key] = value
+
+ # Fill in the various *Name properties
+ if 'FontName' not in prop:
+ prop['FontName'] = (prop.get('FullName') or
+ prop.get('FamilyName') or
+ 'Unknown')
+ if 'FullName' not in prop:
+ prop['FullName'] = prop['FontName']
+ if 'FamilyName' not in prop:
+ extras = ('(?i)([ -](regular|plain|italic|oblique|(semi)?bold|'
+ '(ultra)?light|extra|condensed))+$')
+ prop['FamilyName'] = re.sub(extras, '', prop['FullName'])
+
+ self.prop = prop
+
+ @classmethod
+ def _transformer(cls, tokens, slant, extend):
+ def fontname(name):
+ result = name
+ if slant:
+ result += b'_Slant_' + str(int(1000 * slant)).encode('ascii')
+ if extend != 1.0:
+ result += b'_Extend_' + str(int(1000 * extend)).encode('ascii')
+ return result
+
+ def italicangle(angle):
+ return (str(float(angle) - np.arctan(slant) / np.pi * 180)
+ .encode('ascii'))
+
+ def fontmatrix(array):
+ array = array.lstrip(b'[').rstrip(b']').split()
+ array = [float(x) for x in array]
+ oldmatrix = np.eye(3, 3)
+ oldmatrix[0:3, 0] = array[::2]
+ oldmatrix[0:3, 1] = array[1::2]
+ modifier = np.array([[extend, 0, 0],
+ [slant, 1, 0],
+ [0, 0, 1]])
+ newmatrix = np.dot(modifier, oldmatrix)
+ array[::2] = newmatrix[0:3, 0]
+ array[1::2] = newmatrix[0:3, 1]
+ as_string = u'[' + u' '.join(str(x) for x in array) + u']'
+ return as_string.encode('latin-1')
+
+ def replace(fun):
+ def replacer(tokens):
+ token, value = next(tokens) # name, e.g., /FontMatrix
+ yield bytes(value)
+ token, value = next(tokens) # possible whitespace
+ while token is cls._whitespace:
+ yield bytes(value)
+ token, value = next(tokens)
+ if value != b'[': # name/number/etc.
+ yield bytes(fun(value))
+ else: # array, e.g., [1 2 3]
+ result = b''
+ while value != b']':
+ result += value
+ token, value = next(tokens)
+ result += value
+ yield fun(result)
+ return replacer
+
+ def suppress(tokens):
+ for x in itertools.takewhile(lambda x: x[1] != b'def', tokens):
+ pass
+ yield b''
+
+ table = {b'/FontName': replace(fontname),
+ b'/ItalicAngle': replace(italicangle),
+ b'/FontMatrix': replace(fontmatrix),
+ b'/UniqueID': suppress}
+
+ for token, value in tokens:
+ if token is cls._name and value in table:
+ for value in table[value](itertools.chain([(token, value)],
+ tokens)):
+ yield value
+ else:
+ yield value
+
+ def transform(self, effects):
+ """
+ Transform the font by slanting or extending. *effects* should
+ be a dict where ``effects['slant']`` is the tangent of the
+ angle that the font is to be slanted to the right (so negative
+ values slant to the left) and ``effects['extend']`` is the
+ multiplier by which the font is to be extended (so values less
+ than 1.0 condense). Returns a new :class:`Type1Font` object.
+ """
+ with io.BytesIO() as buffer:
+ tokenizer = self._tokens(self.parts[0])
+ transformed = self._transformer(tokenizer,
+ slant=effects.get('slant', 0.0),
+ extend=effects.get('extend', 1.0))
+ list(map(buffer.write, transformed))
+ return Type1Font((buffer.getvalue(), self.parts[1], self.parts[2]))
diff --git a/contrib/python/matplotlib/py2/matplotlib/units.py b/contrib/python/matplotlib/py2/matplotlib/units.py
new file mode 100644
index 00000000000..6e0a0b78d2a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/units.py
@@ -0,0 +1,200 @@
+"""
+The classes here provide support for using custom classes with
+Matplotlib, e.g., those that do not expose the array interface but know
+how to convert themselves to arrays. It also supports classes with
+units and units conversion. Use cases include converters for custom
+objects, e.g., a list of datetime objects, as well as for objects that
+are unit aware. We don't assume any particular units implementation;
+rather a units implementation must provide the register with the Registry
+converter dictionary and a `ConversionInterface`. For example,
+here is a complete implementation which supports plotting with native
+datetime objects::
+
+ import matplotlib.units as units
+ import matplotlib.dates as dates
+ import matplotlib.ticker as ticker
+ import datetime
+
+ class DateConverter(units.ConversionInterface):
+
+ @staticmethod
+ def convert(value, unit, axis):
+ 'Convert a datetime value to a scalar or array'
+ return dates.date2num(value)
+
+ @staticmethod
+ def axisinfo(unit, axis):
+ 'Return major and minor tick locators and formatters'
+ if unit!='date': return None
+ majloc = dates.AutoDateLocator()
+ majfmt = dates.AutoDateFormatter(majloc)
+ return AxisInfo(majloc=majloc,
+ majfmt=majfmt,
+ label='date')
+
+ @staticmethod
+ def default_units(x, axis):
+ 'Return the default unit for x or None'
+ return 'date'
+
+ # Finally we register our object type with the Matplotlib units registry.
+ units.registry[datetime.date] = DateConverter()
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+
+import six
+from matplotlib.cbook import iterable, is_numlike, safe_first_element
+import numpy as np
+
+
+class AxisInfo(object):
+ """
+ Information to support default axis labeling, tick labeling, and
+ default limits. An instance of this class must be returned by
+ :meth:`ConversionInterface.axisinfo`.
+ """
+ def __init__(self, majloc=None, minloc=None,
+ majfmt=None, minfmt=None, label=None,
+ default_limits=None):
+ """
+ Parameters
+ ----------
+ majloc, minloc : Locator, optional
+ Tick locators for the major and minor ticks.
+ majfmt, minfmt : Formatter, optional
+ Tick formatters for the major and minor ticks.
+ label : str, optional
+ The default axis label.
+ default_limits : optional
+ The default min and max limits of the axis if no data has
+ been plotted.
+
+ Notes
+ -----
+ If any of the above are ``None``, the axis will simply use the
+ default value.
+ """
+ self.majloc = majloc
+ self.minloc = minloc
+ self.majfmt = majfmt
+ self.minfmt = minfmt
+ self.label = label
+ self.default_limits = default_limits
+
+
+class ConversionInterface(object):
+ """
+ The minimal interface for a converter to take custom data types (or
+ sequences) and convert them to values Matplotlib can use.
+ """
+ @staticmethod
+ def axisinfo(unit, axis):
+ """
+ Return an `~units.AxisInfo` instance for the axis with the
+ specified units.
+ """
+ return None
+
+ @staticmethod
+ def default_units(x, axis):
+ """
+ Return the default unit for *x* or ``None`` for the given axis.
+ """
+ return None
+
+ @staticmethod
+ def convert(obj, unit, axis):
+ """
+ Convert *obj* using *unit* for the specified *axis*.
+ If *obj* is a sequence, return the converted sequence.
+ The output must be a sequence of scalars that can be used by the numpy
+ array layer.
+ """
+ return obj
+
+ @staticmethod
+ def is_numlike(x):
+ """
+ The Matplotlib datalim, autoscaling, locators etc work with
+ scalars which are the units converted to floats given the
+ current unit. The converter may be passed these floats, or
+ arrays of them, even when units are set.
+ """
+ if iterable(x):
+ for thisx in x:
+ return is_numlike(thisx)
+ else:
+ return is_numlike(x)
+
+
+class Registry(dict):
+ """
+ A register that maps types to conversion interfaces.
+ """
+ def __init__(self):
+ dict.__init__(self)
+ self._cached = {}
+
+ def get_converter(self, x):
+ """
+ Get the converter for data that has the same type as *x*. If no
+ converters are registered for *x*, returns ``None``.
+ """
+
+ if not len(self):
+ return None # nothing registered
+ # DISABLED idx = id(x)
+ # DISABLED cached = self._cached.get(idx)
+ # DISABLED if cached is not None: return cached
+
+ converter = None
+ classx = getattr(x, '__class__', None)
+
+ if classx is not None:
+ converter = self.get(classx)
+
+ if converter is None and hasattr(x, "values"):
+ # this unpacks pandas series or dataframes...
+ x = x.values
+
+ # If x is an array, look inside the array for data with units
+ if isinstance(x, np.ndarray) and x.size:
+ xravel = x.ravel()
+ try:
+ # pass the first value of x that is not masked back to
+ # get_converter
+ if not np.all(xravel.mask):
+ # some elements are not masked
+ converter = self.get_converter(
+ xravel[np.argmin(xravel.mask)])
+ return converter
+ except AttributeError:
+ # not a masked_array
+ # Make sure we don't recurse forever -- it's possible for
+ # ndarray subclasses to continue to return subclasses and
+ # not ever return a non-subclass for a single element.
+ next_item = xravel[0]
+ if (not isinstance(next_item, np.ndarray) or
+ next_item.shape != x.shape):
+ converter = self.get_converter(next_item)
+ return converter
+
+ # If we haven't found a converter yet, try to get the first element
+ if converter is None:
+ try:
+ thisx = safe_first_element(x)
+ except (TypeError, StopIteration):
+ pass
+ else:
+ if classx and classx != getattr(thisx, '__class__', None):
+ converter = self.get_converter(thisx)
+ return converter
+
+ # DISABLED self._cached[idx] = converter
+ return converter
+
+
+registry = Registry()
diff --git a/contrib/python/matplotlib/py2/matplotlib/widgets.py b/contrib/python/matplotlib/py2/matplotlib/widgets.py
new file mode 100644
index 00000000000..0563964da69
--- /dev/null
+++ b/contrib/python/matplotlib/py2/matplotlib/widgets.py
@@ -0,0 +1,2818 @@
+"""
+GUI neutral widgets
+===================
+
+Widgets that are designed to work for any of the GUI backends.
+All of these widgets require you to predefine a :class:`matplotlib.axes.Axes`
+instance and pass that as the first arg. matplotlib doesn't try to
+be too smart with respect to layout -- you will have to figure out how
+wide and tall you want your Axes to be to accommodate your widget.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import copy
+import six
+from six.moves import zip
+
+import numpy as np
+from matplotlib import rcParams
+
+from .patches import Circle, Rectangle, Ellipse
+from .lines import Line2D
+from .transforms import blended_transform_factory
+
+
+class LockDraw(object):
+ """
+ Some widgets, like the cursor, draw onto the canvas, and this is not
+ desirable under all circumstances, like when the toolbar is in
+ zoom-to-rect mode and drawing a rectangle. The module level "lock"
+ allows someone to grab the lock and prevent other widgets from
+ drawing. Use ``matplotlib.widgets.lock(someobj)`` to prevent
+ other widgets from drawing while you're interacting with the canvas.
+ """
+
+ def __init__(self):
+ self._owner = None
+
+ def __call__(self, o):
+ """reserve the lock for *o*"""
+ if not self.available(o):
+ raise ValueError('already locked')
+ self._owner = o
+
+ def release(self, o):
+ """release the lock"""
+ if not self.available(o):
+ raise ValueError('you do not own this lock')
+ self._owner = None
+
+ def available(self, o):
+ """drawing is available to *o*"""
+ return not self.locked() or self.isowner(o)
+
+ def isowner(self, o):
+ """Return True if *o* owns this lock"""
+ return self._owner is o
+
+ def locked(self):
+ """Return True if the lock is currently held by an owner"""
+ return self._owner is not None
+
+
+class Widget(object):
+ """
+ Abstract base class for GUI neutral widgets
+ """
+ drawon = True
+ eventson = True
+ _active = True
+
+ def set_active(self, active):
+ """Set whether the widget is active.
+ """
+ self._active = active
+
+ def get_active(self):
+ """Get whether the widget is active.
+ """
+ return self._active
+
+ # set_active is overridden by SelectorWidgets.
+ active = property(get_active, lambda self, active: self.set_active(active),
+ doc="Is the widget active?")
+
+ def ignore(self, event):
+ """Return True if event should be ignored.
+
+ This method (or a version of it) should be called at the beginning
+ of any event callback.
+ """
+ return not self.active
+
+
+class AxesWidget(Widget):
+ """Widget that is connected to a single
+ :class:`~matplotlib.axes.Axes`.
+
+ To guarantee that the widget remains responsive and not garbage-collected,
+ a reference to the object should be maintained by the user.
+
+ This is necessary because the callback registry
+ maintains only weak-refs to the functions, which are member
+ functions of the widget. If there are no references to the widget
+ object it may be garbage collected which will disconnect the
+ callbacks.
+
+ Attributes:
+
+ *ax* : :class:`~matplotlib.axes.Axes`
+ The parent axes for the widget
+ *canvas* : :class:`~matplotlib.backend_bases.FigureCanvasBase` subclass
+ The parent figure canvas for the widget.
+ *active* : bool
+ If False, the widget does not respond to events.
+ """
+ def __init__(self, ax):
+ self.ax = ax
+ self.canvas = ax.figure.canvas
+ self.cids = []
+
+ def connect_event(self, event, callback):
+ """Connect callback with an event.
+
+ This should be used in lieu of `figure.canvas.mpl_connect` since this
+ function stores callback ids for later clean up.
+ """
+ cid = self.canvas.mpl_connect(event, callback)
+ self.cids.append(cid)
+
+ def disconnect_events(self):
+ """Disconnect all events created by this widget."""
+ for c in self.cids:
+ self.canvas.mpl_disconnect(c)
+
+
+class Button(AxesWidget):
+ """
+ A GUI neutral button.
+
+ For the button to remain responsive you must keep a reference to it.
+ Call :meth:`on_clicked` to connect to the button.
+
+ Attributes
+ ----------
+ ax :
+ The :class:`matplotlib.axes.Axes` the button renders into.
+ label :
+ A :class:`matplotlib.text.Text` instance.
+ color :
+ The color of the button when not hovering.
+ hovercolor :
+ The color of the button when hovering.
+ """
+
+ def __init__(self, ax, label, image=None,
+ color='0.85', hovercolor='0.95'):
+ """
+ Parameters
+ ----------
+ ax : matplotlib.axes.Axes
+ The :class:`matplotlib.axes.Axes` instance the button
+ will be placed into.
+
+ label : str
+ The button text. Accepts string.
+
+ image : array, mpl image, Pillow Image
+ The image to place in the button, if not *None*.
+ Can be any legal arg to imshow (numpy array,
+ matplotlib Image instance, or Pillow Image).
+
+ color : color
+ The color of the button when not activated
+
+ hovercolor : color
+ The color of the button when the mouse is over it
+ """
+ AxesWidget.__init__(self, ax)
+
+ if image is not None:
+ ax.imshow(image)
+ self.label = ax.text(0.5, 0.5, label,
+ verticalalignment='center',
+ horizontalalignment='center',
+ transform=ax.transAxes)
+
+ self.cnt = 0
+ self.observers = {}
+
+ self.connect_event('button_press_event', self._click)
+ self.connect_event('button_release_event', self._release)
+ self.connect_event('motion_notify_event', self._motion)
+ ax.set_navigate(False)
+ ax.set_facecolor(color)
+ ax.set_xticks([])
+ ax.set_yticks([])
+ self.color = color
+ self.hovercolor = hovercolor
+
+ self._lastcolor = color
+
+ def _click(self, event):
+ if self.ignore(event):
+ return
+ if event.inaxes != self.ax:
+ return
+ if not self.eventson:
+ return
+ if event.canvas.mouse_grabber != self.ax:
+ event.canvas.grab_mouse(self.ax)
+
+ def _release(self, event):
+ if self.ignore(event):
+ return
+ if event.canvas.mouse_grabber != self.ax:
+ return
+ event.canvas.release_mouse(self.ax)
+ if not self.eventson:
+ return
+ if event.inaxes != self.ax:
+ return
+ for cid, func in six.iteritems(self.observers):
+ func(event)
+
+ def _motion(self, event):
+ if self.ignore(event):
+ return
+ if event.inaxes == self.ax:
+ c = self.hovercolor
+ else:
+ c = self.color
+ if c != self._lastcolor:
+ self.ax.set_facecolor(c)
+ self._lastcolor = c
+ if self.drawon:
+ self.ax.figure.canvas.draw()
+
+ def on_clicked(self, func):
+ """
+ When the button is clicked, call this *func* with event.
+
+ A connection id is returned. It can be used to disconnect
+ the button from its callback.
+ """
+ cid = self.cnt
+ self.observers[cid] = func
+ self.cnt += 1
+ return cid
+
+ def disconnect(self, cid):
+ """remove the observer with connection id *cid*"""
+ try:
+ del self.observers[cid]
+ except KeyError:
+ pass
+
+
+class Slider(AxesWidget):
+ """
+ A slider representing a floating point range.
+
+ Create a slider from *valmin* to *valmax* in axes *ax*. For the slider to
+ remain responsive you must maintain a reference to it. Call
+ :meth:`on_changed` to connect to the slider event.
+
+ Attributes
+ ----------
+ val : float
+ Slider value.
+ """
+ def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
+ closedmin=True, closedmax=True, slidermin=None,
+ slidermax=None, dragging=True, valstep=None, **kwargs):
+ """
+ Parameters
+ ----------
+ ax : Axes
+ The Axes to put the slider in.
+
+ label : str
+ Slider label.
+
+ valmin : float
+ The minimum value of the slider.
+
+ valmax : float
+ The maximum value of the slider.
+
+ valinit : float, optional, default: 0.5
+ The slider initial position.
+
+ valfmt : str, optional, default: "%1.2f"
+ Used to format the slider value, fprint format string.
+
+ closedmin : bool, optional, default: True
+ Indicate whether the slider interval is closed on the bottom.
+
+ closedmax : bool, optional, default: True
+ Indicate whether the slider interval is closed on the top.
+
+ slidermin : Slider, optional, default: None
+ Do not allow the current slider to have a value less than
+ the value of the Slider `slidermin`.
+
+ slidermax : Slider, optional, default: None
+ Do not allow the current slider to have a value greater than
+ the value of the Slider `slidermax`.
+
+ dragging : bool, optional, default: True
+ If True the slider can be dragged by the mouse.
+
+ valstep : float, optional, default: None
+ If given, the slider will snap to multiples of `valstep`.
+
+ Notes
+ -----
+ Additional kwargs are passed on to ``self.poly`` which is the
+ :class:`~matplotlib.patches.Rectangle` that draws the slider
+ knob. See the :class:`~matplotlib.patches.Rectangle` documentation for
+ valid property names (e.g., `facecolor`, `edgecolor`, `alpha`).
+ """
+ AxesWidget.__init__(self, ax)
+
+ if slidermin is not None and not hasattr(slidermin, 'val'):
+ raise ValueError("Argument slidermin ({}) has no 'val'"
+ .format(type(slidermin)))
+ if slidermax is not None and not hasattr(slidermax, 'val'):
+ raise ValueError("Argument slidermax ({}) has no 'val'"
+ .format(type(slidermax)))
+ self.closedmin = closedmin
+ self.closedmax = closedmax
+ self.slidermin = slidermin
+ self.slidermax = slidermax
+ self.drag_active = False
+ self.valmin = valmin
+ self.valmax = valmax
+ self.valstep = valstep
+ valinit = self._value_in_bounds(valinit)
+ if valinit is None:
+ valinit = valmin
+ self.val = valinit
+ self.valinit = valinit
+ self.poly = ax.axvspan(valmin, valinit, 0, 1, **kwargs)
+ self.vline = ax.axvline(valinit, 0, 1, color='r', lw=1)
+
+ self.valfmt = valfmt
+ ax.set_yticks([])
+ ax.set_xlim((valmin, valmax))
+ ax.set_xticks([])
+ ax.set_navigate(False)
+
+ self.connect_event('button_press_event', self._update)
+ self.connect_event('button_release_event', self._update)
+ if dragging:
+ self.connect_event('motion_notify_event', self._update)
+ self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
+ verticalalignment='center',
+ horizontalalignment='right')
+
+ self.valtext = ax.text(1.02, 0.5, valfmt % valinit,
+ transform=ax.transAxes,
+ verticalalignment='center',
+ horizontalalignment='left')
+
+ self.cnt = 0
+ self.observers = {}
+
+ self.set_val(valinit)
+
+ def _value_in_bounds(self, val):
+ """ Makes sure self.val is with given bounds."""
+ if self.valstep:
+ val = np.round((val - self.valmin)/self.valstep)*self.valstep
+ val += self.valmin
+
+ if val <= self.valmin:
+ if not self.closedmin:
+ return
+ val = self.valmin
+ elif val >= self.valmax:
+ if not self.closedmax:
+ return
+ val = self.valmax
+
+ if self.slidermin is not None and val <= self.slidermin.val:
+ if not self.closedmin:
+ return
+ val = self.slidermin.val
+
+ if self.slidermax is not None and val >= self.slidermax.val:
+ if not self.closedmax:
+ return
+ val = self.slidermax.val
+ return val
+
+ def _update(self, event):
+ """update the slider position"""
+ if self.ignore(event):
+ return
+
+ if event.button != 1:
+ return
+
+ if event.name == 'button_press_event' and event.inaxes == self.ax:
+ self.drag_active = True
+ event.canvas.grab_mouse(self.ax)
+
+ if not self.drag_active:
+ return
+
+ elif ((event.name == 'button_release_event') or
+ (event.name == 'button_press_event' and
+ event.inaxes != self.ax)):
+ self.drag_active = False
+ event.canvas.release_mouse(self.ax)
+ return
+ val = self._value_in_bounds(event.xdata)
+ if (val is not None) and (val != self.val):
+ self.set_val(val)
+
+ def set_val(self, val):
+ """
+ Set slider value to *val*
+
+ Parameters
+ ----------
+ val : float
+ """
+ xy = self.poly.xy
+ xy[2] = val, 1
+ xy[3] = val, 0
+ self.poly.xy = xy
+ self.valtext.set_text(self.valfmt % val)
+ if self.drawon:
+ self.ax.figure.canvas.draw_idle()
+ self.val = val
+ if not self.eventson:
+ return
+ for cid, func in six.iteritems(self.observers):
+ func(val)
+
+ def on_changed(self, func):
+ """
+ When the slider value is changed call *func* with the new
+ slider value
+
+ Parameters
+ ----------
+ func : callable
+ Function to call when slider is changed.
+ The function must accept a single float as its arguments.
+
+ Returns
+ -------
+ cid : int
+ Connection id (which can be used to disconnect *func*)
+ """
+ cid = self.cnt
+ self.observers[cid] = func
+ self.cnt += 1
+ return cid
+
+ def disconnect(self, cid):
+ """
+ Remove the observer with connection id *cid*
+
+ Parameters
+ ----------
+ cid : int
+ Connection id of the observer to be removed
+ """
+ try:
+ del self.observers[cid]
+ except KeyError:
+ pass
+
+ def reset(self):
+ """Reset the slider to the initial value"""
+ if (self.val != self.valinit):
+ self.set_val(self.valinit)
+
+
+class CheckButtons(AxesWidget):
+ """
+ A GUI neutral set of check buttons.
+
+ For the check buttons to remain responsive you must keep a
+ reference to this object.
+
+ The following attributes are exposed
+
+ *ax*
+ The :class:`matplotlib.axes.Axes` instance the buttons are
+ located in
+
+ *labels*
+ List of :class:`matplotlib.text.Text` instances
+
+ *lines*
+ List of (line1, line2) tuples for the x's in the check boxes.
+ These lines exist for each box, but have ``set_visible(False)``
+ when its box is not checked.
+
+ *rectangles*
+ List of :class:`matplotlib.patches.Rectangle` instances
+
+ Connect to the CheckButtons with the :meth:`on_clicked` method
+ """
+ def __init__(self, ax, labels, actives):
+ """
+ Add check buttons to :class:`matplotlib.axes.Axes` instance *ax*
+
+ *labels*
+ A len(buttons) list of labels as strings
+
+ *actives*
+ A len(buttons) list of booleans indicating whether
+ the button is active
+ """
+ AxesWidget.__init__(self, ax)
+
+ ax.set_xticks([])
+ ax.set_yticks([])
+ ax.set_navigate(False)
+
+ if len(labels) > 1:
+ dy = 1. / (len(labels) + 1)
+ ys = np.linspace(1 - dy, dy, len(labels))
+ else:
+ dy = 0.25
+ ys = [0.5]
+
+ cnt = 0
+ axcolor = ax.get_facecolor()
+
+ self.labels = []
+ self.lines = []
+ self.rectangles = []
+
+ lineparams = {'color': 'k', 'linewidth': 1.25,
+ 'transform': ax.transAxes, 'solid_capstyle': 'butt'}
+ for y, label in zip(ys, labels):
+ t = ax.text(0.25, y, label, transform=ax.transAxes,
+ horizontalalignment='left',
+ verticalalignment='center')
+
+ w, h = dy / 2., dy / 2.
+ x, y = 0.05, y - h / 2.
+
+ p = Rectangle(xy=(x, y), width=w, height=h, edgecolor='black',
+ facecolor=axcolor, transform=ax.transAxes)
+
+ l1 = Line2D([x, x + w], [y + h, y], **lineparams)
+ l2 = Line2D([x, x + w], [y, y + h], **lineparams)
+
+ l1.set_visible(actives[cnt])
+ l2.set_visible(actives[cnt])
+ self.labels.append(t)
+ self.rectangles.append(p)
+ self.lines.append((l1, l2))
+ ax.add_patch(p)
+ ax.add_line(l1)
+ ax.add_line(l2)
+ cnt += 1
+
+ self.connect_event('button_press_event', self._clicked)
+
+ self.cnt = 0
+ self.observers = {}
+
+ def _clicked(self, event):
+ if self.ignore(event):
+ return
+ if event.button != 1:
+ return
+ if event.inaxes != self.ax:
+ return
+
+ for i, (p, t) in enumerate(zip(self.rectangles, self.labels)):
+ if (t.get_window_extent().contains(event.x, event.y) or
+ p.get_window_extent().contains(event.x, event.y)):
+ self.set_active(i)
+ break
+ else:
+ return
+
+ def set_active(self, index):
+ """
+ Directly (de)activate a check button by index.
+
+ *index* is an index into the original label list
+ that this object was constructed with.
+ Raises ValueError if *index* is invalid.
+
+ Callbacks will be triggered if :attr:`eventson` is True.
+
+ """
+ if 0 > index >= len(self.labels):
+ raise ValueError("Invalid CheckButton index: %d" % index)
+
+ l1, l2 = self.lines[index]
+ l1.set_visible(not l1.get_visible())
+ l2.set_visible(not l2.get_visible())
+
+ if self.drawon:
+ self.ax.figure.canvas.draw()
+
+ if not self.eventson:
+ return
+ for cid, func in six.iteritems(self.observers):
+ func(self.labels[index].get_text())
+
+ def get_status(self):
+ """
+ returns a tuple of the status (True/False) of all of the check buttons
+ """
+ return [l1.get_visible() for (l1, l2) in self.lines]
+
+ def on_clicked(self, func):
+ """
+ When the button is clicked, call *func* with button label
+
+ A connection id is returned which can be used to disconnect
+ """
+ cid = self.cnt
+ self.observers[cid] = func
+ self.cnt += 1
+ return cid
+
+ def disconnect(self, cid):
+ """remove the observer with connection id *cid*"""
+ try:
+ del self.observers[cid]
+ except KeyError:
+ pass
+
+
+class TextBox(AxesWidget):
+ """
+ A GUI neutral text input box.
+
+ For the text box to remain responsive you must keep a reference to it.
+
+ The following attributes are accessible:
+
+ *ax*
+ The :class:`matplotlib.axes.Axes` the button renders into.
+
+ *label*
+ A :class:`matplotlib.text.Text` instance.
+
+ *color*
+ The color of the text box when not hovering.
+
+ *hovercolor*
+ The color of the text box when hovering.
+
+ Call :meth:`on_text_change` to be updated whenever the text changes.
+
+ Call :meth:`on_submit` to be updated whenever the user hits enter or
+ leaves the text entry field.
+ """
+
+ def __init__(self, ax, label, initial='',
+ color='.95', hovercolor='1', label_pad=.01):
+ """
+ Parameters
+ ----------
+ ax : matplotlib.axes.Axes
+ The :class:`matplotlib.axes.Axes` instance the button
+ will be placed into.
+
+ label : str
+ Label for this text box. Accepts string.
+
+ initial : str
+ Initial value in the text box
+
+ color : color
+ The color of the box
+
+ hovercolor : color
+ The color of the box when the mouse is over it
+
+ label_pad : float
+ the distance between the label and the right side of the textbox
+ """
+ AxesWidget.__init__(self, ax)
+
+ self.DIST_FROM_LEFT = .05
+
+ self.params_to_disable = [key for key in rcParams if u'keymap' in key]
+
+ self.text = initial
+ self.label = ax.text(-label_pad, 0.5, label,
+ verticalalignment='center',
+ horizontalalignment='right',
+ transform=ax.transAxes)
+ self.text_disp = self._make_text_disp(self.text)
+
+ self.cnt = 0
+ self.change_observers = {}
+ self.submit_observers = {}
+
+ # If these lines are removed, the cursor won't appear the first
+ # time the box is clicked:
+ self.ax.set_xlim(0, 1)
+ self.ax.set_ylim(0, 1)
+
+ self.cursor_index = 0
+
+ # Because this is initialized, _render_cursor
+ # can assume that cursor exists.
+ self.cursor = self.ax.vlines(0, 0, 0)
+ self.cursor.set_visible(False)
+
+ self.connect_event('button_press_event', self._click)
+ self.connect_event('button_release_event', self._release)
+ self.connect_event('motion_notify_event', self._motion)
+ self.connect_event('key_press_event', self._keypress)
+ self.connect_event('resize_event', self._resize)
+ ax.set_navigate(False)
+ ax.set_facecolor(color)
+ ax.set_xticks([])
+ ax.set_yticks([])
+ self.color = color
+ self.hovercolor = hovercolor
+
+ self._lastcolor = color
+
+ self.capturekeystrokes = False
+
+ def _make_text_disp(self, string):
+ return self.ax.text(self.DIST_FROM_LEFT, 0.5, string,
+ verticalalignment='center',
+ horizontalalignment='left',
+ transform=self.ax.transAxes)
+
+ def _rendercursor(self):
+ # this is a hack to figure out where the cursor should go.
+ # we draw the text up to where the cursor should go, measure
+ # and save its dimensions, draw the real text, then put the cursor
+ # at the saved dimensions
+
+ widthtext = self.text[:self.cursor_index]
+ no_text = False
+ if(widthtext == "" or widthtext == " " or widthtext == " "):
+ no_text = widthtext == ""
+ widthtext = ","
+
+ wt_disp = self._make_text_disp(widthtext)
+
+ self.ax.figure.canvas.draw()
+ bb = wt_disp.get_window_extent()
+ inv = self.ax.transData.inverted()
+ bb = inv.transform(bb)
+ wt_disp.set_visible(False)
+ if no_text:
+ bb[1, 0] = bb[0, 0]
+ # hack done
+ self.cursor.set_visible(False)
+
+ self.cursor = self.ax.vlines(bb[1, 0], bb[0, 1], bb[1, 1])
+ self.ax.figure.canvas.draw()
+
+ def _notify_submit_observers(self):
+ for cid, func in six.iteritems(self.submit_observers):
+ func(self.text)
+
+ def _release(self, event):
+ if self.ignore(event):
+ return
+ if event.canvas.mouse_grabber != self.ax:
+ return
+ event.canvas.release_mouse(self.ax)
+
+ def _keypress(self, event):
+ if self.ignore(event):
+ return
+ if self.capturekeystrokes:
+ key = event.key
+
+ if(len(key) == 1):
+ self.text = (self.text[:self.cursor_index] + key +
+ self.text[self.cursor_index:])
+ self.cursor_index += 1
+ elif key == "right":
+ if self.cursor_index != len(self.text):
+ self.cursor_index += 1
+ elif key == "left":
+ if self.cursor_index != 0:
+ self.cursor_index -= 1
+ elif key == "home":
+ self.cursor_index = 0
+ elif key == "end":
+ self.cursor_index = len(self.text)
+ elif(key == "backspace"):
+ if self.cursor_index != 0:
+ self.text = (self.text[:self.cursor_index - 1] +
+ self.text[self.cursor_index:])
+ self.cursor_index -= 1
+ elif(key == "delete"):
+ if self.cursor_index != len(self.text):
+ self.text = (self.text[:self.cursor_index] +
+ self.text[self.cursor_index + 1:])
+
+ self.text_disp.remove()
+ self.text_disp = self._make_text_disp(self.text)
+ self._rendercursor()
+ self._notify_change_observers()
+ if key == "enter":
+ self._notify_submit_observers()
+
+ def set_val(self, val):
+ newval = str(val)
+ if self.text == newval:
+ return
+ self.text = newval
+ self.text_disp.remove()
+ self.text_disp = self._make_text_disp(self.text)
+ self._rendercursor()
+ self._notify_change_observers()
+ self._notify_submit_observers()
+
+ def _notify_change_observers(self):
+ for cid, func in six.iteritems(self.change_observers):
+ func(self.text)
+
+ def begin_typing(self, x):
+ self.capturekeystrokes = True
+ # disable command keys so that the user can type without
+ # command keys causing figure to be saved, etc
+ self.reset_params = {}
+ for key in self.params_to_disable:
+ self.reset_params[key] = rcParams[key]
+ rcParams[key] = []
+
+ def stop_typing(self):
+ notifysubmit = False
+ # because _notify_submit_users might throw an error in the
+ # user's code, we only want to call it once we've already done
+ # our cleanup.
+ if self.capturekeystrokes:
+ # since the user is no longer typing,
+ # reactivate the standard command keys
+ for key in self.params_to_disable:
+ rcParams[key] = self.reset_params[key]
+ notifysubmit = True
+ self.capturekeystrokes = False
+ self.cursor.set_visible(False)
+ self.ax.figure.canvas.draw()
+ if notifysubmit:
+ self._notify_submit_observers()
+
+ def position_cursor(self, x):
+ # now, we have to figure out where the cursor goes.
+ # approximate it based on assuming all characters the same length
+ if len(self.text) == 0:
+ self.cursor_index = 0
+ else:
+ bb = self.text_disp.get_window_extent()
+
+ trans = self.ax.transData
+ inv = self.ax.transData.inverted()
+ bb = trans.transform(inv.transform(bb))
+
+ text_start = bb[0, 0]
+ text_end = bb[1, 0]
+
+ ratio = (x - text_start) / (text_end - text_start)
+
+ if ratio < 0:
+ ratio = 0
+ if ratio > 1:
+ ratio = 1
+
+ self.cursor_index = int(len(self.text) * ratio)
+
+ self._rendercursor()
+
+ def _click(self, event):
+ if self.ignore(event):
+ return
+ if event.inaxes != self.ax:
+ self.stop_typing()
+ return
+ if not self.eventson:
+ return
+ if event.canvas.mouse_grabber != self.ax:
+ event.canvas.grab_mouse(self.ax)
+ if not self.capturekeystrokes:
+ self.begin_typing(event.x)
+ self.position_cursor(event.x)
+
+ def _resize(self, event):
+ self.stop_typing()
+
+ def _motion(self, event):
+ if self.ignore(event):
+ return
+ if event.inaxes == self.ax:
+ c = self.hovercolor
+ else:
+ c = self.color
+ if c != self._lastcolor:
+ self.ax.set_facecolor(c)
+ self._lastcolor = c
+ if self.drawon:
+ self.ax.figure.canvas.draw()
+
+ def on_text_change(self, func):
+ """
+ When the text changes, call this *func* with event.
+
+ A connection id is returned which can be used to disconnect.
+ """
+ cid = self.cnt
+ self.change_observers[cid] = func
+ self.cnt += 1
+ return cid
+
+ def on_submit(self, func):
+ """
+ When the user hits enter or leaves the submision box, call this
+ *func* with event.
+
+ A connection id is returned which can be used to disconnect.
+ """
+ cid = self.cnt
+ self.submit_observers[cid] = func
+ self.cnt += 1
+ return cid
+
+ def disconnect(self, cid):
+ """remove the observer with connection id *cid*"""
+ for reg in (self.change_observers, self.submit_observers):
+ try:
+ del reg[cid]
+ except KeyError:
+ pass
+
+
+class RadioButtons(AxesWidget):
+ """
+ A GUI neutral radio button.
+
+ For the buttons to remain responsive
+ you must keep a reference to this object.
+
+ The following attributes are exposed:
+
+ *ax*
+ The :class:`matplotlib.axes.Axes` instance the buttons are in
+
+ *activecolor*
+ The color of the button when clicked
+
+ *labels*
+ A list of :class:`matplotlib.text.Text` instances
+
+ *circles*
+ A list of :class:`matplotlib.patches.Circle` instances
+
+ *value_selected*
+ A string listing the current value selected
+
+ Connect to the RadioButtons with the :meth:`on_clicked` method
+ """
+ def __init__(self, ax, labels, active=0, activecolor='blue'):
+ """
+ Add radio buttons to :class:`matplotlib.axes.Axes` instance *ax*
+
+ *labels*
+ A len(buttons) list of labels as strings
+
+ *active*
+ The index into labels for the button that is active
+
+ *activecolor*
+ The color of the button when clicked
+ """
+ AxesWidget.__init__(self, ax)
+ self.activecolor = activecolor
+ self.value_selected = None
+
+ ax.set_xticks([])
+ ax.set_yticks([])
+ ax.set_navigate(False)
+ dy = 1. / (len(labels) + 1)
+ ys = np.linspace(1 - dy, dy, len(labels))
+ cnt = 0
+ axcolor = ax.get_facecolor()
+
+ self.labels = []
+ self.circles = []
+ for y, label in zip(ys, labels):
+ t = ax.text(0.25, y, label, transform=ax.transAxes,
+ horizontalalignment='left',
+ verticalalignment='center')
+
+ if cnt == active:
+ self.value_selected = label
+ facecolor = activecolor
+ else:
+ facecolor = axcolor
+
+ p = Circle(xy=(0.15, y), radius=0.05, edgecolor='black',
+ facecolor=facecolor, transform=ax.transAxes)
+
+ self.labels.append(t)
+ self.circles.append(p)
+ ax.add_patch(p)
+ cnt += 1
+
+ self.connect_event('button_press_event', self._clicked)
+
+ self.cnt = 0
+ self.observers = {}
+
+ def _clicked(self, event):
+ if self.ignore(event):
+ return
+ if event.button != 1:
+ return
+ if event.inaxes != self.ax:
+ return
+ xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
+ pclicked = np.array([xy[0], xy[1]])
+
+ def inside(p):
+ pcirc = np.array([p.center[0], p.center[1]])
+ d = pclicked - pcirc
+ return np.sqrt(np.dot(d, d)) < p.radius
+
+ for i, (p, t) in enumerate(zip(self.circles, self.labels)):
+ if t.get_window_extent().contains(event.x, event.y) or inside(p):
+ self.set_active(i)
+ break
+ else:
+ return
+
+ def set_active(self, index):
+ """
+ Trigger which radio button to make active.
+
+ *index* is an index into the original label list
+ that this object was constructed with.
+ Raise ValueError if the index is invalid.
+
+ Callbacks will be triggered if :attr:`eventson` is True.
+
+ """
+ if 0 > index >= len(self.labels):
+ raise ValueError("Invalid RadioButton index: %d" % index)
+
+ self.value_selected = self.labels[index].get_text()
+
+ for i, p in enumerate(self.circles):
+ if i == index:
+ color = self.activecolor
+ else:
+ color = self.ax.get_facecolor()
+ p.set_facecolor(color)
+
+ if self.drawon:
+ self.ax.figure.canvas.draw()
+
+ if not self.eventson:
+ return
+ for cid, func in six.iteritems(self.observers):
+ func(self.labels[index].get_text())
+
+ def on_clicked(self, func):
+ """
+ When the button is clicked, call *func* with button label
+
+ A connection id is returned which can be used to disconnect
+ """
+ cid = self.cnt
+ self.observers[cid] = func
+ self.cnt += 1
+ return cid
+
+ def disconnect(self, cid):
+ """remove the observer with connection id *cid*"""
+ try:
+ del self.observers[cid]
+ except KeyError:
+ pass
+
+
+class SubplotTool(Widget):
+ """
+ A tool to adjust the subplot params of a :class:`matplotlib.figure.Figure`.
+ """
+ def __init__(self, targetfig, toolfig):
+ """
+ *targetfig*
+ The figure instance to adjust.
+
+ *toolfig*
+ The figure instance to embed the subplot tool into. If
+ *None*, a default figure will be created. If you are using
+ this from the GUI
+ """
+ # FIXME: The docstring seems to just abruptly end without...
+
+ self.targetfig = targetfig
+ toolfig.subplots_adjust(left=0.2, right=0.9)
+
+ class toolbarfmt:
+ def __init__(self, slider):
+ self.slider = slider
+
+ def __call__(self, x, y):
+ fmt = '%s=%s' % (self.slider.label.get_text(),
+ self.slider.valfmt)
+ return fmt % x
+
+ self.axleft = toolfig.add_subplot(711)
+ self.axleft.set_title('Click on slider to adjust subplot param')
+ self.axleft.set_navigate(False)
+
+ self.sliderleft = Slider(self.axleft, 'left',
+ 0, 1, targetfig.subplotpars.left,
+ closedmax=False)
+ self.sliderleft.on_changed(self.funcleft)
+
+ self.axbottom = toolfig.add_subplot(712)
+ self.axbottom.set_navigate(False)
+ self.sliderbottom = Slider(self.axbottom,
+ 'bottom', 0, 1,
+ targetfig.subplotpars.bottom,
+ closedmax=False)
+ self.sliderbottom.on_changed(self.funcbottom)
+
+ self.axright = toolfig.add_subplot(713)
+ self.axright.set_navigate(False)
+ self.sliderright = Slider(self.axright, 'right', 0, 1,
+ targetfig.subplotpars.right,
+ closedmin=False)
+ self.sliderright.on_changed(self.funcright)
+
+ self.axtop = toolfig.add_subplot(714)
+ self.axtop.set_navigate(False)
+ self.slidertop = Slider(self.axtop, 'top', 0, 1,
+ targetfig.subplotpars.top,
+ closedmin=False)
+ self.slidertop.on_changed(self.functop)
+
+ self.axwspace = toolfig.add_subplot(715)
+ self.axwspace.set_navigate(False)
+ self.sliderwspace = Slider(self.axwspace, 'wspace',
+ 0, 1, targetfig.subplotpars.wspace,
+ closedmax=False)
+ self.sliderwspace.on_changed(self.funcwspace)
+
+ self.axhspace = toolfig.add_subplot(716)
+ self.axhspace.set_navigate(False)
+ self.sliderhspace = Slider(self.axhspace, 'hspace',
+ 0, 1, targetfig.subplotpars.hspace,
+ closedmax=False)
+ self.sliderhspace.on_changed(self.funchspace)
+
+ # constraints
+ self.sliderleft.slidermax = self.sliderright
+ self.sliderright.slidermin = self.sliderleft
+ self.sliderbottom.slidermax = self.slidertop
+ self.slidertop.slidermin = self.sliderbottom
+
+ bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
+ self.buttonreset = Button(bax, 'Reset')
+
+ sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
+ self.slidertop, self.sliderwspace, self.sliderhspace,)
+
+ def func(event):
+ thisdrawon = self.drawon
+
+ self.drawon = False
+
+ # store the drawon state of each slider
+ bs = []
+ for slider in sliders:
+ bs.append(slider.drawon)
+ slider.drawon = False
+
+ # reset the slider to the initial position
+ for slider in sliders:
+ slider.reset()
+
+ # reset drawon
+ for slider, b in zip(sliders, bs):
+ slider.drawon = b
+
+ # draw the canvas
+ self.drawon = thisdrawon
+ if self.drawon:
+ toolfig.canvas.draw()
+ self.targetfig.canvas.draw()
+
+ # during reset there can be a temporary invalid state
+ # depending on the order of the reset so we turn off
+ # validation for the resetting
+ validate = toolfig.subplotpars.validate
+ toolfig.subplotpars.validate = False
+ self.buttonreset.on_clicked(func)
+ toolfig.subplotpars.validate = validate
+
+ def funcleft(self, val):
+ self.targetfig.subplots_adjust(left=val)
+ if self.drawon:
+ self.targetfig.canvas.draw()
+
+ def funcright(self, val):
+ self.targetfig.subplots_adjust(right=val)
+ if self.drawon:
+ self.targetfig.canvas.draw()
+
+ def funcbottom(self, val):
+ self.targetfig.subplots_adjust(bottom=val)
+ if self.drawon:
+ self.targetfig.canvas.draw()
+
+ def functop(self, val):
+ self.targetfig.subplots_adjust(top=val)
+ if self.drawon:
+ self.targetfig.canvas.draw()
+
+ def funcwspace(self, val):
+ self.targetfig.subplots_adjust(wspace=val)
+ if self.drawon:
+ self.targetfig.canvas.draw()
+
+ def funchspace(self, val):
+ self.targetfig.subplots_adjust(hspace=val)
+ if self.drawon:
+ self.targetfig.canvas.draw()
+
+
+class Cursor(AxesWidget):
+ """
+ A horizontal and vertical line that spans the axes and moves with
+ the pointer. You can turn off the hline or vline respectively with
+ the following attributes:
+
+ *horizOn*
+ Controls the visibility of the horizontal line
+
+ *vertOn*
+ Controls the visibility of the horizontal line
+
+ and the visibility of the cursor itself with the *visible* attribute.
+
+ For the cursor to remain responsive you must keep a reference to
+ it.
+ """
+ def __init__(self, ax, horizOn=True, vertOn=True, useblit=False,
+ **lineprops):
+ """
+ Add a cursor to *ax*. If ``useblit=True``, use the backend-dependent
+ blitting features for faster updates. *lineprops* is a dictionary of
+ line properties.
+ """
+ AxesWidget.__init__(self, ax)
+
+ self.connect_event('motion_notify_event', self.onmove)
+ self.connect_event('draw_event', self.clear)
+
+ self.visible = True
+ self.horizOn = horizOn
+ self.vertOn = vertOn
+ self.useblit = useblit and self.canvas.supports_blit
+
+ if self.useblit:
+ lineprops['animated'] = True
+ self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
+ self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
+
+ self.background = None
+ self.needclear = False
+
+ def clear(self, event):
+ """clear the cursor"""
+ if self.ignore(event):
+ return
+ if self.useblit:
+ self.background = self.canvas.copy_from_bbox(self.ax.bbox)
+ self.linev.set_visible(False)
+ self.lineh.set_visible(False)
+
+ def onmove(self, event):
+ """on mouse motion draw the cursor if visible"""
+ if self.ignore(event):
+ return
+ if not self.canvas.widgetlock.available(self):
+ return
+ if event.inaxes != self.ax:
+ self.linev.set_visible(False)
+ self.lineh.set_visible(False)
+
+ if self.needclear:
+ self.canvas.draw()
+ self.needclear = False
+ return
+ self.needclear = True
+ if not self.visible:
+ return
+ self.linev.set_xdata((event.xdata, event.xdata))
+
+ self.lineh.set_ydata((event.ydata, event.ydata))
+ self.linev.set_visible(self.visible and self.vertOn)
+ self.lineh.set_visible(self.visible and self.horizOn)
+
+ self._update()
+
+ def _update(self):
+
+ if self.useblit:
+ if self.background is not None:
+ self.canvas.restore_region(self.background)
+ self.ax.draw_artist(self.linev)
+ self.ax.draw_artist(self.lineh)
+ self.canvas.blit(self.ax.bbox)
+ else:
+
+ self.canvas.draw_idle()
+
+ return False
+
+
+class MultiCursor(Widget):
+ """
+ Provide a vertical (default) and/or horizontal line cursor shared between
+ multiple axes.
+
+ For the cursor to remain responsive you must keep a reference to
+ it.
+
+ Example usage::
+
+ from matplotlib.widgets import MultiCursor
+ from pylab import figure, show, np
+
+ t = np.arange(0.0, 2.0, 0.01)
+ s1 = np.sin(2*np.pi*t)
+ s2 = np.sin(4*np.pi*t)
+ fig = figure()
+ ax1 = fig.add_subplot(211)
+ ax1.plot(t, s1)
+
+
+ ax2 = fig.add_subplot(212, sharex=ax1)
+ ax2.plot(t, s2)
+
+ multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1,
+ horizOn=False, vertOn=True)
+ show()
+
+ """
+ def __init__(self, canvas, axes, useblit=True, horizOn=False, vertOn=True,
+ **lineprops):
+
+ self.canvas = canvas
+ self.axes = axes
+ self.horizOn = horizOn
+ self.vertOn = vertOn
+
+ xmin, xmax = axes[-1].get_xlim()
+ ymin, ymax = axes[-1].get_ylim()
+ xmid = 0.5 * (xmin + xmax)
+ ymid = 0.5 * (ymin + ymax)
+
+ self.visible = True
+ self.useblit = useblit and self.canvas.supports_blit
+ self.background = None
+ self.needclear = False
+
+ if self.useblit:
+ lineprops['animated'] = True
+
+ if vertOn:
+ self.vlines = [ax.axvline(xmid, visible=False, **lineprops)
+ for ax in axes]
+ else:
+ self.vlines = []
+
+ if horizOn:
+ self.hlines = [ax.axhline(ymid, visible=False, **lineprops)
+ for ax in axes]
+ else:
+ self.hlines = []
+
+ self.connect()
+
+ def connect(self):
+ """connect events"""
+ self._cidmotion = self.canvas.mpl_connect('motion_notify_event',
+ self.onmove)
+ self._ciddraw = self.canvas.mpl_connect('draw_event', self.clear)
+
+ def disconnect(self):
+ """disconnect events"""
+ self.canvas.mpl_disconnect(self._cidmotion)
+ self.canvas.mpl_disconnect(self._ciddraw)
+
+ def clear(self, event):
+ """clear the cursor"""
+ if self.ignore(event):
+ return
+ if self.useblit:
+ self.background = (
+ self.canvas.copy_from_bbox(self.canvas.figure.bbox))
+ for line in self.vlines + self.hlines:
+ line.set_visible(False)
+
+ def onmove(self, event):
+ if self.ignore(event):
+ return
+ if event.inaxes is None:
+ return
+ if not self.canvas.widgetlock.available(self):
+ return
+ self.needclear = True
+ if not self.visible:
+ return
+ if self.vertOn:
+ for line in self.vlines:
+ line.set_xdata((event.xdata, event.xdata))
+ line.set_visible(self.visible)
+ if self.horizOn:
+ for line in self.hlines:
+ line.set_ydata((event.ydata, event.ydata))
+ line.set_visible(self.visible)
+ self._update()
+
+ def _update(self):
+ if self.useblit:
+ if self.background is not None:
+ self.canvas.restore_region(self.background)
+ if self.vertOn:
+ for ax, line in zip(self.axes, self.vlines):
+ ax.draw_artist(line)
+ if self.horizOn:
+ for ax, line in zip(self.axes, self.hlines):
+ ax.draw_artist(line)
+ self.canvas.blit(self.canvas.figure.bbox)
+ else:
+ self.canvas.draw_idle()
+
+
+class _SelectorWidget(AxesWidget):
+
+ def __init__(self, ax, onselect, useblit=False, button=None,
+ state_modifier_keys=None):
+ AxesWidget.__init__(self, ax)
+
+ self.visible = True
+ self.onselect = onselect
+ self.useblit = useblit and self.canvas.supports_blit
+ self.connect_default_events()
+
+ self.state_modifier_keys = dict(move=' ', clear='escape',
+ square='shift', center='control')
+ self.state_modifier_keys.update(state_modifier_keys or {})
+
+ self.background = None
+ self.artists = []
+
+ if isinstance(button, int):
+ self.validButtons = [button]
+ else:
+ self.validButtons = button
+
+ # will save the data (position at mouseclick)
+ self.eventpress = None
+ # will save the data (pos. at mouserelease)
+ self.eventrelease = None
+ self._prev_event = None
+ self.state = set()
+
+ def set_active(self, active):
+ AxesWidget.set_active(self, active)
+ if active:
+ self.update_background(None)
+
+ def update_background(self, event):
+ """force an update of the background"""
+ # If you add a call to `ignore` here, you'll want to check edge case:
+ # `release` can call a draw event even when `ignore` is True.
+ if self.useblit:
+ self.background = self.canvas.copy_from_bbox(self.ax.bbox)
+
+ def connect_default_events(self):
+ """Connect the major canvas events to methods."""
+ self.connect_event('motion_notify_event', self.onmove)
+ self.connect_event('button_press_event', self.press)
+ self.connect_event('button_release_event', self.release)
+ self.connect_event('draw_event', self.update_background)
+ self.connect_event('key_press_event', self.on_key_press)
+ self.connect_event('key_release_event', self.on_key_release)
+ self.connect_event('scroll_event', self.on_scroll)
+
+ def ignore(self, event):
+ """return *True* if *event* should be ignored"""
+ if not self.active or not self.ax.get_visible():
+ return True
+
+ # If canvas was locked
+ if not self.canvas.widgetlock.available(self):
+ return True
+
+ if not hasattr(event, 'button'):
+ event.button = None
+
+ # Only do rectangle selection if event was triggered
+ # with a desired button
+ if self.validButtons is not None:
+ if event.button not in self.validButtons:
+ return True
+
+ # If no button was pressed yet ignore the event if it was out
+ # of the axes
+ if self.eventpress is None:
+ return event.inaxes != self.ax
+
+ # If a button was pressed, check if the release-button is the
+ # same.
+ if event.button == self.eventpress.button:
+ return False
+
+ # If a button was pressed, check if the release-button is the
+ # same.
+ return (event.inaxes != self.ax or
+ event.button != self.eventpress.button)
+
+ def update(self):
+ """draw using newfangled blit or oldfangled draw depending on
+ useblit
+
+ """
+ if not self.ax.get_visible():
+ return False
+
+ if self.useblit:
+ if self.background is not None:
+ self.canvas.restore_region(self.background)
+ for artist in self.artists:
+ self.ax.draw_artist(artist)
+
+ self.canvas.blit(self.ax.bbox)
+
+ else:
+ self.canvas.draw_idle()
+ return False
+
+ def _get_data(self, event):
+ """Get the xdata and ydata for event, with limits"""
+ if event.xdata is None:
+ return None, None
+ x0, x1 = self.ax.get_xbound()
+ y0, y1 = self.ax.get_ybound()
+ xdata = max(x0, event.xdata)
+ xdata = min(x1, xdata)
+ ydata = max(y0, event.ydata)
+ ydata = min(y1, ydata)
+ return xdata, ydata
+
+ def _clean_event(self, event):
+ """Clean up an event
+
+ Use prev event if there is no xdata
+ Limit the xdata and ydata to the axes limits
+ Set the prev event
+ """
+ if event.xdata is None:
+ event = self._prev_event
+ else:
+ event = copy.copy(event)
+ event.xdata, event.ydata = self._get_data(event)
+
+ self._prev_event = event
+ return event
+
+ def press(self, event):
+ """Button press handler and validator"""
+ if not self.ignore(event):
+ event = self._clean_event(event)
+ self.eventpress = event
+ self._prev_event = event
+ key = event.key or ''
+ key = key.replace('ctrl', 'control')
+ # move state is locked in on a button press
+ if key == self.state_modifier_keys['move']:
+ self.state.add('move')
+ self._press(event)
+ return True
+ return False
+
+ def _press(self, event):
+ """Button press handler"""
+ pass
+
+ def release(self, event):
+ """Button release event handler and validator"""
+ if not self.ignore(event) and self.eventpress:
+ event = self._clean_event(event)
+ self.eventrelease = event
+ self._release(event)
+ self.eventpress = None
+ self.eventrelease = None
+ self.state.discard('move')
+ return True
+ return False
+
+ def _release(self, event):
+ """Button release event handler"""
+ pass
+
+ def onmove(self, event):
+ """Cursor move event handler and validator"""
+ if not self.ignore(event) and self.eventpress:
+ event = self._clean_event(event)
+ self._onmove(event)
+ return True
+ return False
+
+ def _onmove(self, event):
+ """Cursor move event handler"""
+ pass
+
+ def on_scroll(self, event):
+ """Mouse scroll event handler and validator"""
+ if not self.ignore(event):
+ self._on_scroll(event)
+
+ def _on_scroll(self, event):
+ """Mouse scroll event handler"""
+ pass
+
+ def on_key_press(self, event):
+ """Key press event handler and validator for all selection widgets"""
+ if self.active:
+ key = event.key or ''
+ key = key.replace('ctrl', 'control')
+ if key == self.state_modifier_keys['clear']:
+ for artist in self.artists:
+ artist.set_visible(False)
+ self.update()
+ return
+ for (state, modifier) in self.state_modifier_keys.items():
+ if modifier in key:
+ self.state.add(state)
+ self._on_key_press(event)
+
+ def _on_key_press(self, event):
+ """Key press event handler - use for widget-specific key press actions.
+ """
+ pass
+
+ def on_key_release(self, event):
+ """Key release event handler and validator"""
+ if self.active:
+ key = event.key or ''
+ for (state, modifier) in self.state_modifier_keys.items():
+ if modifier in key:
+ self.state.discard(state)
+ self._on_key_release(event)
+
+ def _on_key_release(self, event):
+ """Key release event handler"""
+ pass
+
+ def set_visible(self, visible):
+ """ Set the visibility of our artists """
+ self.visible = visible
+ for artist in self.artists:
+ artist.set_visible(visible)
+
+
+class SpanSelector(_SelectorWidget):
+ """
+ Visually select a min/max range on a single axis and call a function with
+ those values.
+
+ To guarantee that the selector remains responsive, keep a reference to it.
+
+ In order to turn off the SpanSelector, set `span_selector.active=False`. To
+ turn it back on, set `span_selector.active=True`.
+
+ Parameters
+ ----------
+ ax : :class:`matplotlib.axes.Axes` object
+
+ onselect : func(min, max), min/max are floats
+
+ direction : "horizontal" or "vertical"
+ The axis along which to draw the span selector
+
+ minspan : float, default is None
+ If selection is less than *minspan*, do not call *onselect*
+
+ useblit : bool, default is False
+ If True, use the backend-dependent blitting features for faster
+ canvas updates.
+
+ rectprops : dict, default is None
+ Dictionary of :class:`matplotlib.patches.Patch` properties
+
+ onmove_callback : func(min, max), min/max are floats, default is None
+ Called on mouse move while the span is being selected
+
+ span_stays : bool, default is False
+ If True, the span stays visible after the mouse is released
+
+ button : int or list of ints
+ Determines which mouse buttons activate the span selector
+ 1 = left mouse button\n
+ 2 = center mouse button (scroll wheel)\n
+ 3 = right mouse button\n
+
+ Examples
+ --------
+ >>> import matplotlib.pyplot as plt
+ >>> import matplotlib.widgets as mwidgets
+ >>> fig, ax = plt.subplots()
+ >>> ax.plot([1, 2, 3], [10, 50, 100])
+ >>> def onselect(vmin, vmax):
+ print(vmin, vmax)
+ >>> rectprops = dict(facecolor='blue', alpha=0.5)
+ >>> span = mwidgets.SpanSelector(ax, onselect, 'horizontal',
+ rectprops=rectprops)
+ >>> fig.show()
+
+ See also: :doc:`/gallery/widgets/span_selector`
+
+ """
+
+ def __init__(self, ax, onselect, direction, minspan=None, useblit=False,
+ rectprops=None, onmove_callback=None, span_stays=False,
+ button=None):
+
+ _SelectorWidget.__init__(self, ax, onselect, useblit=useblit,
+ button=button)
+
+ if rectprops is None:
+ rectprops = dict(facecolor='red', alpha=0.5)
+
+ rectprops['animated'] = self.useblit
+
+ if direction not in ['horizontal', 'vertical']:
+ raise ValueError("direction must be 'horizontal' or 'vertical'")
+ self.direction = direction
+
+ self.rect = None
+ self.pressv = None
+
+ self.rectprops = rectprops
+ self.onmove_callback = onmove_callback
+ self.minspan = minspan
+ self.span_stays = span_stays
+
+ # Needed when dragging out of axes
+ self.prev = (0, 0)
+
+ # Reset canvas so that `new_axes` connects events.
+ self.canvas = None
+ self.new_axes(ax)
+
+ def new_axes(self, ax):
+ """Set SpanSelector to operate on a new Axes"""
+ self.ax = ax
+ if self.canvas is not ax.figure.canvas:
+ if self.canvas is not None:
+ self.disconnect_events()
+
+ self.canvas = ax.figure.canvas
+ self.connect_default_events()
+
+ if self.direction == 'horizontal':
+ trans = blended_transform_factory(self.ax.transData,
+ self.ax.transAxes)
+ w, h = 0, 1
+ else:
+ trans = blended_transform_factory(self.ax.transAxes,
+ self.ax.transData)
+ w, h = 1, 0
+ self.rect = Rectangle((0, 0), w, h,
+ transform=trans,
+ visible=False,
+ **self.rectprops)
+ if self.span_stays:
+ self.stay_rect = Rectangle((0, 0), w, h,
+ transform=trans,
+ visible=False,
+ **self.rectprops)
+ self.stay_rect.set_animated(False)
+ self.ax.add_patch(self.stay_rect)
+
+ self.ax.add_patch(self.rect)
+ self.artists = [self.rect]
+
+ def ignore(self, event):
+ """return *True* if *event* should be ignored"""
+ return _SelectorWidget.ignore(self, event) or not self.visible
+
+ def _press(self, event):
+ """on button press event"""
+ self.rect.set_visible(self.visible)
+ if self.span_stays:
+ self.stay_rect.set_visible(False)
+ # really force a draw so that the stay rect is not in
+ # the blit background
+ if self.useblit:
+ self.canvas.draw()
+ xdata, ydata = self._get_data(event)
+ if self.direction == 'horizontal':
+ self.pressv = xdata
+ else:
+ self.pressv = ydata
+ return False
+
+ def _release(self, event):
+ """on button release event"""
+ if self.pressv is None:
+ return
+ self.buttonDown = False
+
+ self.rect.set_visible(False)
+
+ if self.span_stays:
+ self.stay_rect.set_x(self.rect.get_x())
+ self.stay_rect.set_y(self.rect.get_y())
+ self.stay_rect.set_width(self.rect.get_width())
+ self.stay_rect.set_height(self.rect.get_height())
+ self.stay_rect.set_visible(True)
+
+ self.canvas.draw_idle()
+ vmin = self.pressv
+ xdata, ydata = self._get_data(event)
+ if self.direction == 'horizontal':
+ vmax = xdata or self.prev[0]
+ else:
+ vmax = ydata or self.prev[1]
+
+ if vmin > vmax:
+ vmin, vmax = vmax, vmin
+ span = vmax - vmin
+ if self.minspan is not None and span < self.minspan:
+ return
+ self.onselect(vmin, vmax)
+ self.pressv = None
+ return False
+
+ def _onmove(self, event):
+ """on motion notify event"""
+ if self.pressv is None:
+ return
+ x, y = self._get_data(event)
+ if x is None:
+ return
+
+ self.prev = x, y
+ if self.direction == 'horizontal':
+ v = x
+ else:
+ v = y
+
+ minv, maxv = v, self.pressv
+ if minv > maxv:
+ minv, maxv = maxv, minv
+ if self.direction == 'horizontal':
+ self.rect.set_x(minv)
+ self.rect.set_width(maxv - minv)
+ else:
+ self.rect.set_y(minv)
+ self.rect.set_height(maxv - minv)
+
+ if self.onmove_callback is not None:
+ vmin = self.pressv
+ xdata, ydata = self._get_data(event)
+ if self.direction == 'horizontal':
+ vmax = xdata or self.prev[0]
+ else:
+ vmax = ydata or self.prev[1]
+
+ if vmin > vmax:
+ vmin, vmax = vmax, vmin
+ self.onmove_callback(vmin, vmax)
+
+ self.update()
+ return False
+
+
+class ToolHandles(object):
+ """Control handles for canvas tools.
+
+ Parameters
+ ----------
+ ax : :class:`matplotlib.axes.Axes`
+ Matplotlib axes where tool handles are displayed.
+ x, y : 1D arrays
+ Coordinates of control handles.
+ marker : str
+ Shape of marker used to display handle. See `matplotlib.pyplot.plot`.
+ marker_props : dict
+ Additional marker properties. See :class:`matplotlib.lines.Line2D`.
+ """
+
+ def __init__(self, ax, x, y, marker='o', marker_props=None, useblit=True):
+ self.ax = ax
+
+ props = dict(marker=marker, markersize=7, mfc='w', ls='none',
+ alpha=0.5, visible=False, label='_nolegend_')
+ props.update(marker_props if marker_props is not None else {})
+ self._markers = Line2D(x, y, animated=useblit, **props)
+ self.ax.add_line(self._markers)
+ self.artist = self._markers
+
+ @property
+ def x(self):
+ return self._markers.get_xdata()
+
+ @property
+ def y(self):
+ return self._markers.get_ydata()
+
+ def set_data(self, pts, y=None):
+ """Set x and y positions of handles"""
+ if y is not None:
+ x = pts
+ pts = np.array([x, y])
+ self._markers.set_data(pts)
+
+ def set_visible(self, val):
+ self._markers.set_visible(val)
+
+ def set_animated(self, val):
+ self._markers.set_animated(val)
+
+ def closest(self, x, y):
+ """Return index and pixel distance to closest index."""
+ pts = np.transpose((self.x, self.y))
+ # Transform data coordinates to pixel coordinates.
+ pts = self.ax.transData.transform(pts)
+ diff = pts - ((x, y))
+ if diff.ndim == 2:
+ dist = np.sqrt(np.sum(diff ** 2, axis=1))
+ return np.argmin(dist), np.min(dist)
+ else:
+ return 0, np.sqrt(np.sum(diff ** 2))
+
+
+class RectangleSelector(_SelectorWidget):
+ """
+ Select a rectangular region of an axes.
+
+ For the cursor to remain responsive you must keep a reference to
+ it.
+
+ Example usage::
+
+ from matplotlib.widgets import RectangleSelector
+ from pylab import *
+
+ def onselect(eclick, erelease):
+ 'eclick and erelease are matplotlib events at press and release'
+ print(' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata))
+ print(' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata))
+ print(' used button : ', eclick.button)
+
+ def toggle_selector(event):
+ print(' Key pressed.')
+ if event.key in ['Q', 'q'] and toggle_selector.RS.active:
+ print(' RectangleSelector deactivated.')
+ toggle_selector.RS.set_active(False)
+ if event.key in ['A', 'a'] and not toggle_selector.RS.active:
+ print(' RectangleSelector activated.')
+ toggle_selector.RS.set_active(True)
+
+ x = arange(100)/(99.0)
+ y = sin(x)
+ fig = figure
+ ax = subplot(111)
+ ax.plot(x,y)
+
+ toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
+ connect('key_press_event', toggle_selector)
+ show()
+ """
+
+ _shape_klass = Rectangle
+
+ def __init__(self, ax, onselect, drawtype='box',
+ minspanx=None, minspany=None, useblit=False,
+ lineprops=None, rectprops=None, spancoords='data',
+ button=None, maxdist=10, marker_props=None,
+ interactive=False, state_modifier_keys=None):
+
+ """
+ Create a selector in *ax*. When a selection is made, clear
+ the span and call onselect with::
+
+ onselect(pos_1, pos_2)
+
+ and clear the drawn box/line. The ``pos_1`` and ``pos_2`` are
+ arrays of length 2 containing the x- and y-coordinate.
+
+ If *minspanx* is not *None* then events smaller than *minspanx*
+ in x direction are ignored (it's the same for y).
+
+ The rectangle is drawn with *rectprops*; default::
+
+ rectprops = dict(facecolor='red', edgecolor = 'black',
+ alpha=0.2, fill=True)
+
+ The line is drawn with *lineprops*; default::
+
+ lineprops = dict(color='black', linestyle='-',
+ linewidth = 2, alpha=0.5)
+
+ Use *drawtype* if you want the mouse to draw a line,
+ a box or nothing between click and actual position by setting
+
+ ``drawtype = 'line'``, ``drawtype='box'`` or ``drawtype = 'none'``.
+ Drawing a line would result in a line from vertex A to vertex C in
+ a rectangle ABCD.
+
+ *spancoords* is one of 'data' or 'pixels'. If 'data', *minspanx*
+ and *minspanx* will be interpreted in the same coordinates as
+ the x and y axis. If 'pixels', they are in pixels.
+
+ *button* is a list of integers indicating which mouse buttons should
+ be used for rectangle selection. You can also specify a single
+ integer if only a single button is desired. Default is *None*,
+ which does not limit which button can be used.
+
+ Note, typically:
+ 1 = left mouse button
+ 2 = center mouse button (scroll wheel)
+ 3 = right mouse button
+
+ *interactive* will draw a set of handles and allow you interact
+ with the widget after it is drawn.
+
+ *state_modifier_keys* are keyboard modifiers that affect the behavior
+ of the widget.
+
+ The defaults are:
+ dict(move=' ', clear='escape', square='shift', center='ctrl')
+
+ Keyboard modifiers, which:
+ 'move': Move the existing shape.
+ 'clear': Clear the current shape.
+ 'square': Makes the shape square.
+ 'center': Make the initial point the center of the shape.
+ 'square' and 'center' can be combined.
+ """
+ _SelectorWidget.__init__(self, ax, onselect, useblit=useblit,
+ button=button,
+ state_modifier_keys=state_modifier_keys)
+
+ self.to_draw = None
+ self.visible = True
+ self.interactive = interactive
+
+ if drawtype == 'none':
+ drawtype = 'line' # draw a line but make it
+ self.visible = False # invisible
+
+ if drawtype == 'box':
+ if rectprops is None:
+ rectprops = dict(facecolor='red', edgecolor='black',
+ alpha=0.2, fill=True)
+ rectprops['animated'] = self.useblit
+ self.rectprops = rectprops
+ self.to_draw = self._shape_klass((0, 0), 0, 1, visible=False,
+ **self.rectprops)
+ self.ax.add_patch(self.to_draw)
+ if drawtype == 'line':
+ if lineprops is None:
+ lineprops = dict(color='black', linestyle='-',
+ linewidth=2, alpha=0.5)
+ lineprops['animated'] = self.useblit
+ self.lineprops = lineprops
+ self.to_draw = Line2D([0, 0], [0, 0], visible=False,
+ **self.lineprops)
+ self.ax.add_line(self.to_draw)
+
+ self.minspanx = minspanx
+ self.minspany = minspany
+
+ if spancoords not in ('data', 'pixels'):
+ raise ValueError("'spancoords' must be 'data' or 'pixels'")
+
+ self.spancoords = spancoords
+ self.drawtype = drawtype
+
+ self.maxdist = maxdist
+
+ if rectprops is None:
+ props = dict(mec='r')
+ else:
+ props = dict(mec=rectprops.get('edgecolor', 'r'))
+ self._corner_order = ['NW', 'NE', 'SE', 'SW']
+ xc, yc = self.corners
+ self._corner_handles = ToolHandles(self.ax, xc, yc, marker_props=props,
+ useblit=self.useblit)
+
+ self._edge_order = ['W', 'N', 'E', 'S']
+ xe, ye = self.edge_centers
+ self._edge_handles = ToolHandles(self.ax, xe, ye, marker='s',
+ marker_props=props,
+ useblit=self.useblit)
+
+ xc, yc = self.center
+ self._center_handle = ToolHandles(self.ax, [xc], [yc], marker='s',
+ marker_props=props,
+ useblit=self.useblit)
+
+ self.active_handle = None
+
+ self.artists = [self.to_draw, self._center_handle.artist,
+ self._corner_handles.artist,
+ self._edge_handles.artist]
+
+ if not self.interactive:
+ self.artists = [self.to_draw]
+
+ self._extents_on_press = None
+
+ def _press(self, event):
+ """on button press event"""
+ # make the drawed box/line visible get the click-coordinates,
+ # button, ...
+ if self.interactive and self.to_draw.get_visible():
+ self._set_active_handle(event)
+ else:
+ self.active_handle = None
+
+ if self.active_handle is None or not self.interactive:
+ # Clear previous rectangle before drawing new rectangle.
+ self.update()
+
+ self.set_visible(self.visible)
+
+ def _release(self, event):
+ """on button release event"""
+ if not self.interactive:
+ self.to_draw.set_visible(False)
+
+ # update the eventpress and eventrelease with the resulting extents
+ x1, x2, y1, y2 = self.extents
+ self.eventpress.xdata = x1
+ self.eventpress.ydata = y1
+ xy1 = self.ax.transData.transform_point([x1, y1])
+ self.eventpress.x, self.eventpress.y = xy1
+
+ self.eventrelease.xdata = x2
+ self.eventrelease.ydata = y2
+ xy2 = self.ax.transData.transform_point([x2, y2])
+ self.eventrelease.x, self.eventrelease.y = xy2
+
+ if self.spancoords == 'data':
+ xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
+ xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
+ # calculate dimensions of box or line get values in the right
+ # order
+ elif self.spancoords == 'pixels':
+ xmin, ymin = self.eventpress.x, self.eventpress.y
+ xmax, ymax = self.eventrelease.x, self.eventrelease.y
+ else:
+ raise ValueError('spancoords must be "data" or "pixels"')
+
+ if xmin > xmax:
+ xmin, xmax = xmax, xmin
+ if ymin > ymax:
+ ymin, ymax = ymax, ymin
+
+ spanx = xmax - xmin
+ spany = ymax - ymin
+ xproblems = self.minspanx is not None and spanx < self.minspanx
+ yproblems = self.minspany is not None and spany < self.minspany
+
+ # check if drawn distance (if it exists) is not too small in
+ # either x or y-direction
+ if self.drawtype != 'none' and (xproblems or yproblems):
+ for artist in self.artists:
+ artist.set_visible(False)
+ self.update()
+ return
+
+ # call desired function
+ self.onselect(self.eventpress, self.eventrelease)
+ self.update()
+
+ return False
+
+ def _onmove(self, event):
+ """on motion notify event if box/line is wanted"""
+ # resize an existing shape
+ if self.active_handle and not self.active_handle == 'C':
+ x1, x2, y1, y2 = self._extents_on_press
+ if self.active_handle in ['E', 'W'] + self._corner_order:
+ x2 = event.xdata
+ if self.active_handle in ['N', 'S'] + self._corner_order:
+ y2 = event.ydata
+
+ # move existing shape
+ elif (('move' in self.state or self.active_handle == 'C')
+ and self._extents_on_press is not None):
+ x1, x2, y1, y2 = self._extents_on_press
+ dx = event.xdata - self.eventpress.xdata
+ dy = event.ydata - self.eventpress.ydata
+ x1 += dx
+ x2 += dx
+ y1 += dy
+ y2 += dy
+
+ # new shape
+ else:
+ center = [self.eventpress.xdata, self.eventpress.ydata]
+ center_pix = [self.eventpress.x, self.eventpress.y]
+ dx = (event.xdata - center[0]) / 2.
+ dy = (event.ydata - center[1]) / 2.
+
+ # square shape
+ if 'square' in self.state:
+ dx_pix = abs(event.x - center_pix[0])
+ dy_pix = abs(event.y - center_pix[1])
+ if not dx_pix:
+ return
+ maxd = max(abs(dx_pix), abs(dy_pix))
+ if abs(dx_pix) < maxd:
+ dx *= maxd / (abs(dx_pix) + 1e-6)
+ if abs(dy_pix) < maxd:
+ dy *= maxd / (abs(dy_pix) + 1e-6)
+
+ # from center
+ if 'center' in self.state:
+ dx *= 2
+ dy *= 2
+
+ # from corner
+ else:
+ center[0] += dx
+ center[1] += dy
+
+ x1, x2, y1, y2 = (center[0] - dx, center[0] + dx,
+ center[1] - dy, center[1] + dy)
+
+ self.extents = x1, x2, y1, y2
+
+ @property
+ def _rect_bbox(self):
+ if self.drawtype == 'box':
+ x0 = self.to_draw.get_x()
+ y0 = self.to_draw.get_y()
+ width = self.to_draw.get_width()
+ height = self.to_draw.get_height()
+ return x0, y0, width, height
+ else:
+ x, y = self.to_draw.get_data()
+ x0, x1 = min(x), max(x)
+ y0, y1 = min(y), max(y)
+ return x0, y0, x1 - x0, y1 - y0
+
+ @property
+ def corners(self):
+ """Corners of rectangle from lower left, moving clockwise."""
+ x0, y0, width, height = self._rect_bbox
+ xc = x0, x0 + width, x0 + width, x0
+ yc = y0, y0, y0 + height, y0 + height
+ return xc, yc
+
+ @property
+ def edge_centers(self):
+ """Midpoint of rectangle edges from left, moving clockwise."""
+ x0, y0, width, height = self._rect_bbox
+ w = width / 2.
+ h = height / 2.
+ xe = x0, x0 + w, x0 + width, x0 + w
+ ye = y0 + h, y0, y0 + h, y0 + height
+ return xe, ye
+
+ @property
+ def center(self):
+ """Center of rectangle"""
+ x0, y0, width, height = self._rect_bbox
+ return x0 + width / 2., y0 + height / 2.
+
+ @property
+ def extents(self):
+ """Return (xmin, xmax, ymin, ymax)."""
+ x0, y0, width, height = self._rect_bbox
+ xmin, xmax = sorted([x0, x0 + width])
+ ymin, ymax = sorted([y0, y0 + height])
+ return xmin, xmax, ymin, ymax
+
+ @extents.setter
+ def extents(self, extents):
+ # Update displayed shape
+ self.draw_shape(extents)
+ # Update displayed handles
+ self._corner_handles.set_data(*self.corners)
+ self._edge_handles.set_data(*self.edge_centers)
+ self._center_handle.set_data(*self.center)
+ self.set_visible(self.visible)
+ self.update()
+
+ def draw_shape(self, extents):
+ x0, x1, y0, y1 = extents
+ xmin, xmax = sorted([x0, x1])
+ ymin, ymax = sorted([y0, y1])
+ xlim = sorted(self.ax.get_xlim())
+ ylim = sorted(self.ax.get_ylim())
+
+ xmin = max(xlim[0], xmin)
+ ymin = max(ylim[0], ymin)
+ xmax = min(xmax, xlim[1])
+ ymax = min(ymax, ylim[1])
+
+ if self.drawtype == 'box':
+ self.to_draw.set_x(xmin)
+ self.to_draw.set_y(ymin)
+ self.to_draw.set_width(xmax - xmin)
+ self.to_draw.set_height(ymax - ymin)
+
+ elif self.drawtype == 'line':
+ self.to_draw.set_data([xmin, xmax], [ymin, ymax])
+
+ def _set_active_handle(self, event):
+ """Set active handle based on the location of the mouse event"""
+ # Note: event.xdata/ydata in data coordinates, event.x/y in pixels
+ c_idx, c_dist = self._corner_handles.closest(event.x, event.y)
+ e_idx, e_dist = self._edge_handles.closest(event.x, event.y)
+ m_idx, m_dist = self._center_handle.closest(event.x, event.y)
+
+ if 'move' in self.state:
+ self.active_handle = 'C'
+ self._extents_on_press = self.extents
+
+ # Set active handle as closest handle, if mouse click is close enough.
+ elif m_dist < self.maxdist * 2:
+ self.active_handle = 'C'
+ elif c_dist > self.maxdist and e_dist > self.maxdist:
+ self.active_handle = None
+ return
+ elif c_dist < e_dist:
+ self.active_handle = self._corner_order[c_idx]
+ else:
+ self.active_handle = self._edge_order[e_idx]
+
+ # Save coordinates of rectangle at the start of handle movement.
+ x1, x2, y1, y2 = self.extents
+ # Switch variables so that only x2 and/or y2 are updated on move.
+ if self.active_handle in ['W', 'SW', 'NW']:
+ x1, x2 = x2, event.xdata
+ if self.active_handle in ['N', 'NW', 'NE']:
+ y1, y2 = y2, event.ydata
+ self._extents_on_press = x1, x2, y1, y2
+
+ @property
+ def geometry(self):
+ """
+ Returns numpy.ndarray of shape (2,5) containing
+ x (``RectangleSelector.geometry[1,:]``) and
+ y (``RectangleSelector.geometry[0,:]``)
+ coordinates of the four corners of the rectangle starting
+ and ending in the top left corner.
+ """
+ if hasattr(self.to_draw, 'get_verts'):
+ xfm = self.ax.transData.inverted()
+ y, x = xfm.transform(self.to_draw.get_verts()).T
+ return np.array([x, y])
+ else:
+ return np.array(self.to_draw.get_data())
+
+
+class EllipseSelector(RectangleSelector):
+ """
+ Select an elliptical region of an axes.
+
+ For the cursor to remain responsive you must keep a reference to
+ it.
+
+ Example usage::
+
+ from matplotlib.widgets import EllipseSelector
+ from pylab import *
+
+ def onselect(eclick, erelease):
+ 'eclick and erelease are matplotlib events at press and release'
+ print(' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata))
+ print(' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata))
+ print(' used button : ', eclick.button)
+
+ def toggle_selector(event):
+ print(' Key pressed.')
+ if event.key in ['Q', 'q'] and toggle_selector.ES.active:
+ print(' EllipseSelector deactivated.')
+ toggle_selector.RS.set_active(False)
+ if event.key in ['A', 'a'] and not toggle_selector.ES.active:
+ print(' EllipseSelector activated.')
+ toggle_selector.ES.set_active(True)
+
+ x = arange(100)/(99.0)
+ y = sin(x)
+ fig = figure
+ ax = subplot(111)
+ ax.plot(x,y)
+
+ toggle_selector.ES = EllipseSelector(ax, onselect, drawtype='line')
+ connect('key_press_event', toggle_selector)
+ show()
+ """
+ _shape_klass = Ellipse
+
+ def draw_shape(self, extents):
+ x1, x2, y1, y2 = extents
+ xmin, xmax = sorted([x1, x2])
+ ymin, ymax = sorted([y1, y2])
+ center = [x1 + (x2 - x1) / 2., y1 + (y2 - y1) / 2.]
+ a = (xmax - xmin) / 2.
+ b = (ymax - ymin) / 2.
+
+ if self.drawtype == 'box':
+ self.to_draw.center = center
+ self.to_draw.width = 2 * a
+ self.to_draw.height = 2 * b
+ else:
+ rad = np.deg2rad(np.arange(31) * 12)
+ x = a * np.cos(rad) + center[0]
+ y = b * np.sin(rad) + center[1]
+ self.to_draw.set_data(x, y)
+
+ @property
+ def _rect_bbox(self):
+ if self.drawtype == 'box':
+ x, y = self.to_draw.center
+ width = self.to_draw.width
+ height = self.to_draw.height
+ return x - width / 2., y - height / 2., width, height
+ else:
+ x, y = self.to_draw.get_data()
+ x0, x1 = min(x), max(x)
+ y0, y1 = min(y), max(y)
+ return x0, y0, x1 - x0, y1 - y0
+
+
+class LassoSelector(_SelectorWidget):
+ """
+ Selection curve of an arbitrary shape.
+
+ For the selector to remain responsive you must keep a reference to it.
+
+ The selected path can be used in conjunction with `~.Path.contains_point`
+ to select data points from an image.
+
+ In contrast to `Lasso`, `LassoSelector` is written with an interface
+ similar to `RectangleSelector` and `SpanSelector`, and will continue to
+ interact with the axes until disconnected.
+
+ Example usage::
+
+ ax = subplot(111)
+ ax.plot(x,y)
+
+ def onselect(verts):
+ print(verts)
+ lasso = LassoSelector(ax, onselect)
+
+ Parameters
+ ----------
+ ax : :class:`~matplotlib.axes.Axes`
+ The parent axes for the widget.
+ onselect : function
+ Whenever the lasso is released, the *onselect* function is called and
+ passed the vertices of the selected path.
+ button : List[Int], optional
+ A list of integers indicating which mouse buttons should be used for
+ rectangle selection. You can also specify a single integer if only a
+ single button is desired. Default is ``None``, which does not limit
+ which button can be used.
+
+ Note, typically:
+
+ - 1 = left mouse button
+ - 2 = center mouse button (scroll wheel)
+ - 3 = right mouse button
+
+ """
+
+ def __init__(self, ax, onselect=None, useblit=True, lineprops=None,
+ button=None):
+ _SelectorWidget.__init__(self, ax, onselect, useblit=useblit,
+ button=button)
+
+ self.verts = None
+
+ if lineprops is None:
+ lineprops = dict()
+ if useblit:
+ lineprops['animated'] = True
+ self.line = Line2D([], [], **lineprops)
+ self.line.set_visible(False)
+ self.ax.add_line(self.line)
+ self.artists = [self.line]
+
+ def onpress(self, event):
+ self.press(event)
+
+ def _press(self, event):
+ self.verts = [self._get_data(event)]
+ self.line.set_visible(True)
+
+ def onrelease(self, event):
+ self.release(event)
+
+ def _release(self, event):
+ if self.verts is not None:
+ self.verts.append(self._get_data(event))
+ self.onselect(self.verts)
+ self.line.set_data([[], []])
+ self.line.set_visible(False)
+ self.verts = None
+
+ def _onmove(self, event):
+ if self.verts is None:
+ return
+ self.verts.append(self._get_data(event))
+
+ self.line.set_data(list(zip(*self.verts)))
+
+ self.update()
+
+
+class PolygonSelector(_SelectorWidget):
+ """Select a polygon region of an axes.
+
+ Place vertices with each mouse click, and make the selection by completing
+ the polygon (clicking on the first vertex). Hold the *ctrl* key and click
+ and drag a vertex to reposition it (the *ctrl* key is not necessary if the
+ polygon has already been completed). Hold the *shift* key and click and
+ drag anywhere in the axes to move all vertices. Press the *esc* key to
+ start a new polygon.
+
+ For the selector to remain responsive you must keep a reference to
+ it.
+
+ Parameters
+ ----------
+ ax : :class:`~matplotlib.axes.Axes`
+ The parent axes for the widget.
+ onselect : function
+ When a polygon is completed or modified after completion,
+ the `onselect` function is called and passed a list of the vertices as
+ ``(xdata, ydata)`` tuples.
+ useblit : bool, optional
+ lineprops : dict, optional
+ The line for the sides of the polygon is drawn with the properties
+ given by `lineprops`. The default is ``dict(color='k', linestyle='-',
+ linewidth=2, alpha=0.5)``.
+ markerprops : dict, optional
+ The markers for the vertices of the polygon are drawn with the
+ properties given by `markerprops`. The default is ``dict(marker='o',
+ markersize=7, mec='k', mfc='k', alpha=0.5)``.
+ vertex_select_radius : float, optional
+ A vertex is selected (to complete the polygon or to move a vertex)
+ if the mouse click is within `vertex_select_radius` pixels of the
+ vertex. The default radius is 15 pixels.
+
+ Examples
+ --------
+ :doc:`/gallery/widgets/polygon_selector_demo`
+ """
+
+ def __init__(self, ax, onselect, useblit=False,
+ lineprops=None, markerprops=None, vertex_select_radius=15):
+ # The state modifiers 'move', 'square', and 'center' are expected by
+ # _SelectorWidget but are not supported by PolygonSelector
+ # Note: could not use the existing 'move' state modifier in-place of
+ # 'move_all' because _SelectorWidget automatically discards 'move'
+ # from the state on button release.
+ state_modifier_keys = dict(clear='escape', move_vertex='control',
+ move_all='shift', move='not-applicable',
+ square='not-applicable',
+ center='not-applicable')
+ _SelectorWidget.__init__(self, ax, onselect, useblit=useblit,
+ state_modifier_keys=state_modifier_keys)
+
+ self._xs, self._ys = [0], [0]
+ self._polygon_completed = False
+
+ if lineprops is None:
+ lineprops = dict(color='k', linestyle='-', linewidth=2, alpha=0.5)
+ lineprops['animated'] = self.useblit
+ self.line = Line2D(self._xs, self._ys, **lineprops)
+ self.ax.add_line(self.line)
+
+ if markerprops is None:
+ markerprops = dict(mec='k', mfc=lineprops.get('color', 'k'))
+ self._polygon_handles = ToolHandles(self.ax, self._xs, self._ys,
+ useblit=self.useblit,
+ marker_props=markerprops)
+
+ self._active_handle_idx = -1
+ self.vertex_select_radius = vertex_select_radius
+
+ self.artists = [self.line, self._polygon_handles.artist]
+ self.set_visible(True)
+
+ def _press(self, event):
+ """Button press event handler"""
+ # Check for selection of a tool handle.
+ if ((self._polygon_completed or 'move_vertex' in self.state)
+ and len(self._xs) > 0):
+ h_idx, h_dist = self._polygon_handles.closest(event.x, event.y)
+ if h_dist < self.vertex_select_radius:
+ self._active_handle_idx = h_idx
+ # Save the vertex positions at the time of the press event (needed to
+ # support the 'move_all' state modifier).
+ self._xs_at_press, self._ys_at_press = self._xs[:], self._ys[:]
+
+ def _release(self, event):
+ """Button release event handler"""
+ # Release active tool handle.
+ if self._active_handle_idx >= 0:
+ self._active_handle_idx = -1
+
+ # Complete the polygon.
+ elif (len(self._xs) > 3
+ and self._xs[-1] == self._xs[0]
+ and self._ys[-1] == self._ys[0]):
+ self._polygon_completed = True
+
+ # Place new vertex.
+ elif (not self._polygon_completed
+ and 'move_all' not in self.state
+ and 'move_vertex' not in self.state):
+ self._xs.insert(-1, event.xdata)
+ self._ys.insert(-1, event.ydata)
+
+ if self._polygon_completed:
+ self.onselect(self.verts)
+
+ def onmove(self, event):
+ """Cursor move event handler and validator"""
+ # Method overrides _SelectorWidget.onmove because the polygon selector
+ # needs to process the move callback even if there is no button press.
+ # _SelectorWidget.onmove include logic to ignore move event if
+ # eventpress is None.
+ if not self.ignore(event):
+ event = self._clean_event(event)
+ self._onmove(event)
+ return True
+ return False
+
+ def _onmove(self, event):
+ """Cursor move event handler"""
+ # Move the active vertex (ToolHandle).
+ if self._active_handle_idx >= 0:
+ idx = self._active_handle_idx
+ self._xs[idx], self._ys[idx] = event.xdata, event.ydata
+ # Also update the end of the polygon line if the first vertex is
+ # the active handle and the polygon is completed.
+ if idx == 0 and self._polygon_completed:
+ self._xs[-1], self._ys[-1] = event.xdata, event.ydata
+
+ # Move all vertices.
+ elif 'move_all' in self.state and self.eventpress:
+ dx = event.xdata - self.eventpress.xdata
+ dy = event.ydata - self.eventpress.ydata
+ for k in range(len(self._xs)):
+ self._xs[k] = self._xs_at_press[k] + dx
+ self._ys[k] = self._ys_at_press[k] + dy
+
+ # Do nothing if completed or waiting for a move.
+ elif (self._polygon_completed
+ or 'move_vertex' in self.state or 'move_all' in self.state):
+ return
+
+ # Position pending vertex.
+ else:
+ # Calculate distance to the start vertex.
+ x0, y0 = self.line.get_transform().transform((self._xs[0],
+ self._ys[0]))
+ v0_dist = np.sqrt((x0 - event.x) ** 2 + (y0 - event.y) ** 2)
+ # Lock on to the start vertex if near it and ready to complete.
+ if len(self._xs) > 3 and v0_dist < self.vertex_select_radius:
+ self._xs[-1], self._ys[-1] = self._xs[0], self._ys[0]
+ else:
+ self._xs[-1], self._ys[-1] = event.xdata, event.ydata
+
+ self._draw_polygon()
+
+ def _on_key_press(self, event):
+ """Key press event handler"""
+ # Remove the pending vertex if entering the 'move_vertex' or
+ # 'move_all' mode
+ if (not self._polygon_completed
+ and ('move_vertex' in self.state or 'move_all' in self.state)):
+ self._xs, self._ys = self._xs[:-1], self._ys[:-1]
+ self._draw_polygon()
+
+ def _on_key_release(self, event):
+ """Key release event handler"""
+ # Add back the pending vertex if leaving the 'move_vertex' or
+ # 'move_all' mode (by checking the released key)
+ if (not self._polygon_completed
+ and
+ (event.key == self.state_modifier_keys.get('move_vertex')
+ or event.key == self.state_modifier_keys.get('move_all'))):
+ self._xs.append(event.xdata)
+ self._ys.append(event.ydata)
+ self._draw_polygon()
+ # Reset the polygon if the released key is the 'clear' key.
+ elif event.key == self.state_modifier_keys.get('clear'):
+ event = self._clean_event(event)
+ self._xs, self._ys = [event.xdata], [event.ydata]
+ self._polygon_completed = False
+ self.set_visible(True)
+
+ def _draw_polygon(self):
+ """Redraw the polygon based on the new vertex positions."""
+ self.line.set_data(self._xs, self._ys)
+ # Only show one tool handle at the start and end vertex of the polygon
+ # if the polygon is completed or the user is locked on to the start
+ # vertex.
+ if (self._polygon_completed
+ or (len(self._xs) > 3
+ and self._xs[-1] == self._xs[0]
+ and self._ys[-1] == self._ys[0])):
+ self._polygon_handles.set_data(self._xs[:-1], self._ys[:-1])
+ else:
+ self._polygon_handles.set_data(self._xs, self._ys)
+ self.update()
+
+ @property
+ def verts(self):
+ """Get the polygon vertices.
+
+ Returns
+ -------
+ list
+ A list of the vertices of the polygon as ``(xdata, ydata)`` tuples.
+ """
+ return list(zip(self._xs[:-1], self._ys[:-1]))
+
+
+class Lasso(AxesWidget):
+ """Selection curve of an arbitrary shape.
+
+ The selected path can be used in conjunction with
+ :func:`~matplotlib.path.Path.contains_point` to select data points
+ from an image.
+
+ Unlike :class:`LassoSelector`, this must be initialized with a starting
+ point `xy`, and the `Lasso` events are destroyed upon release.
+
+ Parameters
+ ----------
+ ax : `~matplotlib.axes.Axes`
+ The parent axes for the widget.
+ xy : array
+ Coordinates of the start of the lasso.
+ callback : callable
+ Whenever the lasso is released, the `callback` function is called and
+ passed the vertices of the selected path.
+ """
+
+ def __init__(self, ax, xy, callback=None, useblit=True):
+ AxesWidget.__init__(self, ax)
+
+ self.useblit = useblit and self.canvas.supports_blit
+ if self.useblit:
+ self.background = self.canvas.copy_from_bbox(self.ax.bbox)
+
+ x, y = xy
+ self.verts = [(x, y)]
+ self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
+ self.ax.add_line(self.line)
+ self.callback = callback
+ self.connect_event('button_release_event', self.onrelease)
+ self.connect_event('motion_notify_event', self.onmove)
+
+ def onrelease(self, event):
+ if self.ignore(event):
+ return
+ if self.verts is not None:
+ self.verts.append((event.xdata, event.ydata))
+ if len(self.verts) > 2:
+ self.callback(self.verts)
+ self.ax.lines.remove(self.line)
+ self.verts = None
+ self.disconnect_events()
+
+ def onmove(self, event):
+ if self.ignore(event):
+ return
+ if self.verts is None:
+ return
+ if event.inaxes != self.ax:
+ return
+ if event.button != 1:
+ return
+ self.verts.append((event.xdata, event.ydata))
+
+ self.line.set_data(list(zip(*self.verts)))
+
+ if self.useblit:
+ self.canvas.restore_region(self.background)
+ self.ax.draw_artist(self.line)
+ self.canvas.blit(self.ax.bbox)
+ else:
+ self.canvas.draw_idle()
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/__init__.py b/contrib/python/matplotlib/py2/mpl_toolkits/__init__.py
new file mode 100644
index 00000000000..8d9942e652d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/__init__.py
@@ -0,0 +1,4 @@
+try:
+ __import__('pkg_resources').declare_namespace(__name__)
+except ImportError:
+ pass # must not have setuptools
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/ChangeLog b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/ChangeLog
new file mode 100644
index 00000000000..79cc01cfdf4
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/ChangeLog
@@ -0,0 +1,13 @@
+2009-06-01 Jae-Joon Lee <lee.j.joon@gmail.com>
+
+ * axislines.py (Axes.toggle_axisline): fix broken spine support.
+ (AxisArtistHelper): Initial support for curvelinear grid and ticks.
+
+2009-05-04 Jae-Joon Lee <lee.j.joon@gmail.com>
+
+ * inset_locator.py (inset_axes, zoomed_inset_axes): axes_class support
+
+ * axislines.py : Better support for tick (tick label) color
+ handling
+ (Axes.get_children): fix typo
+
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/__init__.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/__init__.py
new file mode 100644
index 00000000000..c10e89bd62b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/__init__.py
@@ -0,0 +1,15 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from . import axes_size as Size
+from .axes_divider import Divider, SubplotDivider, LocatableAxes, \
+ make_axes_locatable
+from .axes_grid import Grid, ImageGrid, AxesGrid
+#from axes_divider import make_axes_locatable
+from matplotlib.cbook import warn_deprecated
+warn_deprecated(since='2.1',
+ name='mpl_toolkits.axes_grid',
+ alternative='mpl_toolkits.axes_grid1 and'
+ ' mpl_toolkits.axisartist provies the same'
+ ' functionality',
+ obj_type='module')
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/anchored_artists.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/anchored_artists.py
new file mode 100644
index 00000000000..14b661497d8
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/anchored_artists.py
@@ -0,0 +1,9 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from matplotlib.offsetbox import AnchoredOffsetbox, AuxTransformBox, VPacker,\
+ TextArea, AnchoredText, DrawingArea, AnnotationBbox
+
+from mpl_toolkits.axes_grid1.anchored_artists import \
+ AnchoredDrawingArea, AnchoredAuxTransformBox, \
+ AnchoredEllipse, AnchoredSizeBar
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/angle_helper.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/angle_helper.py
new file mode 100644
index 00000000000..f0f877d9136
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/angle_helper.py
@@ -0,0 +1,4 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axisartist.angle_helper import *
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_divider.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_divider.py
new file mode 100644
index 00000000000..25694ecf5ec
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_divider.py
@@ -0,0 +1,8 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axes_grid1.axes_divider import Divider, AxesLocator, SubplotDivider, \
+ AxesDivider, locatable_axes_factory, make_axes_locatable
+
+from mpl_toolkits.axes_grid.axislines import Axes
+LocatableAxes = locatable_axes_factory(Axes)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_grid.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_grid.py
new file mode 100644
index 00000000000..58212ac89c4
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_grid.py
@@ -0,0 +1,30 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import mpl_toolkits.axes_grid1.axes_grid as axes_grid_orig
+from .axes_divider import LocatableAxes
+
+class CbarAxes(axes_grid_orig.CbarAxesBase, LocatableAxes):
+ def __init__(self, *kl, **kwargs):
+ orientation=kwargs.pop("orientation", None)
+ if orientation is None:
+ raise ValueError("orientation must be specified")
+ self.orientation = orientation
+ self._default_label_on = False
+ self.locator = None
+
+ super(LocatableAxes, self).__init__(*kl, **kwargs)
+
+ def cla(self):
+ super(LocatableAxes, self).cla()
+ self._config_axes()
+
+
+class Grid(axes_grid_orig.Grid):
+ _defaultLocatableAxesClass = LocatableAxes
+
+class ImageGrid(axes_grid_orig.ImageGrid):
+ _defaultLocatableAxesClass = LocatableAxes
+ _defaultCbarAxesClass = CbarAxes
+
+AxesGrid = ImageGrid
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_rgb.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_rgb.py
new file mode 100644
index 00000000000..bfd4bb98ad7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_rgb.py
@@ -0,0 +1,11 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+#from mpl_toolkits.axes_grid1.axes_rgb import *
+from mpl_toolkits.axes_grid1.axes_rgb import make_rgb_axes, imshow_rgb, RGBAxesBase
+
+#import mpl_toolkits.axes_grid1.axes_rgb as axes_rgb_orig
+from .axislines import Axes
+
+class RGBAxes(RGBAxesBase):
+ _defaultAxesClass = Axes
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_size.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_size.py
new file mode 100644
index 00000000000..998b5e3c871
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axes_size.py
@@ -0,0 +1,4 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axes_grid1.axes_size import *
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axis_artist.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axis_artist.py
new file mode 100644
index 00000000000..92f0538ceba
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axis_artist.py
@@ -0,0 +1,4 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axisartist.axis_artist import *
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axisline_style.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axisline_style.py
new file mode 100644
index 00000000000..2eef3b8b344
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axisline_style.py
@@ -0,0 +1,4 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axisartist.axisline_style import *
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axislines.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axislines.py
new file mode 100644
index 00000000000..9653aa17020
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/axislines.py
@@ -0,0 +1,4 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axisartist.axislines import *
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/clip_path.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/clip_path.py
new file mode 100644
index 00000000000..bafe568fb1a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/clip_path.py
@@ -0,0 +1,4 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axisartist.clip_path import *
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/colorbar.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/colorbar.py
new file mode 100644
index 00000000000..cc5c252da89
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/colorbar.py
@@ -0,0 +1,5 @@
+from mpl_toolkits.axes_grid1.colorbar import (
+ make_axes_kw_doc, colormap_kw_doc, colorbar_doc,
+ CbarAxesLocator, ColorbarBase, Colorbar,
+ make_axes, colorbar
+)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/floating_axes.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/floating_axes.py
new file mode 100644
index 00000000000..3f30d57c3a8
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/floating_axes.py
@@ -0,0 +1,4 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axisartist.floating_axes import *
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/grid_finder.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/grid_finder.py
new file mode 100644
index 00000000000..ffa3db76cf8
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/grid_finder.py
@@ -0,0 +1,4 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axisartist.grid_finder import *
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/grid_helper_curvelinear.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/grid_helper_curvelinear.py
new file mode 100644
index 00000000000..325ddd6af22
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/grid_helper_curvelinear.py
@@ -0,0 +1,4 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axisartist.grid_helper_curvelinear import *
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/inset_locator.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/inset_locator.py
new file mode 100644
index 00000000000..a9ed77beda3
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/inset_locator.py
@@ -0,0 +1,7 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axes_grid1.inset_locator import InsetPosition, \
+ AnchoredSizeLocator, \
+ AnchoredZoomLocator, BboxPatch, BboxConnector, BboxConnectorPatch, \
+ inset_axes, zoomed_inset_axes, mark_inset
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/parasite_axes.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/parasite_axes.py
new file mode 100644
index 00000000000..cad56e43a22
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid/parasite_axes.py
@@ -0,0 +1,18 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axes_grid1.parasite_axes import (
+ host_axes_class_factory, parasite_axes_class_factory,
+ parasite_axes_auxtrans_class_factory, subplot_class_factory)
+
+from .axislines import Axes
+
+
+ParasiteAxes = parasite_axes_class_factory(Axes)
+
+ParasiteAxesAuxTrans = \
+ parasite_axes_auxtrans_class_factory(axes_class=ParasiteAxes)
+
+HostAxes = host_axes_class_factory(axes_class=Axes)
+
+SubplotHost = subplot_class_factory(HostAxes)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/__init__.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/__init__.py
new file mode 100644
index 00000000000..3e225ba9f0c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/__init__.py
@@ -0,0 +1,12 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from . import axes_size as Size
+from .axes_divider import Divider, SubplotDivider, LocatableAxes, \
+ make_axes_locatable
+from .axes_grid import Grid, ImageGrid, AxesGrid
+#from axes_divider import make_axes_locatable
+
+from .parasite_axes import host_subplot, host_axes
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/anchored_artists.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/anchored_artists.py
new file mode 100644
index 00000000000..5b492858e8d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/anchored_artists.py
@@ -0,0 +1,376 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+import six
+
+from matplotlib import docstring
+from matplotlib.offsetbox import (AnchoredOffsetbox, AuxTransformBox,
+ DrawingArea, TextArea, VPacker)
+from matplotlib.patches import Rectangle, Ellipse
+
+
+__all__ = ['AnchoredDrawingArea', 'AnchoredAuxTransformBox',
+ 'AnchoredEllipse', 'AnchoredSizeBar']
+
+
+class AnchoredDrawingArea(AnchoredOffsetbox):
+ @docstring.dedent
+ def __init__(self, width, height, xdescent, ydescent,
+ loc, pad=0.4, borderpad=0.5, prop=None, frameon=True,
+ **kwargs):
+ """
+ An anchored container with a fixed size and fillable DrawingArea.
+
+ Artists added to the *drawing_area* will have their coordinates
+ interpreted as pixels. Any transformations set on the artists will be
+ overridden.
+
+ Parameters
+ ----------
+ width, height : int or float
+ width and height of the container, in pixels.
+
+ xdescent, ydescent : int or float
+ descent of the container in the x- and y- direction, in pixels.
+
+ loc : int
+ Location of this artist. Valid location codes are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4,
+ 'right' : 5,
+ 'center left' : 6,
+ 'center right' : 7,
+ 'lower center' : 8,
+ 'upper center' : 9,
+ 'center' : 10
+
+ pad : int or float, optional
+ Padding around the child objects, in fraction of the font
+ size. Defaults to 0.4.
+
+ borderpad : int or float, optional
+ Border padding, in fraction of the font size.
+ Defaults to 0.5.
+
+ prop : `matplotlib.font_manager.FontProperties`, optional
+ Font property used as a reference for paddings.
+
+ frameon : bool, optional
+ If True, draw a box around this artists. Defaults to True.
+
+ **kwargs :
+ Keyworded arguments to pass to
+ :class:`matplotlib.offsetbox.AnchoredOffsetbox`.
+
+ Attributes
+ ----------
+ drawing_area : `matplotlib.offsetbox.DrawingArea`
+ A container for artists to display.
+
+ Examples
+ --------
+ To display blue and red circles of different sizes in the upper right
+ of an axes *ax*:
+
+ >>> ada = AnchoredDrawingArea(20, 20, 0, 0, loc=1, frameon=False)
+ >>> ada.drawing_area.add_artist(Circle((10, 10), 10, fc="b"))
+ >>> ada.drawing_area.add_artist(Circle((30, 10), 5, fc="r"))
+ >>> ax.add_artist(ada)
+ """
+ self.da = DrawingArea(width, height, xdescent, ydescent)
+ self.drawing_area = self.da
+
+ super(AnchoredDrawingArea, self).__init__(
+ loc, pad=pad, borderpad=borderpad, child=self.da, prop=None,
+ frameon=frameon, **kwargs
+ )
+
+
+class AnchoredAuxTransformBox(AnchoredOffsetbox):
+ @docstring.dedent
+ def __init__(self, transform, loc,
+ pad=0.4, borderpad=0.5, prop=None, frameon=True, **kwargs):
+ """
+ An anchored container with transformed coordinates.
+
+ Artists added to the *drawing_area* are scaled according to the
+ coordinates of the transformation used. The dimensions of this artist
+ will scale to contain the artists added.
+
+ Parameters
+ ----------
+ transform : `matplotlib.transforms.Transform`
+ The transformation object for the coordinate system in use, i.e.,
+ :attr:`matplotlib.axes.Axes.transData`.
+
+ loc : int
+ Location of this artist. Valid location codes are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4,
+ 'right' : 5,
+ 'center left' : 6,
+ 'center right' : 7,
+ 'lower center' : 8,
+ 'upper center' : 9,
+ 'center' : 10
+
+ pad : int or float, optional
+ Padding around the child objects, in fraction of the font
+ size. Defaults to 0.4.
+
+ borderpad : int or float, optional
+ Border padding, in fraction of the font size.
+ Defaults to 0.5.
+
+ prop : `matplotlib.font_manager.FontProperties`, optional
+ Font property used as a reference for paddings.
+
+ frameon : bool, optional
+ If True, draw a box around this artists. Defaults to True.
+
+ **kwargs :
+ Keyworded arguments to pass to
+ :class:`matplotlib.offsetbox.AnchoredOffsetbox`.
+
+ Attributes
+ ----------
+ drawing_area : `matplotlib.offsetbox.AuxTransformBox`
+ A container for artists to display.
+
+ Examples
+ --------
+ To display an ellipse in the upper left, with a width of 0.1 and
+ height of 0.4 in data coordinates:
+
+ >>> box = AnchoredAuxTransformBox(ax.transData, loc=2)
+ >>> el = Ellipse((0,0), width=0.1, height=0.4, angle=30)
+ >>> box.drawing_area.add_artist(el)
+ >>> ax.add_artist(box)
+ """
+ self.drawing_area = AuxTransformBox(transform)
+
+ AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
+ child=self.drawing_area,
+ prop=prop,
+ frameon=frameon,
+ **kwargs)
+
+
+class AnchoredEllipse(AnchoredOffsetbox):
+ @docstring.dedent
+ def __init__(self, transform, width, height, angle, loc,
+ pad=0.1, borderpad=0.1, prop=None, frameon=True, **kwargs):
+ """
+ Draw an anchored ellipse of a given size.
+
+ Parameters
+ ----------
+ transform : `matplotlib.transforms.Transform`
+ The transformation object for the coordinate system in use, i.e.,
+ :attr:`matplotlib.axes.Axes.transData`.
+
+ width, height : int or float
+ Width and height of the ellipse, given in coordinates of
+ *transform*.
+
+ angle : int or float
+ Rotation of the ellipse, in degrees, anti-clockwise.
+
+ loc : int
+ Location of this size bar. Valid location codes are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4,
+ 'right' : 5,
+ 'center left' : 6,
+ 'center right' : 7,
+ 'lower center' : 8,
+ 'upper center' : 9,
+ 'center' : 10
+
+ pad : int or float, optional
+ Padding around the ellipse, in fraction of the font size. Defaults
+ to 0.1.
+
+ borderpad : int or float, optional
+ Border padding, in fraction of the font size. Defaults to 0.1.
+
+ frameon : bool, optional
+ If True, draw a box around the ellipse. Defaults to True.
+
+ prop : `matplotlib.font_manager.FontProperties`, optional
+ Font property used as a reference for paddings.
+
+ **kwargs :
+ Keyworded arguments to pass to
+ :class:`matplotlib.offsetbox.AnchoredOffsetbox`.
+
+ Attributes
+ ----------
+ ellipse : `matplotlib.patches.Ellipse`
+ Ellipse patch drawn.
+ """
+ self._box = AuxTransformBox(transform)
+ self.ellipse = Ellipse((0, 0), width, height, angle)
+ self._box.add_artist(self.ellipse)
+
+ AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
+ child=self._box,
+ prop=prop,
+ frameon=frameon, **kwargs)
+
+
+class AnchoredSizeBar(AnchoredOffsetbox):
+ @docstring.dedent
+ def __init__(self, transform, size, label, loc,
+ pad=0.1, borderpad=0.1, sep=2,
+ frameon=True, size_vertical=0, color='black',
+ label_top=False, fontproperties=None, fill_bar=None,
+ **kwargs):
+ """
+ Draw a horizontal scale bar with a center-aligned label underneath.
+
+ Parameters
+ ----------
+ transform : `matplotlib.transforms.Transform`
+ The transformation object for the coordinate system in use, i.e.,
+ :attr:`matplotlib.axes.Axes.transData`.
+
+ size : int or float
+ Horizontal length of the size bar, given in coordinates of
+ *transform*.
+
+ label : str
+ Label to display.
+
+ loc : int
+ Location of this size bar. Valid location codes are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4,
+ 'right' : 5,
+ 'center left' : 6,
+ 'center right' : 7,
+ 'lower center' : 8,
+ 'upper center' : 9,
+ 'center' : 10
+
+ pad : int or float, optional
+ Padding around the label and size bar, in fraction of the font
+ size. Defaults to 0.1.
+
+ borderpad : int or float, optional
+ Border padding, in fraction of the font size.
+ Defaults to 0.1.
+
+ sep : int or float, optional
+ Separation between the label and the size bar, in points.
+ Defaults to 2.
+
+ frameon : bool, optional
+ If True, draw a box around the horizontal bar and label.
+ Defaults to True.
+
+ size_vertical : int or float, optional
+ Vertical length of the size bar, given in coordinates of
+ *transform*. Defaults to 0.
+
+ color : str, optional
+ Color for the size bar and label.
+ Defaults to black.
+
+ label_top : bool, optional
+ If True, the label will be over the size bar.
+ Defaults to False.
+
+ fontproperties : `matplotlib.font_manager.FontProperties`, optional
+ Font properties for the label text.
+
+ fill_bar : bool, optional
+ If True and if size_vertical is nonzero, the size bar will
+ be filled in with the color specified by the size bar.
+ Defaults to True if `size_vertical` is greater than
+ zero and False otherwise.
+
+ **kwargs :
+ Keyworded arguments to pass to
+ :class:`matplotlib.offsetbox.AnchoredOffsetbox`.
+
+ Attributes
+ ----------
+ size_bar : `matplotlib.offsetbox.AuxTransformBox`
+ Container for the size bar.
+
+ txt_label : `matplotlib.offsetbox.TextArea`
+ Container for the label of the size bar.
+
+ Notes
+ -----
+ If *prop* is passed as a keyworded argument, but *fontproperties* is
+ not, then *prop* is be assumed to be the intended *fontproperties*.
+ Using both *prop* and *fontproperties* is not supported.
+
+ Examples
+ --------
+ >>> import matplotlib.pyplot as plt
+ >>> import numpy as np
+ >>> from mpl_toolkits.axes_grid1.anchored_artists import \
+AnchoredSizeBar
+ >>> fig, ax = plt.subplots()
+ >>> ax.imshow(np.random.random((10,10)))
+ >>> bar = AnchoredSizeBar(ax.transData, 3, '3 data units', 4)
+ >>> ax.add_artist(bar)
+ >>> fig.show()
+
+ Using all the optional parameters
+
+ >>> import matplotlib.font_manager as fm
+ >>> fontprops = fm.FontProperties(size=14, family='monospace')
+ >>> bar = AnchoredSizeBar(ax.transData, 3, '3 units', 4, pad=0.5, \
+sep=5, borderpad=0.5, frameon=False, \
+size_vertical=0.5, color='white', \
+fontproperties=fontprops)
+ """
+ if fill_bar is None:
+ fill_bar = size_vertical > 0
+
+ self.size_bar = AuxTransformBox(transform)
+ self.size_bar.add_artist(Rectangle((0, 0), size, size_vertical,
+ fill=fill_bar, facecolor=color,
+ edgecolor=color))
+
+ if fontproperties is None and 'prop' in kwargs:
+ fontproperties = kwargs.pop('prop')
+
+ if fontproperties is None:
+ textprops = {'color': color}
+ else:
+ textprops = {'color': color, 'fontproperties': fontproperties}
+
+ self.txt_label = TextArea(
+ label,
+ minimumdescent=False,
+ textprops=textprops)
+
+ if label_top:
+ _box_children = [self.txt_label, self.size_bar]
+ else:
+ _box_children = [self.size_bar, self.txt_label]
+
+ self._box = VPacker(children=_box_children,
+ align="center",
+ pad=0, sep=sep)
+
+ AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
+ child=self._box,
+ prop=fontproperties,
+ frameon=frameon, **kwargs)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_divider.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_divider.py
new file mode 100644
index 00000000000..b238e73cc5e
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_divider.py
@@ -0,0 +1,975 @@
+"""
+The axes_divider module provides helper classes to adjust the positions of
+multiple axes at drawing time.
+
+ Divider: this is the class that is used to calculate the axes
+ position. It divides the given rectangular area into several sub
+ rectangles. You initialize the divider by setting the horizontal
+ and vertical lists of sizes that the division will be based on. You
+ then use the new_locator method, whose return value is a callable
+ object that can be used to set the axes_locator of the axes.
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import map
+
+import matplotlib.transforms as mtransforms
+
+from matplotlib.axes import SubplotBase
+
+from . import axes_size as Size
+
+
+class Divider(object):
+ """
+ This class calculates the axes position. It
+ divides the given rectangular area into several
+ sub-rectangles. You initialize the divider by setting the
+ horizontal and vertical lists of sizes
+ (:mod:`mpl_toolkits.axes_grid.axes_size`) that the division will
+ be based on. You then use the new_locator method to create a
+ callable object that can be used as the axes_locator of the
+ axes.
+ """
+
+ def __init__(self, fig, pos, horizontal, vertical,
+ aspect=None, anchor="C"):
+ """
+ Parameters
+ ----------
+ fig : Figure
+ pos : tuple of 4 floats
+ position of the rectangle that will be divided
+ horizontal : list of :mod:`~mpl_toolkits.axes_grid.axes_size`
+ sizes for horizontal division
+ vertical : list of :mod:`~mpl_toolkits.axes_grid.axes_size`
+ sizes for vertical division
+ aspect : bool
+ if True, the overall rectangular area is reduced
+ so that the relative part of the horizontal and
+ vertical scales have the same scale.
+ anchor : {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'}
+ placement of the reduced rectangle when *aspect* is True
+ """
+
+ self._fig = fig
+ self._pos = pos
+ self._horizontal = horizontal
+ self._vertical = vertical
+ self._anchor = anchor
+ self._aspect = aspect
+ self._xrefindex = 0
+ self._yrefindex = 0
+ self._locator = None
+
+ def get_horizontal_sizes(self, renderer):
+ return [s.get_size(renderer) for s in self.get_horizontal()]
+
+ def get_vertical_sizes(self, renderer):
+ return [s.get_size(renderer) for s in self.get_vertical()]
+
+ def get_vsize_hsize(self):
+
+ from .axes_size import AddList
+
+ vsize = AddList(self.get_vertical())
+ hsize = AddList(self.get_horizontal())
+
+ return vsize, hsize
+
+ @staticmethod
+ def _calc_k(l, total_size):
+
+ rs_sum, as_sum = 0., 0.
+
+ for _rs, _as in l:
+ rs_sum += _rs
+ as_sum += _as
+
+ if rs_sum != 0.:
+ k = (total_size - as_sum) / rs_sum
+ return k
+ else:
+ return 0.
+
+ @staticmethod
+ def _calc_offsets(l, k):
+
+ offsets = [0.]
+
+ #for s in l:
+ for _rs, _as in l:
+ #_rs, _as = s.get_size(renderer)
+ offsets.append(offsets[-1] + _rs*k + _as)
+
+ return offsets
+
+ def set_position(self, pos):
+ """
+ set the position of the rectangle.
+
+ Parameters
+ ----------
+ pos : tuple of 4 floats
+ position of the rectangle that will be divided
+ """
+ self._pos = pos
+
+ def get_position(self):
+ "return the position of the rectangle."
+ return self._pos
+
+ def set_anchor(self, anchor):
+ """
+ Parameters
+ ----------
+ anchor : {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'}
+ anchor position
+
+ ===== ============
+ value description
+ ===== ============
+ 'C' Center
+ 'SW' bottom left
+ 'S' bottom
+ 'SE' bottom right
+ 'E' right
+ 'NE' top right
+ 'N' top
+ 'NW' top left
+ 'W' left
+ ===== ============
+
+ """
+ if anchor in mtransforms.Bbox.coefs or len(anchor) == 2:
+ self._anchor = anchor
+ else:
+ raise ValueError('argument must be among %s' %
+ ', '.join(mtransforms.BBox.coefs))
+
+ def get_anchor(self):
+ "return the anchor"
+ return self._anchor
+
+ def set_horizontal(self, h):
+ """
+ Parameters
+ ----------
+ h : list of :mod:`~mpl_toolkits.axes_grid.axes_size`
+ sizes for horizontal division
+ """
+ self._horizontal = h
+
+ def get_horizontal(self):
+ "return horizontal sizes"
+ return self._horizontal
+
+ def set_vertical(self, v):
+ """
+ Parameters
+ ----------
+ v : list of :mod:`~mpl_toolkits.axes_grid.axes_size`
+ sizes for vertical division
+ """
+ self._vertical = v
+
+ def get_vertical(self):
+ "return vertical sizes"
+ return self._vertical
+
+ def set_aspect(self, aspect=False):
+ """
+ Parameters
+ ----------
+ aspect : bool
+ """
+ self._aspect = aspect
+
+ def get_aspect(self):
+ "return aspect"
+ return self._aspect
+
+ def set_locator(self, _locator):
+ self._locator = _locator
+
+ def get_locator(self):
+ return self._locator
+
+ def get_position_runtime(self, ax, renderer):
+ if self._locator is None:
+ return self.get_position()
+ else:
+ return self._locator(ax, renderer).bounds
+
+ def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
+ """
+ Parameters
+ ----------
+ nx, nx1 : int
+ Integers specifying the column-position of the
+ cell. When *nx1* is None, a single *nx*-th column is
+ specified. Otherwise location of columns spanning between *nx*
+ to *nx1* (but excluding *nx1*-th column) is specified.
+ ny, ny1 : int
+ Same as *nx* and *nx1*, but for row positions.
+ axes
+ renderer
+ """
+
+ figW, figH = self._fig.get_size_inches()
+ x, y, w, h = self.get_position_runtime(axes, renderer)
+
+ hsizes = self.get_horizontal_sizes(renderer)
+ vsizes = self.get_vertical_sizes(renderer)
+ k_h = self._calc_k(hsizes, figW*w)
+ k_v = self._calc_k(vsizes, figH*h)
+
+ if self.get_aspect():
+ k = min(k_h, k_v)
+ ox = self._calc_offsets(hsizes, k)
+ oy = self._calc_offsets(vsizes, k)
+
+ ww = (ox[-1] - ox[0])/figW
+ hh = (oy[-1] - oy[0])/figH
+ pb = mtransforms.Bbox.from_bounds(x, y, w, h)
+ pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
+ pb1_anchored = pb1.anchored(self.get_anchor(), pb)
+ x0, y0 = pb1_anchored.x0, pb1_anchored.y0
+
+ else:
+ ox = self._calc_offsets(hsizes, k_h)
+ oy = self._calc_offsets(vsizes, k_v)
+ x0, y0 = x, y
+
+ if nx1 is None:
+ nx1 = nx+1
+ if ny1 is None:
+ ny1 = ny+1
+
+ x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW
+ y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH
+
+ return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
+
+ def new_locator(self, nx, ny, nx1=None, ny1=None):
+ """
+ Returns a new locator
+ (:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
+ specified cell.
+
+ Parameters
+ ----------
+ nx, nx1 : int
+ Integers specifying the column-position of the
+ cell. When *nx1* is None, a single *nx*-th column is
+ specified. Otherwise location of columns spanning between *nx*
+ to *nx1* (but excluding *nx1*-th column) is specified.
+ ny, ny1 : int
+ Same as *nx* and *nx1*, but for row positions.
+ """
+ return AxesLocator(self, nx, ny, nx1, ny1)
+
+ def append_size(self, position, size):
+
+ if position == "left":
+ self._horizontal.insert(0, size)
+ self._xrefindex += 1
+ elif position == "right":
+ self._horizontal.append(size)
+ elif position == "bottom":
+ self._vertical.insert(0, size)
+ self._yrefindex += 1
+ elif position == "top":
+ self._vertical.append(size)
+ else:
+ raise ValueError("the position must be one of left," +
+ " right, bottom, or top")
+
+ def add_auto_adjustable_area(self,
+ use_axes, pad=0.1,
+ adjust_dirs=None,
+ ):
+ if adjust_dirs is None:
+ adjust_dirs = ["left", "right", "bottom", "top"]
+ from .axes_size import Padded, SizeFromFunc, GetExtentHelper
+ for d in adjust_dirs:
+ helper = GetExtentHelper(use_axes, d)
+ size = SizeFromFunc(helper)
+ padded_size = Padded(size, pad) # pad in inch
+ self.append_size(d, padded_size)
+
+
+class AxesLocator(object):
+ """
+ A simple callable object, initialized with AxesDivider class,
+ returns the position and size of the given cell.
+ """
+ def __init__(self, axes_divider, nx, ny, nx1=None, ny1=None):
+ """
+ Parameters
+ ----------
+ axes_divider : AxesDivider
+ nx, nx1 : int
+ Integers specifying the column-position of the
+ cell. When *nx1* is None, a single *nx*-th column is
+ specified. Otherwise location of columns spanning between *nx*
+ to *nx1* (but excluding *nx1*-th column) is specified.
+ ny, ny1 : int
+ Same as *nx* and *nx1*, but for row positions.
+ """
+ self._axes_divider = axes_divider
+
+ _xrefindex = axes_divider._xrefindex
+ _yrefindex = axes_divider._yrefindex
+
+ self._nx, self._ny = nx - _xrefindex, ny - _yrefindex
+
+ if nx1 is None:
+ nx1 = nx+1
+ if ny1 is None:
+ ny1 = ny+1
+
+ self._nx1 = nx1 - _xrefindex
+ self._ny1 = ny1 - _yrefindex
+
+ def __call__(self, axes, renderer):
+
+ _xrefindex = self._axes_divider._xrefindex
+ _yrefindex = self._axes_divider._yrefindex
+
+ return self._axes_divider.locate(self._nx + _xrefindex,
+ self._ny + _yrefindex,
+ self._nx1 + _xrefindex,
+ self._ny1 + _yrefindex,
+ axes,
+ renderer)
+
+ def get_subplotspec(self):
+ if hasattr(self._axes_divider, "get_subplotspec"):
+ return self._axes_divider.get_subplotspec()
+ else:
+ return None
+
+
+from matplotlib.gridspec import SubplotSpec, GridSpec
+
+
+class SubplotDivider(Divider):
+ """
+ The Divider class whose rectangle area is specified as a subplot geometry.
+ """
+
+ def __init__(self, fig, *args, **kwargs):
+ """
+ Parameters
+ ----------
+ fig : :class:`matplotlib.figure.Figure`
+ args : tuple (*numRows*, *numCols*, *plotNum*)
+ The array of subplots in the figure has dimensions *numRows*,
+ *numCols*, and *plotNum* is the number of the subplot
+ being created. *plotNum* starts at 1 in the upper left
+ corner and increases to the right.
+
+ If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
+ decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
+ """
+
+ self.figure = fig
+
+ if len(args) == 1:
+ if isinstance(args[0], SubplotSpec):
+ self._subplotspec = args[0]
+ else:
+ try:
+ s = str(int(args[0]))
+ rows, cols, num = map(int, s)
+ except ValueError:
+ raise ValueError(
+ 'Single argument to subplot must be a 3-digit integer')
+ self._subplotspec = GridSpec(rows, cols)[num-1]
+ # num - 1 for converting from MATLAB to python indexing
+ elif len(args) == 3:
+ rows, cols, num = args
+ rows = int(rows)
+ cols = int(cols)
+ if isinstance(num, tuple) and len(num) == 2:
+ num = [int(n) for n in num]
+ self._subplotspec = GridSpec(rows, cols)[num[0]-1:num[1]]
+ else:
+ self._subplotspec = GridSpec(rows, cols)[int(num)-1]
+ # num - 1 for converting from MATLAB to python indexing
+ else:
+ raise ValueError('Illegal argument(s) to subplot: %s' % (args,))
+
+ # total = rows*cols
+ # num -= 1 # convert from matlab to python indexing
+ # # i.e., num in range(0,total)
+ # if num >= total:
+ # raise ValueError( 'Subplot number exceeds total subplots')
+ # self._rows = rows
+ # self._cols = cols
+ # self._num = num
+
+ # self.update_params()
+
+ # sets self.fixbox
+ self.update_params()
+
+ pos = self.figbox.bounds
+
+ horizontal = kwargs.pop("horizontal", [])
+ vertical = kwargs.pop("vertical", [])
+ aspect = kwargs.pop("aspect", None)
+ anchor = kwargs.pop("anchor", "C")
+
+ if kwargs:
+ raise Exception("")
+
+ Divider.__init__(self, fig, pos, horizontal, vertical,
+ aspect=aspect, anchor=anchor)
+
+ def get_position(self):
+ "return the bounds of the subplot box"
+
+ self.update_params() # update self.figbox
+ return self.figbox.bounds
+
+ # def update_params(self):
+ # 'update the subplot position from fig.subplotpars'
+
+ # rows = self._rows
+ # cols = self._cols
+ # num = self._num
+
+ # pars = self.figure.subplotpars
+ # left = pars.left
+ # right = pars.right
+ # bottom = pars.bottom
+ # top = pars.top
+ # wspace = pars.wspace
+ # hspace = pars.hspace
+ # totWidth = right-left
+ # totHeight = top-bottom
+
+ # figH = totHeight/(rows + hspace*(rows-1))
+ # sepH = hspace*figH
+
+ # figW = totWidth/(cols + wspace*(cols-1))
+ # sepW = wspace*figW
+
+ # rowNum, colNum = divmod(num, cols)
+
+ # figBottom = top - (rowNum+1)*figH - rowNum*sepH
+ # figLeft = left + colNum*(figW + sepW)
+
+ # self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
+ # figW, figH)
+
+ def update_params(self):
+ 'update the subplot position from fig.subplotpars'
+
+ self.figbox = self.get_subplotspec().get_position(self.figure)
+
+ def get_geometry(self):
+ 'get the subplot geometry, e.g., 2,2,3'
+ rows, cols, num1, num2 = self.get_subplotspec().get_geometry()
+ return rows, cols, num1+1 # for compatibility
+
+ # COVERAGE NOTE: Never used internally or from examples
+ def change_geometry(self, numrows, numcols, num):
+ 'change subplot geometry, e.g., from 1,1,1 to 2,2,3'
+ self._subplotspec = GridSpec(numrows, numcols)[num-1]
+ self.update_params()
+ self.set_position(self.figbox)
+
+ def get_subplotspec(self):
+ 'get the SubplotSpec instance'
+ return self._subplotspec
+
+ def set_subplotspec(self, subplotspec):
+ 'set the SubplotSpec instance'
+ self._subplotspec = subplotspec
+
+
+class AxesDivider(Divider):
+ """
+ Divider based on the pre-existing axes.
+ """
+
+ def __init__(self, axes, xref=None, yref=None):
+ """
+ Parameters
+ ----------
+ axes : :class:`~matplotlib.axes.Axes`
+ xref
+ yref
+ """
+ self._axes = axes
+ if xref is None:
+ self._xref = Size.AxesX(axes)
+ else:
+ self._xref = xref
+ if yref is None:
+ self._yref = Size.AxesY(axes)
+ else:
+ self._yref = yref
+
+ Divider.__init__(self, fig=axes.get_figure(), pos=None,
+ horizontal=[self._xref], vertical=[self._yref],
+ aspect=None, anchor="C")
+
+ def _get_new_axes(self, **kwargs):
+ axes = self._axes
+
+ axes_class = kwargs.pop("axes_class", None)
+
+ if axes_class is None:
+ if isinstance(axes, SubplotBase):
+ axes_class = axes._axes_class
+ else:
+ axes_class = type(axes)
+
+ ax = axes_class(axes.get_figure(),
+ axes.get_position(original=True), **kwargs)
+
+ return ax
+
+ def new_horizontal(self, size, pad=None, pack_start=False, **kwargs):
+ """
+ Add a new axes on the right (or left) side of the main axes.
+
+ Parameters
+ ----------
+ size : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or string
+ A width of the axes. If float or string is given, *from_any*
+ function is used to create the size, with *ref_size* set to AxesX
+ instance of the current axes.
+ pad : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or string
+ Pad between the axes. It takes same argument as *size*.
+ pack_start : bool
+ If False, the new axes is appended at the end
+ of the list, i.e., it became the right-most axes. If True, it is
+ inserted at the start of the list, and becomes the left-most axes.
+ kwargs
+ All extra keywords arguments are passed to the created axes.
+ If *axes_class* is given, the new axes will be created as an
+ instance of the given class. Otherwise, the same class of the
+ main axes will be used.
+ """
+
+ if pad:
+ if not isinstance(pad, Size._Base):
+ pad = Size.from_any(pad,
+ fraction_ref=self._xref)
+ if pack_start:
+ self._horizontal.insert(0, pad)
+ self._xrefindex += 1
+ else:
+ self._horizontal.append(pad)
+
+ if not isinstance(size, Size._Base):
+ size = Size.from_any(size,
+ fraction_ref=self._xref)
+
+ if pack_start:
+ self._horizontal.insert(0, size)
+ self._xrefindex += 1
+ locator = self.new_locator(nx=0, ny=self._yrefindex)
+ else:
+ self._horizontal.append(size)
+ locator = self.new_locator(nx=len(self._horizontal)-1, ny=self._yrefindex)
+
+ ax = self._get_new_axes(**kwargs)
+ ax.set_axes_locator(locator)
+
+ return ax
+
+ def new_vertical(self, size, pad=None, pack_start=False, **kwargs):
+ """
+ Add a new axes on the top (or bottom) side of the main axes.
+
+ Parameters
+ ----------
+ size : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or string
+ A height of the axes. If float or string is given, *from_any*
+ function is used to create the size, with *ref_size* set to AxesX
+ instance of the current axes.
+ pad : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or string
+ Pad between the axes. It takes same argument as *size*.
+ pack_start : bool
+ If False, the new axes is appended at the end
+ of the list, i.e., it became the right-most axes. If True, it is
+ inserted at the start of the list, and becomes the left-most axes.
+ kwargs
+ All extra keywords arguments are passed to the created axes.
+ If *axes_class* is given, the new axes will be created as an
+ instance of the given class. Otherwise, the same class of the
+ main axes will be used.
+ """
+
+ if pad:
+ if not isinstance(pad, Size._Base):
+ pad = Size.from_any(pad,
+ fraction_ref=self._yref)
+ if pack_start:
+ self._vertical.insert(0, pad)
+ self._yrefindex += 1
+ else:
+ self._vertical.append(pad)
+
+ if not isinstance(size, Size._Base):
+ size = Size.from_any(size,
+ fraction_ref=self._yref)
+
+ if pack_start:
+ self._vertical.insert(0, size)
+ self._yrefindex += 1
+ locator = self.new_locator(nx=self._xrefindex, ny=0)
+ else:
+ self._vertical.append(size)
+ locator = self.new_locator(nx=self._xrefindex, ny=len(self._vertical)-1)
+
+ ax = self._get_new_axes(**kwargs)
+ ax.set_axes_locator(locator)
+
+ return ax
+
+ def append_axes(self, position, size, pad=None, add_to_figure=True,
+ **kwargs):
+ """
+ create an axes at the given *position* with the same height
+ (or width) of the main axes.
+
+ *position*
+ ["left"|"right"|"bottom"|"top"]
+
+ *size* and *pad* should be axes_grid.axes_size compatible.
+ """
+
+ if position == "left":
+ ax = self.new_horizontal(size, pad, pack_start=True, **kwargs)
+ elif position == "right":
+ ax = self.new_horizontal(size, pad, pack_start=False, **kwargs)
+ elif position == "bottom":
+ ax = self.new_vertical(size, pad, pack_start=True, **kwargs)
+ elif position == "top":
+ ax = self.new_vertical(size, pad, pack_start=False, **kwargs)
+ else:
+ raise ValueError("the position must be one of left," +
+ " right, bottom, or top")
+
+ if add_to_figure:
+ self._fig.add_axes(ax)
+ return ax
+
+ def get_aspect(self):
+ if self._aspect is None:
+ aspect = self._axes.get_aspect()
+ if aspect == "auto":
+ return False
+ else:
+ return True
+ else:
+ return self._aspect
+
+ def get_position(self):
+ if self._pos is None:
+ bbox = self._axes.get_position(original=True)
+ return bbox.bounds
+ else:
+ return self._pos
+
+ def get_anchor(self):
+ if self._anchor is None:
+ return self._axes.get_anchor()
+ else:
+ return self._anchor
+
+ def get_subplotspec(self):
+ if hasattr(self._axes, "get_subplotspec"):
+ return self._axes.get_subplotspec()
+ else:
+ return None
+
+
+class HBoxDivider(SubplotDivider):
+
+ def __init__(self, fig, *args, **kwargs):
+ SubplotDivider.__init__(self, fig, *args, **kwargs)
+
+ @staticmethod
+ def _determine_karray(equivalent_sizes, appended_sizes,
+ max_equivalent_size,
+ total_appended_size):
+
+ n = len(equivalent_sizes)
+ import numpy as np
+ A = np.mat(np.zeros((n+1, n+1), dtype="d"))
+ B = np.zeros((n+1), dtype="d")
+ # AxK = B
+
+ # populated A
+ for i, (r, a) in enumerate(equivalent_sizes):
+ A[i, i] = r
+ A[i, -1] = -1
+ B[i] = -a
+ A[-1, :-1] = [r for r, a in appended_sizes]
+ B[-1] = total_appended_size - sum([a for rs, a in appended_sizes])
+
+ karray_H = (A.I*np.mat(B).T).A1
+ karray = karray_H[:-1]
+ H = karray_H[-1]
+
+ if H > max_equivalent_size:
+ karray = ((max_equivalent_size -
+ np.array([a for r, a in equivalent_sizes]))
+ / np.array([r for r, a in equivalent_sizes]))
+ return karray
+
+ @staticmethod
+ def _calc_offsets(appended_sizes, karray):
+ offsets = [0.]
+
+ #for s in l:
+ for (r, a), k in zip(appended_sizes, karray):
+ offsets.append(offsets[-1] + r*k + a)
+
+ return offsets
+
+ def new_locator(self, nx, nx1=None):
+ """
+ returns a new locator
+ (:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
+ specified cell.
+
+ Parameters
+ ----------
+ nx, nx1 : int
+ Integers specifying the column-position of the
+ cell. When *nx1* is None, a single *nx*-th column is
+ specified. Otherwise location of columns spanning between *nx*
+ to *nx1* (but excluding *nx1*-th column) is specified.
+ ny, ny1 : int
+ Same as *nx* and *nx1*, but for row positions.
+ """
+ return AxesLocator(self, nx, 0, nx1, None)
+
+ def _locate(self, x, y, w, h,
+ y_equivalent_sizes, x_appended_sizes,
+ figW, figH):
+ """
+ Parameters
+ ----------
+ x
+ y
+ w
+ h
+ y_equivalent_sizes
+ x_appended_sizes
+ figW
+ figH
+ """
+
+ equivalent_sizes = y_equivalent_sizes
+ appended_sizes = x_appended_sizes
+
+ max_equivalent_size = figH*h
+ total_appended_size = figW*w
+ karray = self._determine_karray(equivalent_sizes, appended_sizes,
+ max_equivalent_size,
+ total_appended_size)
+
+ ox = self._calc_offsets(appended_sizes, karray)
+
+ ww = (ox[-1] - ox[0])/figW
+ ref_h = equivalent_sizes[0]
+ hh = (karray[0]*ref_h[0] + ref_h[1])/figH
+ pb = mtransforms.Bbox.from_bounds(x, y, w, h)
+ pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
+ pb1_anchored = pb1.anchored(self.get_anchor(), pb)
+ x0, y0 = pb1_anchored.x0, pb1_anchored.y0
+
+ return x0, y0, ox, hh
+
+ def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
+ """
+ Parameters
+ ----------
+ axes_divider : AxesDivider
+ nx, nx1 : int
+ Integers specifying the column-position of the
+ cell. When *nx1* is None, a single *nx*-th column is
+ specified. Otherwise location of columns spanning between *nx*
+ to *nx1* (but excluding *nx1*-th column) is specified.
+ ny, ny1 : int
+ Same as *nx* and *nx1*, but for row positions.
+ axes
+ renderer
+ """
+
+ figW, figH = self._fig.get_size_inches()
+ x, y, w, h = self.get_position_runtime(axes, renderer)
+
+ y_equivalent_sizes = self.get_vertical_sizes(renderer)
+ x_appended_sizes = self.get_horizontal_sizes(renderer)
+ x0, y0, ox, hh = self._locate(x, y, w, h,
+ y_equivalent_sizes, x_appended_sizes,
+ figW, figH)
+ if nx1 is None:
+ nx1 = nx+1
+
+ x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW
+ y1, h1 = y0, hh
+
+ return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
+
+
+class VBoxDivider(HBoxDivider):
+ """
+ The Divider class whose rectangle area is specified as a subplot geometry.
+ """
+
+ def new_locator(self, ny, ny1=None):
+ """
+ returns a new locator
+ (:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
+ specified cell.
+
+ Parameters
+ ----------
+ ny, ny1 : int
+ Integers specifying the row-position of the
+ cell. When *ny1* is None, a single *ny*-th row is
+ specified. Otherwise location of rows spanning between *ny*
+ to *ny1* (but excluding *ny1*-th row) is specified.
+ """
+ return AxesLocator(self, 0, ny, None, ny1)
+
+ def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
+ """
+ Parameters
+ ----------
+ axes_divider : AxesDivider
+ nx, nx1 : int
+ Integers specifying the column-position of the
+ cell. When *nx1* is None, a single *nx*-th column is
+ specified. Otherwise location of columns spanning between *nx*
+ to *nx1* (but excluding *nx1*-th column) is specified.
+ ny, ny1 : int
+ Same as *nx* and *nx1*, but for row positions.
+ axes
+ renderer
+ """
+
+ figW, figH = self._fig.get_size_inches()
+ x, y, w, h = self.get_position_runtime(axes, renderer)
+
+ x_equivalent_sizes = self.get_horizontal_sizes(renderer)
+ y_appended_sizes = self.get_vertical_sizes(renderer)
+
+ y0, x0, oy, ww = self._locate(y, x, h, w,
+ x_equivalent_sizes, y_appended_sizes,
+ figH, figW)
+ if ny1 is None:
+ ny1 = ny+1
+
+ x1, w1 = x0, ww
+ y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH
+
+ return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
+
+
+class LocatableAxesBase(object):
+ def __init__(self, *kl, **kw):
+
+ self._axes_class.__init__(self, *kl, **kw)
+
+ self._locator = None
+ self._locator_renderer = None
+
+ def set_axes_locator(self, locator):
+ self._locator = locator
+
+ def get_axes_locator(self):
+ return self._locator
+
+ def apply_aspect(self, position=None):
+
+ if self.get_axes_locator() is None:
+ self._axes_class.apply_aspect(self, position)
+ else:
+ pos = self.get_axes_locator()(self, self._locator_renderer)
+ self._axes_class.apply_aspect(self, position=pos)
+
+ def draw(self, renderer=None, inframe=False):
+
+ self._locator_renderer = renderer
+
+ self._axes_class.draw(self, renderer, inframe)
+
+ def _make_twin_axes(self, *kl, **kwargs):
+ """
+ Need to overload so that twinx/twiny will work with
+ these axes.
+ """
+ if 'sharex' in kwargs and 'sharey' in kwargs:
+ raise ValueError("Twinned Axes may share only one axis.")
+ ax2 = type(self)(self.figure, self.get_position(True), *kl, **kwargs)
+ ax2.set_axes_locator(self.get_axes_locator())
+ self.figure.add_axes(ax2)
+ self.set_adjustable('datalim')
+ ax2.set_adjustable('datalim')
+ self._twinned_axes.join(self, ax2)
+ return ax2
+
+_locatableaxes_classes = {}
+
+
+def locatable_axes_factory(axes_class):
+
+ new_class = _locatableaxes_classes.get(axes_class)
+ if new_class is None:
+ new_class = type(str("Locatable%s" % (axes_class.__name__)),
+ (LocatableAxesBase, axes_class),
+ {'_axes_class': axes_class})
+
+ _locatableaxes_classes[axes_class] = new_class
+
+ return new_class
+
+#if hasattr(maxes.Axes, "get_axes_locator"):
+# LocatableAxes = maxes.Axes
+#else:
+
+
+def make_axes_locatable(axes):
+ if not hasattr(axes, "set_axes_locator"):
+ new_class = locatable_axes_factory(type(axes))
+ axes.__class__ = new_class
+
+ divider = AxesDivider(axes)
+ locator = divider.new_locator(nx=0, ny=0)
+ axes.set_axes_locator(locator)
+
+ return divider
+
+
+def make_axes_area_auto_adjustable(ax,
+ use_axes=None, pad=0.1,
+ adjust_dirs=None):
+ if adjust_dirs is None:
+ adjust_dirs = ["left", "right", "bottom", "top"]
+ divider = make_axes_locatable(ax)
+
+ if use_axes is None:
+ use_axes = ax
+
+ divider.add_auto_adjustable_area(use_axes=use_axes, pad=pad,
+ adjust_dirs=adjust_dirs)
+
+#from matplotlib.axes import Axes
+from .mpl_axes import Axes
+LocatableAxes = locatable_axes_factory(Axes)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_grid.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_grid.py
new file mode 100644
index 00000000000..dde0e8dd7cc
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_grid.py
@@ -0,0 +1,771 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import matplotlib.axes as maxes
+import matplotlib.cbook as cbook
+import matplotlib.ticker as ticker
+from matplotlib.gridspec import SubplotSpec
+
+from .axes_divider import Size, SubplotDivider, LocatableAxes, Divider
+from .colorbar import Colorbar
+
+
+def _extend_axes_pad(value):
+ # Check whether a list/tuple/array or scalar has been passed
+ ret = value
+ if not hasattr(ret, "__getitem__"):
+ ret = (value, value)
+ return ret
+
+
+def _tick_only(ax, bottom_on, left_on):
+ bottom_off = not bottom_on
+ left_off = not left_on
+ # [l.set_visible(bottom_off) for l in ax.get_xticklabels()]
+ # [l.set_visible(left_off) for l in ax.get_yticklabels()]
+ # ax.xaxis.label.set_visible(bottom_off)
+ # ax.yaxis.label.set_visible(left_off)
+ ax.axis["bottom"].toggle(ticklabels=bottom_off, label=bottom_off)
+ ax.axis["left"].toggle(ticklabels=left_off, label=left_off)
+
+
+class CbarAxesBase(object):
+
+ def colorbar(self, mappable, **kwargs):
+ locator = kwargs.pop("locator", None)
+
+ if locator is None:
+ if "ticks" not in kwargs:
+ kwargs["ticks"] = ticker.MaxNLocator(5)
+ if locator is not None:
+ if "ticks" in kwargs:
+ raise ValueError("Either *locator* or *ticks* need" +
+ " to be given, not both")
+ else:
+ kwargs["ticks"] = locator
+
+ self._hold = True
+ if self.orientation in ["top", "bottom"]:
+ orientation = "horizontal"
+ else:
+ orientation = "vertical"
+
+ cb = Colorbar(self, mappable, orientation=orientation, **kwargs)
+ self._config_axes()
+
+ def on_changed(m):
+ cb.set_cmap(m.get_cmap())
+ cb.set_clim(m.get_clim())
+ cb.update_bruteforce(m)
+
+ self.cbid = mappable.callbacksSM.connect('changed', on_changed)
+ mappable.colorbar = cb
+
+ self.locator = cb.cbar_axis.get_major_locator()
+
+ return cb
+
+ def _config_axes(self):
+ '''
+ Make an axes patch and outline.
+ '''
+ ax = self
+ ax.set_navigate(False)
+
+ ax.axis[:].toggle(all=False)
+ b = self._default_label_on
+ ax.axis[self.orientation].toggle(all=b)
+
+ # for axis in ax.axis.values():
+ # axis.major_ticks.set_visible(False)
+ # axis.minor_ticks.set_visible(False)
+ # axis.major_ticklabels.set_visible(False)
+ # axis.minor_ticklabels.set_visible(False)
+ # axis.label.set_visible(False)
+
+ # axis = ax.axis[self.orientation]
+ # axis.major_ticks.set_visible(True)
+ # axis.minor_ticks.set_visible(True)
+
+ #axis.major_ticklabels.set_size(
+ # int(axis.major_ticklabels.get_size()*.9))
+ #axis.major_tick_pad = 3
+
+ # axis.major_ticklabels.set_visible(b)
+ # axis.minor_ticklabels.set_visible(b)
+ # axis.label.set_visible(b)
+
+ def toggle_label(self, b):
+ self._default_label_on = b
+ axis = self.axis[self.orientation]
+ axis.toggle(ticklabels=b, label=b)
+ #axis.major_ticklabels.set_visible(b)
+ #axis.minor_ticklabels.set_visible(b)
+ #axis.label.set_visible(b)
+
+
+class CbarAxes(CbarAxesBase, LocatableAxes):
+ def __init__(self, *kl, **kwargs):
+ orientation = kwargs.pop("orientation", None)
+ if orientation is None:
+ raise ValueError("orientation must be specified")
+ self.orientation = orientation
+ self._default_label_on = True
+ self.locator = None
+
+ super(LocatableAxes, self).__init__(*kl, **kwargs)
+
+ def cla(self):
+ super(LocatableAxes, self).cla()
+ self._config_axes()
+
+
+class Grid(object):
+ """
+ A class that creates a grid of Axes. In matplotlib, the axes
+ location (and size) is specified in the normalized figure
+ coordinates. This may not be ideal for images that needs to be
+ displayed with a given aspect ratio. For example, displaying
+ images of a same size with some fixed padding between them cannot
+ be easily done in matplotlib. AxesGrid is used in such case.
+ """
+
+ _defaultLocatableAxesClass = LocatableAxes
+
+ def __init__(self, fig,
+ rect,
+ nrows_ncols,
+ ngrids=None,
+ direction="row",
+ axes_pad=0.02,
+ add_all=True,
+ share_all=False,
+ share_x=True,
+ share_y=True,
+ #aspect=True,
+ label_mode="L",
+ axes_class=None,
+ ):
+ """
+ Build an :class:`Grid` instance with a grid nrows*ncols
+ :class:`~matplotlib.axes.Axes` in
+ :class:`~matplotlib.figure.Figure` *fig* with
+ *rect=[left, bottom, width, height]* (in
+ :class:`~matplotlib.figure.Figure` coordinates) or
+ the subplot position code (e.g., "121").
+
+ Optional keyword arguments:
+
+ ================ ======== =========================================
+ Keyword Default Description
+ ================ ======== =========================================
+ direction "row" [ "row" | "column" ]
+ axes_pad 0.02 float| pad between axes given in inches
+ or tuple-like of floats,
+ (horizontal padding, vertical padding)
+ add_all True bool
+ share_all False bool
+ share_x True bool
+ share_y True bool
+ label_mode "L" [ "L" | "1" | "all" ]
+ axes_class None a type object which must be a subclass
+ of :class:`~matplotlib.axes.Axes`
+ ================ ======== =========================================
+ """
+ self._nrows, self._ncols = nrows_ncols
+
+ if ngrids is None:
+ ngrids = self._nrows * self._ncols
+ else:
+ if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
+ raise Exception("")
+
+ self.ngrids = ngrids
+
+ self._init_axes_pad(axes_pad)
+
+ if direction not in ["column", "row"]:
+ raise Exception("")
+
+ self._direction = direction
+
+ if axes_class is None:
+ axes_class = self._defaultLocatableAxesClass
+ axes_class_args = {}
+ else:
+ if (type(axes_class)) == type and \
+ issubclass(axes_class,
+ self._defaultLocatableAxesClass.Axes):
+ axes_class_args = {}
+ else:
+ axes_class, axes_class_args = axes_class
+
+ self.axes_all = []
+ self.axes_column = [[] for _ in range(self._ncols)]
+ self.axes_row = [[] for _ in range(self._nrows)]
+
+ h = []
+ v = []
+ if isinstance(rect, six.string_types) or cbook.is_numlike(rect):
+ self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
+ aspect=False)
+ elif isinstance(rect, SubplotSpec):
+ self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
+ aspect=False)
+ elif len(rect) == 3:
+ kw = dict(horizontal=h, vertical=v, aspect=False)
+ self._divider = SubplotDivider(fig, *rect, **kw)
+ elif len(rect) == 4:
+ self._divider = Divider(fig, rect, horizontal=h, vertical=v,
+ aspect=False)
+ else:
+ raise Exception("")
+
+ rect = self._divider.get_position()
+
+ # reference axes
+ self._column_refax = [None for _ in range(self._ncols)]
+ self._row_refax = [None for _ in range(self._nrows)]
+ self._refax = None
+
+ for i in range(self.ngrids):
+
+ col, row = self._get_col_row(i)
+
+ if share_all:
+ sharex = self._refax
+ sharey = self._refax
+ else:
+ if share_x:
+ sharex = self._column_refax[col]
+ else:
+ sharex = None
+
+ if share_y:
+ sharey = self._row_refax[row]
+ else:
+ sharey = None
+
+ ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
+ **axes_class_args)
+
+ if share_all:
+ if self._refax is None:
+ self._refax = ax
+ else:
+ if sharex is None:
+ self._column_refax[col] = ax
+ if sharey is None:
+ self._row_refax[row] = ax
+
+ self.axes_all.append(ax)
+ self.axes_column[col].append(ax)
+ self.axes_row[row].append(ax)
+
+ self.axes_llc = self.axes_column[0][-1]
+
+ self._update_locators()
+
+ if add_all:
+ for ax in self.axes_all:
+ fig.add_axes(ax)
+
+ self.set_label_mode(label_mode)
+
+ def _init_axes_pad(self, axes_pad):
+ axes_pad = _extend_axes_pad(axes_pad)
+ self._axes_pad = axes_pad
+
+ self._horiz_pad_size = Size.Fixed(axes_pad[0])
+ self._vert_pad_size = Size.Fixed(axes_pad[1])
+
+ def _update_locators(self):
+
+ h = []
+
+ h_ax_pos = []
+
+ for _ in self._column_refax:
+ #if h: h.append(Size.Fixed(self._axes_pad))
+ if h:
+ h.append(self._horiz_pad_size)
+
+ h_ax_pos.append(len(h))
+
+ sz = Size.Scaled(1)
+ h.append(sz)
+
+ v = []
+
+ v_ax_pos = []
+ for _ in self._row_refax[::-1]:
+ #if v: v.append(Size.Fixed(self._axes_pad))
+ if v:
+ v.append(self._vert_pad_size)
+
+ v_ax_pos.append(len(v))
+ sz = Size.Scaled(1)
+ v.append(sz)
+
+ for i in range(self.ngrids):
+ col, row = self._get_col_row(i)
+ locator = self._divider.new_locator(nx=h_ax_pos[col],
+ ny=v_ax_pos[self._nrows - 1 - row])
+ self.axes_all[i].set_axes_locator(locator)
+
+ self._divider.set_horizontal(h)
+ self._divider.set_vertical(v)
+
+ def _get_col_row(self, n):
+ if self._direction == "column":
+ col, row = divmod(n, self._nrows)
+ else:
+ row, col = divmod(n, self._ncols)
+
+ return col, row
+
+ # Good to propagate __len__ if we have __getitem__
+ def __len__(self):
+ return len(self.axes_all)
+
+ def __getitem__(self, i):
+ return self.axes_all[i]
+
+ def get_geometry(self):
+ """
+ get geometry of the grid. Returns a tuple of two integer,
+ representing number of rows and number of columns.
+ """
+ return self._nrows, self._ncols
+
+ def set_axes_pad(self, axes_pad):
+ "set axes_pad"
+ self._axes_pad = axes_pad
+
+ # These two lines actually differ from ones in _init_axes_pad
+ self._horiz_pad_size.fixed_size = axes_pad[0]
+ self._vert_pad_size.fixed_size = axes_pad[1]
+
+ def get_axes_pad(self):
+ """
+ get axes_pad
+
+ Returns
+ -------
+ tuple
+ Padding in inches, (horizontal pad, vertical pad)
+ """
+ return self._axes_pad
+
+ def set_aspect(self, aspect):
+ "set aspect"
+ self._divider.set_aspect(aspect)
+
+ def get_aspect(self):
+ "get aspect"
+ return self._divider.get_aspect()
+
+ def set_label_mode(self, mode):
+ "set label_mode"
+ if mode == "all":
+ for ax in self.axes_all:
+ _tick_only(ax, False, False)
+ elif mode == "L":
+ # left-most axes
+ for ax in self.axes_column[0][:-1]:
+ _tick_only(ax, bottom_on=True, left_on=False)
+ # lower-left axes
+ ax = self.axes_column[0][-1]
+ _tick_only(ax, bottom_on=False, left_on=False)
+
+ for col in self.axes_column[1:]:
+ # axes with no labels
+ for ax in col[:-1]:
+ _tick_only(ax, bottom_on=True, left_on=True)
+
+ # bottom
+ ax = col[-1]
+ _tick_only(ax, bottom_on=False, left_on=True)
+
+ elif mode == "1":
+ for ax in self.axes_all:
+ _tick_only(ax, bottom_on=True, left_on=True)
+
+ ax = self.axes_llc
+ _tick_only(ax, bottom_on=False, left_on=False)
+
+ def get_divider(self):
+ return self._divider
+
+ def set_axes_locator(self, locator):
+ self._divider.set_locator(locator)
+
+ def get_axes_locator(self):
+ return self._divider.get_locator()
+
+ def get_vsize_hsize(self):
+
+ return self._divider.get_vsize_hsize()
+# from axes_size import AddList
+
+# vsize = AddList(self._divider.get_vertical())
+# hsize = AddList(self._divider.get_horizontal())
+
+# return vsize, hsize
+
+
+class ImageGrid(Grid):
+ """
+ A class that creates a grid of Axes. In matplotlib, the axes
+ location (and size) is specified in the normalized figure
+ coordinates. This may not be ideal for images that needs to be
+ displayed with a given aspect ratio. For example, displaying
+ images of a same size with some fixed padding between them cannot
+ be easily done in matplotlib. ImageGrid is used in such case.
+ """
+
+ _defaultCbarAxesClass = CbarAxes
+
+ def __init__(self, fig,
+ rect,
+ nrows_ncols,
+ ngrids=None,
+ direction="row",
+ axes_pad=0.02,
+ add_all=True,
+ share_all=False,
+ aspect=True,
+ label_mode="L",
+ cbar_mode=None,
+ cbar_location="right",
+ cbar_pad=None,
+ cbar_size="5%",
+ cbar_set_cax=True,
+ axes_class=None,
+ ):
+ """
+ Build an :class:`ImageGrid` instance with a grid nrows*ncols
+ :class:`~matplotlib.axes.Axes` in
+ :class:`~matplotlib.figure.Figure` *fig* with
+ *rect=[left, bottom, width, height]* (in
+ :class:`~matplotlib.figure.Figure` coordinates) or
+ the subplot position code (e.g., "121").
+
+ Optional keyword arguments:
+
+ ================ ======== =========================================
+ Keyword Default Description
+ ================ ======== =========================================
+ direction "row" [ "row" | "column" ]
+ axes_pad 0.02 float| pad between axes given in inches
+ or tuple-like of floats,
+ (horizontal padding, vertical padding)
+ add_all True bool
+ share_all False bool
+ aspect True bool
+ label_mode "L" [ "L" | "1" | "all" ]
+ cbar_mode None [ "each" | "single" | "edge" ]
+ cbar_location "right" [ "left" | "right" | "bottom" | "top" ]
+ cbar_pad None
+ cbar_size "5%"
+ cbar_set_cax True bool
+ axes_class None a type object which must be a subclass
+ of axes_grid's subclass of
+ :class:`~matplotlib.axes.Axes`
+ ================ ======== =========================================
+
+ *cbar_set_cax* : if True, each axes in the grid has a cax
+ attribute that is bind to associated cbar_axes.
+ """
+ self._nrows, self._ncols = nrows_ncols
+
+ if ngrids is None:
+ ngrids = self._nrows * self._ncols
+ else:
+ if not 0 <= ngrids < self._nrows * self._ncols:
+ raise Exception
+
+ self.ngrids = ngrids
+
+ axes_pad = _extend_axes_pad(axes_pad)
+ self._axes_pad = axes_pad
+
+ self._colorbar_mode = cbar_mode
+ self._colorbar_location = cbar_location
+ if cbar_pad is None:
+ # horizontal or vertical arrangement?
+ if cbar_location in ("left", "right"):
+ self._colorbar_pad = axes_pad[0]
+ else:
+ self._colorbar_pad = axes_pad[1]
+ else:
+ self._colorbar_pad = cbar_pad
+
+ self._colorbar_size = cbar_size
+
+ self._init_axes_pad(axes_pad)
+
+ if direction not in ["column", "row"]:
+ raise Exception("")
+
+ self._direction = direction
+
+ if axes_class is None:
+ axes_class = self._defaultLocatableAxesClass
+ axes_class_args = {}
+ else:
+ if isinstance(axes_class, maxes.Axes):
+ axes_class_args = {}
+ else:
+ axes_class, axes_class_args = axes_class
+
+ self.axes_all = []
+ self.axes_column = [[] for _ in range(self._ncols)]
+ self.axes_row = [[] for _ in range(self._nrows)]
+
+ self.cbar_axes = []
+
+ h = []
+ v = []
+ if isinstance(rect, six.string_types) or cbook.is_numlike(rect):
+ self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
+ aspect=aspect)
+ elif isinstance(rect, SubplotSpec):
+ self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
+ aspect=aspect)
+ elif len(rect) == 3:
+ kw = dict(horizontal=h, vertical=v, aspect=aspect)
+ self._divider = SubplotDivider(fig, *rect, **kw)
+ elif len(rect) == 4:
+ self._divider = Divider(fig, rect, horizontal=h, vertical=v,
+ aspect=aspect)
+ else:
+ raise Exception("")
+
+ rect = self._divider.get_position()
+
+ # reference axes
+ self._column_refax = [None for _ in range(self._ncols)]
+ self._row_refax = [None for _ in range(self._nrows)]
+ self._refax = None
+
+ for i in range(self.ngrids):
+
+ col, row = self._get_col_row(i)
+
+ if share_all:
+ if self.axes_all:
+ sharex = self.axes_all[0]
+ sharey = self.axes_all[0]
+ else:
+ sharex = None
+ sharey = None
+ else:
+ sharex = self._column_refax[col]
+ sharey = self._row_refax[row]
+
+ ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
+ **axes_class_args)
+
+ self.axes_all.append(ax)
+ self.axes_column[col].append(ax)
+ self.axes_row[row].append(ax)
+
+ if share_all:
+ if self._refax is None:
+ self._refax = ax
+ if sharex is None:
+ self._column_refax[col] = ax
+ if sharey is None:
+ self._row_refax[row] = ax
+
+ cax = self._defaultCbarAxesClass(fig, rect,
+ orientation=self._colorbar_location)
+ self.cbar_axes.append(cax)
+
+ self.axes_llc = self.axes_column[0][-1]
+
+ self._update_locators()
+
+ if add_all:
+ for ax in self.axes_all+self.cbar_axes:
+ fig.add_axes(ax)
+
+ if cbar_set_cax:
+ if self._colorbar_mode == "single":
+ for ax in self.axes_all:
+ ax.cax = self.cbar_axes[0]
+ elif self._colorbar_mode == "edge":
+ for index, ax in enumerate(self.axes_all):
+ col, row = self._get_col_row(index)
+ if self._colorbar_location in ("left", "right"):
+ ax.cax = self.cbar_axes[row]
+ else:
+ ax.cax = self.cbar_axes[col]
+ else:
+ for ax, cax in zip(self.axes_all, self.cbar_axes):
+ ax.cax = cax
+
+ self.set_label_mode(label_mode)
+
+ def _update_locators(self):
+
+ h = []
+ v = []
+
+ h_ax_pos = []
+ h_cb_pos = []
+ if (self._colorbar_mode == "single" and
+ self._colorbar_location in ('left', 'bottom')):
+ if self._colorbar_location == "left":
+ #sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
+ sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
+ h.append(Size.from_any(self._colorbar_size, sz))
+ h.append(Size.from_any(self._colorbar_pad, sz))
+ locator = self._divider.new_locator(nx=0, ny=0, ny1=-1)
+ elif self._colorbar_location == "bottom":
+ #sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
+ sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
+ v.append(Size.from_any(self._colorbar_size, sz))
+ v.append(Size.from_any(self._colorbar_pad, sz))
+ locator = self._divider.new_locator(nx=0, nx1=-1, ny=0)
+ for i in range(self.ngrids):
+ self.cbar_axes[i].set_visible(False)
+ self.cbar_axes[0].set_axes_locator(locator)
+ self.cbar_axes[0].set_visible(True)
+
+ for col, ax in enumerate(self.axes_row[0]):
+ if h:
+ h.append(self._horiz_pad_size) # Size.Fixed(self._axes_pad))
+
+ if ax:
+ sz = Size.AxesX(ax, aspect="axes", ref_ax=self.axes_all[0])
+ else:
+ sz = Size.AxesX(self.axes_all[0],
+ aspect="axes", ref_ax=self.axes_all[0])
+
+ if (self._colorbar_mode == "each" or
+ (self._colorbar_mode == 'edge' and
+ col == 0)) and self._colorbar_location == "left":
+ h_cb_pos.append(len(h))
+ h.append(Size.from_any(self._colorbar_size, sz))
+ h.append(Size.from_any(self._colorbar_pad, sz))
+
+ h_ax_pos.append(len(h))
+
+ h.append(sz)
+
+ if ((self._colorbar_mode == "each" or
+ (self._colorbar_mode == 'edge' and
+ col == self._ncols - 1)) and
+ self._colorbar_location == "right"):
+ h.append(Size.from_any(self._colorbar_pad, sz))
+ h_cb_pos.append(len(h))
+ h.append(Size.from_any(self._colorbar_size, sz))
+
+ v_ax_pos = []
+ v_cb_pos = []
+ for row, ax in enumerate(self.axes_column[0][::-1]):
+ if v:
+ v.append(self._vert_pad_size) # Size.Fixed(self._axes_pad))
+
+ if ax:
+ sz = Size.AxesY(ax, aspect="axes", ref_ax=self.axes_all[0])
+ else:
+ sz = Size.AxesY(self.axes_all[0],
+ aspect="axes", ref_ax=self.axes_all[0])
+
+ if (self._colorbar_mode == "each" or
+ (self._colorbar_mode == 'edge' and
+ row == 0)) and self._colorbar_location == "bottom":
+ v_cb_pos.append(len(v))
+ v.append(Size.from_any(self._colorbar_size, sz))
+ v.append(Size.from_any(self._colorbar_pad, sz))
+
+ v_ax_pos.append(len(v))
+ v.append(sz)
+
+ if ((self._colorbar_mode == "each" or
+ (self._colorbar_mode == 'edge' and
+ row == self._nrows - 1)) and
+ self._colorbar_location == "top"):
+ v.append(Size.from_any(self._colorbar_pad, sz))
+ v_cb_pos.append(len(v))
+ v.append(Size.from_any(self._colorbar_size, sz))
+
+ for i in range(self.ngrids):
+ col, row = self._get_col_row(i)
+ #locator = self._divider.new_locator(nx=4*col,
+ # ny=2*(self._nrows - row - 1))
+ locator = self._divider.new_locator(nx=h_ax_pos[col],
+ ny=v_ax_pos[self._nrows-1-row])
+ self.axes_all[i].set_axes_locator(locator)
+
+ if self._colorbar_mode == "each":
+ if self._colorbar_location in ("right", "left"):
+ locator = self._divider.new_locator(
+ nx=h_cb_pos[col], ny=v_ax_pos[self._nrows - 1 - row])
+
+ elif self._colorbar_location in ("top", "bottom"):
+ locator = self._divider.new_locator(
+ nx=h_ax_pos[col], ny=v_cb_pos[self._nrows - 1 - row])
+
+ self.cbar_axes[i].set_axes_locator(locator)
+ elif self._colorbar_mode == 'edge':
+ if ((self._colorbar_location == 'left' and col == 0) or
+ (self._colorbar_location == 'right'
+ and col == self._ncols-1)):
+ locator = self._divider.new_locator(
+ nx=h_cb_pos[0], ny=v_ax_pos[self._nrows -1 - row])
+ self.cbar_axes[row].set_axes_locator(locator)
+ elif ((self._colorbar_location == 'bottom' and
+ row == self._nrows - 1) or
+ (self._colorbar_location == 'top' and row == 0)):
+ locator = self._divider.new_locator(nx=h_ax_pos[col],
+ ny=v_cb_pos[0])
+ self.cbar_axes[col].set_axes_locator(locator)
+
+ if self._colorbar_mode == "single":
+ if self._colorbar_location == "right":
+ #sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
+ sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
+ h.append(Size.from_any(self._colorbar_pad, sz))
+ h.append(Size.from_any(self._colorbar_size, sz))
+ locator = self._divider.new_locator(nx=-2, ny=0, ny1=-1)
+ elif self._colorbar_location == "top":
+ #sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
+ sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
+ v.append(Size.from_any(self._colorbar_pad, sz))
+ v.append(Size.from_any(self._colorbar_size, sz))
+ locator = self._divider.new_locator(nx=0, nx1=-1, ny=-2)
+ if self._colorbar_location in ("right", "top"):
+ for i in range(self.ngrids):
+ self.cbar_axes[i].set_visible(False)
+ self.cbar_axes[0].set_axes_locator(locator)
+ self.cbar_axes[0].set_visible(True)
+ elif self._colorbar_mode == "each":
+ for i in range(self.ngrids):
+ self.cbar_axes[i].set_visible(True)
+ elif self._colorbar_mode == "edge":
+ if self._colorbar_location in ('right', 'left'):
+ count = self._nrows
+ else:
+ count = self._ncols
+ for i in range(count):
+ self.cbar_axes[i].set_visible(True)
+ for j in range(i + 1, self.ngrids):
+ self.cbar_axes[j].set_visible(False)
+ else:
+ for i in range(self.ngrids):
+ self.cbar_axes[i].set_visible(False)
+ self.cbar_axes[i].set_position([1., 1., 0.001, 0.001],
+ which="active")
+
+ self._divider.set_horizontal(h)
+ self._divider.set_vertical(v)
+
+
+AxesGrid = ImageGrid
+
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_rgb.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_rgb.py
new file mode 100644
index 00000000000..e62d4f06154
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_rgb.py
@@ -0,0 +1,228 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import numpy as np
+from .axes_divider import make_axes_locatable, Size, locatable_axes_factory
+import sys
+from .mpl_axes import Axes
+
+
+def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True):
+ """
+ pad : fraction of the axes height.
+ """
+
+ divider = make_axes_locatable(ax)
+
+ pad_size = Size.Fraction(pad, Size.AxesY(ax))
+
+ xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
+ ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
+
+ divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
+ divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
+
+ ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
+
+ ax_rgb = []
+ if axes_class is None:
+ try:
+ axes_class = locatable_axes_factory(ax._axes_class)
+ except AttributeError:
+ axes_class = locatable_axes_factory(type(ax))
+
+ for ny in [4, 2, 0]:
+ ax1 = axes_class(ax.get_figure(),
+ ax.get_position(original=True),
+ sharex=ax, sharey=ax)
+ locator = divider.new_locator(nx=2, ny=ny)
+ ax1.set_axes_locator(locator)
+ for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
+ t.set_visible(False)
+ try:
+ for axis in ax1.axis.values():
+ axis.major_ticklabels.set_visible(False)
+ except AttributeError:
+ pass
+
+ ax_rgb.append(ax1)
+
+ if add_all:
+ fig = ax.get_figure()
+ for ax1 in ax_rgb:
+ fig.add_axes(ax1)
+
+ return ax_rgb
+
+
+def imshow_rgb(ax, r, g, b, **kwargs):
+ ny, nx = r.shape
+ R = np.zeros([ny, nx, 3], dtype="d")
+ R[:,:,0] = r
+ G = np.zeros_like(R)
+ G[:,:,1] = g
+ B = np.zeros_like(R)
+ B[:,:,2] = b
+
+ RGB = R + G + B
+
+ im_rgb = ax.imshow(RGB, **kwargs)
+
+ return im_rgb
+
+
+class RGBAxesBase(object):
+ """base class for a 4-panel imshow (RGB, R, G, B)
+
+ Layout:
+ +---------------+-----+
+ | | R |
+ + +-----+
+ | RGB | G |
+ + +-----+
+ | | B |
+ +---------------+-----+
+
+ Attributes
+ ----------
+ _defaultAxesClass : matplotlib.axes.Axes
+ defaults to 'Axes' in RGBAxes child class.
+ No default in abstract base class
+ RGB : _defaultAxesClass
+ The axes object for the three-channel imshow
+ R : _defaultAxesClass
+ The axes object for the red channel imshow
+ G : _defaultAxesClass
+ The axes object for the green channel imshow
+ B : _defaultAxesClass
+ The axes object for the blue channel imshow
+ """
+ def __init__(self, *kl, **kwargs):
+ """
+ Parameters
+ ----------
+ pad : float
+ fraction of the axes height to put as padding.
+ defaults to 0.0
+ add_all : bool
+ True: Add the {rgb, r, g, b} axes to the figure
+ defaults to True.
+ axes_class : matplotlib.axes.Axes
+
+ kl :
+ Unpacked into axes_class() init for RGB
+ kwargs :
+ Unpacked into axes_class() init for RGB, R, G, B axes
+ """
+ pad = kwargs.pop("pad", 0.0)
+ add_all = kwargs.pop("add_all", True)
+ try:
+ axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
+ except AttributeError:
+ new_msg = ("A subclass of RGBAxesBase must have a "
+ "_defaultAxesClass attribute. If you are not sure which "
+ "axes class to use, consider using "
+ "mpl_toolkits.axes_grid1.mpl_axes.Axes.")
+ six.reraise(AttributeError, AttributeError(new_msg),
+ sys.exc_info()[2])
+
+ ax = axes_class(*kl, **kwargs)
+
+ divider = make_axes_locatable(ax)
+
+ pad_size = Size.Fraction(pad, Size.AxesY(ax))
+
+ xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
+ ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
+
+ divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
+ divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
+
+ ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
+
+ ax_rgb = []
+ for ny in [4, 2, 0]:
+ ax1 = axes_class(ax.get_figure(),
+ ax.get_position(original=True),
+ sharex=ax, sharey=ax, **kwargs)
+ locator = divider.new_locator(nx=2, ny=ny)
+ ax1.set_axes_locator(locator)
+ ax1.axis[:].toggle(ticklabels=False)
+ ax_rgb.append(ax1)
+
+ self.RGB = ax
+ self.R, self.G, self.B = ax_rgb
+
+ if add_all:
+ fig = ax.get_figure()
+ fig.add_axes(ax)
+ self.add_RGB_to_figure()
+
+ self._config_axes()
+
+ def _config_axes(self, line_color='w', marker_edge_color='w'):
+ """Set the line color and ticks for the axes
+
+ Parameters
+ ----------
+ line_color : any matplotlib color
+ marker_edge_color : any matplotlib color
+ """
+ for ax1 in [self.RGB, self.R, self.G, self.B]:
+ ax1.axis[:].line.set_color(line_color)
+ ax1.axis[:].major_ticks.set_markeredgecolor(marker_edge_color)
+
+ def add_RGB_to_figure(self):
+ """Add the red, green and blue axes to the RGB composite's axes figure
+ """
+ self.RGB.get_figure().add_axes(self.R)
+ self.RGB.get_figure().add_axes(self.G)
+ self.RGB.get_figure().add_axes(self.B)
+
+ def imshow_rgb(self, r, g, b, **kwargs):
+ """Create the four images {rgb, r, g, b}
+
+ Parameters
+ ----------
+ r : array-like
+ The red array
+ g : array-like
+ The green array
+ b : array-like
+ The blue array
+ kwargs : imshow kwargs
+ kwargs get unpacked into the imshow calls for the four images
+
+ Returns
+ -------
+ rgb : matplotlib.image.AxesImage
+ r : matplotlib.image.AxesImage
+ g : matplotlib.image.AxesImage
+ b : matplotlib.image.AxesImage
+ """
+ if not (r.shape == g.shape == b.shape):
+ raise ValueError('Input shapes do not match.'
+ '\nr.shape = {}'
+ '\ng.shape = {}'
+ '\nb.shape = {}'
+ .format(r.shape, g.shape, b.shape))
+ RGB = np.dstack([r, g, b])
+ R = np.zeros_like(RGB)
+ R[:,:,0] = r
+ G = np.zeros_like(RGB)
+ G[:,:,1] = g
+ B = np.zeros_like(RGB)
+ B[:,:,2] = b
+
+ im_rgb = self.RGB.imshow(RGB, **kwargs)
+ im_r = self.R.imshow(R, **kwargs)
+ im_g = self.G.imshow(G, **kwargs)
+ im_b = self.B.imshow(B, **kwargs)
+
+ return im_rgb, im_r, im_g, im_b
+
+
+class RGBAxes(RGBAxesBase):
+ _defaultAxesClass = Axes
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_size.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_size.py
new file mode 100644
index 00000000000..163a6245fef
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/axes_size.py
@@ -0,0 +1,323 @@
+
+"""
+provides a classes of simple units that will be used with AxesDivider
+class (or others) to determine the size of each axes. The unit
+classes define `get_size` method that returns a tuple of two floats,
+meaning relative and absolute sizes, respectively.
+
+Note that this class is nothing more than a simple tuple of two
+floats. Take a look at the Divider class to see how these two
+values are used.
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import matplotlib.cbook as cbook
+from matplotlib.axes import Axes
+
+class _Base(object):
+ "Base class"
+
+ def __rmul__(self, other):
+ float(other) # just to check if number if given
+ return Fraction(other, self)
+
+ def __add__(self, other):
+ if isinstance(other, _Base):
+ return Add(self, other)
+ else:
+ float(other)
+ other = Fixed(other)
+ return Add(self, other)
+
+
+class Add(_Base):
+ def __init__(self, a, b):
+ self._a = a
+ self._b = b
+
+ def get_size(self, renderer):
+ a_rel_size, a_abs_size = self._a.get_size(renderer)
+ b_rel_size, b_abs_size = self._b.get_size(renderer)
+ return a_rel_size + b_rel_size, a_abs_size + b_abs_size
+
+class AddList(_Base):
+ def __init__(self, add_list):
+ self._list = add_list
+
+ def get_size(self, renderer):
+ sum_rel_size = sum([a.get_size(renderer)[0] for a in self._list])
+ sum_abs_size = sum([a.get_size(renderer)[1] for a in self._list])
+ return sum_rel_size, sum_abs_size
+
+
+class Fixed(_Base):
+ "Simple fixed size with absolute part = *fixed_size* and relative part = 0"
+ def __init__(self, fixed_size):
+ self.fixed_size = fixed_size
+
+ def get_size(self, renderer):
+ rel_size = 0.
+ abs_size = self.fixed_size
+ return rel_size, abs_size
+
+
+class Scaled(_Base):
+ "Simple scaled(?) size with absolute part = 0 and relative part = *scalable_size*"
+ def __init__(self, scalable_size):
+ self._scalable_size = scalable_size
+
+ def get_size(self, renderer):
+ rel_size = self._scalable_size
+ abs_size = 0.
+ return rel_size, abs_size
+
+Scalable=Scaled
+
+def _get_axes_aspect(ax):
+ aspect = ax.get_aspect()
+ # when aspec is "auto", consider it as 1.
+ if aspect in ('normal', 'auto'):
+ aspect = 1.
+ elif aspect == "equal":
+ aspect = 1
+ else:
+ aspect = float(aspect)
+
+ return aspect
+
+class AxesX(_Base):
+ """
+ Scaled size whose relative part corresponds to the data width
+ of the *axes* multiplied by the *aspect*.
+ """
+ def __init__(self, axes, aspect=1., ref_ax=None):
+ self._axes = axes
+ self._aspect = aspect
+ if aspect == "axes" and ref_ax is None:
+ raise ValueError("ref_ax must be set when aspect='axes'")
+ self._ref_ax = ref_ax
+
+ def get_size(self, renderer):
+ l1, l2 = self._axes.get_xlim()
+ if self._aspect == "axes":
+ ref_aspect = _get_axes_aspect(self._ref_ax)
+ aspect = ref_aspect/_get_axes_aspect(self._axes)
+ else:
+ aspect = self._aspect
+
+ rel_size = abs(l2-l1)*aspect
+ abs_size = 0.
+ return rel_size, abs_size
+
+class AxesY(_Base):
+ """
+ Scaled size whose relative part corresponds to the data height
+ of the *axes* multiplied by the *aspect*.
+ """
+ def __init__(self, axes, aspect=1., ref_ax=None):
+ self._axes = axes
+ self._aspect = aspect
+ if aspect == "axes" and ref_ax is None:
+ raise ValueError("ref_ax must be set when aspect='axes'")
+ self._ref_ax = ref_ax
+
+ def get_size(self, renderer):
+ l1, l2 = self._axes.get_ylim()
+
+ if self._aspect == "axes":
+ ref_aspect = _get_axes_aspect(self._ref_ax)
+ aspect = _get_axes_aspect(self._axes)
+ else:
+ aspect = self._aspect
+
+ rel_size = abs(l2-l1)*aspect
+ abs_size = 0.
+ return rel_size, abs_size
+
+
+class MaxExtent(_Base):
+ """
+ Size whose absolute part is the largest width (or height) of
+ the given *artist_list*.
+ """
+ def __init__(self, artist_list, w_or_h):
+ self._artist_list = artist_list
+
+ if w_or_h not in ["width", "height"]:
+ raise ValueError()
+
+ self._w_or_h = w_or_h
+
+ def add_artist(self, a):
+ self._artist_list.append(a)
+
+ def get_size(self, renderer):
+ rel_size = 0.
+ w_list, h_list = [], []
+ for a in self._artist_list:
+ bb = a.get_window_extent(renderer)
+ w_list.append(bb.width)
+ h_list.append(bb.height)
+ dpi = a.get_figure().get_dpi()
+ if self._w_or_h == "width":
+ abs_size = max(w_list)/dpi
+ elif self._w_or_h == "height":
+ abs_size = max(h_list)/dpi
+
+ return rel_size, abs_size
+
+
+class MaxWidth(_Base):
+ """
+ Size whose absolute part is the largest width of
+ the given *artist_list*.
+ """
+ def __init__(self, artist_list):
+ self._artist_list = artist_list
+
+ def add_artist(self, a):
+ self._artist_list.append(a)
+
+ def get_size(self, renderer):
+ rel_size = 0.
+ w_list = []
+ for a in self._artist_list:
+ bb = a.get_window_extent(renderer)
+ w_list.append(bb.width)
+ dpi = a.get_figure().get_dpi()
+ abs_size = max(w_list)/dpi
+
+ return rel_size, abs_size
+
+
+
+class MaxHeight(_Base):
+ """
+ Size whose absolute part is the largest height of
+ the given *artist_list*.
+ """
+ def __init__(self, artist_list):
+ self._artist_list = artist_list
+
+ def add_artist(self, a):
+ self._artist_list.append(a)
+
+ def get_size(self, renderer):
+ rel_size = 0.
+ h_list = []
+ for a in self._artist_list:
+ bb = a.get_window_extent(renderer)
+ h_list.append(bb.height)
+ dpi = a.get_figure().get_dpi()
+ abs_size = max(h_list)/dpi
+
+ return rel_size, abs_size
+
+
+class Fraction(_Base):
+ """
+ An instance whose size is a *fraction* of the *ref_size*.
+ ::
+
+ >>> s = Fraction(0.3, AxesX(ax))
+
+ """
+ def __init__(self, fraction, ref_size):
+ self._fraction_ref = ref_size
+ self._fraction = fraction
+
+ def get_size(self, renderer):
+ if self._fraction_ref is None:
+ return self._fraction, 0.
+ else:
+ r, a = self._fraction_ref.get_size(renderer)
+ rel_size = r*self._fraction
+ abs_size = a*self._fraction
+ return rel_size, abs_size
+
+class Padded(_Base):
+ """
+ Return a instance where the absolute part of *size* is
+ increase by the amount of *pad*.
+ """
+ def __init__(self, size, pad):
+ self._size = size
+ self._pad = pad
+
+ def get_size(self, renderer):
+ r, a = self._size.get_size(renderer)
+ rel_size = r
+ abs_size = a + self._pad
+ return rel_size, abs_size
+
+def from_any(size, fraction_ref=None):
+ """
+ Creates Fixed unit when the first argument is a float, or a
+ Fraction unit if that is a string that ends with %. The second
+ argument is only meaningful when Fraction unit is created.::
+
+ >>> a = Size.from_any(1.2) # => Size.Fixed(1.2)
+ >>> Size.from_any("50%", a) # => Size.Fraction(0.5, a)
+
+ """
+ if cbook.is_numlike(size):
+ return Fixed(size)
+ elif isinstance(size, six.string_types):
+ if size[-1] == "%":
+ return Fraction(float(size[:-1]) / 100, fraction_ref)
+
+ raise ValueError("Unknown format")
+
+
+class SizeFromFunc(_Base):
+ def __init__(self, func):
+ self._func = func
+
+ def get_size(self, renderer):
+ rel_size = 0.
+
+ bb = self._func(renderer)
+ dpi = renderer.points_to_pixels(72.)
+ abs_size = bb/dpi
+
+ return rel_size, abs_size
+
+class GetExtentHelper(object):
+ def _get_left(tight_bbox, axes_bbox):
+ return axes_bbox.xmin - tight_bbox.xmin
+
+ def _get_right(tight_bbox, axes_bbox):
+ return tight_bbox.xmax - axes_bbox.xmax
+
+ def _get_bottom(tight_bbox, axes_bbox):
+ return axes_bbox.ymin - tight_bbox.ymin
+
+ def _get_top(tight_bbox, axes_bbox):
+ return tight_bbox.ymax - axes_bbox.ymax
+
+ _get_func_map = dict(left=_get_left,
+ right=_get_right,
+ bottom=_get_bottom,
+ top=_get_top)
+
+ del _get_left, _get_right, _get_bottom, _get_top
+
+ def __init__(self, ax, direction):
+ if isinstance(ax, Axes):
+ self._ax_list = [ax]
+ else:
+ self._ax_list = ax
+
+ try:
+ self._get_func = self._get_func_map[direction]
+ except KeyError:
+ raise KeyError("direction must be one of left, right, bottom, top")
+
+ def __call__(self, renderer):
+ vl = [self._get_func(ax.get_tightbbox(renderer, False),
+ ax.bbox) for ax in self._ax_list]
+ return max(vl)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/colorbar.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/colorbar.py
new file mode 100644
index 00000000000..34bdf3618a7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/colorbar.py
@@ -0,0 +1,836 @@
+"""
+Colorbar toolkit with two classes and a function:
+
+ :class:`ColorbarBase`
+ the base class with full colorbar drawing functionality.
+ It can be used as-is to make a colorbar for a given colormap;
+ a mappable object (e.g., image) is not needed.
+
+ :class:`Colorbar`
+ the derived class for use with images or contour plots.
+
+ :func:`make_axes`
+ a function for resizing an axes and adding a second axes
+ suitable for a colorbar
+
+The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
+and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
+is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import xrange, zip
+
+import numpy as np
+import matplotlib as mpl
+import matplotlib.colors as colors
+import matplotlib.cm as cm
+from matplotlib import docstring
+import matplotlib.ticker as ticker
+import matplotlib.cbook as cbook
+import matplotlib.collections as collections
+import matplotlib.contour as contour
+from matplotlib.path import Path
+from matplotlib.patches import PathPatch
+from matplotlib.transforms import Bbox
+
+
+make_axes_kw_doc = '''
+
+ ============= ====================================================
+ Property Description
+ ============= ====================================================
+ *orientation* vertical or horizontal
+ *fraction* 0.15; fraction of original axes to use for colorbar
+ *pad* 0.05 if vertical, 0.15 if horizontal; fraction
+ of original axes between colorbar and new image axes
+ *shrink* 1.0; fraction by which to shrink the colorbar
+ *aspect* 20; ratio of long to short dimensions
+ ============= ====================================================
+
+'''
+
+colormap_kw_doc = '''
+
+ =========== ====================================================
+ Property Description
+ =========== ====================================================
+ *extend* [ 'neither' | 'both' | 'min' | 'max' ]
+ If not 'neither', make pointed end(s) for out-of-
+ range values. These are set for a given colormap
+ using the colormap set_under and set_over methods.
+ *spacing* [ 'uniform' | 'proportional' ]
+ Uniform spacing gives each discrete color the same
+ space; proportional makes the space proportional to
+ the data interval.
+ *ticks* [ None | list of ticks | Locator object ]
+ If None, ticks are determined automatically from the
+ input.
+ *format* [ None | format string | Formatter object ]
+ If None, the
+ :class:`~matplotlib.ticker.ScalarFormatter` is used.
+ If a format string is given, e.g., '%.3f', that is
+ used. An alternative
+ :class:`~matplotlib.ticker.Formatter` object may be
+ given instead.
+ *drawedges* bool
+ Whether to draw lines at color boundaries.
+ =========== ====================================================
+
+ The following will probably be useful only in the context of
+ indexed colors (that is, when the mappable has norm=NoNorm()),
+ or other unusual circumstances.
+
+ ============ ===================================================
+ Property Description
+ ============ ===================================================
+ *boundaries* None or a sequence
+ *values* None or a sequence which must be of length 1 less
+ than the sequence of *boundaries*. For each region
+ delimited by adjacent entries in *boundaries*, the
+ color mapped to the corresponding value in values
+ will be used.
+ ============ ===================================================
+
+'''
+
+colorbar_doc = '''
+
+Add a colorbar to a plot.
+
+Function signatures for the :mod:`~matplotlib.pyplot` interface; all
+but the first are also method signatures for the
+:meth:`~matplotlib.figure.Figure.colorbar` method::
+
+ colorbar(**kwargs)
+ colorbar(mappable, **kwargs)
+ colorbar(mappable, cax=cax, **kwargs)
+ colorbar(mappable, ax=ax, **kwargs)
+
+arguments:
+
+ *mappable*
+ the :class:`~matplotlib.image.Image`,
+ :class:`~matplotlib.contour.ContourSet`, etc. to
+ which the colorbar applies; this argument is mandatory for the
+ :meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
+ :func:`~matplotlib.pyplot.colorbar` function, which sets the
+ default to the current image.
+
+keyword arguments:
+
+ *cax*
+ None | axes object into which the colorbar will be drawn
+ *ax*
+ None | parent axes object from which space for a new
+ colorbar axes will be stolen
+
+
+Additional keyword arguments are of two kinds:
+
+ axes properties:
+ %s
+ colorbar properties:
+ %s
+
+If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
+kwarg is included automatically.
+
+Note that the *shrink* kwarg provides a simple way to keep a vertical
+colorbar, for example, from being taller than the axes of the mappable
+to which the colorbar is attached; but it is a manual method requiring
+some trial and error. If the colorbar is too tall (or a horizontal
+colorbar is too wide) use a smaller value of *shrink*.
+
+For more precise control, you can manually specify the positions of
+the axes objects in which the mappable and the colorbar are drawn. In
+this case, do not use any of the axes properties kwargs.
+
+It is known that some vector graphics viewer (svg and pdf) renders white gaps
+between segments of the colorbar. This is due to bugs in the viewers not
+matplotlib. As a workaround the colorbar can be rendered with overlapping
+segments::
+
+ cbar = colorbar()
+ cbar.solids.set_edgecolor("face")
+ draw()
+
+However this has negative consequences in other circumstances. Particularly with
+semi transparent images (alpha < 1) and colorbar extensions and is not enabled
+by default see (issue #1188).
+
+returns:
+ :class:`~matplotlib.colorbar.Colorbar` instance; see also its base class,
+ :class:`~matplotlib.colorbar.ColorbarBase`. Call the
+ :meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
+ to label the colorbar.
+
+
+The transData of the *cax* is adjusted so that the limits in the
+longest axis actually corresponds to the limits in colorbar range. On
+the other hand, the shortest axis has a data limits of [1,2], whose
+unconventional value is to prevent underflow when log scale is used.
+''' % (make_axes_kw_doc, colormap_kw_doc)
+
+#docstring.interpd.update(colorbar_doc=colorbar_doc)
+
+
+class CbarAxesLocator(object):
+ """
+ CbarAxesLocator is a axes_locator for colorbar axes. It adjust the
+ position of the axes to make a room for extended ends, i.e., the
+ extended ends are located outside the axes area.
+ """
+
+ def __init__(self, locator=None, extend="neither", orientation="vertical"):
+ """
+ *locator* : the bbox returned from the locator is used as a
+ initial axes location. If None, axes.bbox is used.
+
+ *extend* : same as in ColorbarBase
+ *orientation* : same as in ColorbarBase
+
+ """
+ self._locator = locator
+ self.extesion_fraction = 0.05
+ self.extend = extend
+ self.orientation = orientation
+
+ def get_original_position(self, axes, renderer):
+ """
+ get the original position of the axes.
+ """
+ if self._locator is None:
+ bbox = axes.get_position(original=True)
+ else:
+ bbox = self._locator(axes, renderer)
+ return bbox
+
+ def get_end_vertices(self):
+ """
+ return a tuple of two vertices for the colorbar extended ends.
+ The first vertices is for the minimum end, and the second is for
+ the maximum end.
+ """
+ # Note that concatenating two vertices needs to make a
+ # vertices for the frame.
+ extesion_fraction = self.extesion_fraction
+
+ corx = extesion_fraction*2.
+ cory = 1./(1. - corx)
+ x1, y1, w, h = 0, 0, 1, 1
+ x2, y2 = x1 + w, y1 + h
+ dw, dh = w*extesion_fraction, h*extesion_fraction*cory
+
+ if self.extend in ["min", "both"]:
+ bottom = [(x1, y1),
+ (x1+w/2., y1-dh),
+ (x2, y1)]
+ else:
+ bottom = [(x1, y1),
+ (x2, y1)]
+
+ if self.extend in ["max", "both"]:
+ top = [(x2, y2),
+ (x1+w/2., y2+dh),
+ (x1, y2)]
+ else:
+ top = [(x2, y2),
+ (x1, y2)]
+
+ if self.orientation == "horizontal":
+ bottom = [(y,x) for (x,y) in bottom]
+ top = [(y,x) for (x,y) in top]
+
+ return bottom, top
+
+
+ def get_path_patch(self):
+ """
+ get the path for axes patch
+ """
+ end1, end2 = self.get_end_vertices()
+
+ verts = [] + end1 + end2 + end1[:1]
+
+ return Path(verts)
+
+
+ def get_path_ends(self):
+ """
+ get the paths for extended ends
+ """
+
+ end1, end2 = self.get_end_vertices()
+
+ return Path(end1), Path(end2)
+
+
+ def __call__(self, axes, renderer):
+ """
+ Return the adjusted position of the axes
+ """
+ bbox0 = self.get_original_position(axes, renderer)
+ bbox = bbox0
+
+ x1, y1, w, h = bbox.bounds
+ extesion_fraction = self.extesion_fraction
+ dw, dh = w*extesion_fraction, h*extesion_fraction
+
+ if self.extend in ["min", "both"]:
+ if self.orientation == "horizontal":
+ x1 = x1 + dw
+ else:
+ y1 = y1+dh
+
+ if self.extend in ["max", "both"]:
+ if self.orientation == "horizontal":
+ w = w-2*dw
+ else:
+ h = h-2*dh
+
+ return Bbox.from_bounds(x1, y1, w, h)
+
+
+
+class ColorbarBase(cm.ScalarMappable):
+ '''
+ Draw a colorbar in an existing axes.
+
+ This is a base class for the :class:`Colorbar` class, which is the
+ basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab
+ function.
+
+ It is also useful by itself for showing a colormap. If the *cmap*
+ kwarg is given but *boundaries* and *values* are left as None,
+ then the colormap will be displayed on a 0-1 scale. To show the
+ under- and over-value colors, specify the *norm* as::
+
+ colors.Normalize(clip=False)
+
+ To show the colors versus index instead of on the 0-1 scale,
+ use::
+
+ norm=colors.NoNorm.
+
+ Useful attributes:
+
+ :attr:`ax`
+ the Axes instance in which the colorbar is drawn
+
+ :attr:`lines`
+ a LineCollection if lines were drawn, otherwise None
+
+ :attr:`dividers`
+ a LineCollection if *drawedges* is True, otherwise None
+
+ Useful public methods are :meth:`set_label` and :meth:`add_lines`.
+
+ '''
+
+ def __init__(self, ax, cmap=None,
+ norm=None,
+ alpha=1.0,
+ values=None,
+ boundaries=None,
+ orientation='vertical',
+ extend='neither',
+ spacing='uniform', # uniform or proportional
+ ticks=None,
+ format=None,
+ drawedges=False,
+ filled=True,
+ ):
+ self.ax = ax
+
+ if cmap is None: cmap = cm.get_cmap()
+ if norm is None: norm = colors.Normalize()
+ self.alpha = alpha
+ cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
+ self.values = values
+ self.boundaries = boundaries
+ self.extend = extend
+ self.spacing = spacing
+ self.orientation = orientation
+ self.drawedges = drawedges
+ self.filled = filled
+
+ # artists
+ self.solids = None
+ self.lines = None
+ self.dividers = None
+ self.extension_patch1 = None
+ self.extension_patch2 = None
+
+ if orientation == "vertical":
+ self.cbar_axis = self.ax.yaxis
+ else:
+ self.cbar_axis = self.ax.xaxis
+
+
+ if format is None:
+ if isinstance(self.norm, colors.LogNorm):
+ # change both axis for proper aspect
+ self.ax.set_xscale("log")
+ self.ax.set_yscale("log")
+ self.cbar_axis.set_minor_locator(ticker.NullLocator())
+ formatter = ticker.LogFormatter()
+ else:
+ formatter = None
+ elif isinstance(format, six.string_types):
+ formatter = ticker.FormatStrFormatter(format)
+ else:
+ formatter = format # Assume it is a Formatter
+
+ if formatter is None:
+ formatter = self.cbar_axis.get_major_formatter()
+ else:
+ self.cbar_axis.set_major_formatter(formatter)
+
+ if cbook.iterable(ticks):
+ self.cbar_axis.set_ticks(ticks)
+ elif ticks is not None:
+ self.cbar_axis.set_major_locator(ticks)
+ else:
+ self._select_locator(formatter)
+
+
+ self._config_axes()
+
+ self.update_artists()
+
+ self.set_label_text('')
+
+
+ def _get_colorbar_limits(self):
+ """
+ initial limits for colorbar range. The returned min, max values
+ will be used to create colorbar solid(?) and etc.
+ """
+ if self.boundaries is not None:
+ C = self.boundaries
+ if self.extend in ["min", "both"]:
+ C = C[1:]
+
+ if self.extend in ["max", "both"]:
+ C = C[:-1]
+ return min(C), max(C)
+ else:
+ return self.get_clim()
+
+
+ def _config_axes(self):
+ '''
+ Adjust the properties of the axes to be adequate for colorbar display.
+ '''
+ ax = self.ax
+
+ axes_locator = CbarAxesLocator(ax.get_axes_locator(),
+ extend=self.extend,
+ orientation=self.orientation)
+ ax.set_axes_locator(axes_locator)
+
+ # override the get_data_ratio for the aspect works.
+ def _f():
+ return 1.
+ ax.get_data_ratio = _f
+ ax.get_data_ratio_log = _f
+
+ ax.set_frame_on(True)
+ ax.set_navigate(False)
+
+ self.ax.set_autoscalex_on(False)
+ self.ax.set_autoscaley_on(False)
+
+ if self.orientation == 'horizontal':
+ ax.xaxis.set_label_position('bottom')
+ ax.set_yticks([])
+ else:
+ ax.set_xticks([])
+ ax.yaxis.set_label_position('right')
+ ax.yaxis.set_ticks_position('right')
+
+
+
+ def update_artists(self):
+ """
+ Update the colorbar associated artists, *filled* and
+ *ends*. Note that *lines* are not updated. This needs to be
+ called whenever clim of associated image changes.
+ """
+ self._process_values()
+ self._add_ends()
+
+ X, Y = self._mesh()
+ if self.filled:
+ C = self._values[:,np.newaxis]
+ self._add_solids(X, Y, C)
+
+ ax = self.ax
+ vmin, vmax = self._get_colorbar_limits()
+ if self.orientation == 'horizontal':
+ ax.set_ylim(1, 2)
+ ax.set_xlim(vmin, vmax)
+ else:
+ ax.set_xlim(1, 2)
+ ax.set_ylim(vmin, vmax)
+
+
+ def _add_ends(self):
+ """
+ Create patches from extended ends and add them to the axes.
+ """
+
+ del self.extension_patch1
+ del self.extension_patch2
+
+ path1, path2 = self.ax.get_axes_locator().get_path_ends()
+ fc=mpl.rcParams['axes.facecolor']
+ ec=mpl.rcParams['axes.edgecolor']
+ linewidths=0.5*mpl.rcParams['axes.linewidth']
+ self.extension_patch1 = PathPatch(path1,
+ fc=fc, ec=ec, lw=linewidths,
+ zorder=2.,
+ transform=self.ax.transAxes,
+ clip_on=False)
+ self.extension_patch2 = PathPatch(path2,
+ fc=fc, ec=ec, lw=linewidths,
+ zorder=2.,
+ transform=self.ax.transAxes,
+ clip_on=False)
+ self.ax.add_artist(self.extension_patch1)
+ self.ax.add_artist(self.extension_patch2)
+
+
+
+ def _set_label_text(self):
+ """
+ set label.
+ """
+ self.cbar_axis.set_label_text(self._label, **self._labelkw)
+
+ def set_label_text(self, label, **kw):
+ '''
+ Label the long axis of the colorbar
+ '''
+ self._label = label
+ self._labelkw = kw
+ self._set_label_text()
+
+
+ def _edges(self, X, Y):
+ '''
+ Return the separator line segments; helper for _add_solids.
+ '''
+ N = X.shape[0]
+ # Using the non-array form of these line segments is much
+ # simpler than making them into arrays.
+ if self.orientation == 'vertical':
+ return [list(zip(X[i], Y[i])) for i in xrange(1, N-1)]
+ else:
+ return [list(zip(Y[i], X[i])) for i in xrange(1, N-1)]
+
+ def _add_solids(self, X, Y, C):
+ '''
+ Draw the colors using :meth:`~matplotlib.axes.Axes.pcolormesh`;
+ optionally add separators.
+ '''
+ ## Change to pcolorfast after fixing bugs in some backends...
+
+ if self.extend in ["min", "both"]:
+ cc = self.to_rgba([C[0][0]])
+ self.extension_patch1.set_fc(cc[0])
+ X, Y, C = X[1:], Y[1:], C[1:]
+
+ if self.extend in ["max", "both"]:
+ cc = self.to_rgba([C[-1][0]])
+ self.extension_patch2.set_fc(cc[0])
+ X, Y, C = X[:-1], Y[:-1], C[:-1]
+
+ if self.orientation == 'vertical':
+ args = (X, Y, C)
+ else:
+ args = (np.transpose(Y), np.transpose(X), np.transpose(C))
+ kw = {'cmap':self.cmap, 'norm':self.norm,
+ 'shading':'flat', 'alpha':self.alpha,
+ }
+
+ del self.solids
+ del self.dividers
+
+ col = self.ax.pcolormesh(*args, **kw)
+
+ self.solids = col
+ if self.drawedges:
+ self.dividers = collections.LineCollection(self._edges(X,Y),
+ colors=(mpl.rcParams['axes.edgecolor'],),
+ linewidths=(0.5*mpl.rcParams['axes.linewidth'],),
+ )
+ self.ax.add_collection(self.dividers)
+ else:
+ self.dividers = None
+
+ def add_lines(self, levels, colors, linewidths):
+ '''
+ Draw lines on the colorbar. It deletes preexisting lines.
+ '''
+ del self.lines
+
+ N = len(levels)
+ x = np.array([1.0, 2.0])
+ X, Y = np.meshgrid(x,levels)
+ if self.orientation == 'vertical':
+ xy = [list(zip(X[i], Y[i])) for i in xrange(N)]
+ else:
+ xy = [list(zip(Y[i], X[i])) for i in xrange(N)]
+ col = collections.LineCollection(xy, linewidths=linewidths,
+ )
+ self.lines = col
+ col.set_color(colors)
+ self.ax.add_collection(col)
+
+
+ def _select_locator(self, formatter):
+ '''
+ select a suitable locator
+ '''
+ if self.boundaries is None:
+ if isinstance(self.norm, colors.NoNorm):
+ nv = len(self._values)
+ base = 1 + int(nv/10)
+ locator = ticker.IndexLocator(base=base, offset=0)
+ elif isinstance(self.norm, colors.BoundaryNorm):
+ b = self.norm.boundaries
+ locator = ticker.FixedLocator(b, nbins=10)
+ elif isinstance(self.norm, colors.LogNorm):
+ locator = ticker.LogLocator()
+ else:
+ locator = ticker.MaxNLocator(nbins=5)
+ else:
+ b = self._boundaries[self._inside]
+ locator = ticker.FixedLocator(b) #, nbins=10)
+
+ self.cbar_axis.set_major_locator(locator)
+
+
+ def _process_values(self, b=None):
+ '''
+ Set the :attr:`_boundaries` and :attr:`_values` attributes
+ based on the input boundaries and values. Input boundaries
+ can be *self.boundaries* or the argument *b*.
+ '''
+ if b is None:
+ b = self.boundaries
+ if b is not None:
+ self._boundaries = np.asarray(b, dtype=float)
+ if self.values is None:
+ self._values = 0.5*(self._boundaries[:-1]
+ + self._boundaries[1:])
+ if isinstance(self.norm, colors.NoNorm):
+ self._values = (self._values + 0.00001).astype(np.int16)
+ return
+ self._values = np.array(self.values)
+ return
+ if self.values is not None:
+ self._values = np.array(self.values)
+ if self.boundaries is None:
+ b = np.zeros(len(self.values)+1, 'd')
+ b[1:-1] = 0.5*(self._values[:-1] - self._values[1:])
+ b[0] = 2.0*b[1] - b[2]
+ b[-1] = 2.0*b[-2] - b[-3]
+ self._boundaries = b
+ return
+ self._boundaries = np.array(self.boundaries)
+ return
+ # Neither boundaries nor values are specified;
+ # make reasonable ones based on cmap and norm.
+ if isinstance(self.norm, colors.NoNorm):
+ b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5
+ v = np.zeros((len(b)-1,), dtype=np.int16)
+ v = np.arange(self.cmap.N, dtype=np.int16)
+ self._boundaries = b
+ self._values = v
+ return
+ elif isinstance(self.norm, colors.BoundaryNorm):
+ b = np.array(self.norm.boundaries)
+ v = np.zeros((len(b)-1,), dtype=float)
+ bi = self.norm.boundaries
+ v = 0.5*(bi[:-1] + bi[1:])
+ self._boundaries = b
+ self._values = v
+ return
+ else:
+ b = self._uniform_y(self.cmap.N+1)
+
+ self._process_values(b)
+
+
+ def _uniform_y(self, N):
+ '''
+ Return colorbar data coordinates for *N* uniformly
+ spaced boundaries.
+ '''
+ vmin, vmax = self._get_colorbar_limits()
+ if isinstance(self.norm, colors.LogNorm):
+ y = np.logspace(np.log10(vmin), np.log10(vmax), N)
+ else:
+ y = np.linspace(vmin, vmax, N)
+ return y
+
+ def _mesh(self):
+ '''
+ Return X,Y, the coordinate arrays for the colorbar pcolormesh.
+ These are suitable for a vertical colorbar; swapping and
+ transposition for a horizontal colorbar are done outside
+ this function.
+ '''
+ x = np.array([1.0, 2.0])
+ if self.spacing == 'uniform':
+ y = self._uniform_y(len(self._boundaries))
+ else:
+ y = self._boundaries
+ self._y = y
+
+ X, Y = np.meshgrid(x,y)
+ return X, Y
+
+
+ def set_alpha(self, alpha):
+ """
+ set alpha value.
+ """
+ self.alpha = alpha
+
+
+class Colorbar(ColorbarBase):
+ def __init__(self, ax, mappable, **kw):
+ mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax
+ # are set when colorbar is called,
+ # even if mappable.draw has not yet
+ # been called. This will not change
+ # vmin, vmax if they are already set.
+ self.mappable = mappable
+ kw['cmap'] = mappable.cmap
+ kw['norm'] = mappable.norm
+ kw['alpha'] = mappable.get_alpha()
+ if isinstance(mappable, contour.ContourSet):
+ CS = mappable
+ kw['boundaries'] = CS._levels
+ kw['values'] = CS.cvalues
+ kw['extend'] = CS.extend
+ #kw['ticks'] = CS._levels
+ kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
+ kw['filled'] = CS.filled
+ ColorbarBase.__init__(self, ax, **kw)
+ if not CS.filled:
+ self.add_lines(CS)
+ else:
+ ColorbarBase.__init__(self, ax, **kw)
+
+
+ def add_lines(self, CS):
+ '''
+ Add the lines from a non-filled
+ :class:`~matplotlib.contour.ContourSet` to the colorbar.
+ '''
+ if not isinstance(CS, contour.ContourSet) or CS.filled:
+ raise ValueError('add_lines is only for a ContourSet of lines')
+ tcolors = [c[0] for c in CS.tcolors]
+ tlinewidths = [t[0] for t in CS.tlinewidths]
+ # The following was an attempt to get the colorbar lines
+ # to follow subsequent changes in the contour lines,
+ # but more work is needed: specifically, a careful
+ # look at event sequences, and at how
+ # to make one object track another automatically.
+ #tcolors = [col.get_colors()[0] for col in CS.collections]
+ #tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
+ ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths)
+
+ def update_bruteforce(self, mappable):
+ """
+ Update the colorbar artists to reflect the change of the
+ associated mappable.
+ """
+ self.update_artists()
+
+ if isinstance(mappable, contour.ContourSet):
+ if not mappable.filled:
+ self.add_lines(mappable)
+
+@docstring.Substitution(make_axes_kw_doc)
+def make_axes(parent, **kw):
+ '''
+ Resize and reposition a parent axes, and return a child
+ axes suitable for a colorbar
+
+ ::
+
+ cax, kw = make_axes(parent, **kw)
+
+ Keyword arguments may include the following (with defaults):
+
+ *orientation*
+ 'vertical' or 'horizontal'
+
+ %s
+
+ All but the first of these are stripped from the input kw set.
+
+ Returns (cax, kw), the child axes and the reduced kw dictionary.
+ '''
+ orientation = kw.setdefault('orientation', 'vertical')
+ fraction = kw.pop('fraction', 0.15)
+ shrink = kw.pop('shrink', 1.0)
+ aspect = kw.pop('aspect', 20)
+ #pb = transforms.PBox(parent.get_position())
+ pb = parent.get_position(original=True).frozen()
+ if orientation == 'vertical':
+ pad = kw.pop('pad', 0.05)
+ x1 = 1.0-fraction
+ pb1, pbx, pbcb = pb.splitx(x1-pad, x1)
+ pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb)
+ anchor = (0.0, 0.5)
+ panchor = (1.0, 0.5)
+ else:
+ pad = kw.pop('pad', 0.15)
+ pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad)
+ pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb)
+ aspect = 1.0/aspect
+ anchor = (0.5, 1.0)
+ panchor = (0.5, 0.0)
+ parent.set_position(pb1)
+ parent.set_anchor(panchor)
+ fig = parent.get_figure()
+ cax = fig.add_axes(pbcb)
+ cax.set_aspect(aspect, anchor=anchor, adjustable='box')
+ return cax, kw
+
+@docstring.Substitution(colorbar_doc)
+def colorbar(mappable, cax=None, ax=None, **kw):
+ """
+ Create a colorbar for a ScalarMappable instance.
+
+ Documentation for the pylab thin wrapper:
+
+ %s
+ """
+ import matplotlib.pyplot as plt
+ if ax is None:
+ ax = plt.gca()
+ if cax is None:
+ cax, kw = make_axes(ax, **kw)
+ cax._hold = True
+ cb = Colorbar(cax, mappable, **kw)
+
+ def on_changed(m):
+ cb.set_cmap(m.get_cmap())
+ cb.set_clim(m.get_clim())
+ cb.update_bruteforce(m)
+
+ cbid = mappable.callbacksSM.connect('changed', on_changed)
+ mappable.colorbar = cb
+ ax.figure.sca(ax)
+ return cb
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/inset_locator.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/inset_locator.py
new file mode 100644
index 00000000000..9aeedcb0888
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/inset_locator.py
@@ -0,0 +1,659 @@
+"""
+A collection of functions and objects for creating or placing inset axes.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import warnings
+from matplotlib import docstring
+import six
+from matplotlib.offsetbox import AnchoredOffsetbox
+from matplotlib.patches import Patch, Rectangle
+from matplotlib.path import Path
+from matplotlib.transforms import Bbox, BboxTransformTo
+from matplotlib.transforms import IdentityTransform, TransformedBbox
+
+from . import axes_size as Size
+from .parasite_axes import HostAxes
+
+
+class InsetPosition(object):
+ @docstring.dedent_interpd
+ def __init__(self, parent, lbwh):
+ """
+ An object for positioning an inset axes.
+
+ This is created by specifying the normalized coordinates in the axes,
+ instead of the figure.
+
+ Parameters
+ ----------
+ parent : `matplotlib.axes.Axes`
+ Axes to use for normalizing coordinates.
+
+ lbwh : iterable of four floats
+ The left edge, bottom edge, width, and height of the inset axes, in
+ units of the normalized coordinate of the *parent* axes.
+
+ See Also
+ --------
+ :meth:`matplotlib.axes.Axes.set_axes_locator`
+
+ Examples
+ --------
+ The following bounds the inset axes to a box with 20%% of the parent
+ axes's height and 40%% of the width. The size of the axes specified
+ ([0, 0, 1, 1]) ensures that the axes completely fills the bounding box:
+
+ >>> parent_axes = plt.gca()
+ >>> ax_ins = plt.axes([0, 0, 1, 1])
+ >>> ip = InsetPosition(ax, [0.5, 0.1, 0.4, 0.2])
+ >>> ax_ins.set_axes_locator(ip)
+ """
+ self.parent = parent
+ self.lbwh = lbwh
+
+ def __call__(self, ax, renderer):
+ bbox_parent = self.parent.get_position(original=False)
+ trans = BboxTransformTo(bbox_parent)
+ bbox_inset = Bbox.from_bounds(*self.lbwh)
+ bb = TransformedBbox(bbox_inset, trans)
+ return bb
+
+
+class AnchoredLocatorBase(AnchoredOffsetbox):
+ def __init__(self, bbox_to_anchor, offsetbox, loc,
+ borderpad=0.5, bbox_transform=None):
+ super(AnchoredLocatorBase, self).__init__(
+ loc, pad=0., child=None, borderpad=borderpad,
+ bbox_to_anchor=bbox_to_anchor, bbox_transform=bbox_transform
+ )
+
+ def draw(self, renderer):
+ raise RuntimeError("No draw method should be called")
+
+ def __call__(self, ax, renderer):
+ self.axes = ax
+
+ fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
+ self._update_offset_func(renderer, fontsize)
+
+ width, height, xdescent, ydescent = self.get_extent(renderer)
+
+ px, py = self.get_offset(width, height, 0, 0, renderer)
+ bbox_canvas = Bbox.from_bounds(px, py, width, height)
+ tr = ax.figure.transFigure.inverted()
+ bb = TransformedBbox(bbox_canvas, tr)
+
+ return bb
+
+
+class AnchoredSizeLocator(AnchoredLocatorBase):
+ def __init__(self, bbox_to_anchor, x_size, y_size, loc,
+ borderpad=0.5, bbox_transform=None):
+
+ super(AnchoredSizeLocator, self).__init__(
+ bbox_to_anchor, None, loc,
+ borderpad=borderpad, bbox_transform=bbox_transform
+ )
+
+ self.x_size = Size.from_any(x_size)
+ self.y_size = Size.from_any(y_size)
+
+ def get_extent(self, renderer):
+ x, y, w, h = self.get_bbox_to_anchor().bounds
+
+ dpi = renderer.points_to_pixels(72.)
+
+ r, a = self.x_size.get_size(renderer)
+ width = w * r + a * dpi
+
+ r, a = self.y_size.get_size(renderer)
+ height = h * r + a * dpi
+ xd, yd = 0, 0
+
+ fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
+ pad = self.pad * fontsize
+
+ return width + 2 * pad, height + 2 * pad, xd + pad, yd + pad
+
+
+class AnchoredZoomLocator(AnchoredLocatorBase):
+ def __init__(self, parent_axes, zoom, loc,
+ borderpad=0.5,
+ bbox_to_anchor=None,
+ bbox_transform=None):
+ self.parent_axes = parent_axes
+ self.zoom = zoom
+
+ if bbox_to_anchor is None:
+ bbox_to_anchor = parent_axes.bbox
+
+ super(AnchoredZoomLocator, self).__init__(
+ bbox_to_anchor, None, loc, borderpad=borderpad,
+ bbox_transform=bbox_transform)
+
+ def get_extent(self, renderer):
+ bb = TransformedBbox(self.axes.viewLim,
+ self.parent_axes.transData)
+
+ x, y, w, h = bb.bounds
+ fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
+ pad = self.pad * fontsize
+
+ return abs(w * self.zoom) + 2 * pad, abs(h * self.zoom) + 2 * pad, pad, pad
+
+
+class BboxPatch(Patch):
+ @docstring.dedent_interpd
+ def __init__(self, bbox, **kwargs):
+ """
+ Patch showing the shape bounded by a Bbox.
+
+ Parameters
+ ----------
+ bbox : `matplotlib.transforms.Bbox`
+ Bbox to use for the extents of this patch.
+
+ **kwargs
+ Patch properties. Valid arguments include:
+ %(Patch)s
+ """
+ if "transform" in kwargs:
+ raise ValueError("transform should not be set")
+
+ kwargs["transform"] = IdentityTransform()
+ Patch.__init__(self, **kwargs)
+ self.bbox = bbox
+
+ def get_path(self):
+ x0, y0, x1, y1 = self.bbox.extents
+
+ verts = [(x0, y0),
+ (x1, y0),
+ (x1, y1),
+ (x0, y1),
+ (x0, y0),
+ (0, 0)]
+
+ codes = [Path.MOVETO,
+ Path.LINETO,
+ Path.LINETO,
+ Path.LINETO,
+ Path.LINETO,
+ Path.CLOSEPOLY]
+
+ return Path(verts, codes)
+
+ get_path.__doc__ = Patch.get_path.__doc__
+
+
+class BboxConnector(Patch):
+ @staticmethod
+ def get_bbox_edge_pos(bbox, loc):
+ """
+ Helper function to obtain the location of a corner of a bbox
+
+ Parameters
+ ----------
+ bbox : `matplotlib.transforms.Bbox`
+
+ loc : {1, 2, 3, 4}
+ Corner of *bbox*. Valid values are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4
+
+ Returns
+ -------
+ x, y : float
+ Coordinates of the corner specified by *loc*.
+ """
+ x0, y0, x1, y1 = bbox.extents
+ if loc == 1:
+ return x1, y1
+ elif loc == 2:
+ return x0, y1
+ elif loc == 3:
+ return x0, y0
+ elif loc == 4:
+ return x1, y0
+
+ @staticmethod
+ def connect_bbox(bbox1, bbox2, loc1, loc2=None):
+ """
+ Helper function to obtain a Path from one bbox to another.
+
+ Parameters
+ ----------
+ bbox1, bbox2 : `matplotlib.transforms.Bbox`
+ Bounding boxes to connect.
+
+ loc1 : {1, 2, 3, 4}
+ Corner of *bbox1* to use. Valid values are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4
+
+ loc2 : {1, 2, 3, 4}, optional
+ Corner of *bbox2* to use. If None, defaults to *loc1*.
+ Valid values are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4
+
+ Returns
+ -------
+ path : `matplotlib.path.Path`
+ A line segment from the *loc1* corner of *bbox1* to the *loc2*
+ corner of *bbox2*.
+ """
+ if isinstance(bbox1, Rectangle):
+ transform = bbox1.get_transfrom()
+ bbox1 = Bbox.from_bounds(0, 0, 1, 1)
+ bbox1 = TransformedBbox(bbox1, transform)
+
+ if isinstance(bbox2, Rectangle):
+ transform = bbox2.get_transform()
+ bbox2 = Bbox.from_bounds(0, 0, 1, 1)
+ bbox2 = TransformedBbox(bbox2, transform)
+
+ if loc2 is None:
+ loc2 = loc1
+
+ x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)
+ x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)
+
+ verts = [[x1, y1], [x2, y2]]
+ codes = [Path.MOVETO, Path.LINETO]
+
+ return Path(verts, codes)
+
+ @docstring.dedent_interpd
+ def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):
+ """
+ Connect two bboxes with a straight line.
+
+ Parameters
+ ----------
+ bbox1, bbox2 : `matplotlib.transforms.Bbox`
+ Bounding boxes to connect.
+
+ loc1 : {1, 2, 3, 4}
+ Corner of *bbox1* to draw the line. Valid values are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4
+
+ loc2 : {1, 2, 3, 4}, optional
+ Corner of *bbox2* to draw the line. If None, defaults to *loc1*.
+ Valid values are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4
+
+ **kwargs
+ Patch properties for the line drawn. Valid arguments include:
+ %(Patch)s
+ """
+ if "transform" in kwargs:
+ raise ValueError("transform should not be set")
+
+ kwargs["transform"] = IdentityTransform()
+ Patch.__init__(self, fill=False, **kwargs)
+ self.bbox1 = bbox1
+ self.bbox2 = bbox2
+ self.loc1 = loc1
+ self.loc2 = loc2
+
+ def get_path(self):
+ return self.connect_bbox(self.bbox1, self.bbox2,
+ self.loc1, self.loc2)
+
+ get_path.__doc__ = Patch.get_path.__doc__
+
+
+class BboxConnectorPatch(BboxConnector):
+ @docstring.dedent_interpd
+ def __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):
+ """
+ Connect two bboxes with a quadrilateral.
+
+ The quadrilateral is specified by two lines that start and end at corners
+ of the bboxes. The four sides of the quadrilateral are defined by the two
+ lines given, the line between the two corners specified in *bbox1* and the
+ line between the two corners specified in *bbox2*.
+
+ Parameters
+ ----------
+ bbox1, bbox2 : `matplotlib.transforms.Bbox`
+ Bounding boxes to connect.
+
+ loc1a, loc2a : {1, 2, 3, 4}
+ Corners of *bbox1* and *bbox2* to draw the first line.
+ Valid values are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4
+
+ loc1b, loc2b : {1, 2, 3, 4}
+ Corners of *bbox1* and *bbox2* to draw the second line.
+ Valid values are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4
+
+ **kwargs
+ Patch properties for the line drawn:
+ %(Patch)s
+ """
+ if "transform" in kwargs:
+ raise ValueError("transform should not be set")
+ BboxConnector.__init__(self, bbox1, bbox2, loc1a, loc2a, **kwargs)
+ self.loc1b = loc1b
+ self.loc2b = loc2b
+
+ def get_path(self):
+ path1 = self.connect_bbox(self.bbox1, self.bbox2, self.loc1, self.loc2)
+ path2 = self.connect_bbox(self.bbox2, self.bbox1,
+ self.loc2b, self.loc1b)
+ path_merged = (list(path1.vertices) +
+ list(path2.vertices) +
+ [path1.vertices[0]])
+ return Path(path_merged)
+
+ get_path.__doc__ = BboxConnector.get_path.__doc__
+
+
+def _add_inset_axes(parent_axes, inset_axes):
+ """Helper function to add an inset axes and disable navigation in it"""
+ parent_axes.figure.add_axes(inset_axes)
+ inset_axes.set_navigate(False)
+
+
+@docstring.dedent_interpd
+def inset_axes(parent_axes, width, height, loc=1,
+ bbox_to_anchor=None, bbox_transform=None,
+ axes_class=None,
+ axes_kwargs=None,
+ borderpad=0.5):
+ """
+ Create an inset axes with a given width and height.
+
+ Both sizes used can be specified either in inches or percentage.
+ For example,::
+
+ inset_axes(parent_axes, width='40%%', height='30%%', loc=3)
+
+ creates in inset axes in the lower left corner of *parent_axes* which spans
+ over 30%% in height and 40%% in width of the *parent_axes*. Since the usage
+ of `.inset_axes` may become slightly tricky when exceeding such standard
+ cases, it is recommended to read
+ :ref:`the examples <sphx_glr_gallery_axes_grid1_inset_locator_demo.py>`.
+
+ Parameters
+ ----------
+ parent_axes : `matplotlib.axes.Axes`
+ Axes to place the inset axes.
+
+ width, height : float or str
+ Size of the inset axes to create. If a float is provided, it is
+ the size in inches, e.g. *width=1.3*. If a string is provided, it is
+ the size in relative units, e.g. *width='40%%'*. By default, i.e. if
+ neither *bbox_to_anchor* nor *bbox_transform* are specified, those
+ are relative to the parent_axes. Otherwise they are to be understood
+ relative to the bounding box provided via *bbox_to_anchor*.
+
+ loc : int or string, optional, default to 1
+ Location to place the inset axes. The valid locations are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4,
+ 'right' : 5,
+ 'center left' : 6,
+ 'center right' : 7,
+ 'lower center' : 8,
+ 'upper center' : 9,
+ 'center' : 10
+
+ bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional
+ Bbox that the inset axes will be anchored to. If None,
+ *parent_axes.bbox* is used. If a tuple, can be either
+ [left, bottom, width, height], or [left, bottom].
+ If the kwargs *width* and/or *height* are specified in relative units,
+ the 2-tuple [left, bottom] cannot be used. Note that
+ the units of the bounding box are determined through the transform
+ in use. When using *bbox_to_anchor* it almost always makes sense to
+ also specify a *bbox_transform*. This might often be the axes transform
+ *parent_axes.transAxes*.
+
+ bbox_transform : `matplotlib.transforms.Transform`, optional
+ Transformation for the bbox that contains the inset axes.
+ If None, a `.transforms.IdentityTransform` is used (i.e. pixel
+ coordinates). This is useful when not providing any argument to
+ *bbox_to_anchor*. When using *bbox_to_anchor* it almost always makes
+ sense to also specify a *bbox_transform*. This might often be the
+ axes transform *parent_axes.transAxes*. Inversely, when specifying
+ the axes- or figure-transform here, be aware that not specifying
+ *bbox_to_anchor* will use *parent_axes.bbox*, the units of which are
+ in display (pixel) coordinates.
+
+ axes_class : `matplotlib.axes.Axes` type, optional
+ If specified, the inset axes created will be created with this class's
+ constructor.
+
+ axes_kwargs : dict, optional
+ Keyworded arguments to pass to the constructor of the inset axes.
+ Valid arguments include:
+ %(Axes)s
+
+ borderpad : float, optional
+ Padding between inset axes and the bbox_to_anchor. Defaults to 0.5.
+ The units are axes font size, i.e. for a default font size of 10 points
+ *borderpad = 0.5* is equivalent to a padding of 5 points.
+
+ Returns
+ -------
+ inset_axes : `axes_class`
+ Inset axes object created.
+ """
+
+ if axes_class is None:
+ axes_class = HostAxes
+
+ if axes_kwargs is None:
+ inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
+ else:
+ inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
+ **axes_kwargs)
+
+ if bbox_transform in [parent_axes.transAxes,
+ parent_axes.figure.transFigure]:
+ if bbox_to_anchor is None:
+ warnings.warn("Using the axes or figure transform requires a "
+ "bounding box in the respective coordinates. "
+ "Using bbox_to_anchor=(0,0,1,1) now.")
+ bbox_to_anchor = (0, 0, 1, 1)
+
+ if bbox_to_anchor is None:
+ bbox_to_anchor = parent_axes.bbox
+
+ if isinstance(bbox_to_anchor, tuple) and \
+ (isinstance(width, str) or isinstance(height, str)):
+ if len(bbox_to_anchor) != 4:
+ raise ValueError("Using relative units for width or height "
+ "requires to provide a 4-tuple or a "
+ "`BBox` instance to `bbox_to_anchor.")
+
+ axes_locator = AnchoredSizeLocator(bbox_to_anchor,
+ width, height,
+ loc=loc,
+ bbox_transform=bbox_transform,
+ borderpad=borderpad)
+
+ inset_axes.set_axes_locator(axes_locator)
+
+ _add_inset_axes(parent_axes, inset_axes)
+
+ return inset_axes
+
+
+@docstring.dedent_interpd
+def zoomed_inset_axes(parent_axes, zoom, loc=1,
+ bbox_to_anchor=None, bbox_transform=None,
+ axes_class=None,
+ axes_kwargs=None,
+ borderpad=0.5):
+ """
+ Create an anchored inset axes by scaling a parent axes. For usage, also see
+ :ref:`the examples <sphx_glr_gallery_axes_grid1_inset_locator_demo2.py>`.
+
+ Parameters
+ ----------
+ parent_axes : `matplotlib.axes.Axes`
+ Axes to place the inset axes.
+
+ zoom : float
+ Scaling factor of the data axes. *zoom* > 1 will enlargen the
+ coordinates (i.e., "zoomed in"), while *zoom* < 1 will shrink the
+ coordinates (i.e., "zoomed out").
+
+ loc : int or string, optional, default to 1
+ Location to place the inset axes. The valid locations are::
+
+ 'upper right' : 1,
+ 'upper left' : 2,
+ 'lower left' : 3,
+ 'lower right' : 4,
+ 'right' : 5,
+ 'center left' : 6,
+ 'center right' : 7,
+ 'lower center' : 8,
+ 'upper center' : 9,
+ 'center' : 10
+
+ bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional
+ Bbox that the inset axes will be anchored to. If None,
+ *parent_axes.bbox* is used. If a tuple, can be either
+ [left, bottom, width, height], or [left, bottom].
+ If the kwargs *width* and/or *height* are specified in relative units,
+ the 2-tuple [left, bottom] cannot be used. Note that
+ the units of the bounding box are determined through the transform
+ in use. When using *bbox_to_anchor* it almost always makes sense to
+ also specify a *bbox_transform*. This might often be the axes transform
+ *parent_axes.transAxes*.
+
+ bbox_transform : `matplotlib.transforms.Transform`, optional
+ Transformation for the bbox that contains the inset axes.
+ If None, a `.transforms.IdentityTransform` is used (i.e. pixel
+ coordinates). This is useful when not providing any argument to
+ *bbox_to_anchor*. When using *bbox_to_anchor* it almost always makes
+ sense to also specify a *bbox_transform*. This might often be the
+ axes transform *parent_axes.transAxes*. Inversely, when specifying
+ the axes- or figure-transform here, be aware that not specifying
+ *bbox_to_anchor* will use *parent_axes.bbox*, the units of which are
+ in display (pixel) coordinates.
+
+ axes_class : `matplotlib.axes.Axes` type, optional
+ If specified, the inset axes created will be created with this class's
+ constructor.
+
+ axes_kwargs : dict, optional
+ Keyworded arguments to pass to the constructor of the inset axes.
+ Valid arguments include:
+ %(Axes)s
+
+ borderpad : float, optional
+ Padding between inset axes and the bbox_to_anchor. Defaults to 0.5.
+ The units are axes font size, i.e. for a default font size of 10 points
+ *borderpad = 0.5* is equivalent to a padding of 5 points.
+
+ Returns
+ -------
+ inset_axes : `axes_class`
+ Inset axes object created.
+ """
+
+ if axes_class is None:
+ axes_class = HostAxes
+
+ if axes_kwargs is None:
+ inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
+ else:
+ inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
+ **axes_kwargs)
+
+ axes_locator = AnchoredZoomLocator(parent_axes, zoom=zoom, loc=loc,
+ bbox_to_anchor=bbox_to_anchor,
+ bbox_transform=bbox_transform,
+ borderpad=borderpad)
+ inset_axes.set_axes_locator(axes_locator)
+
+ _add_inset_axes(parent_axes, inset_axes)
+
+ return inset_axes
+
+
+@docstring.dedent_interpd
+def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):
+ """
+ Draw a box to mark the location of an area represented by an inset axes.
+
+ This function draws a box in *parent_axes* at the bounding box of
+ *inset_axes*, and shows a connection with the inset axes by drawing lines
+ at the corners, giving a "zoomed in" effect.
+
+ Parameters
+ ----------
+ parent_axes : `matplotlib.axes.Axes`
+ Axes which contains the area of the inset axes.
+
+ inset_axes : `matplotlib.axes.Axes`
+ The inset axes.
+
+ loc1, loc2 : {1, 2, 3, 4}
+ Corners to use for connecting the inset axes and the area in the
+ parent axes.
+
+ **kwargs
+ Patch properties for the lines and box drawn:
+ %(Patch)s
+
+ Returns
+ -------
+ pp : `matplotlib.patches.Patch`
+ The patch drawn to represent the area of the inset axes.
+
+ p1, p2 : `matplotlib.patches.Patch`
+ The patches connecting two corners of the inset axes and its area.
+ """
+ rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
+
+ fill = kwargs.pop("fill", False)
+ pp = BboxPatch(rect, fill=fill, **kwargs)
+ parent_axes.add_patch(pp)
+
+ p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)
+ inset_axes.add_patch(p1)
+ p1.set_clip_on(False)
+ p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)
+ inset_axes.add_patch(p2)
+ p2.set_clip_on(False)
+
+ return pp, p1, p2
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/mpl_axes.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/mpl_axes.py
new file mode 100644
index 00000000000..aaff7b7692a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/mpl_axes.py
@@ -0,0 +1,154 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import matplotlib.axes as maxes
+from matplotlib.artist import Artist
+from matplotlib.axis import XAxis, YAxis
+
+class SimpleChainedObjects(object):
+ def __init__(self, objects):
+ self._objects = objects
+
+ def __getattr__(self, k):
+ _a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
+ return _a
+
+ def __call__(self, *kl, **kwargs):
+ for m in self._objects:
+ m(*kl, **kwargs)
+
+
+class Axes(maxes.Axes):
+
+ class AxisDict(dict):
+ def __init__(self, axes):
+ self.axes = axes
+ super(Axes.AxisDict, self).__init__()
+
+ def __getitem__(self, k):
+ if isinstance(k, tuple):
+ r = SimpleChainedObjects(
+ [super(Axes.AxisDict, self).__getitem__(k1) for k1 in k])
+ return r
+ elif isinstance(k, slice):
+ if k.start is None and k.stop is None and k.step is None:
+ r = SimpleChainedObjects(list(six.itervalues(self)))
+ return r
+ else:
+ raise ValueError("Unsupported slice")
+ else:
+ return dict.__getitem__(self, k)
+
+ def __call__(self, *v, **kwargs):
+ return maxes.Axes.axis(self.axes, *v, **kwargs)
+
+ def __init__(self, *kl, **kw):
+ super(Axes, self).__init__(*kl, **kw)
+
+ def _init_axis_artists(self, axes=None):
+ if axes is None:
+ axes = self
+
+ self._axislines = self.AxisDict(self)
+
+ self._axislines["bottom"] = SimpleAxisArtist(self.xaxis, 1, self.spines["bottom"])
+ self._axislines["top"] = SimpleAxisArtist(self.xaxis, 2, self.spines["top"])
+ self._axislines["left"] = SimpleAxisArtist(self.yaxis, 1, self.spines["left"])
+ self._axislines["right"] = SimpleAxisArtist(self.yaxis, 2, self.spines["right"])
+
+
+ def _get_axislines(self):
+ return self._axislines
+
+ axis = property(_get_axislines)
+
+ def cla(self):
+
+ super(Axes, self).cla()
+ self._init_axis_artists()
+
+
+class SimpleAxisArtist(Artist):
+ def __init__(self, axis, axisnum, spine):
+ self._axis = axis
+ self._axisnum = axisnum
+ self.line = spine
+
+ if isinstance(axis, XAxis):
+ self._axis_direction = ["bottom", "top"][axisnum-1]
+ elif isinstance(axis, YAxis):
+ self._axis_direction = ["left", "right"][axisnum-1]
+ else:
+ raise ValueError("axis must be instance of XAxis or YAxis : %s is provided" % (axis,))
+ Artist.__init__(self)
+
+
+ def _get_major_ticks(self):
+ tickline = "tick%dline" % self._axisnum
+ return SimpleChainedObjects([getattr(tick, tickline)
+ for tick in self._axis.get_major_ticks()])
+
+ def _get_major_ticklabels(self):
+ label = "label%d" % self._axisnum
+ return SimpleChainedObjects([getattr(tick, label)
+ for tick in self._axis.get_major_ticks()])
+
+ def _get_label(self):
+ return self._axis.label
+
+ major_ticks = property(_get_major_ticks)
+ major_ticklabels = property(_get_major_ticklabels)
+ label = property(_get_label)
+
+ def set_visible(self, b):
+ self.toggle(all=b)
+ self.line.set_visible(b)
+ self._axis.set_visible(True)
+ Artist.set_visible(self, b)
+
+ def set_label(self, txt):
+ self._axis.set_label_text(txt)
+
+ def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
+
+ if all:
+ _ticks, _ticklabels, _label = True, True, True
+ elif all is not None:
+ _ticks, _ticklabels, _label = False, False, False
+ else:
+ _ticks, _ticklabels, _label = None, None, None
+
+ if ticks is not None:
+ _ticks = ticks
+ if ticklabels is not None:
+ _ticklabels = ticklabels
+ if label is not None:
+ _label = label
+
+ tickOn = "tick%dOn" % self._axisnum
+ labelOn = "label%dOn" % self._axisnum
+
+ if _ticks is not None:
+ tickparam = {tickOn: _ticks}
+ self._axis.set_tick_params(**tickparam)
+ if _ticklabels is not None:
+ tickparam = {labelOn: _ticklabels}
+ self._axis.set_tick_params(**tickparam)
+
+ if _label is not None:
+ pos = self._axis.get_label_position()
+ if (pos == self._axis_direction) and not _label:
+ self._axis.label.set_visible(False)
+ elif _label:
+ self._axis.label.set_visible(True)
+ self._axis.set_label_position(self._axis_direction)
+
+
+if __name__ == '__main__':
+ import matplotlib.pyplot as plt
+ fig = plt.figure()
+ ax = Axes(fig, [0.1, 0.1, 0.8, 0.8])
+ fig.add_axes(ax)
+ ax.cla()
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/parasite_axes.py b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/parasite_axes.py
new file mode 100644
index 00000000000..16a67b4d1ff
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axes_grid1/parasite_axes.py
@@ -0,0 +1,486 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from matplotlib import (
+ artist as martist, collections as mcoll, transforms as mtransforms,
+ rcParams)
+from matplotlib.axes import subplot_class_factory
+from matplotlib.transforms import Bbox
+from .mpl_axes import Axes
+
+import numpy as np
+
+
+class ParasiteAxesBase(object):
+
+ def get_images_artists(self):
+ artists = {a for a in self.get_children() if a.get_visible()}
+ images = {a for a in self.images if a.get_visible()}
+
+ return list(images), list(artists - images)
+
+ def __init__(self, parent_axes, **kargs):
+
+ self._parent_axes = parent_axes
+ kargs.update(dict(frameon=False))
+ self._get_base_axes_attr("__init__")(self, parent_axes.figure,
+ parent_axes._position, **kargs)
+
+ def cla(self):
+ self._get_base_axes_attr("cla")(self)
+
+ martist.setp(self.get_children(), visible=False)
+ self._get_lines = self._parent_axes._get_lines
+
+ # In mpl's Axes, zorders of x- and y-axis are originally set
+ # within Axes.draw().
+ if self._axisbelow:
+ self.xaxis.set_zorder(0.5)
+ self.yaxis.set_zorder(0.5)
+ else:
+ self.xaxis.set_zorder(2.5)
+ self.yaxis.set_zorder(2.5)
+
+
+_parasite_axes_classes = {}
+def parasite_axes_class_factory(axes_class=None):
+ if axes_class is None:
+ axes_class = Axes
+
+ new_class = _parasite_axes_classes.get(axes_class)
+ if new_class is None:
+ def _get_base_axes_attr(self, attrname):
+ return getattr(axes_class, attrname)
+
+ new_class = type(str("%sParasite" % (axes_class.__name__)),
+ (ParasiteAxesBase, axes_class),
+ {'_get_base_axes_attr': _get_base_axes_attr})
+ _parasite_axes_classes[axes_class] = new_class
+
+ return new_class
+
+ParasiteAxes = parasite_axes_class_factory()
+
+# #class ParasiteAxes(ParasiteAxesBase, Axes):
+
+# @classmethod
+# def _get_base_axes_attr(cls, attrname):
+# return getattr(Axes, attrname)
+
+
+
+class ParasiteAxesAuxTransBase(object):
+ def __init__(self, parent_axes, aux_transform, viewlim_mode=None,
+ **kwargs):
+
+ self.transAux = aux_transform
+ self.set_viewlim_mode(viewlim_mode)
+
+ self._parasite_axes_class.__init__(self, parent_axes, **kwargs)
+
+ def _set_lim_and_transforms(self):
+
+ self.transAxes = self._parent_axes.transAxes
+
+ self.transData = \
+ self.transAux + \
+ self._parent_axes.transData
+
+ self._xaxis_transform = mtransforms.blended_transform_factory(
+ self.transData, self.transAxes)
+ self._yaxis_transform = mtransforms.blended_transform_factory(
+ self.transAxes, self.transData)
+
+ def set_viewlim_mode(self, mode):
+ if mode not in [None, "equal", "transform"]:
+ raise ValueError("Unknown mode : %s" % (mode,))
+ else:
+ self._viewlim_mode = mode
+
+ def get_viewlim_mode(self):
+ return self._viewlim_mode
+
+
+ def update_viewlim(self):
+ viewlim = self._parent_axes.viewLim.frozen()
+ mode = self.get_viewlim_mode()
+ if mode is None:
+ pass
+ elif mode == "equal":
+ self.axes.viewLim.set(viewlim)
+ elif mode == "transform":
+ self.axes.viewLim.set(viewlim.transformed(self.transAux.inverted()))
+ else:
+ raise ValueError("Unknown mode : %s" % (self._viewlim_mode,))
+
+
+ def _pcolor(self, method_name, *XYC, **kwargs):
+ if len(XYC) == 1:
+ C = XYC[0]
+ ny, nx = C.shape
+
+ gx = np.arange(-0.5, nx, 1.)
+ gy = np.arange(-0.5, ny, 1.)
+
+ X, Y = np.meshgrid(gx, gy)
+ else:
+ X, Y, C = XYC
+
+ pcolor_routine = self._get_base_axes_attr(method_name)
+
+ if "transform" in kwargs:
+ mesh = pcolor_routine(self, X, Y, C, **kwargs)
+ else:
+ orig_shape = X.shape
+ xy = np.vstack([X.flat, Y.flat])
+ xyt=xy.transpose()
+ wxy = self.transAux.transform(xyt)
+ gx, gy = wxy[:,0].reshape(orig_shape), wxy[:,1].reshape(orig_shape)
+ mesh = pcolor_routine(self, gx, gy, C, **kwargs)
+ mesh.set_transform(self._parent_axes.transData)
+
+ return mesh
+
+ def pcolormesh(self, *XYC, **kwargs):
+ return self._pcolor("pcolormesh", *XYC, **kwargs)
+
+ def pcolor(self, *XYC, **kwargs):
+ return self._pcolor("pcolor", *XYC, **kwargs)
+
+
+ def _contour(self, method_name, *XYCL, **kwargs):
+
+ if len(XYCL) <= 2:
+ C = XYCL[0]
+ ny, nx = C.shape
+
+ gx = np.arange(0., nx, 1.)
+ gy = np.arange(0., ny, 1.)
+
+ X,Y = np.meshgrid(gx, gy)
+ CL = XYCL
+ else:
+ X, Y = XYCL[:2]
+ CL = XYCL[2:]
+
+ contour_routine = self._get_base_axes_attr(method_name)
+
+ if "transform" in kwargs:
+ cont = contour_routine(self, X, Y, *CL, **kwargs)
+ else:
+ orig_shape = X.shape
+ xy = np.vstack([X.flat, Y.flat])
+ xyt=xy.transpose()
+ wxy = self.transAux.transform(xyt)
+ gx, gy = wxy[:,0].reshape(orig_shape), wxy[:,1].reshape(orig_shape)
+ cont = contour_routine(self, gx, gy, *CL, **kwargs)
+ for c in cont.collections:
+ c.set_transform(self._parent_axes.transData)
+
+ return cont
+
+ def contour(self, *XYCL, **kwargs):
+ return self._contour("contour", *XYCL, **kwargs)
+
+ def contourf(self, *XYCL, **kwargs):
+ return self._contour("contourf", *XYCL, **kwargs)
+
+ def apply_aspect(self, position=None):
+ self.update_viewlim()
+ self._get_base_axes_attr("apply_aspect")(self)
+ #ParasiteAxes.apply_aspect()
+
+
+
+_parasite_axes_auxtrans_classes = {}
+def parasite_axes_auxtrans_class_factory(axes_class=None):
+ if axes_class is None:
+ parasite_axes_class = ParasiteAxes
+ elif not issubclass(axes_class, ParasiteAxesBase):
+ parasite_axes_class = parasite_axes_class_factory(axes_class)
+ else:
+ parasite_axes_class = axes_class
+
+ new_class = _parasite_axes_auxtrans_classes.get(parasite_axes_class)
+ if new_class is None:
+ new_class = type(str("%sParasiteAuxTrans" % (parasite_axes_class.__name__)),
+ (ParasiteAxesAuxTransBase, parasite_axes_class),
+ {'_parasite_axes_class': parasite_axes_class,
+ 'name': 'parasite_axes'})
+ _parasite_axes_auxtrans_classes[parasite_axes_class] = new_class
+
+ return new_class
+
+
+ParasiteAxesAuxTrans = parasite_axes_auxtrans_class_factory(axes_class=ParasiteAxes)
+
+
+
+
+def _get_handles(ax):
+ handles = ax.lines[:]
+ handles.extend(ax.patches)
+ handles.extend([c for c in ax.collections
+ if isinstance(c, mcoll.LineCollection)])
+ handles.extend([c for c in ax.collections
+ if isinstance(c, mcoll.RegularPolyCollection)])
+ handles.extend([c for c in ax.collections
+ if isinstance(c, mcoll.CircleCollection)])
+
+ return handles
+
+
+class HostAxesBase(object):
+ def __init__(self, *args, **kwargs):
+
+ self.parasites = []
+ self._get_base_axes_attr("__init__")(self, *args, **kwargs)
+
+
+ def get_aux_axes(self, tr, viewlim_mode="equal", axes_class=None):
+ parasite_axes_class = parasite_axes_auxtrans_class_factory(axes_class)
+ ax2 = parasite_axes_class(self, tr, viewlim_mode)
+ # note that ax2.transData == tr + ax1.transData
+ # Anthing you draw in ax2 will match the ticks and grids of ax1.
+ self.parasites.append(ax2)
+ ax2._remove_method = lambda h: self.parasites.remove(h)
+ return ax2
+
+ def _get_legend_handles(self, legend_handler_map=None):
+ # don't use this!
+ Axes_get_legend_handles = self._get_base_axes_attr("_get_legend_handles")
+ all_handles = list(Axes_get_legend_handles(self, legend_handler_map))
+
+ for ax in self.parasites:
+ all_handles.extend(ax._get_legend_handles(legend_handler_map))
+
+ return all_handles
+
+
+ def draw(self, renderer):
+
+ orig_artists = list(self.artists)
+ orig_images = list(self.images)
+
+ if hasattr(self, "get_axes_locator"):
+ locator = self.get_axes_locator()
+ if locator:
+ pos = locator(self, renderer)
+ self.set_position(pos, which="active")
+ self.apply_aspect(pos)
+ else:
+ self.apply_aspect()
+ else:
+ self.apply_aspect()
+
+ rect = self.get_position()
+
+ for ax in self.parasites:
+ ax.apply_aspect(rect)
+ images, artists = ax.get_images_artists()
+ self.images.extend(images)
+ self.artists.extend(artists)
+
+ self._get_base_axes_attr("draw")(self, renderer)
+ self.artists = orig_artists
+ self.images = orig_images
+
+
+ def cla(self):
+
+ for ax in self.parasites:
+ ax.cla()
+
+ self._get_base_axes_attr("cla")(self)
+ #super(HostAxes, self).cla()
+
+
+ def twinx(self, axes_class=None):
+ """
+ create a twin of Axes for generating a plot with a sharex
+ x-axis but independent y axis. The y-axis of self will have
+ ticks on left and the returned axes will have ticks on the
+ right
+ """
+
+ if axes_class is None:
+ axes_class = self._get_base_axes()
+
+ parasite_axes_class = parasite_axes_class_factory(axes_class)
+
+ ax2 = parasite_axes_class(self, sharex=self, frameon=False)
+ self.parasites.append(ax2)
+
+ self.axis["right"].set_visible(False)
+
+ ax2.axis["right"].set_visible(True)
+ ax2.axis["left", "top", "bottom"].set_visible(False)
+
+ def _remove_method(h):
+ self.parasites.remove(h)
+ self.axis["right"].set_visible(True)
+ self.axis["right"].toggle(ticklabels=False, label=False)
+ ax2._remove_method = _remove_method
+
+ return ax2
+
+ def twiny(self, axes_class=None):
+ """
+ create a twin of Axes for generating a plot with a shared
+ y-axis but independent x axis. The x-axis of self will have
+ ticks on bottom and the returned axes will have ticks on the
+ top
+ """
+
+ if axes_class is None:
+ axes_class = self._get_base_axes()
+
+ parasite_axes_class = parasite_axes_class_factory(axes_class)
+
+ ax2 = parasite_axes_class(self, sharey=self, frameon=False)
+ self.parasites.append(ax2)
+
+ self.axis["top"].set_visible(False)
+
+ ax2.axis["top"].set_visible(True)
+ ax2.axis["left", "right", "bottom"].set_visible(False)
+
+ def _remove_method(h):
+ self.parasites.remove(h)
+ self.axis["top"].set_visible(True)
+ self.axis["top"].toggle(ticklabels=False, label=False)
+ ax2._remove_method = _remove_method
+
+ return ax2
+
+
+ def twin(self, aux_trans=None, axes_class=None):
+ """
+ create a twin of Axes for generating a plot with a sharex
+ x-axis but independent y axis. The y-axis of self will have
+ ticks on left and the returned axes will have ticks on the
+ right
+ """
+
+ if axes_class is None:
+ axes_class = self._get_base_axes()
+
+ parasite_axes_auxtrans_class = parasite_axes_auxtrans_class_factory(axes_class)
+
+ if aux_trans is None:
+ ax2 = parasite_axes_auxtrans_class(self, mtransforms.IdentityTransform(),
+ viewlim_mode="equal",
+ )
+ else:
+ ax2 = parasite_axes_auxtrans_class(self, aux_trans,
+ viewlim_mode="transform",
+ )
+ self.parasites.append(ax2)
+ ax2._remove_method = lambda h: self.parasites.remove(h)
+
+ self.axis["top", "right"].set_visible(False)
+
+ ax2.axis["top", "right"].set_visible(True)
+ ax2.axis["left", "bottom"].set_visible(False)
+
+ def _remove_method(h):
+ self.parasites.remove(h)
+ self.axis["top", "right"].set_visible(True)
+ self.axis["top", "right"].toggle(ticklabels=False, label=False)
+ ax2._remove_method = _remove_method
+
+ return ax2
+
+ def get_tightbbox(self, renderer, call_axes_locator=True):
+
+ bbs = [ax.get_tightbbox(renderer, call_axes_locator)
+ for ax in self.parasites]
+ get_tightbbox = self._get_base_axes_attr("get_tightbbox")
+ bbs.append(get_tightbbox(self, renderer, call_axes_locator))
+
+ _bbox = Bbox.union([b for b in bbs if b.width!=0 or b.height!=0])
+
+ return _bbox
+
+
+
+_host_axes_classes = {}
+def host_axes_class_factory(axes_class=None):
+ if axes_class is None:
+ axes_class = Axes
+
+ new_class = _host_axes_classes.get(axes_class)
+ if new_class is None:
+ def _get_base_axes(self):
+ return axes_class
+
+ def _get_base_axes_attr(self, attrname):
+ return getattr(axes_class, attrname)
+
+ new_class = type(str("%sHostAxes" % (axes_class.__name__)),
+ (HostAxesBase, axes_class),
+ {'_get_base_axes_attr': _get_base_axes_attr,
+ '_get_base_axes': _get_base_axes})
+
+ _host_axes_classes[axes_class] = new_class
+
+ return new_class
+
+def host_subplot_class_factory(axes_class):
+ host_axes_class = host_axes_class_factory(axes_class=axes_class)
+ subplot_host_class = subplot_class_factory(host_axes_class)
+ return subplot_host_class
+
+HostAxes = host_axes_class_factory(axes_class=Axes)
+SubplotHost = subplot_class_factory(HostAxes)
+
+
+def host_axes(*args, **kwargs):
+ """
+ Create axes that can act as a hosts to parasitic axes.
+
+ Parameters
+ ----------
+ figure : `matplotlib.figure.Figure`
+ Figure to which the axes will be added. Defaults to the current figure
+ `pyplot.gcf()`.
+
+ *args, **kwargs :
+ Will be passed on to the underlying ``Axes`` object creation.
+ """
+ import matplotlib.pyplot as plt
+ axes_class = kwargs.pop("axes_class", None)
+ host_axes_class = host_axes_class_factory(axes_class)
+ fig = kwargs.get("figure", None)
+ if fig is None:
+ fig = plt.gcf()
+ ax = host_axes_class(fig, *args, **kwargs)
+ fig.add_axes(ax)
+ plt.draw_if_interactive()
+ return ax
+
+def host_subplot(*args, **kwargs):
+ """
+ Create a subplot that can act as a host to parasitic axes.
+
+ Parameters
+ ----------
+ figure : `matplotlib.figure.Figure`
+ Figure to which the subplot will be added. Defaults to the current
+ figure `pyplot.gcf()`.
+
+ *args, **kwargs :
+ Will be passed on to the underlying ``Axes`` object creation.
+ """
+ import matplotlib.pyplot as plt
+ axes_class = kwargs.pop("axes_class", None)
+ host_subplot_class = host_subplot_class_factory(axes_class)
+ fig = kwargs.get("figure", None)
+ if fig is None:
+ fig = plt.gcf()
+ ax = host_subplot_class(fig, *args, **kwargs)
+ fig.add_subplot(ax)
+ plt.draw_if_interactive()
+ return ax
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/__init__.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/__init__.py
new file mode 100644
index 00000000000..8431c0cd3ee
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/__init__.py
@@ -0,0 +1,26 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from .axislines import (
+ Axes, AxesZero, AxisArtistHelper, AxisArtistHelperRectlinear,
+ GridHelperBase, GridHelperRectlinear, Subplot, SubplotZero)
+from .axis_artist import AxisArtist, GridlinesCollection
+
+from .grid_helper_curvelinear import GridHelperCurveLinear
+
+from .floating_axes import FloatingAxes, FloatingSubplot
+
+from mpl_toolkits.axes_grid1.parasite_axes import (
+ host_axes_class_factory, parasite_axes_class_factory,
+ parasite_axes_auxtrans_class_factory, subplot_class_factory)
+
+ParasiteAxes = parasite_axes_class_factory(Axes)
+
+ParasiteAxesAuxTrans = \
+ parasite_axes_auxtrans_class_factory(axes_class=ParasiteAxes)
+
+HostAxes = host_axes_class_factory(axes_class=Axes)
+
+SubplotHost = subplot_class_factory(HostAxes)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/angle_helper.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/angle_helper.py
new file mode 100644
index 00000000000..15732a58ec0
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/angle_helper.py
@@ -0,0 +1,416 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import numpy as np
+import math
+
+from mpl_toolkits.axisartist.grid_finder import ExtremeFinderSimple
+
+def select_step_degree(dv):
+
+ degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520]
+ degree_steps_ = [ 1, 2, 5, 10, 15, 30, 45, 90, 180, 360]
+ degree_factors = [1.] * len(degree_steps_)
+
+ minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45]
+ minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30]
+
+ minute_limits_ = np.array(minsec_limits_) / 60
+ minute_factors = [60.] * len(minute_limits_)
+
+ second_limits_ = np.array(minsec_limits_) / 3600
+ second_factors = [3600.] * len(second_limits_)
+
+ degree_limits = np.concatenate([second_limits_,
+ minute_limits_,
+ degree_limits_])
+
+ degree_steps = np.concatenate([minsec_steps_,
+ minsec_steps_,
+ degree_steps_])
+
+ degree_factors = np.concatenate([second_factors,
+ minute_factors,
+ degree_factors])
+
+ n = degree_limits.searchsorted(dv)
+ step = degree_steps[n]
+ factor = degree_factors[n]
+
+ return step, factor
+
+
+
+def select_step_hour(dv):
+
+ hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36]
+ hour_steps_ = [1, 2 , 3, 4, 6, 8, 12, 18, 24]
+ hour_factors = [1.] * len(hour_steps_)
+
+ minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45]
+ minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30]
+
+ minute_limits_ = np.array(minsec_limits_) / 60
+ minute_factors = [60.] * len(minute_limits_)
+
+ second_limits_ = np.array(minsec_limits_) / 3600
+ second_factors = [3600.] * len(second_limits_)
+
+ hour_limits = np.concatenate([second_limits_,
+ minute_limits_,
+ hour_limits_])
+
+ hour_steps = np.concatenate([minsec_steps_,
+ minsec_steps_,
+ hour_steps_])
+
+ hour_factors = np.concatenate([second_factors,
+ minute_factors,
+ hour_factors])
+
+ n = hour_limits.searchsorted(dv)
+ step = hour_steps[n]
+ factor = hour_factors[n]
+
+ return step, factor
+
+
+def select_step_sub(dv):
+
+ # subarcsec or degree
+ tmp = 10.**(int(math.log10(dv))-1.)
+
+ factor = 1./tmp
+
+ if 1.5*tmp >= dv:
+ step = 1
+ elif 3.*tmp >= dv:
+ step = 2
+ elif 7.*tmp >= dv:
+ step = 5
+ else:
+ step = 1
+ factor = 0.1*factor
+
+ return step, factor
+
+
+def select_step(v1, v2, nv, hour=False, include_last=True,
+ threshold_factor=3600.):
+
+ if v1 > v2:
+ v1, v2 = v2, v1
+
+ dv = (v2 - v1) / nv
+
+ if hour:
+ _select_step = select_step_hour
+ cycle = 24.
+ else:
+ _select_step = select_step_degree
+ cycle = 360.
+
+ # for degree
+ if dv > 1./threshold_factor:
+ step, factor = _select_step(dv)
+ else:
+ step, factor = select_step_sub(dv*threshold_factor)
+
+ factor = factor * threshold_factor
+
+
+ f1, f2, fstep = v1*factor, v2*factor, step/factor
+ levs = np.arange(np.floor(f1/step), np.ceil(f2/step)+0.5, dtype=int) * step
+
+ # n : number of valid levels. If there is a cycle, e.g., [0, 90, 180,
+ # 270, 360], the grid line needs to be extended from 0 to 360, so
+ # we need to return the whole array. However, the last level (360)
+ # needs to be ignored often. In this case, so we return n=4.
+
+ n = len(levs)
+
+
+ # we need to check the range of values
+ # for example, -90 to 90, 0 to 360,
+
+ if factor == 1. and (levs[-1] >= levs[0]+cycle): # check for cycle
+ nv = int(cycle / step)
+ if include_last:
+ levs = levs[0] + np.arange(0, nv+1, 1) * step
+ else:
+ levs = levs[0] + np.arange(0, nv, 1) * step
+
+ n = len(levs)
+
+ return np.array(levs), n, factor
+
+
+def select_step24(v1, v2, nv, include_last=True, threshold_factor=3600):
+ v1, v2 = v1/15., v2/15.
+ levs, n, factor = select_step(v1, v2, nv, hour=True,
+ include_last=include_last,
+ threshold_factor=threshold_factor)
+ return levs*15., n, factor
+
+def select_step360(v1, v2, nv, include_last=True, threshold_factor=3600):
+ return select_step(v1, v2, nv, hour=False,
+ include_last=include_last,
+ threshold_factor=threshold_factor)
+
+
+class LocatorBase(object):
+ def __init__(self, den, include_last=True):
+ self.den = den
+ self._include_last = include_last
+
+ @property
+ def nbins(self):
+ return self.den
+
+ @nbins.setter
+ def nbins(self, v):
+ self.den = v
+
+ def set_params(self, nbins=None):
+ if nbins is not None:
+ self.den = int(nbins)
+
+
+class LocatorHMS(LocatorBase):
+ def __call__(self, v1, v2):
+ return select_step24(v1, v2, self.den, self._include_last)
+
+class LocatorHM(LocatorBase):
+ def __call__(self, v1, v2):
+ return select_step24(v1, v2, self.den, self._include_last,
+ threshold_factor=60)
+
+class LocatorH(LocatorBase):
+ def __call__(self, v1, v2):
+ return select_step24(v1, v2, self.den, self._include_last,
+ threshold_factor=1)
+
+
+class LocatorDMS(LocatorBase):
+ def __call__(self, v1, v2):
+ return select_step360(v1, v2, self.den, self._include_last)
+
+class LocatorDM(LocatorBase):
+ def __call__(self, v1, v2):
+ return select_step360(v1, v2, self.den, self._include_last,
+ threshold_factor=60)
+
+class LocatorD(LocatorBase):
+ def __call__(self, v1, v2):
+ return select_step360(v1, v2, self.den, self._include_last,
+ threshold_factor=1)
+
+
+class FormatterDMS(object):
+ deg_mark = r"^{\circ}"
+ min_mark = r"^{\prime}"
+ sec_mark = r"^{\prime\prime}"
+
+ fmt_d = "$%d" + deg_mark + "$"
+ fmt_ds = r"$%d.%s" + deg_mark + "$"
+
+ # %s for sign
+ fmt_d_m = r"$%s%d" + deg_mark + r"\,%02d" + min_mark + "$"
+ fmt_d_ms = r"$%s%d" + deg_mark + r"\,%02d.%s" + min_mark + "$"
+
+ fmt_d_m_partial = "$%s%d" + deg_mark + r"\,%02d" + min_mark + r"\,"
+ fmt_s_partial = "%02d" + sec_mark + "$"
+ fmt_ss_partial = "%02d.%s" + sec_mark + "$"
+
+ def _get_number_fraction(self, factor):
+ ## check for fractional numbers
+ number_fraction = None
+ # check for 60
+
+ for threshold in [1, 60, 3600]:
+ if factor <= threshold:
+ break
+
+ d = factor // threshold
+ int_log_d = int(np.floor(np.log10(d)))
+ if 10**int_log_d == d and d != 1:
+ number_fraction = int_log_d
+ factor = factor // 10**int_log_d
+ return factor, number_fraction
+
+ return factor, number_fraction
+
+
+ def __call__(self, direction, factor, values):
+ if len(values) == 0:
+ return []
+ #ss = [[-1, 1][v>0] for v in values] #not py24 compliant
+ values = np.asarray(values)
+ ss = np.where(values>0, 1, -1)
+
+ sign_map = {(-1, True):"-"}
+ signs = [sign_map.get((s, v!=0), "") for s, v in zip(ss, values)]
+
+ factor, number_fraction = self._get_number_fraction(factor)
+
+ values = np.abs(values)
+
+ if number_fraction is not None:
+ values, frac_part = divmod(values, 10**number_fraction)
+ frac_fmt = "%%0%dd" % (number_fraction,)
+ frac_str = [frac_fmt % (f1,) for f1 in frac_part]
+
+ if factor == 1:
+ if number_fraction is None:
+ return [self.fmt_d % (s*int(v),) for (s, v) in zip(ss, values)]
+ else:
+ return [self.fmt_ds % (s*int(v), f1)
+ for (s, v, f1) in zip(ss, values, frac_str)]
+ elif factor == 60:
+ deg_part, min_part = divmod(values, 60)
+ if number_fraction is None:
+ return [self.fmt_d_m % (s1, d1, m1)
+ for s1, d1, m1 in zip(signs, deg_part, min_part)]
+ else:
+ return [self.fmt_d_ms % (s, d1, m1, f1)
+ for s, d1, m1, f1 in zip(signs, deg_part, min_part, frac_str)]
+
+ elif factor == 3600:
+ if ss[-1] == -1:
+ inverse_order = True
+ values = values[::-1]
+ signs = signs[::-1]
+ else:
+ inverse_order = False
+
+ l_hm_old = ""
+ r = []
+
+ deg_part, min_part_ = divmod(values, 3600)
+ min_part, sec_part = divmod(min_part_, 60)
+
+ if number_fraction is None:
+ sec_str = [self.fmt_s_partial % (s1,) for s1 in sec_part]
+ else:
+ sec_str = [self.fmt_ss_partial % (s1, f1) for s1, f1 in zip(sec_part, frac_str)]
+
+ for s, d1, m1, s1 in zip(signs, deg_part, min_part, sec_str):
+ l_hm = self.fmt_d_m_partial % (s, d1, m1)
+ if l_hm != l_hm_old:
+ l_hm_old = l_hm
+ l = l_hm + s1 #l_s
+ else:
+ l = "$" + s + s1
+ r.append(l)
+
+ if inverse_order:
+ return r[::-1]
+ else:
+ return r
+
+ else: # factor > 3600.
+ return [r"$%s^{\circ}$" % (str(v),) for v in ss*values]
+
+
+class FormatterHMS(FormatterDMS):
+ deg_mark = r"^\mathrm{h}"
+ min_mark = r"^\mathrm{m}"
+ sec_mark = r"^\mathrm{s}"
+
+ fmt_d = "$%d" + deg_mark + "$"
+ fmt_ds = r"$%d.%s" + deg_mark + "$"
+
+ # %s for sign
+ fmt_d_m = r"$%s%d" + deg_mark + r"\,%02d" + min_mark+"$"
+ fmt_d_ms = r"$%s%d" + deg_mark + r"\,%02d.%s" + min_mark+"$"
+
+ fmt_d_m_partial = "$%s%d" + deg_mark + r"\,%02d" + min_mark + r"\,"
+ fmt_s_partial = "%02d" + sec_mark + "$"
+ fmt_ss_partial = "%02d.%s" + sec_mark + "$"
+
+ def __call__(self, direction, factor, values): # hour
+ return FormatterDMS.__call__(self, direction, factor, np.asarray(values)/15.)
+
+
+
+
+
+class ExtremeFinderCycle(ExtremeFinderSimple):
+ """
+ When there is a cycle, e.g., longitude goes from 0-360.
+ """
+ def __init__(self,
+ nx, ny,
+ lon_cycle = 360.,
+ lat_cycle = None,
+ lon_minmax = None,
+ lat_minmax = (-90, 90)
+ ):
+ #self.transfrom_xy = transform_xy
+ #self.inv_transfrom_xy = inv_transform_xy
+ self.nx, self.ny = nx, ny
+ self.lon_cycle, self.lat_cycle = lon_cycle, lat_cycle
+ self.lon_minmax = lon_minmax
+ self.lat_minmax = lat_minmax
+
+
+ def __call__(self, transform_xy, x1, y1, x2, y2):
+ """
+ get extreme values.
+
+ x1, y1, x2, y2 in image coordinates (0-based)
+ nx, ny : number of divisions in each axis
+ """
+ x_, y_ = np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)
+ x, y = np.meshgrid(x_, y_)
+ lon, lat = transform_xy(np.ravel(x), np.ravel(y))
+
+ # iron out jumps, but algorithm should be improved.
+ # This is just naive way of doing and my fail for some cases.
+ # Consider replacing this with numpy.unwrap
+ # We are ignoring invalid warnings. They are triggered when
+ # comparing arrays with NaNs using > We are already handling
+ # that correctly using np.nanmin and np.nanmax
+ with np.errstate(invalid='ignore'):
+ if self.lon_cycle is not None:
+ lon0 = np.nanmin(lon)
+ lon -= 360. * ((lon - lon0) > 180.)
+ if self.lat_cycle is not None:
+ lat0 = np.nanmin(lat)
+ lat -= 360. * ((lat - lat0) > 180.)
+
+ lon_min, lon_max = np.nanmin(lon), np.nanmax(lon)
+ lat_min, lat_max = np.nanmin(lat), np.nanmax(lat)
+
+ lon_min, lon_max, lat_min, lat_max = \
+ self._adjust_extremes(lon_min, lon_max, lat_min, lat_max)
+
+ return lon_min, lon_max, lat_min, lat_max
+
+
+ def _adjust_extremes(self, lon_min, lon_max, lat_min, lat_max):
+
+ lon_min, lon_max, lat_min, lat_max = \
+ self._add_pad(lon_min, lon_max, lat_min, lat_max)
+
+ # check cycle
+ if self.lon_cycle:
+ lon_max = min(lon_max, lon_min + self.lon_cycle)
+ if self.lat_cycle:
+ lat_max = min(lat_max, lat_min + self.lat_cycle)
+
+ if self.lon_minmax is not None:
+ min0 = self.lon_minmax[0]
+ lon_min = max(min0, lon_min)
+ max0 = self.lon_minmax[1]
+ lon_max = min(max0, lon_max)
+
+ if self.lat_minmax is not None:
+ min0 = self.lat_minmax[0]
+ lat_min = max(min0, lat_min)
+ max0 = self.lat_minmax[1]
+ lat_max = min(max0, lat_max)
+
+ return lon_min, lon_max, lat_min, lat_max
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axes_divider.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axes_divider.py
new file mode 100644
index 00000000000..52949405302
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axes_divider.py
@@ -0,0 +1,9 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axes_grid1.axes_divider import (
+ Divider, AxesLocator, SubplotDivider, AxesDivider, locatable_axes_factory,
+ make_axes_locatable)
+
+from mpl_toolkits.axes_grid.axislines import Axes
+LocatableAxes = locatable_axes_factory(Axes)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axes_grid.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axes_grid.py
new file mode 100644
index 00000000000..58212ac89c4
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axes_grid.py
@@ -0,0 +1,30 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import mpl_toolkits.axes_grid1.axes_grid as axes_grid_orig
+from .axes_divider import LocatableAxes
+
+class CbarAxes(axes_grid_orig.CbarAxesBase, LocatableAxes):
+ def __init__(self, *kl, **kwargs):
+ orientation=kwargs.pop("orientation", None)
+ if orientation is None:
+ raise ValueError("orientation must be specified")
+ self.orientation = orientation
+ self._default_label_on = False
+ self.locator = None
+
+ super(LocatableAxes, self).__init__(*kl, **kwargs)
+
+ def cla(self):
+ super(LocatableAxes, self).cla()
+ self._config_axes()
+
+
+class Grid(axes_grid_orig.Grid):
+ _defaultLocatableAxesClass = LocatableAxes
+
+class ImageGrid(axes_grid_orig.ImageGrid):
+ _defaultLocatableAxesClass = LocatableAxes
+ _defaultCbarAxesClass = CbarAxes
+
+AxesGrid = ImageGrid
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axes_rgb.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axes_rgb.py
new file mode 100644
index 00000000000..695a362b57d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axes_rgb.py
@@ -0,0 +1,11 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axes_grid1.axes_rgb import (
+ make_rgb_axes, imshow_rgb, RGBAxesBase)
+
+from .axislines import Axes
+
+
+class RGBAxes(RGBAxesBase):
+ _defaultAxesClass = Axes
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axis_artist.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axis_artist.py
new file mode 100644
index 00000000000..620232112cf
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axis_artist.py
@@ -0,0 +1,1527 @@
+"""
+axis_artist.py module provides axis-related artists. They are
+
+ * axis line
+ * tick lines
+ * tick labels
+ * axis label
+ * grid lines
+
+The main artist class is a AxisArtist and a GridlinesCollection. The
+GridlinesCollection is responsible for drawing grid lines and the
+AxisArtist is responsible for all other artists. The AxisArtist class
+has attributes that are associated with each type of artists.
+
+ * line : axis line
+ * major_ticks : major tick lines
+ * major_ticklabels : major tick labels
+ * minor_ticks : minor tick lines
+ * minor_ticklabels : minor tick labels
+ * label : axis label
+
+Typically, the AxisArtist associated with a axes will be accessed with
+the *axis* dictionary of the axes, i.e., the AxisArtist for the bottom
+axis is
+
+ ax.axis["bottom"]
+
+where *ax* is an instance of axes (mpl_toolkits.axislines.Axes). Thus,
+ax.axis["bottom"].line is an artist associated with the axis line, and
+ax.axis["bottom"].major_ticks is an artist associated with the major tick
+lines.
+
+You can change the colors, fonts, line widths, etc. of these artists
+by calling suitable set method. For example, to change the color of the major
+ticks of the bottom axis to red,
+
+ ax.axis["bottom"].major_ticks.set_color("r")
+
+However, things like the locations of ticks, and their ticklabels need
+to be changed from the side of the grid_helper.
+
+axis_direction
+--------------
+
+AxisArtist, AxisLabel, TickLabels have *axis_direction* attribute,
+which adjusts the location, angle, etc.,. The *axis_direction* must be
+one of [left, right, bottom, top] and they follow the matplotlib
+convention for the rectangle axis.
+
+For example, for the *bottom* axis (the left and right is relative to
+the direction of the increasing coordinate),
+
+ * ticklabels and axislabel are on the right
+ * ticklabels and axislabel have text angle of 0
+ * ticklabels are baseline, center-aligned
+ * axislabel is top, center-aligned
+
+
+The text angles are actually relative to (90 + angle of the direction
+to the ticklabel), which gives 0 for bottom axis.
+
+ Parameter left bottom right top
+ ticklabels location left right right left
+ axislabel location left right right left
+ ticklabels angle 90 0 -90 180
+ axislabel angle 180 0 0 180
+ ticklabel va center baseline center baseline
+ axislabel va center top center bottom
+ ticklabel ha right center right center
+ axislabel ha right center right center
+
+
+Ticks are by default direct opposite side of the ticklabels. To make
+ticks to the same side of the ticklabels,
+
+ ax.axis["bottom"].major_ticks.set_ticks_out(True)
+
+
+Following attributes can be customized (use set_xxx method)
+
+ * Ticks : ticksize, tick_out
+ * TickLabels : pad
+ * AxisLabel : pad
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+# FIXME :
+# angles are given in data coordinate - need to convert it to canvas coordinate
+
+
+import matplotlib.artist as martist
+import matplotlib.text as mtext
+import matplotlib.font_manager as font_manager
+
+from matplotlib.path import Path
+from matplotlib.transforms import (
+ Affine2D, Bbox, IdentityTransform, ScaledTranslation, TransformedPath)
+from matplotlib.collections import LineCollection
+
+from matplotlib import rcParams
+
+from matplotlib.artist import allow_rasterization
+
+import warnings
+
+import numpy as np
+
+
+import matplotlib.lines as mlines
+from .axisline_style import AxislineStyle
+
+
+class BezierPath(mlines.Line2D):
+
+ def __init__(self, path, *kl, **kw):
+ mlines.Line2D.__init__(self, [], [], *kl, **kw)
+ self._path = path
+ self._invalid = False
+
+ def recache(self):
+
+ self._transformed_path = TransformedPath(self._path, self.get_transform())
+
+ self._invalid = False
+
+ def set_path(self, path):
+ self._path = path
+ self._invalid = True
+
+
+ def draw(self, renderer):
+ if self._invalid:
+ self.recache()
+
+ if not self._visible: return
+ renderer.open_group('line2d')
+
+ gc = renderer.new_gc()
+ self._set_gc_clip(gc)
+
+ gc.set_foreground(self._color)
+ gc.set_antialiased(self._antialiased)
+ gc.set_linewidth(self._linewidth)
+ gc.set_alpha(self._alpha)
+ if self.is_dashed():
+ cap = self._dashcapstyle
+ join = self._dashjoinstyle
+ else:
+ cap = self._solidcapstyle
+ join = self._solidjoinstyle
+ gc.set_joinstyle(join)
+ gc.set_capstyle(cap)
+ gc.set_dashes(self._dashOffset, self._dashSeq)
+
+ if self._lineStyles[self._linestyle] != '_draw_nothing':
+ tpath, affine = (
+ self._transformed_path.get_transformed_path_and_affine())
+ renderer.draw_path(gc, tpath, affine.frozen())
+
+ gc.restore()
+ renderer.close_group('line2d')
+
+
+
+class UnimplementedException(Exception):
+ pass
+
+from matplotlib.artist import Artist
+
+class AttributeCopier(object):
+ def __init__(self, ref_artist, klass=Artist):
+ self._klass = klass
+ self._ref_artist = ref_artist
+ super(AttributeCopier, self).__init__()
+
+ def set_ref_artist(self, artist):
+ self._ref_artist = artist
+
+ def get_ref_artist(self):
+ raise RuntimeError("get_ref_artist must overridden")
+ #return self._ref_artist
+
+ def get_attribute_from_ref_artist(self, attr_name, default_value):
+ get_attr_method_name = "get_"+attr_name
+ c = getattr(self._klass, get_attr_method_name)(self)
+ if c == 'auto':
+ ref_artist = self.get_ref_artist()
+ if ref_artist:
+ attr = getattr(ref_artist,
+ get_attr_method_name)()
+ return attr
+ else:
+ return default_value
+
+ return c
+
+
+from matplotlib.lines import Line2D
+
+class Ticks(Line2D, AttributeCopier):
+ """
+ Ticks are derived from Line2D, and note that ticks themselves
+ are markers. Thus, you should use set_mec, set_mew, etc.
+
+ To change the tick size (length), you need to use
+ set_ticksize. To change the direction of the ticks (ticks are
+ in opposite direction of ticklabels by default), use
+ set_tick_out(False).
+ """
+
+ def __init__(self, ticksize, tick_out=False, **kwargs):
+ self._ticksize = ticksize
+ self.locs_angles_labels = []
+
+ self.set_tick_out(tick_out)
+
+ self._axis = kwargs.pop("axis", None)
+ if self._axis is not None:
+ if "color" not in kwargs:
+ kwargs["color"] = "auto"
+ if ("mew" not in kwargs) and ("markeredgewidth" not in kwargs):
+ kwargs["markeredgewidth"] = "auto"
+
+ Line2D.__init__(self, [0.], [0.], **kwargs)
+ AttributeCopier.__init__(self, self._axis, klass=Line2D)
+ self.set_snap(True)
+
+ def get_ref_artist(self):
+ #return self._ref_artist.get_ticklines()[0]
+ return self._ref_artist.majorTicks[0].tick1line
+
+ def get_color(self):
+ return self.get_attribute_from_ref_artist("color", "k")
+
+ def get_markeredgecolor(self):
+ if self._markeredgecolor == 'auto':
+ return self.get_color()
+ else:
+ return self._markeredgecolor
+
+ def get_markeredgewidth(self):
+ return self.get_attribute_from_ref_artist("markeredgewidth", .5)
+
+
+ def set_tick_out(self, b):
+ """
+ set True if tick need to be rotated by 180 degree.
+ """
+ self._tick_out = b
+
+ def get_tick_out(self):
+ """
+ Return True if the tick will be rotated by 180 degree.
+ """
+ return self._tick_out
+
+
+ def set_ticksize(self, ticksize):
+ """
+ set length of the ticks in points.
+ """
+ self._ticksize = ticksize
+
+
+ def get_ticksize(self):
+ """
+ Return length of the ticks in points.
+ """
+ return self._ticksize
+
+ def set_locs_angles(self, locs_angles):
+ self.locs_angles = locs_angles
+
+
+ def _update(self, renderer):
+ pass
+
+ _tickvert_path = Path([[0., 0.], [1., 0.]])
+
+ def draw(self, renderer):
+ if not self.get_visible():
+ return
+
+ self._update(renderer) # update the tick
+
+ size = self._ticksize
+ path_trans = self.get_transform()
+
+ # set gc : copied from lines.py
+# gc = renderer.new_gc()
+# self._set_gc_clip(gc)
+
+# gc.set_foreground(self.get_color())
+# gc.set_antialiased(self._antialiased)
+# gc.set_linewidth(self._linewidth)
+# gc.set_alpha(self._alpha)
+# if self.is_dashed():
+# cap = self._dashcapstyle
+# join = self._dashjoinstyle
+# else:
+# cap = self._solidcapstyle
+# join = self._solidjoinstyle
+# gc.set_joinstyle(join)
+# gc.set_capstyle(cap)
+# gc.set_snap(self.get_snap())
+
+
+ gc = renderer.new_gc()
+ gc.set_foreground(self.get_markeredgecolor())
+ gc.set_linewidth(self.get_markeredgewidth())
+ gc.set_alpha(self._alpha)
+
+ offset = renderer.points_to_pixels(size)
+ marker_scale = Affine2D().scale(offset, offset)
+
+ if self.get_tick_out():
+ add_angle = 180
+ else:
+ add_angle = 0
+
+ marker_rotation = Affine2D()
+ marker_transform = marker_scale + marker_rotation
+
+ for loc, angle in self.locs_angles:
+ marker_rotation.clear().rotate_deg(angle+add_angle)
+ locs = path_trans.transform_non_affine(np.array([loc]))
+ if self.axes and not self.axes.viewLim.contains(*locs[0]):
+ continue
+ renderer.draw_markers(gc, self._tickvert_path, marker_transform,
+ Path(locs), path_trans.get_affine())
+
+ gc.restore()
+
+
+class LabelBase(mtext.Text):
+ """
+ A base class for AxisLabel and TickLabels. The position and angle
+ of the text are calculated by to offset_ref_angle,
+ text_ref_angle, and offset_radius attributes.
+ """
+
+ def __init__(self, *kl, **kwargs):
+ self.locs_angles_labels = []
+ self._ref_angle = 0
+ self._offset_radius = 0.
+
+ super(LabelBase, self).__init__(*kl,
+ **kwargs)
+
+ self.set_rotation_mode("anchor")
+ self._text_follow_ref_angle = True
+ #self._offset_ref_angle = 0
+
+ def _set_ref_angle(self, a):
+ self._ref_angle = a
+
+ def _get_ref_angle(self):
+ return self._ref_angle
+
+ def _get_text_ref_angle(self):
+ if self._text_follow_ref_angle:
+ return self._get_ref_angle()+90
+ else:
+ return 0 #self.get_ref_angle()
+
+ def _get_offset_ref_angle(self):
+ return self._get_ref_angle()
+
+ def _set_offset_radius(self, offset_radius):
+ self._offset_radius = offset_radius
+
+ def _get_offset_radius(self):
+ return self._offset_radius
+
+
+ _get_opposite_direction = {"left":"right",
+ "right":"left",
+ "top":"bottom",
+ "bottom":"top"}.__getitem__
+
+
+ def _update(self, renderer):
+ pass
+
+ def draw(self, renderer):
+ if not self.get_visible(): return
+
+ self._update(renderer)
+
+ # save original and adjust some properties
+ tr = self.get_transform()
+ angle_orig = self.get_rotation()
+
+ offset_tr = Affine2D()
+ self.set_transform(tr+offset_tr)
+
+ text_ref_angle = self._get_text_ref_angle()
+ offset_ref_angle = self._get_offset_ref_angle()
+
+ theta = (offset_ref_angle)/180.*np.pi
+ dd = self._get_offset_radius()
+ dx, dy = dd * np.cos(theta), dd * np.sin(theta)
+ offset_tr.translate(dx, dy)
+ self.set_rotation(text_ref_angle+angle_orig)
+ super(LabelBase, self).draw(renderer)
+ offset_tr.clear()
+
+
+ # restore original properties
+ self.set_transform(tr)
+ self.set_rotation(angle_orig)
+
+
+ def get_window_extent(self, renderer):
+
+ self._update(renderer)
+
+ # save original and adjust some properties
+ tr = self.get_transform()
+ angle_orig = self.get_rotation()
+
+ offset_tr = Affine2D()
+ self.set_transform(tr+offset_tr)
+
+ text_ref_angle = self._get_text_ref_angle()
+ offset_ref_angle = self._get_offset_ref_angle()
+
+ theta = (offset_ref_angle)/180.*np.pi
+ dd = self._get_offset_radius()
+ dx, dy = dd * np.cos(theta), dd * np.sin(theta)
+ offset_tr.translate(dx, dy)
+ self.set_rotation(text_ref_angle+angle_orig)
+
+ bbox = super(LabelBase, self).get_window_extent(renderer).frozen()
+
+ offset_tr.clear()
+
+
+ # restore original properties
+ self.set_transform(tr)
+ self.set_rotation(angle_orig)
+
+ return bbox
+
+
+class AxisLabel(LabelBase, AttributeCopier):
+ """
+ Axis Label. Derived from Text. The position of the text is updated
+ in the fly, so changing text position has no effect. Otherwise, the
+ properties can be changed as a normal Text.
+
+ To change the pad between ticklabels and axis label, use set_pad.
+ """
+
+ def __init__(self, *kl, **kwargs):
+
+ axis_direction = kwargs.pop("axis_direction", "bottom")
+ self._axis = kwargs.pop("axis", None)
+ #super(AxisLabel, self).__init__(*kl, **kwargs)
+ LabelBase.__init__(self, *kl, **kwargs)
+ AttributeCopier.__init__(self, self._axis, klass=LabelBase)
+
+ self.set_axis_direction(axis_direction)
+ self._pad = 5
+ self._extra_pad = 0
+
+ def set_pad(self, pad):
+ """
+ Set the pad in points. Note that the actual pad will be the
+ sum of the internal pad and the external pad (that are set
+ automatically by the AxisArtist), and it only set the internal
+ pad
+ """
+ self._pad = pad
+
+ def get_pad(self):
+ """
+ return pad in points. See set_pad for more details.
+ """
+ return self._pad
+
+
+ def _set_external_pad(self, p):
+ """
+ Set external pad IN PIXELS. This is intended to be set by the
+ AxisArtist, bot by user..
+ """
+ self._extra_pad = p
+
+ def _get_external_pad(self):
+ """
+ Get external pad.
+ """
+ return self._extra_pad
+
+
+ def get_ref_artist(self):
+ return self._axis.get_label()
+
+
+ def get_text(self):
+ t = super(AxisLabel, self).get_text()
+ if t == "__from_axes__":
+ return self._axis.get_label().get_text()
+ return self._text
+
+ _default_alignments = dict(left=("bottom", "center"),
+ right=("top", "center"),
+ bottom=("top", "center"),
+ top=("bottom", "center"))
+
+
+
+ def set_default_alignment(self, d):
+ if d not in ["left", "right", "top", "bottom"]:
+ raise ValueError('direction must be on of "left", "right", "top", "bottom"')
+
+ va, ha = self._default_alignments[d]
+ self.set_va(va)
+ self.set_ha(ha)
+
+
+ _default_angles = dict(left=180,
+ right=0,
+ bottom=0,
+ top=180)
+
+
+ def set_default_angle(self, d):
+ if d not in ["left", "right", "top", "bottom"]:
+ raise ValueError('direction must be on of "left", "right", "top", "bottom"')
+
+ self.set_rotation(self._default_angles[d])
+
+
+ def set_axis_direction(self, d):
+ """
+ Adjust the text angle and text alignment of axis label
+ according to the matplotlib convention.
+
+
+ ===================== ========== ========= ========== ==========
+ property left bottom right top
+ ===================== ========== ========= ========== ==========
+ axislabel angle 180 0 0 180
+ axislabel va center top center bottom
+ axislabel ha right center right center
+ ===================== ========== ========= ========== ==========
+
+ Note that the text angles are actually relative to (90 + angle
+ of the direction to the ticklabel), which gives 0 for bottom
+ axis.
+
+ """
+ if d not in ["left", "right", "top", "bottom"]:
+ raise ValueError('direction must be on of "left", "right", "top", "bottom"')
+
+ self.set_default_alignment(d)
+ self.set_default_angle(d)
+
+ def get_color(self):
+ return self.get_attribute_from_ref_artist("color", "k")
+
+ def draw(self, renderer):
+ if not self.get_visible():
+ return
+
+ pad = renderer.points_to_pixels(self.get_pad())
+ r = self._get_external_pad() + pad
+ self._set_offset_radius(r)
+
+ super(AxisLabel, self).draw(renderer)
+
+
+ def get_window_extent(self, renderer):
+
+ if not self.get_visible():
+ return
+
+ pad = renderer.points_to_pixels(self.get_pad())
+ r = self._get_external_pad() + pad
+ self._set_offset_radius(r)
+
+ bb = super(AxisLabel, self).get_window_extent(renderer)
+
+ return bb
+
+
+class TickLabels(AxisLabel, AttributeCopier): # mtext.Text
+ """
+ Tick Labels. While derived from Text, this single artist draws all
+ ticklabels. As in AxisLabel, the position of the text is updated
+ in the fly, so changing text position has no effect. Otherwise,
+ the properties can be changed as a normal Text. Unlike the
+ ticklabels of the mainline matplotlib, properties of single
+ ticklabel alone cannot modified.
+
+ To change the pad between ticks and ticklabels, use set_pad.
+ """
+
+ def __init__(self, **kwargs):
+
+ axis_direction = kwargs.pop("axis_direction", "bottom")
+ AxisLabel.__init__(self, **kwargs)
+ self.set_axis_direction(axis_direction)
+ #self._axis_direction = axis_direction
+ self._axislabel_pad = 0
+ #self._extra_pad = 0
+
+
+ # attribute copier
+ def get_ref_artist(self):
+ return self._axis.get_ticklabels()[0]
+
+ def set_axis_direction(self, label_direction):
+ """
+ Adjust the text angle and text alignment of ticklabels
+ according to the matplotlib convention.
+
+ The *label_direction* must be one of [left, right, bottom,
+ top].
+
+ ===================== ========== ========= ========== ==========
+ property left bottom right top
+ ===================== ========== ========= ========== ==========
+ ticklabels angle 90 0 -90 180
+ ticklabel va center baseline center baseline
+ ticklabel ha right center right center
+ ===================== ========== ========= ========== ==========
+
+
+ Note that the text angles are actually relative to (90 + angle
+ of the direction to the ticklabel), which gives 0 for bottom
+ axis.
+
+ """
+
+ if label_direction not in ["left", "right", "top", "bottom"]:
+ raise ValueError('direction must be one of "left", "right", "top", "bottom"')
+
+ self._axis_direction = label_direction
+ self.set_default_alignment(label_direction)
+ self.set_default_angle(label_direction)
+
+
+ def invert_axis_direction(self):
+ label_direction = self._get_opposite_direction(self._axis_direction)
+ self.set_axis_direction(label_direction)
+
+ def _get_ticklabels_offsets(self, renderer, label_direction):
+ """
+ Calculates the offsets of the ticklabels from the tick and
+ their total heights. The offset only takes account the offset
+ due to the vertical alignment of the ticklabels, i.e.,if axis
+ direction is bottom and va is ;top', it will return 0. if va
+ is 'baseline', it will return (height-descent).
+ """
+ whd_list = self.get_texts_widths_heights_descents(renderer)
+
+ if not whd_list:
+ return 0, 0
+
+ r = 0
+ va, ha = self.get_va(), self.get_ha()
+
+ if label_direction == "left":
+ pad = max(w for w, h, d in whd_list)
+ if ha == "left":
+ r = pad
+ elif ha == "center":
+ r = .5 * pad
+ elif label_direction == "right":
+ pad = max(w for w, h, d in whd_list)
+ if ha == "right":
+ r = pad
+ elif ha == "center":
+ r = .5 * pad
+ elif label_direction == "bottom":
+ pad = max(h for w, h, d in whd_list)
+ if va == "bottom":
+ r = pad
+ elif va == "center":
+ r =.5 * pad
+ elif va == "baseline":
+ max_ascent = max(h - d for w, h, d in whd_list)
+ max_descent = max(d for w, h, d in whd_list)
+ r = max_ascent
+ pad = max_ascent + max_descent
+ elif label_direction == "top":
+ pad = max(h for w, h, d in whd_list)
+ if va == "top":
+ r = pad
+ elif va == "center":
+ r =.5 * pad
+ elif va == "baseline":
+ max_ascent = max(h - d for w, h, d in whd_list)
+ max_descent = max(d for w, h, d in whd_list)
+ r = max_descent
+ pad = max_ascent + max_descent
+
+ #tick_pad = renderer.points_to_pixels(self.get_pad())
+
+ # r : offset
+
+ # pad : total height of the ticklabels. This will be used to
+ # calculate the pad for the axislabel.
+ return r, pad
+
+
+
+ _default_alignments = dict(left=("center", "right"),
+ right=("center", "left"),
+ bottom=("baseline", "center"),
+ top=("baseline", "center"))
+
+
+
+ # set_default_alignments(self, d)
+
+ _default_angles = dict(left=90,
+ right=-90,
+ bottom=0,
+ top=180)
+
+
+ def draw(self, renderer):
+ if not self.get_visible():
+ self._axislabel_pad = self._get_external_pad()
+ return
+
+ r, total_width = self._get_ticklabels_offsets(renderer,
+ self._axis_direction)
+
+ #self._set_external_pad(r+self._get_external_pad())
+ pad = self._get_external_pad() + \
+ renderer.points_to_pixels(self.get_pad())
+ self._set_offset_radius(r+pad)
+
+ #self._set_offset_radius(r)
+
+ for (x, y), a, l in self._locs_angles_labels:
+ if not l.strip(): continue
+ self._set_ref_angle(a) #+ add_angle
+ self.set_x(x)
+ self.set_y(y)
+ self.set_text(l)
+ LabelBase.draw(self, renderer)
+
+ self._axislabel_pad = total_width \
+ + pad # the value saved will be used to draw axislabel.
+
+
+ def set_locs_angles_labels(self, locs_angles_labels):
+ self._locs_angles_labels = locs_angles_labels
+
+ def get_window_extents(self, renderer):
+
+ if not self.get_visible():
+ self._axislabel_pad = self._get_external_pad()
+ return []
+
+ bboxes = []
+
+ r, total_width = self._get_ticklabels_offsets(renderer,
+ self._axis_direction)
+
+ pad = self._get_external_pad() + \
+ renderer.points_to_pixels(self.get_pad())
+ self._set_offset_radius(r+pad)
+
+
+ for (x, y), a, l in self._locs_angles_labels:
+ self._set_ref_angle(a) #+ add_angle
+ self.set_x(x)
+ self.set_y(y)
+ self.set_text(l)
+ bb = LabelBase.get_window_extent(self, renderer)
+ bboxes.append(bb)
+
+ self._axislabel_pad = total_width \
+ + pad # the value saved will be used to draw axislabel.
+
+ return bboxes
+
+
+ def get_texts_widths_heights_descents(self, renderer):
+ """
+ return a list of width, height, descent for ticklabels.
+ """
+ whd_list = []
+ for (x, y), a, l in self._locs_angles_labels:
+ if not l.strip(): continue
+ clean_line, ismath = self.is_math_text(l)
+ whd = renderer.get_text_width_height_descent(
+ clean_line, self._fontproperties, ismath=ismath)
+ whd_list.append(whd)
+
+ return whd_list
+
+
+class GridlinesCollection(LineCollection):
+ def __init__(self, *kl, **kwargs):
+ """
+ *which* : "major" or "minor"
+ *axis* : "both", "x" or "y"
+ """
+ self._which = kwargs.pop("which", "major")
+ self._axis = kwargs.pop("axis", "both")
+ super(GridlinesCollection, self).__init__(*kl, **kwargs)
+ self.set_grid_helper(None)
+
+ def set_which(self, which):
+ self._which = which
+
+ def set_axis(self, axis):
+ self._axis = axis
+
+ def set_grid_helper(self, grid_helper):
+ self._grid_helper = grid_helper
+
+ def draw(self, renderer):
+ if self._grid_helper is not None:
+ self._grid_helper.update_lim(self.axes)
+ gl = self._grid_helper.get_gridlines(self._which, self._axis)
+ if gl:
+ self.set_segments([np.transpose(l) for l in gl])
+ else:
+ self.set_segments([])
+ super(GridlinesCollection, self).draw(renderer)
+
+
+
+
+class AxisArtist(martist.Artist):
+ """
+ An artist which draws axis (a line along which the n-th axes coord
+ is constant) line, ticks, ticklabels, and axis label.
+ """
+
+ ZORDER=2.5
+
+ @property
+ def LABELPAD(self):
+ return self.label.get_pad()
+
+ @LABELPAD.setter
+ def LABELPAD(self, v):
+ return self.label.set_pad(v)
+
+ def __init__(self, axes,
+ helper,
+ offset=None,
+ axis_direction="bottom",
+ **kw):
+ """
+ *axes* : axes
+ *helper* : an AxisArtistHelper instance.
+ """
+ #axes is also used to follow the axis attribute (tick color, etc).
+
+ super(AxisArtist, self).__init__(**kw)
+
+ self.axes = axes
+
+ self._axis_artist_helper = helper
+
+ if offset is None:
+ offset = (0, 0)
+ self.dpi_transform = Affine2D()
+ self.offset_transform = ScaledTranslation(offset[0], offset[1],
+ self.dpi_transform)
+
+ self._label_visible = True
+ self._majortick_visible = True
+ self._majorticklabel_visible = True
+ self._minortick_visible = True
+ self._minorticklabel_visible = True
+
+
+ #if self._axis_artist_helper._loc in ["left", "right"]:
+ if axis_direction in ["left", "right"]:
+ axis_name = "ytick"
+ self.axis = axes.yaxis
+ else:
+ axis_name = "xtick"
+ self.axis = axes.xaxis
+
+
+ self._axisline_style = None
+
+
+ self._axis_direction = axis_direction
+
+
+ self._init_line()
+ self._init_ticks(axis_name, **kw)
+ self._init_offsetText(axis_direction)
+ self._init_label()
+
+ self.set_zorder(self.ZORDER)
+
+ self._rotate_label_along_line = False
+
+ # axis direction
+ self._tick_add_angle = 180.
+ self._ticklabel_add_angle = 0.
+ self._axislabel_add_angle = 0.
+ self.set_axis_direction(axis_direction)
+
+
+ # axis direction
+
+ def set_axis_direction(self, axis_direction):
+ """
+ Adjust the direction, text angle, text alignment of
+ ticklabels, labels following the matplotlib convention for
+ the rectangle axes.
+
+ The *axis_direction* must be one of [left, right, bottom,
+ top].
+
+ ===================== ========== ========= ========== ==========
+ property left bottom right top
+ ===================== ========== ========= ========== ==========
+ ticklabels location "-" "+" "+" "-"
+ axislabel location "-" "+" "+" "-"
+ ticklabels angle 90 0 -90 180
+ ticklabel va center baseline center baseline
+ ticklabel ha right center right center
+ axislabel angle 180 0 0 180
+ axislabel va center top center bottom
+ axislabel ha right center right center
+ ===================== ========== ========= ========== ==========
+
+
+ Note that the direction "+" and "-" are relative to the direction of
+ the increasing coordinate. Also, the text angles are actually
+ relative to (90 + angle of the direction to the ticklabel),
+ which gives 0 for bottom axis.
+
+ """
+
+ if axis_direction not in ["left", "right", "top", "bottom"]:
+ raise ValueError('direction must be on of "left", "right", "top", "bottom"')
+ self._axis_direction = axis_direction
+ if axis_direction in ["left", "top"]:
+ #self._set_tick_direction("+")
+ self.set_ticklabel_direction("-")
+ self.set_axislabel_direction("-")
+ else:
+ #self._set_tick_direction("-")
+ self.set_ticklabel_direction("+")
+ self.set_axislabel_direction("+")
+
+ self.major_ticklabels.set_axis_direction(axis_direction)
+ self.label.set_axis_direction(axis_direction)
+
+ # def _set_tick_direction(self, d):
+ # if d not in ["+", "-"]:
+ # raise ValueError('direction must be on of "in", "out"')
+
+ # if d == "+":
+ # self._tick_add_angle = 0 #get_helper()._extremes=0, 10
+ # else:
+ # self._tick_add_angle = 180 #get_helper()._extremes=0, 10
+
+ def set_ticklabel_direction(self, tick_direction):
+ """
+ Adjust the direction of the ticklabel.
+
+ ACCEPTS: [ "+" | "-" ]
+
+ Note that the label_direction '+' and '-' are relative to the
+ direction of the increasing coordinate.
+ """
+
+ if tick_direction not in ["+", "-"]:
+ raise ValueError('direction must be one of "+", "-"')
+
+ if tick_direction == "-":
+ self._ticklabel_add_angle = 180
+ else:
+ self._ticklabel_add_angle = 0
+
+ def invert_ticklabel_direction(self):
+ self._ticklabel_add_angle = (self._ticklabel_add_angle + 180) % 360
+ self.major_ticklabels.invert_axis_direction()
+ self.minor_ticklabels.invert_axis_direction()
+
+ # def invert_ticks_direction(self):
+ # self.major_ticks.set_tick_out(not self.major_ticks.get_tick_out())
+ # self.minor_ticks.set_tick_out(not self.minor_ticks.get_tick_out())
+
+ def set_axislabel_direction(self, label_direction):
+ """
+ Adjust the direction of the axislabel.
+
+ ACCEPTS: [ "+" | "-" ]
+
+ Note that the label_direction '+' and '-' are relative to the
+ direction of the increasing coordinate.
+ """
+ if label_direction not in ["+", "-"]:
+ raise ValueError('direction must be one of "+", "-"')
+
+ if label_direction == "-":
+ self._axislabel_add_angle = 180
+ else:
+ self._axislabel_add_angle = 0
+
+
+
+ def get_transform(self):
+ return self.axes.transAxes + self.offset_transform
+
+ def get_helper(self):
+ """
+ Return axis artist helper instance.
+ """
+ return self._axis_artist_helper
+
+
+ def set_axisline_style(self, axisline_style=None, **kw):
+ """
+ Set the axisline style.
+
+ *axisline_style* can be a string with axisline style name with optional
+ comma-separated attributes. Alternatively, the attrs can
+ be provided as keywords.
+
+ set_arrowstyle("->,size=1.5")
+ set_arrowstyle("->", size=1.5)
+
+ Old attrs simply are forgotten.
+
+ Without argument (or with arrowstyle=None), return
+ available styles as a list of strings.
+ """
+
+ if axisline_style==None:
+ return AxislineStyle.pprint_styles()
+
+ if isinstance(axisline_style, AxislineStyle._Base):
+ self._axisline_style = axisline_style
+ else:
+ self._axisline_style = AxislineStyle(axisline_style, **kw)
+
+
+ self._init_line()
+
+
+ def get_axisline_style(self):
+ """
+ return the current axisline style.
+ """
+ return self._axisline_style
+
+ def _init_line(self):
+ """
+ Initialize the *line* artist that is responsible to draw the axis line.
+ """
+ tran = self._axis_artist_helper.get_line_transform(self.axes) \
+ + self.offset_transform
+
+ axisline_style = self.get_axisline_style()
+ if axisline_style is None:
+ self.line = BezierPath(self._axis_artist_helper.get_line(self.axes),
+ color=rcParams['axes.edgecolor'],
+ linewidth=rcParams['axes.linewidth'],
+ transform=tran)
+ else:
+ self.line = axisline_style(self, transform=tran)
+
+ def _draw_line(self, renderer):
+ self.line.set_path(self._axis_artist_helper.get_line(self.axes))
+ if self.get_axisline_style() is not None:
+ self.line.set_line_mutation_scale(self.major_ticklabels.get_size())
+ self.line.draw(renderer)
+
+
+ def _init_ticks(self, axis_name, **kw):
+
+ trans=self._axis_artist_helper.get_tick_transform(self.axes) \
+ + self.offset_transform
+
+
+ major_tick_size = kw.get("major_tick_size",
+ rcParams['%s.major.size'%axis_name])
+ major_tick_pad = kw.get("major_tick_pad",
+ rcParams['%s.major.pad'%axis_name])
+ minor_tick_size = kw.get("minor_tick_size",
+ rcParams['%s.minor.size'%axis_name])
+ minor_tick_pad = kw.get("minor_tick_pad",
+ rcParams['%s.minor.pad'%axis_name])
+
+ self.major_ticks = Ticks(major_tick_size,
+ axis=self.axis,
+ transform=trans)
+ self.minor_ticks = Ticks(minor_tick_size,
+ axis=self.axis,
+ transform=trans)
+
+ if axis_name == "xaxis":
+ size = rcParams['xtick.labelsize']
+ else:
+ size = rcParams['ytick.labelsize']
+
+
+ fontprops = font_manager.FontProperties(size=size)
+
+ self.major_ticklabels = TickLabels(size=size, axis=self.axis,
+ axis_direction=self._axis_direction)
+ self.minor_ticklabels = TickLabels(size=size, axis=self.axis,
+ axis_direction=self._axis_direction)
+
+
+ self.major_ticklabels.set(figure = self.axes.figure,
+ transform=trans,
+ fontproperties=fontprops)
+ self.major_ticklabels.set_pad(major_tick_pad)
+
+ self.minor_ticklabels.set(figure = self.axes.figure,
+ transform=trans,
+ fontproperties=fontprops)
+ self.minor_ticklabels.set_pad(minor_tick_pad)
+
+
+
+ def _get_tick_info(self, tick_iter):
+ """
+ return ticks_loc_angle, ticklabels_loc_angle_label
+
+ ticks_loc_angle : list of locs and angles for ticks
+ ticklabels_loc_angle_label : list of locs, angles and labels for tickslabels
+ """
+ ticks_loc_angle = []
+ ticklabels_loc_angle_label = []
+
+ tick_add_angle = self._tick_add_angle
+ ticklabel_add_angle = self._ticklabel_add_angle
+
+ for loc, angle_normal, angle_tangent, label in tick_iter:
+ angle_label = angle_tangent - 90
+ angle_label += ticklabel_add_angle
+
+ if np.cos((angle_label - angle_normal)/180.*np.pi) < 0.:
+ angle_tick = angle_normal
+ else:
+ angle_tick = angle_normal + 180
+
+ ticks_loc_angle.append([loc, angle_tick])
+ ticklabels_loc_angle_label.append([loc, angle_label, label])
+
+ return ticks_loc_angle, ticklabels_loc_angle_label
+
+
+ def _update_ticks(self, renderer):
+
+
+ # set extra pad for major and minor ticklabels:
+ # use ticksize of majorticks even for minor ticks. not clear what is best.
+
+ dpi_cor = renderer.points_to_pixels(1.)
+ if self.major_ticks.get_visible() and self.major_ticks.get_tick_out():
+ self.major_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
+ self.minor_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
+ else:
+ self.major_ticklabels._set_external_pad(0)
+ self.minor_ticklabels._set_external_pad(0)
+
+
+ majortick_iter, minortick_iter = \
+ self._axis_artist_helper.get_tick_iterators(self.axes)
+
+ tick_loc_angle, ticklabel_loc_angle_label \
+ = self._get_tick_info(majortick_iter)
+
+ self.major_ticks.set_locs_angles(tick_loc_angle)
+ self.major_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
+
+ #self.major_ticks.draw(renderer)
+ #self.major_ticklabels.draw(renderer)
+
+
+ # minor ticks
+ tick_loc_angle, ticklabel_loc_angle_label \
+ = self._get_tick_info(minortick_iter)
+
+ self.minor_ticks.set_locs_angles(tick_loc_angle)
+ self.minor_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
+
+ #self.minor_ticks.draw(renderer)
+ #self.minor_ticklabels.draw(renderer)
+
+
+ #if (self.major_ticklabels.get_visible() or self.minor_ticklabels.get_visible()):
+ # self._draw_offsetText(renderer)
+
+ return self.major_ticklabels.get_window_extents(renderer)
+
+
+ def _draw_ticks(self, renderer):
+
+ extents = self._update_ticks(renderer)
+
+ self.major_ticks.draw(renderer)
+ self.major_ticklabels.draw(renderer)
+
+ self.minor_ticks.draw(renderer)
+ self.minor_ticklabels.draw(renderer)
+
+
+ if (self.major_ticklabels.get_visible() or self.minor_ticklabels.get_visible()):
+ self._draw_offsetText(renderer)
+
+ return extents
+
+ def _draw_ticks2(self, renderer):
+
+
+ # set extra pad for major and minor ticklabels:
+ # use ticksize of majorticks even for minor ticks. not clear what is best.
+
+ dpi_cor = renderer.points_to_pixels(1.)
+ if self.major_ticks.get_visible() and self.major_ticks.get_tick_out():
+ self.major_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
+ self.minor_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
+ else:
+ self.major_ticklabels._set_external_pad(0)
+ self.minor_ticklabels._set_external_pad(0)
+
+
+ majortick_iter, minortick_iter = \
+ self._axis_artist_helper.get_tick_iterators(self.axes)
+
+ tick_loc_angle, ticklabel_loc_angle_label \
+ = self._get_tick_info(majortick_iter)
+
+ self.major_ticks.set_locs_angles(tick_loc_angle)
+ self.major_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
+
+ self.major_ticks.draw(renderer)
+ self.major_ticklabels.draw(renderer)
+
+
+ # minor ticks
+ tick_loc_angle, ticklabel_loc_angle_label \
+ = self._get_tick_info(minortick_iter)
+
+ self.minor_ticks.set_locs_angles(tick_loc_angle)
+ self.minor_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
+
+ self.minor_ticks.draw(renderer)
+ self.minor_ticklabels.draw(renderer)
+
+
+ if (self.major_ticklabels.get_visible() or self.minor_ticklabels.get_visible()):
+ self._draw_offsetText(renderer)
+
+ return self.major_ticklabels.get_window_extents(renderer)
+
+
+
+
+ _offsetText_pos = dict(left=(0, 1, "bottom", "right"),
+ right=(1, 1, "bottom", "left"),
+ bottom=(1, 0, "top", "right"),
+ top=(1, 1, "bottom", "right"))
+
+ def _init_offsetText(self, direction):
+
+ x,y,va,ha = self._offsetText_pos[direction]
+
+ self.offsetText = mtext.Annotation("",
+ xy=(x,y), xycoords="axes fraction",
+ xytext=(0,0), textcoords="offset points",
+ #fontproperties = fp,
+ color = rcParams['xtick.color'],
+ verticalalignment=va,
+ horizontalalignment=ha,
+ )
+ self.offsetText.set_transform(IdentityTransform())
+ self.axes._set_artist_props(self.offsetText)
+
+
+ def _update_offsetText(self):
+ self.offsetText.set_text( self.axis.major.formatter.get_offset() )
+ self.offsetText.set_size(self.major_ticklabels.get_size())
+ offset = self.major_ticklabels.get_pad() + self.major_ticklabels.get_size() + 2.
+ self.offsetText.xyann= (0, offset)
+
+
+ def _draw_offsetText(self, renderer):
+ self._update_offsetText()
+ self.offsetText.draw(renderer)
+
+
+
+ def _init_label(self, **kw):
+ # x in axes coords, y in display coords (to be updated at draw
+ # time by _update_label_positions)
+
+ labelsize = kw.get("labelsize",
+ rcParams['axes.labelsize'])
+ #labelcolor = kw.get("labelcolor",
+ # rcParams['axes.labelcolor'])
+ fontprops = font_manager.FontProperties(
+ size=labelsize,
+ weight=rcParams['axes.labelweight'])
+ textprops = dict(fontproperties = fontprops)
+ #color = labelcolor)
+
+ tr = self._axis_artist_helper.get_axislabel_transform(self.axes) \
+ + self.offset_transform
+
+ self.label = AxisLabel(0, 0, "__from_axes__",
+ color = "auto", #rcParams['axes.labelcolor'],
+ fontproperties=fontprops,
+ axis=self.axis,
+ transform=tr,
+ axis_direction=self._axis_direction,
+ )
+
+ self.label.set_figure(self.axes.figure)
+
+ labelpad = kw.get("labelpad", 5)
+ self.label.set_pad(labelpad)
+
+
+ def _update_label(self, renderer):
+
+ if not self.label.get_visible():
+ return
+
+ fontprops = font_manager.FontProperties(
+ size=rcParams['axes.labelsize'],
+ weight=rcParams['axes.labelweight'])
+
+ #pad_points = self.major_tick_pad
+
+ #if abs(self._ticklabel_add_angle - self._axislabel_add_angle)%360 > 90:
+ if self._ticklabel_add_angle != self._axislabel_add_angle:
+ if (self.major_ticks.get_visible() and not self.major_ticks.get_tick_out()) \
+ or \
+ (self.minor_ticks.get_visible() and not self.major_ticks.get_tick_out()):
+ axislabel_pad = self.major_ticks._ticksize
+ else:
+ axislabel_pad = 0
+ else:
+ axislabel_pad = max(self.major_ticklabels._axislabel_pad,
+ self.minor_ticklabels._axislabel_pad)
+
+
+ #label_offset = axislabel_pad + self.LABELPAD
+
+ #self.label._set_offset_radius(label_offset)
+ self.label._set_external_pad(axislabel_pad)
+
+ xy, angle_tangent = self._axis_artist_helper.get_axislabel_pos_angle(self.axes)
+ if xy is None: return
+
+ angle_label = angle_tangent - 90
+
+
+ x, y = xy
+ self.label._set_ref_angle(angle_label+self._axislabel_add_angle)
+ self.label.set(x=x, y=y)
+
+
+ def _draw_label(self, renderer):
+ self._update_label(renderer)
+ self.label.draw(renderer)
+
+ def _draw_label2(self, renderer):
+
+ if not self.label.get_visible():
+ return
+
+ fontprops = font_manager.FontProperties(
+ size=rcParams['axes.labelsize'],
+ weight=rcParams['axes.labelweight'])
+
+ #pad_points = self.major_tick_pad
+
+ #if abs(self._ticklabel_add_angle - self._axislabel_add_angle)%360 > 90:
+ if self._ticklabel_add_angle != self._axislabel_add_angle:
+ if (self.major_ticks.get_visible() and not self.major_ticks.get_tick_out()) \
+ or \
+ (self.minor_ticks.get_visible() and not self.major_ticks.get_tick_out()):
+ axislabel_pad = self.major_ticks._ticksize
+ else:
+ axislabel_pad = 0
+ else:
+ axislabel_pad = max(self.major_ticklabels._axislabel_pad,
+ self.minor_ticklabels._axislabel_pad)
+
+ #label_offset = axislabel_pad + self.LABELPAD
+
+ #self.label._set_offset_radius(label_offset)
+ self.label._set_external_pad(axislabel_pad)
+
+ xy, angle_tangent = self._axis_artist_helper.get_axislabel_pos_angle(self.axes)
+ if xy is None: return
+
+ angle_label = angle_tangent - 90
+
+ x, y = xy
+ self.label._set_ref_angle(angle_label+self._axislabel_add_angle)
+ self.label.set(x=x, y=y)
+ self.label.draw(renderer)
+
+
+
+ def set_label(self, s):
+ self.label.set_text(s)
+
+
+
+ def get_tightbbox(self, renderer):
+ if not self.get_visible(): return
+
+ self._axis_artist_helper.update_lim(self.axes)
+
+ dpi_cor = renderer.points_to_pixels(1.)
+ self.dpi_transform.clear().scale(dpi_cor, dpi_cor)
+
+
+ bb = []
+
+ self._update_ticks(renderer)
+
+ #if self.major_ticklabels.get_visible():
+ bb.extend(self.major_ticklabels.get_window_extents(renderer))
+ #if self.minor_ticklabels.get_visible():
+ bb.extend(self.minor_ticklabels.get_window_extents(renderer))
+
+
+ self._update_label(renderer)
+
+ #if self.label.get_visible():
+ bb.append(self.label.get_window_extent(renderer))
+ bb.append(self.offsetText.get_window_extent(renderer))
+
+ bb = [b for b in bb if b and (b.width!=0 or b.height!=0)]
+ if bb:
+ _bbox = Bbox.union(bb)
+ return _bbox
+ else:
+ return None
+
+ #self._draw_line(renderer)
+
+ #self._draw_ticks(renderer)
+
+ #self._draw_offsetText(renderer)
+ #self._draw_label(renderer)
+
+
+
+ @allow_rasterization
+ def draw(self, renderer):
+ 'Draw the axis lines, tick lines and labels'
+
+ if not self.get_visible(): return
+
+ renderer.open_group(__name__)
+
+ self._axis_artist_helper.update_lim(self.axes)
+
+ dpi_cor = renderer.points_to_pixels(1.)
+ self.dpi_transform.clear().scale(dpi_cor, dpi_cor)
+
+
+ self._draw_ticks(renderer)
+
+ self._draw_line(renderer)
+
+ #self._draw_offsetText(renderer)
+ self._draw_label(renderer)
+
+ renderer.close_group(__name__)
+
+ #def get_ticklabel_extents(self, renderer):
+ # pass
+
+ def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
+ """
+ Toggle visibility of ticks, ticklabels, and (axis) label.
+ To turn all off, ::
+
+ axis.toggle(all=False)
+
+ To turn all off but ticks on ::
+
+ axis.toggle(all=False, ticks=True)
+
+ To turn all on but (axis) label off ::
+
+ axis.toggle(all=True, label=False))
+
+ """
+ if all:
+ _ticks, _ticklabels, _label = True, True, True
+ elif all is not None:
+ _ticks, _ticklabels, _label = False, False, False
+ else:
+ _ticks, _ticklabels, _label = None, None, None
+
+ if ticks is not None:
+ _ticks = ticks
+ if ticklabels is not None:
+ _ticklabels = ticklabels
+ if label is not None:
+ _label = label
+
+ if _ticks is not None:
+ self.major_ticks.set_visible(_ticks)
+ self.minor_ticks.set_visible(_ticks)
+ if _ticklabels is not None:
+ self.major_ticklabels.set_visible(_ticklabels)
+ self.minor_ticklabels.set_visible(_ticklabels)
+ if _label is not None:
+ self.label.set_visible(_label)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axisline_style.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axisline_style.py
new file mode 100644
index 00000000000..876f5fe1898
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axisline_style.py
@@ -0,0 +1,168 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from matplotlib.patches import _Style, FancyArrowPatch
+from matplotlib.transforms import IdentityTransform
+from matplotlib.path import Path
+import numpy as np
+
+class _FancyAxislineStyle(object):
+ class SimpleArrow(FancyArrowPatch):
+ """
+ The artist class that will be returned for SimpleArrow style.
+ """
+ _ARROW_STYLE = "->"
+
+ def __init__(self, axis_artist, line_path, transform,
+ line_mutation_scale):
+ self._axis_artist = axis_artist
+ self._line_transform = transform
+ self._line_path = line_path
+ self._line_mutation_scale = line_mutation_scale
+
+ FancyArrowPatch.__init__(self,
+ path=self._line_path,
+ arrowstyle=self._ARROW_STYLE,
+ arrow_transmuter=None,
+ patchA=None,
+ patchB=None,
+ shrinkA=0.,
+ shrinkB=0.,
+ mutation_scale=line_mutation_scale,
+ mutation_aspect=None,
+ transform=IdentityTransform(),
+ )
+
+ def set_line_mutation_scale(self, scale):
+ self.set_mutation_scale(scale*self._line_mutation_scale)
+
+ def _extend_path(self, path, mutation_size=10):
+ """
+ Extend the path to make a room for drawing arrow.
+ """
+ from matplotlib.bezier import get_cos_sin
+
+ x0, y0 = path.vertices[-2]
+ x1, y1 = path.vertices[-1]
+ cost, sint = get_cos_sin(x0, y0, x1, y1)
+
+ d = mutation_size * 1.
+ x2, y2 = x1 + cost*d, y1+sint*d
+
+ if path.codes is None:
+ _path = Path(np.concatenate([path.vertices, [[x2, y2]]]))
+ else:
+ _path = Path(np.concatenate([path.vertices, [[x2, y2]]]),
+ np.concatenate([path.codes, [Path.LINETO]]))
+
+ return _path
+
+ def set_path(self, path):
+ self._line_path = path
+
+ def draw(self, renderer):
+ """
+ Draw the axis line.
+ 1) transform the path to the display coordinate.
+ 2) extend the path to make a room for arrow
+ 3) update the path of the FancyArrowPatch.
+ 4) draw
+ """
+ path_in_disp = self._line_transform.transform_path(self._line_path)
+ mutation_size = self.get_mutation_scale() #line_mutation_scale()
+ extented_path = self._extend_path(path_in_disp,
+ mutation_size=mutation_size)
+
+ self._path_original = extented_path
+ FancyArrowPatch.draw(self, renderer)
+
+ class FilledArrow(SimpleArrow):
+ """
+ The artist class that will be returned for SimpleArrow style.
+ """
+ _ARROW_STYLE = "-|>"
+
+
+class AxislineStyle(_Style):
+ """
+ :class:`AxislineStyle` is a container class which defines style classes
+ for AxisArtists.
+
+ An instance of any axisline style class is an callable object,
+ whose call signature is ::
+
+ __call__(self, axis_artist, path, transform)
+
+ When called, this should return a mpl artist with following
+ methods implemented. ::
+
+ def set_path(self, path):
+ # set the path for axisline.
+
+ def set_line_mutation_scale(self, scale):
+ # set the scale
+
+ def draw(self, renderer):
+ # draw
+
+
+ """
+
+ _style_list = {}
+
+
+ class _Base(object):
+ # The derived classes are required to be able to be initialized
+ # w/o arguments, i.e., all its argument (except self) must have
+ # the default values.
+
+ def __init__(self):
+ """
+ initialization.
+ """
+ super(AxislineStyle._Base, self).__init__()
+
+
+
+
+ def __call__(self, axis_artist, transform):
+ """
+ Given the AxisArtist instance, and transform for the path
+ (set_path method), return the mpl artist for drawing the axis line.
+ """
+
+ return self.new_line(axis_artist, transform)
+
+
+ class SimpleArrow(_Base):
+ """
+ A simple arrow.
+ """
+
+ ArrowAxisClass = _FancyAxislineStyle.SimpleArrow
+
+ def __init__(self, size=1):
+ """
+ *size*
+ size of the arrow as a fraction of the ticklabel size.
+ """
+
+ self.size = size
+ super(AxislineStyle.SimpleArrow, self).__init__()
+
+ def new_line(self, axis_artist, transform):
+
+ linepath = Path([(0,0), (0, 1)])
+ axisline = self.ArrowAxisClass(axis_artist, linepath, transform,
+ line_mutation_scale=self.size)
+ return axisline
+
+
+ _style_list["->"] = SimpleArrow
+
+ class FilledArrow(SimpleArrow):
+ ArrowAxisClass = _FancyAxislineStyle.FilledArrow
+
+ _style_list["-|>"] = FilledArrow
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axislines.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axislines.py
new file mode 100644
index 00000000000..6182608cc5b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/axislines.py
@@ -0,0 +1,828 @@
+"""
+Axislines includes modified implementation of the Axes class. The
+biggest difference is that the artists responsible for drawing the axis spine,
+ticks, ticklabels and axis labels are separated out from mpl's Axis
+class. Originally, this change was motivated to support curvilinear
+grid. Here are a few reasons that I came up with a new axes class:
+
+
+ * "top" and "bottom" x-axis (or "left" and "right" y-axis) can have
+ different ticks (tick locations and labels). This is not possible
+ with the current mpl, although some twin axes trick can help.
+
+ * Curvilinear grid.
+
+ * angled ticks.
+
+In the new axes class, xaxis and yaxis is set to not visible by
+default, and new set of artist (AxisArtist) are defined to draw axis
+line, ticks, ticklabels and axis label. Axes.axis attribute serves as
+a dictionary of these artists, i.e., ax.axis["left"] is a AxisArtist
+instance responsible to draw left y-axis. The default Axes.axis contains
+"bottom", "left", "top" and "right".
+
+AxisArtist can be considered as a container artist and
+has following children artists which will draw ticks, labels, etc.
+
+ * line
+ * major_ticks, major_ticklabels
+ * minor_ticks, minor_ticklabels
+ * offsetText
+ * label
+
+Note that these are separate artists from Axis class of the
+original mpl, thus most of tick-related command in the original mpl
+won't work, although some effort has made to work with. For example,
+color and markerwidth of the ax.axis["bottom"].major_ticks will follow
+those of Axes.xaxis unless explicitly specified.
+
+In addition to AxisArtist, the Axes will have *gridlines* attribute,
+which obviously draws grid lines. The gridlines needs to be separated
+from the axis as some gridlines can never pass any axis.
+
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import warnings
+
+import numpy as np
+
+from matplotlib import rcParams
+import matplotlib.artist as martist
+import matplotlib.axes as maxes
+from matplotlib.path import Path
+from matplotlib.transforms import Bbox
+from .axisline_style import AxislineStyle
+from .axis_artist import AxisArtist, GridlinesCollection
+
+
+class AxisArtistHelper(object):
+ """
+ AxisArtistHelper should define
+ following method with given APIs. Note that the first axes argument
+ will be axes attribute of the caller artist.::
+
+
+ # LINE (spinal line?)
+
+ def get_line(self, axes):
+ # path : Path
+ return path
+
+ def get_line_transform(self, axes):
+ # ...
+ # trans : transform
+ return trans
+
+ # LABEL
+
+ def get_label_pos(self, axes):
+ # x, y : position
+ return (x, y), trans
+
+
+ def get_label_offset_transform(self, \
+ axes,
+ pad_points, fontprops, renderer,
+ bboxes,
+ ):
+ # va : vertical alignment
+ # ha : horizontal alignment
+ # a : angle
+ return trans, va, ha, a
+
+ # TICK
+
+ def get_tick_transform(self, axes):
+ return trans
+
+ def get_tick_iterators(self, axes):
+ # iter : iterable object that yields (c, angle, l) where
+ # c, angle, l is position, tick angle, and label
+
+ return iter_major, iter_minor
+
+
+ """
+
+ class _Base(object):
+ """
+ Base class for axis helper.
+ """
+ def __init__(self):
+ """
+ """
+ self.delta1, self.delta2 = 0.00001, 0.00001
+
+ def update_lim(self, axes):
+ pass
+
+
+ class Fixed(_Base):
+ """
+ Helper class for a fixed (in the axes coordinate) axis.
+ """
+
+ _default_passthru_pt = dict(left=(0, 0),
+ right=(1, 0),
+ bottom=(0, 0),
+ top=(0, 1))
+
+ def __init__(self,
+ loc, nth_coord=None,
+ ):
+ """
+ nth_coord = along which coordinate value varies
+ in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
+ """
+
+ self._loc = loc
+
+ if loc not in ["left", "right", "bottom", "top"]:
+ raise ValueError("%s" % loc)
+
+ if nth_coord is None:
+ if loc in ["left", "right"]:
+ nth_coord = 1
+ elif loc in ["bottom", "top"]:
+ nth_coord = 0
+
+ self.nth_coord = nth_coord
+
+ super(AxisArtistHelper.Fixed, self).__init__()
+
+ self.passthru_pt = self._default_passthru_pt[loc]
+
+
+
+ _verts = np.array([[0., 0.],
+ [1., 1.]])
+ fixed_coord = 1-nth_coord
+ _verts[:,fixed_coord] = self.passthru_pt[fixed_coord]
+
+ # axis line in transAxes
+ self._path = Path(_verts)
+
+
+ def get_nth_coord(self):
+ return self.nth_coord
+
+ # LINE
+
+ def get_line(self, axes):
+ return self._path
+
+ def get_line_transform(self, axes):
+ return axes.transAxes
+
+ # LABEL
+
+ def get_axislabel_transform(self, axes):
+ return axes.transAxes
+
+ def get_axislabel_pos_angle(self, axes):
+ """
+ label reference position in transAxes.
+
+ get_label_transform() returns a transform of (transAxes+offset)
+ """
+ loc = self._loc
+ pos, angle_tangent = dict(left=((0., 0.5), 90),
+ right=((1., 0.5), 90),
+ bottom=((0.5, 0.), 0),
+ top=((0.5, 1.), 0))[loc]
+
+ return pos, angle_tangent
+
+
+
+ # TICK
+
+ def get_tick_transform(self, axes):
+ trans_tick = [axes.get_xaxis_transform(),
+ axes.get_yaxis_transform()][self.nth_coord]
+
+ return trans_tick
+
+
+ class Floating(_Base):
+ def __init__(self, nth_coord,
+ value):
+
+ self.nth_coord = nth_coord
+
+ self._value = value
+
+ super(AxisArtistHelper.Floating,
+ self).__init__()
+
+
+ def get_nth_coord(self):
+ return self.nth_coord
+
+ def get_line(self, axes):
+ raise RuntimeError("get_line method should be defined by the derived class")
+
+
+
+
+class AxisArtistHelperRectlinear(object):
+
+ class Fixed(AxisArtistHelper.Fixed):
+
+ def __init__(self, axes, loc, nth_coord=None):
+ """
+ nth_coord = along which coordinate value varies
+ in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
+ """
+ super(AxisArtistHelperRectlinear.Fixed, self).__init__(
+ loc, nth_coord)
+ self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
+
+ # TICK
+
+ def get_tick_iterators(self, axes):
+ """tick_loc, tick_angle, tick_label"""
+
+ loc = self._loc
+
+ if loc in ["bottom", "top"]:
+ angle_normal, angle_tangent = 90, 0
+ else:
+ angle_normal, angle_tangent = 0, 90
+
+ major = self.axis.major
+ majorLocs = major.locator()
+ major.formatter.set_locs(majorLocs)
+ majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
+
+ minor = self.axis.minor
+ minorLocs = minor.locator()
+ minor.formatter.set_locs(minorLocs)
+ minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
+
+ trans_tick = self.get_tick_transform(axes)
+
+ tr2ax = trans_tick + axes.transAxes.inverted()
+
+ def _f(locs, labels):
+ for x, l in zip(locs, labels):
+
+ c = list(self.passthru_pt) # copy
+ c[self.nth_coord] = x
+
+ # check if the tick point is inside axes
+ c2 = tr2ax.transform_point(c)
+ #delta=0.00001
+ if 0. -self.delta1<= c2[self.nth_coord] <= 1.+self.delta2:
+ yield c, angle_normal, angle_tangent, l
+
+ return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
+
+
+
+ class Floating(AxisArtistHelper.Floating):
+ def __init__(self, axes, nth_coord,
+ passingthrough_point, axis_direction="bottom"):
+ super(AxisArtistHelperRectlinear.Floating, self).__init__(
+ nth_coord, passingthrough_point)
+ self._axis_direction = axis_direction
+ self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
+
+ def get_line(self, axes):
+ _verts = np.array([[0., 0.],
+ [1., 1.]])
+
+ fixed_coord = 1-self.nth_coord
+ trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
+ p = trans_passingthrough_point.transform_point([self._value,
+ self._value])
+ _verts[:,fixed_coord] = p[fixed_coord]
+
+ return Path(_verts)
+
+ def get_line_transform(self, axes):
+ return axes.transAxes
+
+ def get_axislabel_transform(self, axes):
+ return axes.transAxes
+
+ def get_axislabel_pos_angle(self, axes):
+ """
+ label reference position in transAxes.
+
+ get_label_transform() returns a transform of (transAxes+offset)
+ """
+ loc = self._axis_direction
+ #angle = dict(left=0,
+ # right=0,
+ # bottom=.5*np.pi,
+ # top=.5*np.pi)[loc]
+
+ if self.nth_coord == 0:
+ angle = 0
+ else:
+ angle = 90
+
+ _verts = [0.5, 0.5]
+
+ fixed_coord = 1-self.nth_coord
+ trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
+ p = trans_passingthrough_point.transform_point([self._value,
+ self._value])
+ _verts[fixed_coord] = p[fixed_coord]
+ if not (0. <= _verts[fixed_coord] <= 1.):
+ return None, None
+ else:
+ return _verts, angle
+
+
+
+ def get_tick_transform(self, axes):
+ return axes.transData
+
+
+ def get_tick_iterators(self, axes):
+ """tick_loc, tick_angle, tick_label"""
+
+ loc = self._axis_direction
+
+ if loc in ["bottom", "top"]:
+ angle_normal, angle_tangent = 90, 0
+ else:
+ angle_normal, angle_tangent = 0, 90
+
+ if self.nth_coord == 0:
+ angle_normal, angle_tangent = 90, 0
+ else:
+ angle_normal, angle_tangent = 0, 90
+
+ #angle = 90 - 90 * self.nth_coord
+
+ major = self.axis.major
+ majorLocs = major.locator()
+ major.formatter.set_locs(majorLocs)
+ majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
+
+ minor = self.axis.minor
+ minorLocs = minor.locator()
+ minor.formatter.set_locs(minorLocs)
+ minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
+
+ tr2ax = axes.transData + axes.transAxes.inverted()
+
+ def _f(locs, labels):
+ for x, l in zip(locs, labels):
+
+ c = [self._value, self._value]
+ c[self.nth_coord] = x
+ c1, c2 = tr2ax.transform_point(c)
+ if 0. <= c1 <= 1. and 0. <= c2 <= 1.:
+ if 0. - self.delta1 <= [c1, c2][self.nth_coord] <= 1. + self.delta2:
+ yield c, angle_normal, angle_tangent, l
+
+ return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
+
+
+
+
+
+class GridHelperBase(object):
+
+ def __init__(self):
+ self._force_update = True
+ self._old_limits = None
+ super(GridHelperBase, self).__init__()
+
+
+ def update_lim(self, axes):
+ x1, x2 = axes.get_xlim()
+ y1, y2 = axes.get_ylim()
+
+ if self._force_update or self._old_limits != (x1, x2, y1, y2):
+ self._update(x1, x2, y1, y2)
+ self._force_update = False
+ self._old_limits = (x1, x2, y1, y2)
+
+
+ def _update(self, x1, x2, y1, y2):
+ pass
+
+
+ def invalidate(self):
+ self._force_update = True
+
+ def valid(self):
+ return not self._force_update
+
+
+ def get_gridlines(self, which, axis):
+ """
+ Return list of grid lines as a list of paths (list of points).
+
+ *which* : "major" or "minor"
+ *axis* : "both", "x" or "y"
+ """
+ return []
+
+ def new_gridlines(self, ax):
+ """
+ Create and return a new GridlineCollection instance.
+
+ *which* : "major" or "minor"
+ *axis* : "both", "x" or "y"
+
+ """
+ gridlines = GridlinesCollection(None, transform=ax.transData,
+ colors=rcParams['grid.color'],
+ linestyles=rcParams['grid.linestyle'],
+ linewidths=rcParams['grid.linewidth'])
+ ax._set_artist_props(gridlines)
+ gridlines.set_grid_helper(self)
+
+ ax.axes._set_artist_props(gridlines)
+ # gridlines.set_clip_path(self.axes.patch)
+ # set_clip_path need to be deferred after Axes.cla is completed.
+ # It is done inside the cla.
+
+ return gridlines
+
+
+class GridHelperRectlinear(GridHelperBase):
+
+
+ def __init__(self, axes):
+
+ super(GridHelperRectlinear, self).__init__()
+ self.axes = axes
+
+
+
+ def new_fixed_axis(self, loc,
+ nth_coord=None,
+ axis_direction=None,
+ offset=None,
+ axes=None,
+ ):
+
+ if axes is None:
+ warnings.warn("'new_fixed_axis' explicitly requires the axes keyword.")
+ axes = self.axes
+
+ _helper = AxisArtistHelperRectlinear.Fixed(axes, loc, nth_coord)
+
+ if axis_direction is None:
+ axis_direction = loc
+ axisline = AxisArtist(axes, _helper, offset=offset,
+ axis_direction=axis_direction,
+ )
+
+ return axisline
+
+
+ def new_floating_axis(self, nth_coord, value,
+ axis_direction="bottom",
+ axes=None,
+ ):
+
+ if axes is None:
+ warnings.warn(
+ "'new_floating_axis' explicitly requires the axes keyword.")
+ axes = self.axes
+
+ passthrough_point = (value, value)
+ transform = axes.transData
+
+ _helper = AxisArtistHelperRectlinear.Floating(
+ axes, nth_coord, value, axis_direction)
+
+ axisline = AxisArtist(axes, _helper)
+
+ axisline.line.set_clip_on(True)
+ axisline.line.set_clip_box(axisline.axes.bbox)
+ return axisline
+
+
+ def get_gridlines(self, which="major", axis="both"):
+ """
+ return list of gridline coordinates in data coordinates.
+
+ *which* : "major" or "minor"
+ *axis* : "both", "x" or "y"
+ """
+
+ gridlines = []
+
+
+ if axis in ["both", "x"]:
+ locs = []
+ y1, y2 = self.axes.get_ylim()
+ #if self.axes.xaxis._gridOnMajor:
+ if which in ["both", "major"]:
+ locs.extend(self.axes.xaxis.major.locator())
+ #if self.axes.xaxis._gridOnMinor:
+ if which in ["both", "minor"]:
+ locs.extend(self.axes.xaxis.minor.locator())
+
+ for x in locs:
+ gridlines.append([[x, x], [y1, y2]])
+
+
+ if axis in ["both", "y"]:
+ x1, x2 = self.axes.get_xlim()
+ locs = []
+ if self.axes.yaxis._gridOnMajor:
+ #if which in ["both", "major"]:
+ locs.extend(self.axes.yaxis.major.locator())
+ if self.axes.yaxis._gridOnMinor:
+ #if which in ["both", "minor"]:
+ locs.extend(self.axes.yaxis.minor.locator())
+
+ for y in locs:
+ gridlines.append([[x1, x2], [y, y]])
+
+ return gridlines
+
+
+
+
+
+
+class SimpleChainedObjects(object):
+ def __init__(self, objects):
+ self._objects = objects
+
+ def __getattr__(self, k):
+ _a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
+ return _a
+
+ def __call__(self, *kl, **kwargs):
+ for m in self._objects:
+ m(*kl, **kwargs)
+
+
+class Axes(maxes.Axes):
+
+ class AxisDict(dict):
+ def __init__(self, axes):
+ self.axes = axes
+ super(Axes.AxisDict, self).__init__()
+
+ def __getitem__(self, k):
+ if isinstance(k, tuple):
+ r = SimpleChainedObjects([dict.__getitem__(self, k1) for k1 in k])
+ return r
+ elif isinstance(k, slice):
+ if k.start == None and k.stop == None and k.step == None:
+ r = SimpleChainedObjects(list(six.itervalues(self)))
+ return r
+ else:
+ raise ValueError("Unsupported slice")
+ else:
+ return dict.__getitem__(self, k)
+
+ def __call__(self, *v, **kwargs):
+ return maxes.Axes.axis(self.axes, *v, **kwargs)
+
+
+ def __init__(self, *kl, **kw):
+
+
+ helper = kw.pop("grid_helper", None)
+
+ self._axisline_on = True
+
+ if helper:
+ self._grid_helper = helper
+ else:
+ self._grid_helper = GridHelperRectlinear(self)
+
+ super(Axes, self).__init__(*kl, **kw)
+
+ self.toggle_axisline(True)
+
+
+ def toggle_axisline(self, b=None):
+ if b is None:
+ b = not self._axisline_on
+ if b:
+ self._axisline_on = True
+ for s in self.spines.values():
+ s.set_visible(False)
+ self.xaxis.set_visible(False)
+ self.yaxis.set_visible(False)
+ else:
+ self._axisline_on = False
+ for s in self.spines.values():
+ s.set_visible(True)
+ self.xaxis.set_visible(True)
+ self.yaxis.set_visible(True)
+
+
+ def _init_axis(self):
+ super(Axes, self)._init_axis()
+
+
+ def _init_axis_artists(self, axes=None):
+ if axes is None:
+ axes = self
+
+ self._axislines = self.AxisDict(self)
+ new_fixed_axis = self.get_grid_helper().new_fixed_axis
+ for loc in ["bottom", "top", "left", "right"]:
+ self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes,
+ axis_direction=loc)
+
+ for axisline in [self._axislines["top"], self._axislines["right"]]:
+ axisline.label.set_visible(False)
+ axisline.major_ticklabels.set_visible(False)
+ axisline.minor_ticklabels.set_visible(False)
+
+ @property
+ def axis(self):
+ return self._axislines
+
+ def new_gridlines(self, grid_helper=None):
+ """
+ Create and return a new GridlineCollection instance.
+
+ *which* : "major" or "minor"
+ *axis* : "both", "x" or "y"
+
+ """
+ if grid_helper is None:
+ grid_helper = self.get_grid_helper()
+
+ gridlines = grid_helper.new_gridlines(self)
+
+ return gridlines
+
+
+ def _init_gridlines(self, grid_helper=None):
+ # It is done inside the cla.
+ gridlines = self.new_gridlines(grid_helper)
+
+ self.gridlines = gridlines
+
+ def cla(self):
+ # gridlines need to b created before cla() since cla calls grid()
+
+ self._init_gridlines()
+ super(Axes, self).cla()
+
+ # the clip_path should be set after Axes.cla() since that's
+ # when a patch is created.
+ self.gridlines.set_clip_path(self.axes.patch)
+
+ self._init_axis_artists()
+
+ def get_grid_helper(self):
+ return self._grid_helper
+
+
+ def grid(self, b=None, which='major', axis="both", **kwargs):
+ """
+ Toggle the gridlines, and optionally set the properties of the lines.
+ """
+ # their are some discrepancy between the behavior of grid in
+ # axes_grid and the original mpl's grid, because axes_grid
+ # explicitly set the visibility of the gridlines.
+
+ super(Axes, self).grid(b, which=which, axis=axis, **kwargs)
+ if not self._axisline_on:
+ return
+
+ if b is None:
+
+ if self.axes.xaxis._gridOnMinor or self.axes.xaxis._gridOnMajor or \
+ self.axes.yaxis._gridOnMinor or self.axes.yaxis._gridOnMajor:
+ b=True
+ else:
+ b=False
+
+ self.gridlines.set_which(which)
+ self.gridlines.set_axis(axis)
+ self.gridlines.set_visible(b)
+
+ if len(kwargs):
+ martist.setp(self.gridlines, **kwargs)
+
+ def get_children(self):
+ if self._axisline_on:
+ children = list(six.itervalues(self._axislines)) + [self.gridlines]
+ else:
+ children = []
+ children.extend(super(Axes, self).get_children())
+ return children
+
+ def invalidate_grid_helper(self):
+ self._grid_helper.invalidate()
+
+
+ def new_fixed_axis(self, loc, offset=None):
+ gh = self.get_grid_helper()
+ axis = gh.new_fixed_axis(loc,
+ nth_coord=None,
+ axis_direction=None,
+ offset=offset,
+ axes=self,
+ )
+ return axis
+
+
+ def new_floating_axis(self, nth_coord, value,
+ axis_direction="bottom",
+ ):
+ gh = self.get_grid_helper()
+ axis = gh.new_floating_axis(nth_coord, value,
+ axis_direction=axis_direction,
+ axes=self)
+ return axis
+
+
+
+ def draw(self, renderer, inframe=False):
+
+ if not self._axisline_on:
+ super(Axes, self).draw(renderer, inframe)
+ return
+
+ orig_artists = self.artists
+ self.artists = self.artists + list(self._axislines.values()) + [self.gridlines]
+
+ super(Axes, self).draw(renderer, inframe)
+
+ self.artists = orig_artists
+
+
+ def get_tightbbox(self, renderer, call_axes_locator=True):
+
+ bb0 = super(Axes, self).get_tightbbox(renderer, call_axes_locator)
+
+ if not self._axisline_on:
+ return bb0
+
+ bb = [bb0]
+
+ for axisline in list(six.itervalues(self._axislines)):
+ if not axisline.get_visible():
+ continue
+
+ bb.append(axisline.get_tightbbox(renderer))
+ # if axisline.label.get_visible():
+ # bb.append(axisline.label.get_window_extent(renderer))
+
+
+ # if axisline.major_ticklabels.get_visible():
+ # bb.extend(axisline.major_ticklabels.get_window_extents(renderer))
+ # if axisline.minor_ticklabels.get_visible():
+ # bb.extend(axisline.minor_ticklabels.get_window_extents(renderer))
+ # if axisline.major_ticklabels.get_visible() or \
+ # axisline.minor_ticklabels.get_visible():
+ # bb.append(axisline.offsetText.get_window_extent(renderer))
+
+ #bb.extend([c.get_window_extent(renderer) for c in artists \
+ # if c.get_visible()])
+
+ _bbox = Bbox.union([b for b in bb if b and (b.width!=0 or b.height!=0)])
+
+ return _bbox
+
+
+
+
+Subplot = maxes.subplot_class_factory(Axes)
+
+class AxesZero(Axes):
+ def __init__(self, *kl, **kw):
+
+ super(AxesZero, self).__init__(*kl, **kw)
+
+
+ def _init_axis_artists(self):
+ super(AxesZero, self)._init_axis_artists()
+
+ new_floating_axis = self._grid_helper.new_floating_axis
+ xaxis_zero = new_floating_axis(nth_coord=0,
+ value=0.,
+ axis_direction="bottom",
+ axes=self)
+
+ xaxis_zero.line.set_clip_path(self.patch)
+ xaxis_zero.set_visible(False)
+ self._axislines["xzero"] = xaxis_zero
+
+ yaxis_zero = new_floating_axis(nth_coord=1,
+ value=0.,
+ axis_direction="left",
+ axes=self)
+
+
+ yaxis_zero.line.set_clip_path(self.patch)
+ yaxis_zero.set_visible(False)
+ self._axislines["yzero"] = yaxis_zero
+
+SubplotZero = maxes.subplot_class_factory(AxesZero)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/clip_path.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/clip_path.py
new file mode 100644
index 00000000000..8507b09b075
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/clip_path.py
@@ -0,0 +1,135 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import zip
+
+import numpy as np
+from math import degrees
+import math
+import warnings
+
+def atan2(dy, dx):
+ if dx == 0 and dy == 0:
+ warnings.warn("dx and dy is 0")
+ return 0
+ else:
+ return math.atan2(dy, dx)
+
+# FIXME : The current algorithm seems to return incorrect angle when the line
+# ends at the boundary.
+
+def clip(xlines, ylines, x0, clip="right", xdir=True, ydir=True):
+
+ clipped_xlines = []
+ clipped_ylines = []
+
+ _pos_angles = []
+
+ if xdir:
+ xsign = 1
+ else:
+ xsign = -1
+
+ if ydir:
+ ysign = 1
+ else:
+ ysign = -1
+
+
+ for x, y in zip(xlines, ylines):
+
+ if clip in ["up", "right"]:
+ b = (x < x0).astype("i")
+ db = b[1:] - b[:-1]
+ else:
+ b = (x > x0).astype("i")
+ db = b[1:] - b[:-1]
+
+
+ if b[0]:
+ ns = 0
+ else:
+ ns = -1
+ segx, segy = [], []
+ for (i,) in np.argwhere(db!=0):
+ c = db[i]
+ if c == -1:
+ dx = (x0 - x[i])
+ dy = (y[i+1] - y[i]) * (dx/ (x[i+1] - x[i]))
+ y0 = y[i] + dy
+ clipped_xlines.append(np.concatenate([segx, x[ns:i+1], [x0]]))
+ clipped_ylines.append(np.concatenate([segy, y[ns:i+1], [y0]]))
+ ns = -1
+ segx, segy = [], []
+
+ if dx == 0. and dy == 0:
+ dx = x[i+1] - x[i]
+ dy = y[i+1] - y[i]
+
+ a = degrees(atan2(ysign*dy, xsign*dx))
+ _pos_angles.append((x0, y0, a))
+
+ elif c == 1:
+ dx = (x0 - x[i])
+ dy = (y[i+1] - y[i]) * (dx / (x[i+1] - x[i]))
+ y0 = y[i] + dy
+ segx, segy = [x0], [y0]
+ ns = i+1
+
+ if dx == 0. and dy == 0:
+ dx = x[i+1] - x[i]
+ dy = y[i+1] - y[i]
+
+ a = degrees(atan2(ysign*dy, xsign*dx))
+ _pos_angles.append((x0, y0, a))
+
+ if ns != -1:
+ clipped_xlines.append(np.concatenate([segx, x[ns:]]))
+ clipped_ylines.append(np.concatenate([segy, y[ns:]]))
+
+ #clipped_pos_angles.append(_pos_angles)
+
+
+ return clipped_xlines, clipped_ylines, _pos_angles
+
+
+def clip_line_to_rect(xline, yline, bbox):
+
+ x0, y0, x1, y1 = bbox.extents
+
+ xdir = x1 > x0
+ ydir = y1 > y0
+
+ if x1 > x0:
+ lx1, ly1, c_right_ = clip([xline], [yline], x1, clip="right", xdir=xdir, ydir=ydir)
+ lx2, ly2, c_left_ = clip(lx1, ly1, x0, clip="left", xdir=xdir, ydir=ydir)
+ else:
+ lx1, ly1, c_right_ = clip([xline], [yline], x0, clip="right", xdir=xdir, ydir=ydir)
+ lx2, ly2, c_left_ = clip(lx1, ly1, x1, clip="left", xdir=xdir, ydir=ydir)
+
+ if y1 > y0:
+ ly3, lx3, c_top_ = clip(ly2, lx2, y1, clip="right", xdir=ydir, ydir=xdir)
+ ly4, lx4, c_bottom_ = clip(ly3, lx3, y0, clip="left", xdir=ydir, ydir=xdir)
+ else:
+ ly3, lx3, c_top_ = clip(ly2, lx2, y0, clip="right", xdir=ydir, ydir=xdir)
+ ly4, lx4, c_bottom_ = clip(ly3, lx3, y1, clip="left", xdir=ydir, ydir=xdir)
+
+
+ # lx1, ly1, c_right_ = clip([xline], [yline], x1, clip="right")
+ # lx2, ly2, c_left_ = clip(lx1, ly1, x0, clip="left")
+ # ly3, lx3, c_top_ = clip(ly2, lx2, y1, clip="right")
+ # ly4, lx4, c_bottom_ = clip(ly3, lx3, y0, clip="left")
+
+ #c_left = [((x, y), (a+90)%180-180) for (x, y, a) in c_left_ \
+ # if bbox.containsy(y)]
+ c_left = [((x, y), (a+90)%180-90) for (x, y, a) in c_left_
+ if bbox.containsy(y)]
+ c_bottom = [((x, y), (90 - a)%180) for (y, x, a) in c_bottom_
+ if bbox.containsx(x)]
+ c_right = [((x, y), (a+90)%180+90) for (x, y, a) in c_right_
+ if bbox.containsy(y)]
+ c_top = [((x, y), (90 - a)%180+180) for (y, x, a) in c_top_
+ if bbox.containsx(x)]
+
+ return list(zip(lx4, ly4)), [c_left, c_bottom, c_right, c_top]
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/floating_axes.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/floating_axes.py
new file mode 100644
index 00000000000..468413dbac4
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/floating_axes.py
@@ -0,0 +1,544 @@
+"""
+An experimental support for curvilinear grid.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import zip
+
+# TODO :
+# see if tick_iterator method can be simplified by reusing the parent method.
+
+import numpy as np
+
+from matplotlib.transforms import Affine2D, IdentityTransform
+from . import grid_helper_curvelinear
+from .axislines import AxisArtistHelper, GridHelperBase
+from .axis_artist import AxisArtist
+from .grid_finder import GridFinder
+
+
+class FloatingAxisArtistHelper(grid_helper_curvelinear.FloatingAxisArtistHelper):
+ pass
+
+
+class FixedAxisArtistHelper(grid_helper_curvelinear.FloatingAxisArtistHelper):
+
+ def __init__(self, grid_helper, side, nth_coord_ticks=None):
+ """
+ nth_coord = along which coordinate value varies.
+ nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
+ """
+
+ value, nth_coord = grid_helper.get_data_boundary(side) # return v= 0 , nth=1, extremes of the other coordinate.
+ super(FixedAxisArtistHelper, self).__init__(grid_helper,
+ nth_coord,
+ value,
+ axis_direction=side,
+ )
+ #self.grid_helper = grid_helper
+ if nth_coord_ticks is None:
+ nth_coord_ticks = nth_coord
+ self.nth_coord_ticks = nth_coord_ticks
+
+ self.value = value
+ self.grid_helper = grid_helper
+ self._side = side
+
+
+ def update_lim(self, axes):
+ self.grid_helper.update_lim(axes)
+
+ self.grid_info = self.grid_helper.grid_info
+
+
+
+ def get_axislabel_pos_angle(self, axes):
+
+ extremes = self.grid_info["extremes"]
+
+ if self.nth_coord == 0:
+ xx0 = self.value
+ yy0 = (extremes[2]+extremes[3])/2.
+ dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
+ elif self.nth_coord == 1:
+ xx0 = (extremes[0]+extremes[1])/2.
+ yy0 = self.value
+ dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
+
+ grid_finder = self.grid_helper.grid_finder
+ xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
+
+ trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
+ p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
+
+
+ if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
+ xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
+ xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
+ xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
+
+ return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
+ else:
+ return None, None
+
+
+
+ def get_tick_transform(self, axes):
+ return IdentityTransform() #axes.transData
+
+ def get_tick_iterators(self, axes):
+ """tick_loc, tick_angle, tick_label, (optionally) tick_label"""
+
+
+ grid_finder = self.grid_helper.grid_finder
+
+ lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
+ lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
+
+ lon_levs, lat_levs = np.asarray(lon_levs), np.asarray(lat_levs)
+ if lat_factor is not None:
+ yy0 = lat_levs / lat_factor
+ dy = 0.001 / lat_factor
+ else:
+ yy0 = lat_levs
+ dy = 0.001
+
+ if lon_factor is not None:
+ xx0 = lon_levs / lon_factor
+ dx = 0.001 / lon_factor
+ else:
+ xx0 = lon_levs
+ dx = 0.001
+
+ _extremes = self.grid_helper._extremes
+ xmin, xmax = sorted(_extremes[:2])
+ ymin, ymax = sorted(_extremes[2:])
+ if self.nth_coord == 0:
+ mask = (ymin <= yy0) & (yy0 <= ymax)
+ yy0 = yy0[mask]
+ elif self.nth_coord == 1:
+ mask = (xmin <= xx0) & (xx0 <= xmax)
+ xx0 = xx0[mask]
+
+ def transform_xy(x, y):
+ x1, y1 = grid_finder.transform_xy(x, y)
+ x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
+ x2, y2 = x2y2.transpose()
+ return x2, y2
+
+ # find angles
+ if self.nth_coord == 0:
+ xx0 = np.empty_like(yy0)
+ xx0.fill(self.value)
+
+ #yy0_ = yy0.copy()
+
+ xx1, yy1 = transform_xy(xx0, yy0)
+
+ xx00 = xx0.astype(float, copy=True)
+ xx00[xx0+dx>xmax] -= dx
+ xx1a, yy1a = transform_xy(xx00, yy0)
+ xx1b, yy1b = transform_xy(xx00+dx, yy0)
+
+ yy00 = yy0.astype(float, copy=True)
+ yy00[yy0+dy>ymax] -= dy
+ xx2a, yy2a = transform_xy(xx0, yy00)
+ xx2b, yy2b = transform_xy(xx0, yy00+dy)
+
+ labels = self.grid_info["lat_labels"]
+ labels = [l for l, m in zip(labels, mask) if m]
+
+ elif self.nth_coord == 1:
+ yy0 = np.empty_like(xx0)
+ yy0.fill(self.value)
+
+ #xx0_ = xx0.copy()
+ xx1, yy1 = transform_xy(xx0, yy0)
+
+
+ yy00 = yy0.astype(float, copy=True)
+ yy00[yy0+dy>ymax] -= dy
+ xx1a, yy1a = transform_xy(xx0, yy00)
+ xx1b, yy1b = transform_xy(xx0, yy00+dy)
+
+ xx00 = xx0.astype(float, copy=True)
+ xx00[xx0+dx>xmax] -= dx
+ xx2a, yy2a = transform_xy(xx00, yy0)
+ xx2b, yy2b = transform_xy(xx00+dx, yy0)
+
+ labels = self.grid_info["lon_labels"]
+ labels = [l for l, m in zip(labels, mask) if m]
+
+
+ def f1():
+ dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
+ dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
+ mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
+ dd[mm] = dd2[mm] + np.pi / 2
+
+ #dd += np.pi
+ #dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
+ trans_tick = self.get_tick_transform(axes)
+ tr2ax = trans_tick + axes.transAxes.inverted()
+ for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
+ c2 = tr2ax.transform_point((x, y))
+ delta=0.00001
+ if (0. -delta<= c2[0] <= 1.+delta) and \
+ (0. -delta<= c2[1] <= 1.+delta):
+ d1 = d/3.14159*180.
+ d2 = d2/3.14159*180.
+ #_mod = (d2-d1+180)%360
+ #if _mod < 180:
+ # d1 += 180
+ ##_div, _mod = divmod(d2-d1, 360)
+ yield [x, y], d1, d2, lab
+ #, d2/3.14159*180.+da)
+
+ return f1(), iter([])
+
+ def get_line_transform(self, axes):
+ return axes.transData
+
+ def get_line(self, axes):
+
+ self.update_lim(axes)
+ from matplotlib.path import Path
+ k, v = dict(left=("lon_lines0", 0),
+ right=("lon_lines0", 1),
+ bottom=("lat_lines0", 0),
+ top=("lat_lines0", 1))[self._side]
+
+ xx, yy = self.grid_info[k][v]
+ return Path(np.column_stack([xx, yy]))
+
+
+
+from .grid_finder import ExtremeFinderSimple
+
+class ExtremeFinderFixed(ExtremeFinderSimple):
+ def __init__(self, extremes):
+ self._extremes = extremes
+
+ def __call__(self, transform_xy, x1, y1, x2, y2):
+ """
+ get extreme values.
+
+ x1, y1, x2, y2 in image coordinates (0-based)
+ nx, ny : number of division in each axis
+ """
+ #lon_min, lon_max, lat_min, lat_max = self._extremes
+ return self._extremes
+
+
+
+class GridHelperCurveLinear(grid_helper_curvelinear.GridHelperCurveLinear):
+
+ def __init__(self, aux_trans, extremes,
+ grid_locator1=None,
+ grid_locator2=None,
+ tick_formatter1=None,
+ tick_formatter2=None):
+ """
+ aux_trans : a transform from the source (curved) coordinate to
+ target (rectilinear) coordinate. An instance of MPL's Transform
+ (inverse transform should be defined) or a tuple of two callable
+ objects which defines the transform and its inverse. The callables
+ need take two arguments of array of source coordinates and
+ should return two target coordinates:
+ e.g., *x2, y2 = trans(x1, y1)*
+ """
+
+ self._old_values = None
+
+ self._extremes = extremes
+ extreme_finder = ExtremeFinderFixed(extremes)
+
+ super(GridHelperCurveLinear, self).__init__(aux_trans,
+ extreme_finder,
+ grid_locator1=grid_locator1,
+ grid_locator2=grid_locator2,
+ tick_formatter1=tick_formatter1,
+ tick_formatter2=tick_formatter2)
+
+
+ # def update_grid_finder(self, aux_trans=None, **kw):
+
+ # if aux_trans is not None:
+ # self.grid_finder.update_transform(aux_trans)
+
+ # self.grid_finder.update(**kw)
+ # self.invalidate()
+
+
+ # def _update(self, x1, x2, y1, y2):
+ # "bbox in 0-based image coordinates"
+ # # update wcsgrid
+
+ # if self.valid() and self._old_values == (x1, x2, y1, y2):
+ # return
+
+ # self._update_grid(x1, y1, x2, y2)
+
+ # self._old_values = (x1, x2, y1, y2)
+
+ # self._force_update = False
+
+
+ def get_data_boundary(self, side):
+ """
+ return v= 0 , nth=1
+ """
+ lon1, lon2, lat1, lat2 = self._extremes
+ return dict(left=(lon1, 0),
+ right=(lon2, 0),
+ bottom=(lat1, 1),
+ top=(lat2, 1))[side]
+
+
+ def new_fixed_axis(self, loc,
+ nth_coord=None,
+ axis_direction=None,
+ offset=None,
+ axes=None):
+
+ if axes is None:
+ axes = self.axes
+
+ if axis_direction is None:
+ axis_direction = loc
+
+ _helper = FixedAxisArtistHelper(self, loc,
+ nth_coord_ticks=nth_coord)
+
+
+ axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
+ axisline.line.set_clip_on(True)
+ axisline.line.set_clip_box(axisline.axes.bbox)
+
+
+ return axisline
+
+
+ # new_floating_axis will inherit the grid_helper's extremes.
+
+ # def new_floating_axis(self, nth_coord,
+ # value,
+ # axes=None,
+ # axis_direction="bottom"
+ # ):
+
+ # axis = super(GridHelperCurveLinear,
+ # self).new_floating_axis(nth_coord,
+ # value, axes=axes,
+ # axis_direction=axis_direction)
+
+ # # set extreme values of the axis helper
+ # if nth_coord == 1:
+ # axis.get_helper().set_extremes(*self._extremes[:2])
+ # elif nth_coord == 0:
+ # axis.get_helper().set_extremes(*self._extremes[2:])
+
+ # return axis
+
+
+ def _update_grid(self, x1, y1, x2, y2):
+
+ #self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
+
+ if self.grid_info is None:
+ self.grid_info = dict()
+
+ grid_info = self.grid_info
+
+ grid_finder = self.grid_finder
+ extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
+ x1, y1, x2, y2)
+
+ lon_min, lon_max = sorted(extremes[:2])
+ lat_min, lat_max = sorted(extremes[2:])
+ lon_levs, lon_n, lon_factor = \
+ grid_finder.grid_locator1(lon_min, lon_max)
+ lat_levs, lat_n, lat_factor = \
+ grid_finder.grid_locator2(lat_min, lat_max)
+ grid_info["extremes"] = lon_min, lon_max, lat_min, lat_max #extremes
+
+ grid_info["lon_info"] = lon_levs, lon_n, lon_factor
+ grid_info["lat_info"] = lat_levs, lat_n, lat_factor
+
+ grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
+ lon_factor,
+ lon_levs)
+
+ grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
+ lat_factor,
+ lat_levs)
+
+ if lon_factor is None:
+ lon_values = np.asarray(lon_levs[:lon_n])
+ else:
+ lon_values = np.asarray(lon_levs[:lon_n]/lon_factor)
+ if lat_factor is None:
+ lat_values = np.asarray(lat_levs[:lat_n])
+ else:
+ lat_values = np.asarray(lat_levs[:lat_n]/lat_factor)
+
+ lon_values0 = lon_values[(lon_min<lon_values) & (lon_values<lon_max)]
+ lat_values0 = lat_values[(lat_min<lat_values) & (lat_values<lat_max)]
+ lon_lines, lat_lines = grid_finder._get_raw_grid_lines(lon_values0,
+ lat_values0,
+ lon_min, lon_max,
+ lat_min, lat_max)
+
+
+ grid_info["lon_lines"] = lon_lines
+ grid_info["lat_lines"] = lat_lines
+
+
+ lon_lines, lat_lines = grid_finder._get_raw_grid_lines(extremes[:2],
+ extremes[2:],
+ *extremes)
+ #lon_min, lon_max,
+ # lat_min, lat_max)
+
+
+ grid_info["lon_lines0"] = lon_lines
+ grid_info["lat_lines0"] = lat_lines
+
+
+
+ def get_gridlines(self, which="major", axis="both"):
+ grid_lines = []
+ if axis in ["both", "x"]:
+ for gl in self.grid_info["lon_lines"]:
+ grid_lines.extend([gl])
+ if axis in ["both", "y"]:
+ for gl in self.grid_info["lat_lines"]:
+ grid_lines.extend([gl])
+
+ return grid_lines
+
+
+ def get_boundary(self):
+ """
+ return Nx2 array of x,y coordinate of the boundary
+ """
+ x0, x1, y0, y1 = self._extremes
+ tr = self._aux_trans
+ xx = np.linspace(x0, x1, 100)
+ yy0, yy1 = np.empty_like(xx), np.empty_like(xx)
+ yy0.fill(y0)
+ yy1.fill(y1)
+
+ yy = np.linspace(y0, y1, 100)
+ xx0, xx1 = np.empty_like(yy), np.empty_like(yy)
+ xx0.fill(x0)
+ xx1.fill(x1)
+
+ xxx = np.concatenate([xx[:-1], xx1[:-1], xx[-1:0:-1], xx0])
+ yyy = np.concatenate([yy0[:-1], yy[:-1], yy1[:-1], yy[::-1]])
+ t = tr.transform(np.array([xxx, yyy]).transpose())
+
+ return t
+
+
+
+
+
+
+
+
+
+
+
+
+class FloatingAxesBase(object):
+
+
+ def __init__(self, *kl, **kwargs):
+ grid_helper = kwargs.get("grid_helper", None)
+ if grid_helper is None:
+ raise ValueError("FloatingAxes requires grid_helper argument")
+ if not hasattr(grid_helper, "get_boundary"):
+ raise ValueError("grid_helper must implement get_boundary method")
+
+ self._axes_class_floating.__init__(self, *kl, **kwargs)
+
+ self.set_aspect(1.)
+ self.adjust_axes_lim()
+
+
+ def _gen_axes_patch(self):
+ """
+ Returns the patch used to draw the background of the axes. It
+ is also used as the clipping path for any data elements on the
+ axes.
+
+ In the standard axes, this is a rectangle, but in other
+ projections it may not be.
+
+ .. note::
+ Intended to be overridden by new projection types.
+ """
+ import matplotlib.patches as mpatches
+ grid_helper = self.get_grid_helper()
+ t = grid_helper.get_boundary()
+ return mpatches.Polygon(t)
+
+ def cla(self):
+ self._axes_class_floating.cla(self)
+ #HostAxes.cla(self)
+ self.patch.set_transform(self.transData)
+
+
+ patch = self._axes_class_floating._gen_axes_patch(self)
+ patch.set_figure(self.figure)
+ patch.set_visible(False)
+ patch.set_transform(self.transAxes)
+
+ self.patch.set_clip_path(patch)
+ self.gridlines.set_clip_path(patch)
+
+ self._original_patch = patch
+
+
+ def adjust_axes_lim(self):
+
+ #t = self.get_boundary()
+ grid_helper = self.get_grid_helper()
+ t = grid_helper.get_boundary()
+ x, y = t[:,0], t[:,1]
+
+ xmin, xmax = min(x), max(x)
+ ymin, ymax = min(y), max(y)
+
+ dx = (xmax-xmin)/100.
+ dy = (ymax-ymin)/100.
+
+ self.set_xlim(xmin-dx, xmax+dx)
+ self.set_ylim(ymin-dy, ymax+dy)
+
+
+
+_floatingaxes_classes = {}
+
+def floatingaxes_class_factory(axes_class):
+
+ new_class = _floatingaxes_classes.get(axes_class)
+ if new_class is None:
+ new_class = type(str("Floating %s" % (axes_class.__name__)),
+ (FloatingAxesBase, axes_class),
+ {'_axes_class_floating': axes_class})
+ _floatingaxes_classes[axes_class] = new_class
+
+ return new_class
+
+from .axislines import Axes
+from mpl_toolkits.axes_grid1.parasite_axes import host_axes_class_factory
+
+FloatingAxes = floatingaxes_class_factory(host_axes_class_factory(Axes))
+
+
+import matplotlib.axes as maxes
+FloatingSubplot = maxes.subplot_class_factory(FloatingAxes)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/grid_finder.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/grid_finder.py
new file mode 100644
index 00000000000..62a94b14783
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/grid_finder.py
@@ -0,0 +1,340 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import numpy as np
+from matplotlib.transforms import Bbox
+from . import clip_path
+clip_line_to_rect = clip_path.clip_line_to_rect
+
+import matplotlib.ticker as mticker
+from matplotlib.transforms import Transform
+
+# extremes finder
+
+class ExtremeFinderSimple(object):
+ def __init__(self, nx, ny):
+ self.nx, self.ny = nx, ny
+
+ def __call__(self, transform_xy, x1, y1, x2, y2):
+ """
+ get extreme values.
+
+ x1, y1, x2, y2 in image coordinates (0-based)
+ nx, ny : number of division in each axis
+ """
+ x_, y_ = np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)
+ x, y = np.meshgrid(x_, y_)
+ lon, lat = transform_xy(np.ravel(x), np.ravel(y))
+
+ lon_min, lon_max = lon.min(), lon.max()
+ lat_min, lat_max = lat.min(), lat.max()
+
+ return self._add_pad(lon_min, lon_max, lat_min, lat_max)
+
+ def _add_pad(self, lon_min, lon_max, lat_min, lat_max):
+ """ a small amount of padding is added because the current
+ clipping algorithms seems to fail when the gridline ends at
+ the bbox boundary.
+ """
+ dlon = (lon_max - lon_min) / self.nx
+ dlat = (lat_max - lat_min) / self.ny
+
+ lon_min, lon_max = lon_min - dlon, lon_max + dlon
+ lat_min, lat_max = lat_min - dlat, lat_max + dlat
+
+ return lon_min, lon_max, lat_min, lat_max
+
+
+
+class GridFinderBase(object):
+ def __init__(self,
+ extreme_finder,
+ grid_locator1,
+ grid_locator2,
+ tick_formatter1=None,
+ tick_formatter2=None):
+ """
+ the transData of the axes to the world coordinate.
+ locator1, locator2 : grid locator for 1st and 2nd axis.
+
+ Derived must define "transform_xy, inv_transform_xy"
+ (may use update_transform)
+ """
+ super(GridFinderBase, self).__init__()
+
+ self.extreme_finder = extreme_finder
+ self.grid_locator1 = grid_locator1
+ self.grid_locator2 = grid_locator2
+ self.tick_formatter1 = tick_formatter1
+ self.tick_formatter2 = tick_formatter2
+
+ def get_grid_info(self,
+ x1, y1, x2, y2):
+ """
+ lon_values, lat_values : list of grid values. if integer is given,
+ rough number of grids in each direction.
+ """
+
+ extremes = self.extreme_finder(self.inv_transform_xy, x1, y1, x2, y2)
+
+ # min & max rage of lat (or lon) for each grid line will be drawn.
+ # i.e., gridline of lon=0 will be drawn from lat_min to lat_max.
+
+ lon_min, lon_max, lat_min, lat_max = extremes
+ lon_levs, lon_n, lon_factor = \
+ self.grid_locator1(lon_min, lon_max)
+ lat_levs, lat_n, lat_factor = \
+ self.grid_locator2(lat_min, lat_max)
+
+ if lon_factor is None:
+ lon_values = np.asarray(lon_levs[:lon_n])
+ else:
+ lon_values = np.asarray(lon_levs[:lon_n]/lon_factor)
+ if lat_factor is None:
+ lat_values = np.asarray(lat_levs[:lat_n])
+ else:
+ lat_values = np.asarray(lat_levs[:lat_n]/lat_factor)
+
+
+ lon_lines, lat_lines = self._get_raw_grid_lines(lon_values,
+ lat_values,
+ lon_min, lon_max,
+ lat_min, lat_max)
+
+ ddx = (x2-x1)*1.e-10
+ ddy = (y2-y1)*1.e-10
+ bb = Bbox.from_extents(x1-ddx, y1-ddy, x2+ddx, y2+ddy)
+
+ grid_info = {}
+ grid_info["extremes"] = extremes
+ grid_info["lon_lines"] = lon_lines
+ grid_info["lat_lines"] = lat_lines
+
+ grid_info["lon"] = self._clip_grid_lines_and_find_ticks(lon_lines,
+ lon_values,
+ lon_levs,
+ bb)
+
+ grid_info["lat"] = self._clip_grid_lines_and_find_ticks(lat_lines,
+ lat_values,
+ lat_levs,
+ bb)
+
+ tck_labels = grid_info["lon"]["tick_labels"] = dict()
+ for direction in ["left", "bottom", "right", "top"]:
+ levs = grid_info["lon"]["tick_levels"][direction]
+ tck_labels[direction] = self.tick_formatter1(direction,
+ lon_factor, levs)
+
+ tck_labels = grid_info["lat"]["tick_labels"] = dict()
+ for direction in ["left", "bottom", "right", "top"]:
+ levs = grid_info["lat"]["tick_levels"][direction]
+ tck_labels[direction] = self.tick_formatter2(direction,
+ lat_factor, levs)
+
+ return grid_info
+
+
+ def _get_raw_grid_lines(self,
+ lon_values, lat_values,
+ lon_min, lon_max, lat_min, lat_max):
+
+ lons_i = np.linspace(lon_min, lon_max, 100) # for interpolation
+ lats_i = np.linspace(lat_min, lat_max, 100)
+
+ lon_lines = [self.transform_xy(np.zeros_like(lats_i) + lon, lats_i)
+ for lon in lon_values]
+ lat_lines = [self.transform_xy(lons_i, np.zeros_like(lons_i) + lat)
+ for lat in lat_values]
+
+ return lon_lines, lat_lines
+
+
+ def _clip_grid_lines_and_find_ticks(self, lines, values, levs, bb):
+ gi = dict()
+ gi["values"] = []
+ gi["levels"] = []
+ gi["tick_levels"] = dict(left=[], bottom=[], right=[], top=[])
+ gi["tick_locs"] = dict(left=[], bottom=[], right=[], top=[])
+ gi["lines"] = []
+
+ tck_levels = gi["tick_levels"]
+ tck_locs = gi["tick_locs"]
+ for (lx, ly), v, lev in zip(lines, values, levs):
+ xy, tcks = clip_line_to_rect(lx, ly, bb)
+ if not xy:
+ continue
+ gi["levels"].append(v)
+ gi["lines"].append(xy)
+
+ for tck, direction in zip(tcks,
+ ["left", "bottom", "right", "top"]):
+ for t in tck:
+ tck_levels[direction].append(lev)
+ tck_locs[direction].append(t)
+
+ return gi
+
+
+ def update_transform(self, aux_trans):
+ if isinstance(aux_trans, Transform):
+ def transform_xy(x, y):
+ x, y = np.asarray(x), np.asarray(y)
+ ll1 = np.concatenate((x[:,np.newaxis], y[:,np.newaxis]), 1)
+ ll2 = aux_trans.transform(ll1)
+ lon, lat = ll2[:,0], ll2[:,1]
+ return lon, lat
+
+ def inv_transform_xy(x, y):
+ x, y = np.asarray(x), np.asarray(y)
+ ll1 = np.concatenate((x[:,np.newaxis], y[:,np.newaxis]), 1)
+ ll2 = aux_trans.inverted().transform(ll1)
+ lon, lat = ll2[:,0], ll2[:,1]
+ return lon, lat
+
+ else:
+ transform_xy, inv_transform_xy = aux_trans
+
+ self.transform_xy = transform_xy
+ self.inv_transform_xy = inv_transform_xy
+
+
+ def update(self, **kw):
+ for k in kw:
+ if k in ["extreme_finder",
+ "grid_locator1",
+ "grid_locator2",
+ "tick_formatter1",
+ "tick_formatter2"]:
+ setattr(self, k, kw[k])
+ else:
+ raise ValueError("unknown update property '%s'" % k)
+
+
+class GridFinder(GridFinderBase):
+
+ def __init__(self,
+ transform,
+ extreme_finder=None,
+ grid_locator1=None,
+ grid_locator2=None,
+ tick_formatter1=None,
+ tick_formatter2=None):
+ """
+ transform : transform from the image coordinate (which will be
+ the transData of the axes to the world coordinate.
+
+ or transform = (transform_xy, inv_transform_xy)
+
+ locator1, locator2 : grid locator for 1st and 2nd axis.
+ """
+ if extreme_finder is None:
+ extreme_finder = ExtremeFinderSimple(20, 20)
+ if grid_locator1 is None:
+ grid_locator1 = MaxNLocator()
+ if grid_locator2 is None:
+ grid_locator2 = MaxNLocator()
+ if tick_formatter1 is None:
+ tick_formatter1 = FormatterPrettyPrint()
+ if tick_formatter2 is None:
+ tick_formatter2 = FormatterPrettyPrint()
+ super(GridFinder, self).__init__(
+ extreme_finder,
+ grid_locator1,
+ grid_locator2,
+ tick_formatter1,
+ tick_formatter2)
+ self.update_transform(transform)
+
+
+class MaxNLocator(mticker.MaxNLocator):
+ def __init__(self, nbins=10, steps=None,
+ trim=True,
+ integer=False,
+ symmetric=False,
+ prune=None):
+ # trim argument has no effect. It has been left for API compatibility
+ mticker.MaxNLocator.__init__(self, nbins, steps=steps,
+ integer=integer,
+ symmetric=symmetric, prune=prune)
+ self.create_dummy_axis()
+ self._factor = None
+
+ def __call__(self, v1, v2):
+ if self._factor is not None:
+ self.set_bounds(v1*self._factor, v2*self._factor)
+ locs = mticker.MaxNLocator.__call__(self)
+ return np.array(locs), len(locs), self._factor
+ else:
+ self.set_bounds(v1, v2)
+ locs = mticker.MaxNLocator.__call__(self)
+ return np.array(locs), len(locs), None
+
+ def set_factor(self, f):
+ self._factor = f
+
+
+class FixedLocator(object):
+ def __init__(self, locs):
+ self._locs = locs
+ self._factor = None
+
+
+ def __call__(self, v1, v2):
+ if self._factor is None:
+ v1, v2 = sorted([v1, v2])
+ else:
+ v1, v2 = sorted([v1*self._factor, v2*self._factor])
+ locs = np.array([l for l in self._locs if ((v1 <= l) and (l <= v2))])
+ return locs, len(locs), self._factor
+
+ def set_factor(self, f):
+ self._factor = f
+
+
+
+# Tick Formatter
+
+class FormatterPrettyPrint(object):
+ def __init__(self, useMathText=True):
+ self._fmt = mticker.ScalarFormatter(
+ useMathText=useMathText, useOffset=False)
+ self._fmt.create_dummy_axis()
+ self._ignore_factor = True
+
+ def __call__(self, direction, factor, values):
+ if not self._ignore_factor:
+ if factor is None:
+ factor = 1.
+ values = [v/factor for v in values]
+ #values = [v for v in values]
+ self._fmt.set_locs(values)
+ return [self._fmt(v) for v in values]
+
+
+class DictFormatter(object):
+ def __init__(self, format_dict, formatter=None):
+ """
+ format_dict : dictionary for format strings to be used.
+ formatter : fall-back formatter
+ """
+ super(DictFormatter, self).__init__()
+ self._format_dict = format_dict
+ self._fallback_formatter = formatter
+
+ def __call__(self, direction, factor, values):
+ """
+ factor is ignored if value is found in the dictionary
+ """
+
+ if self._fallback_formatter:
+ fallback_strings = self._fallback_formatter(
+ direction, factor, values)
+ else:
+ fallback_strings = [""]*len(values)
+
+ r = [self._format_dict.get(k, v) for k, v in zip(values,
+ fallback_strings)]
+ return r
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/grid_helper_curvelinear.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/grid_helper_curvelinear.py
new file mode 100644
index 00000000000..578645148ee
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/grid_helper_curvelinear.py
@@ -0,0 +1,475 @@
+"""
+An experimental support for curvilinear grid.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import zip
+
+from itertools import chain
+from .grid_finder import GridFinder
+
+from .axislines import AxisArtistHelper, GridHelperBase
+from .axis_artist import AxisArtist
+from matplotlib.transforms import Affine2D, IdentityTransform
+import numpy as np
+
+from matplotlib.path import Path
+
+class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
+ """
+ Helper class for a fixed axis.
+ """
+
+ def __init__(self, grid_helper, side, nth_coord_ticks=None):
+ """
+ nth_coord = along which coordinate value varies.
+ nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
+ """
+
+ super(FixedAxisArtistHelper, self).__init__(loc=side)
+
+ self.grid_helper = grid_helper
+ if nth_coord_ticks is None:
+ nth_coord_ticks = self.nth_coord
+ self.nth_coord_ticks = nth_coord_ticks
+
+ self.side = side
+ self._limits_inverted = False
+
+ def update_lim(self, axes):
+ self.grid_helper.update_lim(axes)
+
+ if self.nth_coord == 0:
+ xy1, xy2 = axes.get_ylim()
+ else:
+ xy1, xy2 = axes.get_xlim()
+
+ if xy1 > xy2:
+ self._limits_inverted = True
+ else:
+ self._limits_inverted = False
+
+
+ def change_tick_coord(self, coord_number=None):
+ if coord_number is None:
+ self.nth_coord_ticks = 1 - self.nth_coord_ticks
+ elif coord_number in [0, 1]:
+ self.nth_coord_ticks = coord_number
+ else:
+ raise Exception("wrong coord number")
+
+
+ def get_tick_transform(self, axes):
+ return axes.transData
+
+ def get_tick_iterators(self, axes):
+ """tick_loc, tick_angle, tick_label"""
+
+ g = self.grid_helper
+
+ if self._limits_inverted:
+ side = {"left":"right","right":"left",
+ "top":"bottom", "bottom":"top"}[self.side]
+ else:
+ side = self.side
+
+ ti1 = g.get_tick_iterator(self.nth_coord_ticks, side)
+ ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True)
+
+ #ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
+
+ return chain(ti1, ti2), iter([])
+
+
+
+class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
+
+ def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
+ """
+ nth_coord = along which coordinate value varies.
+ nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
+ """
+
+ super(FloatingAxisArtistHelper, self).__init__(nth_coord,
+ value,
+ )
+ self.value = value
+ self.grid_helper = grid_helper
+ self._extremes = None, None
+
+ self._get_line_path = None # a method that returns a Path.
+ self._line_num_points = 100 # number of points to create a line
+
+ def set_extremes(self, e1, e2):
+ self._extremes = e1, e2
+
+ def update_lim(self, axes):
+ self.grid_helper.update_lim(axes)
+
+ x1, x2 = axes.get_xlim()
+ y1, y2 = axes.get_ylim()
+ grid_finder = self.grid_helper.grid_finder
+ extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
+ x1, y1, x2, y2)
+
+ extremes = list(extremes)
+ e1, e2 = self._extremes # ranges of other coordinates
+ if self.nth_coord == 0:
+ if e1 is not None:
+ extremes[2] = max(e1, extremes[2])
+ if e2 is not None:
+ extremes[3] = min(e2, extremes[3])
+ elif self.nth_coord == 1:
+ if e1 is not None:
+ extremes[0] = max(e1, extremes[0])
+ if e2 is not None:
+ extremes[1] = min(e2, extremes[1])
+
+ grid_info = dict()
+ lon_min, lon_max, lat_min, lat_max = extremes
+ lon_levs, lon_n, lon_factor = \
+ grid_finder.grid_locator1(lon_min, lon_max)
+ lat_levs, lat_n, lat_factor = \
+ grid_finder.grid_locator2(lat_min, lat_max)
+ grid_info["extremes"] = extremes
+
+ grid_info["lon_info"] = lon_levs, lon_n, lon_factor
+ grid_info["lat_info"] = lat_levs, lat_n, lat_factor
+
+ grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
+ lon_factor,
+ lon_levs)
+
+ grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
+ lat_factor,
+ lat_levs)
+
+ grid_finder = self.grid_helper.grid_finder
+
+ #e1, e2 = self._extremes # ranges of other coordinates
+ if self.nth_coord == 0:
+ xx0 = np.linspace(self.value, self.value, self._line_num_points)
+ yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
+ xx, yy = grid_finder.transform_xy(xx0, yy0)
+ elif self.nth_coord == 1:
+ xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
+ yy0 = np.linspace(self.value, self.value, self._line_num_points)
+ xx, yy = grid_finder.transform_xy(xx0, yy0)
+
+ grid_info["line_xy"] = xx, yy
+ self.grid_info = grid_info
+
+ def get_axislabel_transform(self, axes):
+ return Affine2D() #axes.transData
+
+ def get_axislabel_pos_angle(self, axes):
+
+ extremes = self.grid_info["extremes"]
+
+ if self.nth_coord == 0:
+ xx0 = self.value
+ yy0 = (extremes[2]+extremes[3])/2.
+ dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
+ elif self.nth_coord == 1:
+ xx0 = (extremes[0]+extremes[1])/2.
+ yy0 = self.value
+ dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
+
+ grid_finder = self.grid_helper.grid_finder
+ xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
+
+ trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
+ p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
+
+
+ if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
+ xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
+ xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
+ xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
+
+ return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
+ else:
+ return None, None
+
+
+
+
+ def get_tick_transform(self, axes):
+ return IdentityTransform() #axes.transData
+
+ def get_tick_iterators(self, axes):
+ """tick_loc, tick_angle, tick_label, (optionally) tick_label"""
+
+ grid_finder = self.grid_helper.grid_finder
+
+ lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
+ lat_levs = np.asarray(lat_levs)
+ if lat_factor is not None:
+ yy0 = lat_levs / lat_factor
+ dy = 0.01 / lat_factor
+ else:
+ yy0 = lat_levs
+ dy = 0.01
+
+ lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
+ lon_levs = np.asarray(lon_levs)
+ if lon_factor is not None:
+ xx0 = lon_levs / lon_factor
+ dx = 0.01 / lon_factor
+ else:
+ xx0 = lon_levs
+ dx = 0.01
+
+ if None in self._extremes:
+ e0, e1 = self._extremes
+ else:
+ e0, e1 = sorted(self._extremes)
+ if e0 is None:
+ e0 = -np.inf
+ if e1 is None:
+ e1 = np.inf
+
+ if self.nth_coord == 0:
+ mask = (e0 <= yy0) & (yy0 <= e1)
+ #xx0, yy0 = xx0[mask], yy0[mask]
+ yy0 = yy0[mask]
+ elif self.nth_coord == 1:
+ mask = (e0 <= xx0) & (xx0 <= e1)
+ #xx0, yy0 = xx0[mask], yy0[mask]
+ xx0 = xx0[mask]
+
+ def transform_xy(x, y):
+ x1, y1 = grid_finder.transform_xy(x, y)
+ x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
+ x2, y2 = x2y2.transpose()
+ return x2, y2
+
+ # find angles
+ if self.nth_coord == 0:
+ xx0 = np.empty_like(yy0)
+ xx0.fill(self.value)
+
+ xx1, yy1 = transform_xy(xx0, yy0)
+
+ xx00 = xx0.copy()
+ xx00[xx0+dx>e1] -= dx
+ xx1a, yy1a = transform_xy(xx00, yy0)
+ xx1b, yy1b = transform_xy(xx00+dx, yy0)
+
+ xx2a, yy2a = transform_xy(xx0, yy0)
+ xx2b, yy2b = transform_xy(xx0, yy0+dy)
+
+ labels = self.grid_info["lat_labels"]
+ labels = [l for l, m in zip(labels, mask) if m]
+
+ elif self.nth_coord == 1:
+ yy0 = np.empty_like(xx0)
+ yy0.fill(self.value)
+
+ xx1, yy1 = transform_xy(xx0, yy0)
+
+ xx1a, yy1a = transform_xy(xx0, yy0)
+ xx1b, yy1b = transform_xy(xx0, yy0+dy)
+
+ xx00 = xx0.copy()
+ xx00[xx0+dx>e1] -= dx
+ xx2a, yy2a = transform_xy(xx00, yy0)
+ xx2b, yy2b = transform_xy(xx00+dx, yy0)
+
+ labels = self.grid_info["lon_labels"]
+ labels = [l for l, m in zip(labels, mask) if m]
+
+
+ def f1():
+ dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
+ dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
+ mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
+ dd[mm] = dd2[mm] + np.pi / 2
+ #dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
+ #dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
+ #mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
+ #dd[mm] = dd2[mm] + np.pi / 2
+
+ #dd += np.pi
+
+ #dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
+ trans_tick = self.get_tick_transform(axes)
+ tr2ax = trans_tick + axes.transAxes.inverted()
+ for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
+ c2 = tr2ax.transform_point((x, y))
+ delta=0.00001
+ if (0. -delta<= c2[0] <= 1.+delta) and \
+ (0. -delta<= c2[1] <= 1.+delta):
+ d1 = d/3.14159*180.
+ d2 = d2/3.14159*180.
+ yield [x, y], d1, d2, lab
+
+ return f1(), iter([])
+
+ def get_line_transform(self, axes):
+ return axes.transData
+
+ def get_line(self, axes):
+ self.update_lim(axes)
+ x, y = self.grid_info["line_xy"]
+
+ if self._get_line_path is None:
+ return Path(np.column_stack([x, y]))
+ else:
+ return self._get_line_path(axes, x, y)
+
+
+
+
+class GridHelperCurveLinear(GridHelperBase):
+
+ def __init__(self, aux_trans,
+ extreme_finder=None,
+ grid_locator1=None,
+ grid_locator2=None,
+ tick_formatter1=None,
+ tick_formatter2=None):
+ """
+ aux_trans : a transform from the source (curved) coordinate to
+ target (rectilinear) coordinate. An instance of MPL's Transform
+ (inverse transform should be defined) or a tuple of two callable
+ objects which defines the transform and its inverse. The callables
+ need take two arguments of array of source coordinates and
+ should return two target coordinates.
+
+ e.g., ``x2, y2 = trans(x1, y1)``
+ """
+ super(GridHelperCurveLinear, self).__init__()
+
+ self.grid_info = None
+ self._old_values = None
+ #self._grid_params = dict()
+ self._aux_trans = aux_trans
+
+ self.grid_finder = GridFinder(aux_trans,
+ extreme_finder,
+ grid_locator1,
+ grid_locator2,
+ tick_formatter1,
+ tick_formatter2)
+
+
+ def update_grid_finder(self, aux_trans=None, **kw):
+
+ if aux_trans is not None:
+ self.grid_finder.update_transform(aux_trans)
+
+ self.grid_finder.update(**kw)
+ self.invalidate()
+
+
+ def _update(self, x1, x2, y1, y2):
+ "bbox in 0-based image coordinates"
+ # update wcsgrid
+
+ if self.valid() and self._old_values == (x1, x2, y1, y2):
+ return
+
+ self._update_grid(x1, y1, x2, y2)
+
+ self._old_values = (x1, x2, y1, y2)
+
+ self._force_update = False
+
+
+ def new_fixed_axis(self, loc,
+ nth_coord=None,
+ axis_direction=None,
+ offset=None,
+ axes=None):
+
+
+ if axes is None:
+ axes = self.axes
+
+ if axis_direction is None:
+ axis_direction = loc
+ _helper = FixedAxisArtistHelper(self, loc,
+ #nth_coord,
+ nth_coord_ticks=nth_coord,
+ )
+
+ axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
+
+ return axisline
+
+
+ def new_floating_axis(self, nth_coord,
+ value,
+ axes=None,
+ axis_direction="bottom"
+ ):
+
+ if axes is None:
+ axes = self.axes
+
+ _helper = FloatingAxisArtistHelper(
+ self, nth_coord, value, axis_direction)
+
+ axisline = AxisArtist(axes, _helper)
+
+ #_helper = FloatingAxisArtistHelper(self, nth_coord,
+ # value,
+ # label_direction=label_direction,
+ # )
+
+ #axisline = AxisArtistFloating(axes, _helper,
+ # axis_direction=axis_direction)
+ axisline.line.set_clip_on(True)
+ axisline.line.set_clip_box(axisline.axes.bbox)
+ #axisline.major_ticklabels.set_visible(True)
+ #axisline.minor_ticklabels.set_visible(False)
+
+ #axisline.major_ticklabels.set_rotate_along_line(True)
+ #axisline.set_rotate_label_along_line(True)
+
+ return axisline
+
+
+ def _update_grid(self, x1, y1, x2, y2):
+
+ self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
+
+
+ def get_gridlines(self, which="major", axis="both"):
+ grid_lines = []
+
+ if axis in ["both", "x"]:
+ for gl in self.grid_info["lon"]["lines"]:
+ grid_lines.extend(gl)
+ if axis in ["both", "y"]:
+ for gl in self.grid_info["lat"]["lines"]:
+ grid_lines.extend(gl)
+
+ return grid_lines
+
+
+ def get_tick_iterator(self, nth_coord, axis_side, minor=False):
+
+ #axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
+ angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
+ #angle = [0, 90, 180, 270][axisnr]
+ lon_or_lat = ["lon", "lat"][nth_coord]
+ if not minor: # major ticks
+ def f():
+ for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
+ self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
+ angle_normal = a
+ yield xy, angle_normal, angle_tangent, l
+ else:
+ def f():
+ for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
+ self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
+ angle_normal = a
+ yield xy, angle_normal, angle_tangent, ""
+ #for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
+ # yield xy, a, ""
+
+ return f()
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/parasite_axes.py b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/parasite_axes.py
new file mode 100644
index 00000000000..cad56e43a22
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/axisartist/parasite_axes.py
@@ -0,0 +1,18 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from mpl_toolkits.axes_grid1.parasite_axes import (
+ host_axes_class_factory, parasite_axes_class_factory,
+ parasite_axes_auxtrans_class_factory, subplot_class_factory)
+
+from .axislines import Axes
+
+
+ParasiteAxes = parasite_axes_class_factory(Axes)
+
+ParasiteAxesAuxTrans = \
+ parasite_axes_auxtrans_class_factory(axes_class=ParasiteAxes)
+
+HostAxes = host_axes_class_factory(axes_class=Axes)
+
+SubplotHost = subplot_class_factory(HostAxes)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/__init__.py b/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/__init__.py
new file mode 100644
index 00000000000..cd9c2139d27
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/__init__.py
@@ -0,0 +1,6 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+from .axes3d import Axes3D
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/art3d.py b/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/art3d.py
new file mode 100644
index 00000000000..ef55dd693e1
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/art3d.py
@@ -0,0 +1,774 @@
+# art3d.py, original mplot3d version by John Porter
+# Parts rewritten by Reinier Heeres <reinier@heeres.eu>
+# Minor additions by Ben Axelrod <baxelrod@coroware.com>
+
+'''
+Module containing 3D artist code and functions to convert 2D
+artists into 3D versions which can be added to an Axes3D.
+'''
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import zip
+
+import math
+
+import numpy as np
+
+from matplotlib import (
+ artist, cbook, colors as mcolors, lines, text as mtext, path as mpath)
+from matplotlib.cbook import _backports
+from matplotlib.collections import (
+ Collection, LineCollection, PolyCollection, PatchCollection,
+ PathCollection)
+from matplotlib.colors import Normalize
+from matplotlib.patches import Patch
+from . import proj3d
+
+
+def norm_angle(a):
+ """Return angle between -180 and +180"""
+ a = (a + 360) % 360
+ if a > 180:
+ a = a - 360
+ return a
+
+
+def norm_text_angle(a):
+ """Return angle between -90 and +90"""
+ a = (a + 180) % 180
+ if a > 90:
+ a = a - 180
+ return a
+
+
+def get_dir_vector(zdir):
+ if zdir == 'x':
+ return np.array((1, 0, 0))
+ elif zdir == 'y':
+ return np.array((0, 1, 0))
+ elif zdir == 'z':
+ return np.array((0, 0, 1))
+ elif zdir is None:
+ return np.array((0, 0, 0))
+ elif cbook.iterable(zdir) and len(zdir) == 3:
+ return zdir
+ else:
+ raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
+
+
+class Text3D(mtext.Text):
+ '''
+ Text object with 3D position and (in the future) direction.
+ '''
+
+ def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
+ '''
+ *x*, *y*, *z* Position of text
+ *text* Text string to display
+ *zdir* Direction of text
+
+ Keyword arguments are passed onto :func:`~matplotlib.text.Text`.
+ '''
+ mtext.Text.__init__(self, x, y, text, **kwargs)
+ self.set_3d_properties(z, zdir)
+
+ def set_3d_properties(self, z=0, zdir='z'):
+ x, y = self.get_position()
+ self._position3d = np.array((x, y, z))
+ self._dir_vec = get_dir_vector(zdir)
+ self.stale = True
+
+ def draw(self, renderer):
+ proj = proj3d.proj_trans_points(
+ [self._position3d, self._position3d + self._dir_vec], renderer.M)
+ dx = proj[0][1] - proj[0][0]
+ dy = proj[1][1] - proj[1][0]
+ if dx==0. and dy==0.:
+ # atan2 raises ValueError: math domain error on 0,0
+ angle = 0.
+ else:
+ angle = math.degrees(math.atan2(dy, dx))
+ self.set_position((proj[0][0], proj[1][0]))
+ self.set_rotation(norm_text_angle(angle))
+ mtext.Text.draw(self, renderer)
+ self.stale = False
+
+
+def text_2d_to_3d(obj, z=0, zdir='z'):
+ """Convert a Text to a Text3D object."""
+ obj.__class__ = Text3D
+ obj.set_3d_properties(z, zdir)
+
+
+class Line3D(lines.Line2D):
+ '''
+ 3D line object.
+ '''
+
+ def __init__(self, xs, ys, zs, *args, **kwargs):
+ '''
+ Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
+ '''
+ lines.Line2D.__init__(self, [], [], *args, **kwargs)
+ self._verts3d = xs, ys, zs
+
+ def set_3d_properties(self, zs=0, zdir='z'):
+ xs = self.get_xdata()
+ ys = self.get_ydata()
+
+ try:
+ # If *zs* is a list or array, then this will fail and
+ # just proceed to juggle_axes().
+ zs = float(zs)
+ zs = [zs for x in xs]
+ except TypeError:
+ pass
+ self._verts3d = juggle_axes(xs, ys, zs, zdir)
+ self.stale = True
+
+ def draw(self, renderer):
+ xs3d, ys3d, zs3d = self._verts3d
+ xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
+ self.set_data(xs, ys)
+ lines.Line2D.draw(self, renderer)
+ self.stale = False
+
+
+def line_2d_to_3d(line, zs=0, zdir='z'):
+ '''
+ Convert a 2D line to 3D.
+ '''
+ line.__class__ = Line3D
+ line.set_3d_properties(zs, zdir)
+
+
+def path_to_3d_segment(path, zs=0, zdir='z'):
+ '''Convert a path to a 3D segment.'''
+
+ zs = _backports.broadcast_to(zs, len(path))
+ pathsegs = path.iter_segments(simplify=False, curves=False)
+ seg = [(x, y, z) for (((x, y), code), z) in zip(pathsegs, zs)]
+ seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
+ return seg3d
+
+
+def paths_to_3d_segments(paths, zs=0, zdir='z'):
+ '''
+ Convert paths from a collection object to 3D segments.
+ '''
+
+ zs = _backports.broadcast_to(zs, len(paths))
+ segs = [path_to_3d_segment(path, pathz, zdir)
+ for path, pathz in zip(paths, zs)]
+ return segs
+
+
+def path_to_3d_segment_with_codes(path, zs=0, zdir='z'):
+ '''Convert a path to a 3D segment with path codes.'''
+
+ zs = _backports.broadcast_to(zs, len(path))
+ seg = []
+ codes = []
+ pathsegs = path.iter_segments(simplify=False, curves=False)
+ for (((x, y), code), z) in zip(pathsegs, zs):
+ seg.append((x, y, z))
+ codes.append(code)
+ seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
+ return seg3d, codes
+
+
+def paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):
+ '''
+ Convert paths from a collection object to 3D segments with path codes.
+ '''
+
+ zs = _backports.broadcast_to(zs, len(paths))
+ segments = []
+ codes_list = []
+ for path, pathz in zip(paths, zs):
+ segs, codes = path_to_3d_segment_with_codes(path, pathz, zdir)
+ segments.append(segs)
+ codes_list.append(codes)
+ return segments, codes_list
+
+
+class Line3DCollection(LineCollection):
+ '''
+ A collection of 3D lines.
+ '''
+
+ def __init__(self, segments, *args, **kwargs):
+ '''
+ Keyword arguments are passed onto :func:`~matplotlib.collections.LineCollection`.
+ '''
+ LineCollection.__init__(self, segments, *args, **kwargs)
+
+ def set_sort_zpos(self, val):
+ '''Set the position to use for z-sorting.'''
+ self._sort_zpos = val
+ self.stale = True
+
+ def set_segments(self, segments):
+ '''
+ Set 3D segments
+ '''
+ self._segments3d = np.asanyarray(segments)
+ LineCollection.set_segments(self, [])
+
+ def do_3d_projection(self, renderer):
+ '''
+ Project the points according to renderer matrix.
+ '''
+ xyslist = [
+ proj3d.proj_trans_points(points, renderer.M) for points in
+ self._segments3d]
+ segments_2d = [np.column_stack([xs, ys]) for xs, ys, zs in xyslist]
+ LineCollection.set_segments(self, segments_2d)
+
+ # FIXME
+ minz = 1e9
+ for xs, ys, zs in xyslist:
+ minz = min(minz, min(zs))
+ return minz
+
+ def draw(self, renderer, project=False):
+ if project:
+ self.do_3d_projection(renderer)
+ LineCollection.draw(self, renderer)
+
+
+def line_collection_2d_to_3d(col, zs=0, zdir='z'):
+ """Convert a LineCollection to a Line3DCollection object."""
+ segments3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
+ col.__class__ = Line3DCollection
+ col.set_segments(segments3d)
+
+
+class Patch3D(Patch):
+ '''
+ 3D patch object.
+ '''
+
+ def __init__(self, *args, **kwargs):
+ zs = kwargs.pop('zs', [])
+ zdir = kwargs.pop('zdir', 'z')
+ Patch.__init__(self, *args, **kwargs)
+ self.set_3d_properties(zs, zdir)
+
+ def set_3d_properties(self, verts, zs=0, zdir='z'):
+ zs = _backports.broadcast_to(zs, len(verts))
+ self._segment3d = [juggle_axes(x, y, z, zdir)
+ for ((x, y), z) in zip(verts, zs)]
+ self._facecolor3d = Patch.get_facecolor(self)
+
+ def get_path(self):
+ return self._path2d
+
+ def get_facecolor(self):
+ return self._facecolor2d
+
+ def do_3d_projection(self, renderer):
+ s = self._segment3d
+ xs, ys, zs = zip(*s)
+ vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
+ self._path2d = mpath.Path(np.column_stack([vxs, vys]))
+ # FIXME: coloring
+ self._facecolor2d = self._facecolor3d
+ return min(vzs)
+
+ def draw(self, renderer):
+ Patch.draw(self, renderer)
+
+
+class PathPatch3D(Patch3D):
+ '''
+ 3D PathPatch object.
+ '''
+
+ def __init__(self, path, **kwargs):
+ zs = kwargs.pop('zs', [])
+ zdir = kwargs.pop('zdir', 'z')
+ Patch.__init__(self, **kwargs)
+ self.set_3d_properties(path, zs, zdir)
+
+ def set_3d_properties(self, path, zs=0, zdir='z'):
+ Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
+ self._code3d = path.codes
+
+ def do_3d_projection(self, renderer):
+ s = self._segment3d
+ xs, ys, zs = zip(*s)
+ vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
+ self._path2d = mpath.Path(np.column_stack([vxs, vys]), self._code3d)
+ # FIXME: coloring
+ self._facecolor2d = self._facecolor3d
+ return min(vzs)
+
+
+def get_patch_verts(patch):
+ """Return a list of vertices for the path of a patch."""
+ trans = patch.get_patch_transform()
+ path = patch.get_path()
+ polygons = path.to_polygons(trans)
+ if len(polygons):
+ return polygons[0]
+ else:
+ return []
+
+
+def patch_2d_to_3d(patch, z=0, zdir='z'):
+ """Convert a Patch to a Patch3D object."""
+ verts = get_patch_verts(patch)
+ patch.__class__ = Patch3D
+ patch.set_3d_properties(verts, z, zdir)
+
+
+def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
+ """Convert a PathPatch to a PathPatch3D object."""
+ path = pathpatch.get_path()
+ trans = pathpatch.get_patch_transform()
+
+ mpath = trans.transform_path(path)
+ pathpatch.__class__ = PathPatch3D
+ pathpatch.set_3d_properties(mpath, z, zdir)
+
+
+class Patch3DCollection(PatchCollection):
+ '''
+ A collection of 3D patches.
+ '''
+
+ def __init__(self, *args, **kwargs):
+ """
+ Create a collection of flat 3D patches with its normal vector
+ pointed in *zdir* direction, and located at *zs* on the *zdir*
+ axis. 'zs' can be a scalar or an array-like of the same length as
+ the number of patches in the collection.
+
+ Constructor arguments are the same as for
+ :class:`~matplotlib.collections.PatchCollection`. In addition,
+ keywords *zs=0* and *zdir='z'* are available.
+
+ Also, the keyword argument "depthshade" is available to
+ indicate whether or not to shade the patches in order to
+ give the appearance of depth (default is *True*).
+ This is typically desired in scatter plots.
+ """
+ zs = kwargs.pop('zs', 0)
+ zdir = kwargs.pop('zdir', 'z')
+ self._depthshade = kwargs.pop('depthshade', True)
+ PatchCollection.__init__(self, *args, **kwargs)
+ self.set_3d_properties(zs, zdir)
+
+ def set_sort_zpos(self, val):
+ '''Set the position to use for z-sorting.'''
+ self._sort_zpos = val
+ self.stale = True
+
+ def set_3d_properties(self, zs, zdir):
+ # Force the collection to initialize the face and edgecolors
+ # just in case it is a scalarmappable with a colormap.
+ self.update_scalarmappable()
+ offsets = self.get_offsets()
+ if len(offsets) > 0:
+ xs, ys = zip(*offsets)
+ else:
+ xs = []
+ ys = []
+ self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
+ self._facecolor3d = self.get_facecolor()
+ self._edgecolor3d = self.get_edgecolor()
+ self.stale = True
+
+ def do_3d_projection(self, renderer):
+ xs, ys, zs = self._offsets3d
+ vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
+
+ fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
+ self._facecolor3d)
+ fcs = mcolors.to_rgba_array(fcs, self._alpha)
+ self.set_facecolors(fcs)
+
+ ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
+ self._edgecolor3d)
+ ecs = mcolors.to_rgba_array(ecs, self._alpha)
+ self.set_edgecolors(ecs)
+ PatchCollection.set_offsets(self, np.column_stack([vxs, vys]))
+
+ if vzs.size > 0:
+ return min(vzs)
+ else:
+ return np.nan
+
+
+class Path3DCollection(PathCollection):
+ '''
+ A collection of 3D paths.
+ '''
+
+ def __init__(self, *args, **kwargs):
+ """
+ Create a collection of flat 3D paths with its normal vector
+ pointed in *zdir* direction, and located at *zs* on the *zdir*
+ axis. 'zs' can be a scalar or an array-like of the same length as
+ the number of paths in the collection.
+
+ Constructor arguments are the same as for
+ :class:`~matplotlib.collections.PathCollection`. In addition,
+ keywords *zs=0* and *zdir='z'* are available.
+
+ Also, the keyword argument "depthshade" is available to
+ indicate whether or not to shade the patches in order to
+ give the appearance of depth (default is *True*).
+ This is typically desired in scatter plots.
+ """
+ zs = kwargs.pop('zs', 0)
+ zdir = kwargs.pop('zdir', 'z')
+ self._depthshade = kwargs.pop('depthshade', True)
+ PathCollection.__init__(self, *args, **kwargs)
+ self.set_3d_properties(zs, zdir)
+
+ def set_sort_zpos(self, val):
+ '''Set the position to use for z-sorting.'''
+ self._sort_zpos = val
+ self.stale = True
+
+ def set_3d_properties(self, zs, zdir):
+ # Force the collection to initialize the face and edgecolors
+ # just in case it is a scalarmappable with a colormap.
+ self.update_scalarmappable()
+ offsets = self.get_offsets()
+ if len(offsets) > 0:
+ xs, ys = zip(*offsets)
+ else:
+ xs = []
+ ys = []
+ self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
+ self._facecolor3d = self.get_facecolor()
+ self._edgecolor3d = self.get_edgecolor()
+ self.stale = True
+
+ def do_3d_projection(self, renderer):
+ xs, ys, zs = self._offsets3d
+ vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
+
+ fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
+ self._facecolor3d)
+ fcs = mcolors.to_rgba_array(fcs, self._alpha)
+ self.set_facecolors(fcs)
+
+ ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
+ self._edgecolor3d)
+ ecs = mcolors.to_rgba_array(ecs, self._alpha)
+ self.set_edgecolors(ecs)
+ PathCollection.set_offsets(self, np.column_stack([vxs, vys]))
+
+ if vzs.size > 0 :
+ return min(vzs)
+ else :
+ return np.nan
+
+
+def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
+ """
+ Convert a :class:`~matplotlib.collections.PatchCollection` into a
+ :class:`Patch3DCollection` object
+ (or a :class:`~matplotlib.collections.PathCollection` into a
+ :class:`Path3DCollection` object).
+
+ Keywords:
+
+ *za* The location or locations to place the patches in the
+ collection along the *zdir* axis. Defaults to 0.
+
+ *zdir* The axis in which to place the patches. Default is "z".
+
+ *depthshade* Whether to shade the patches to give a sense of depth.
+ Defaults to *True*.
+
+ """
+ if isinstance(col, PathCollection):
+ col.__class__ = Path3DCollection
+ elif isinstance(col, PatchCollection):
+ col.__class__ = Patch3DCollection
+ col._depthshade = depthshade
+ col.set_3d_properties(zs, zdir)
+
+
+class Poly3DCollection(PolyCollection):
+ '''
+ A collection of 3D polygons.
+ '''
+
+ def __init__(self, verts, *args, **kwargs):
+ '''
+ Create a Poly3DCollection.
+
+ *verts* should contain 3D coordinates.
+
+ Keyword arguments:
+ zsort, see set_zsort for options.
+
+ Note that this class does a bit of magic with the _facecolors
+ and _edgecolors properties.
+ '''
+ zsort = kwargs.pop('zsort', True)
+ PolyCollection.__init__(self, verts, *args, **kwargs)
+ self.set_zsort(zsort)
+ self._codes3d = None
+
+ _zsort_functions = {
+ 'average': np.average,
+ 'min': np.min,
+ 'max': np.max,
+ }
+
+ def set_zsort(self, zsort):
+ '''
+ Set z-sorting behaviour:
+ boolean: if True use default 'average'
+ string: 'average', 'min' or 'max'
+ '''
+
+ if zsort is True:
+ zsort = 'average'
+
+ if zsort is not False:
+ if zsort in self._zsort_functions:
+ zsortfunc = self._zsort_functions[zsort]
+ else:
+ return False
+ else:
+ zsortfunc = None
+
+ self._zsort = zsort
+ self._sort_zpos = None
+ self._zsortfunc = zsortfunc
+ self.stale = True
+
+ def get_vector(self, segments3d):
+ """Optimize points for projection"""
+ si = 0
+ ei = 0
+ segis = []
+ points = []
+ for p in segments3d:
+ points.extend(p)
+ ei = si + len(p)
+ segis.append((si, ei))
+ si = ei
+
+ if len(segments3d):
+ xs, ys, zs = zip(*points)
+ else :
+ # We need this so that we can skip the bad unpacking from zip()
+ xs, ys, zs = [], [], []
+
+ ones = np.ones(len(xs))
+ self._vec = np.array([xs, ys, zs, ones])
+ self._segis = segis
+
+ def set_verts(self, verts, closed=True):
+ '''Set 3D vertices.'''
+ self.get_vector(verts)
+ # 2D verts will be updated at draw time
+ PolyCollection.set_verts(self, [], False)
+ self._closed = closed
+
+ def set_verts_and_codes(self, verts, codes):
+ '''Sets 3D vertices with path codes'''
+ # set vertices with closed=False to prevent PolyCollection from
+ # setting path codes
+ self.set_verts(verts, closed=False)
+ # and set our own codes instead.
+ self._codes3d = codes
+
+ def set_3d_properties(self):
+ # Force the collection to initialize the face and edgecolors
+ # just in case it is a scalarmappable with a colormap.
+ self.update_scalarmappable()
+ self._sort_zpos = None
+ self.set_zsort(True)
+ self._facecolors3d = PolyCollection.get_facecolors(self)
+ self._edgecolors3d = PolyCollection.get_edgecolors(self)
+ self._alpha3d = PolyCollection.get_alpha(self)
+ self.stale = True
+
+ def set_sort_zpos(self,val):
+ '''Set the position to use for z-sorting.'''
+ self._sort_zpos = val
+ self.stale = True
+
+ def do_3d_projection(self, renderer):
+ '''
+ Perform the 3D projection for this object.
+ '''
+ # FIXME: This may no longer be needed?
+ if self._A is not None:
+ self.update_scalarmappable()
+ self._facecolors3d = self._facecolors
+
+ txs, tys, tzs = proj3d.proj_transform_vec(self._vec, renderer.M)
+ xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei])
+ for si, ei in self._segis]
+
+ # This extra fuss is to re-order face / edge colors
+ cface = self._facecolors3d
+ cedge = self._edgecolors3d
+ if len(cface) != len(xyzlist):
+ cface = cface.repeat(len(xyzlist), axis=0)
+ if len(cedge) != len(xyzlist):
+ if len(cedge) == 0:
+ cedge = cface
+ else:
+ cedge = cedge.repeat(len(xyzlist), axis=0)
+
+ # if required sort by depth (furthest drawn first)
+ if self._zsort:
+ z_segments_2d = sorted(
+ ((self._zsortfunc(zs), np.column_stack([xs, ys]), fc, ec, idx)
+ for idx, ((xs, ys, zs), fc, ec)
+ in enumerate(zip(xyzlist, cface, cedge))),
+ key=lambda x: x[0], reverse=True)
+ else:
+ raise ValueError("whoops")
+
+ segments_2d = [s for z, s, fc, ec, idx in z_segments_2d]
+ if self._codes3d is not None:
+ codes = [self._codes3d[idx] for z, s, fc, ec, idx in z_segments_2d]
+ PolyCollection.set_verts_and_codes(self, segments_2d, codes)
+ else:
+ PolyCollection.set_verts(self, segments_2d, self._closed)
+
+ self._facecolors2d = [fc for z, s, fc, ec, idx in z_segments_2d]
+ if len(self._edgecolors3d) == len(cface):
+ self._edgecolors2d = [ec for z, s, fc, ec, idx in z_segments_2d]
+ else:
+ self._edgecolors2d = self._edgecolors3d
+
+ # Return zorder value
+ if self._sort_zpos is not None:
+ zvec = np.array([[0], [0], [self._sort_zpos], [1]])
+ ztrans = proj3d.proj_transform_vec(zvec, renderer.M)
+ return ztrans[2][0]
+ elif tzs.size > 0 :
+ # FIXME: Some results still don't look quite right.
+ # In particular, examine contourf3d_demo2.py
+ # with az = -54 and elev = -45.
+ return np.min(tzs)
+ else :
+ return np.nan
+
+ def set_facecolor(self, colors):
+ PolyCollection.set_facecolor(self, colors)
+ self._facecolors3d = PolyCollection.get_facecolor(self)
+ set_facecolors = set_facecolor
+
+ def set_edgecolor(self, colors):
+ PolyCollection.set_edgecolor(self, colors)
+ self._edgecolors3d = PolyCollection.get_edgecolor(self)
+ set_edgecolors = set_edgecolor
+
+ def set_alpha(self, alpha):
+ """
+ Set the alpha tranparencies of the collection. *alpha* must be
+ a float or *None*.
+
+ ACCEPTS: float or None
+ """
+ if alpha is not None:
+ try:
+ float(alpha)
+ except TypeError:
+ raise TypeError('alpha must be a float or None')
+ artist.Artist.set_alpha(self, alpha)
+ try:
+ self._facecolors = mcolors.to_rgba_array(
+ self._facecolors3d, self._alpha)
+ except (AttributeError, TypeError, IndexError):
+ pass
+ try:
+ self._edgecolors = mcolors.to_rgba_array(
+ self._edgecolors3d, self._alpha)
+ except (AttributeError, TypeError, IndexError):
+ pass
+ self.stale = True
+
+ def get_facecolors(self):
+ return self._facecolors2d
+ get_facecolor = get_facecolors
+
+ def get_edgecolors(self):
+ return self._edgecolors2d
+ get_edgecolor = get_edgecolors
+
+ def draw(self, renderer):
+ return Collection.draw(self, renderer)
+
+
+def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
+ """Convert a PolyCollection to a Poly3DCollection object."""
+ segments_3d, codes = paths_to_3d_segments_with_codes(col.get_paths(),
+ zs, zdir)
+ col.__class__ = Poly3DCollection
+ col.set_verts_and_codes(segments_3d, codes)
+ col.set_3d_properties()
+
+
+def juggle_axes(xs, ys, zs, zdir):
+ """
+ Reorder coordinates so that 2D xs, ys can be plotted in the plane
+ orthogonal to zdir. zdir is normally x, y or z. However, if zdir
+ starts with a '-' it is interpreted as a compensation for rotate_axes.
+ """
+ if zdir == 'x':
+ return zs, xs, ys
+ elif zdir == 'y':
+ return xs, zs, ys
+ elif zdir[0] == '-':
+ return rotate_axes(xs, ys, zs, zdir)
+ else:
+ return xs, ys, zs
+
+
+def rotate_axes(xs, ys, zs, zdir):
+ """
+ Reorder coordinates so that the axes are rotated with zdir along
+ the original z axis. Prepending the axis with a '-' does the
+ inverse transform, so zdir can be x, -x, y, -y, z or -z
+ """
+ if zdir == 'x':
+ return ys, zs, xs
+ elif zdir == '-x':
+ return zs, xs, ys
+
+ elif zdir == 'y':
+ return zs, xs, ys
+ elif zdir == '-y':
+ return ys, zs, xs
+
+ else:
+ return xs, ys, zs
+
+
+def get_colors(c, num):
+ """Stretch the color argument to provide the required number num"""
+ return _backports.broadcast_to(
+ mcolors.to_rgba_array(c) if len(c) else [0, 0, 0, 0],
+ (num, 4))
+
+
+def zalpha(colors, zs):
+ """Modify the alphas of the color list according to depth"""
+ # FIXME: This only works well if the points for *zs* are well-spaced
+ # in all three dimensions. Otherwise, at certain orientations,
+ # the min and max zs are very close together.
+ # Should really normalize against the viewing depth.
+ colors = get_colors(colors, len(zs))
+ if len(zs):
+ norm = Normalize(min(zs), max(zs))
+ sats = 1 - norm(zs) * 0.7
+ colors = [(c[0], c[1], c[2], c[3] * s) for c, s in zip(colors, sats)]
+ return colors
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/axes3d.py b/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/axes3d.py
new file mode 100644
index 00000000000..b99a090c62c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/axes3d.py
@@ -0,0 +1,2958 @@
+"""
+axes3d.py, original mplot3d version by John Porter
+Created: 23 Sep 2005
+
+Parts fixed by Reinier Heeres <reinier@heeres.eu>
+Minor additions by Ben Axelrod <baxelrod@coroware.com>
+Significant updates and revisions by Ben Root <ben.v.root@gmail.com>
+
+Module containing Axes3D, an object which can plot 3D objects on a
+2D matplotlib figure.
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import map, xrange, zip, reduce
+
+import math
+import warnings
+from collections import defaultdict
+
+import numpy as np
+
+import matplotlib.axes as maxes
+import matplotlib.cbook as cbook
+import matplotlib.collections as mcoll
+import matplotlib.colors as mcolors
+import matplotlib.docstring as docstring
+import matplotlib.scale as mscale
+import matplotlib.transforms as mtransforms
+from matplotlib.axes import Axes, rcParams
+from matplotlib.cbook import _backports
+from matplotlib.colors import Normalize, LightSource
+from matplotlib.transforms import Bbox
+from matplotlib.tri.triangulation import Triangulation
+
+from . import art3d
+from . import proj3d
+from . import axis3d
+
+
+def unit_bbox():
+ box = Bbox(np.array([[0, 0], [1, 1]]))
+ return box
+
+
+class Axes3D(Axes):
+ """
+ 3D axes object.
+ """
+ name = '3d'
+ _shared_z_axes = cbook.Grouper()
+
+ def __init__(self, fig, rect=None, *args, **kwargs):
+ '''
+ Build an :class:`Axes3D` instance in
+ :class:`~matplotlib.figure.Figure` *fig* with
+ *rect=[left, bottom, width, height]* in
+ :class:`~matplotlib.figure.Figure` coordinates
+
+ Optional keyword arguments:
+
+ ================ =========================================
+ Keyword Description
+ ================ =========================================
+ *azim* Azimuthal viewing angle (default -60)
+ *elev* Elevation viewing angle (default 30)
+ *zscale* [%(scale)s]
+ *sharez* Other axes to share z-limits with
+ *proj_type* 'persp' or 'ortho' (default 'persp')
+ ================ =========================================
+
+ .. versionadded :: 1.2.1
+ *sharez*
+
+ ''' % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
+
+ if rect is None:
+ rect = [0.0, 0.0, 1.0, 1.0]
+ self._cids = []
+
+ self.initial_azim = kwargs.pop('azim', -60)
+ self.initial_elev = kwargs.pop('elev', 30)
+ zscale = kwargs.pop('zscale', None)
+ sharez = kwargs.pop('sharez', None)
+ self.set_proj_type(kwargs.pop('proj_type', 'persp'))
+
+ self.xy_viewLim = unit_bbox()
+ self.zz_viewLim = unit_bbox()
+ self.xy_dataLim = unit_bbox()
+ self.zz_dataLim = unit_bbox()
+ # inihibit autoscale_view until the axes are defined
+ # they can't be defined until Axes.__init__ has been called
+ self.view_init(self.initial_elev, self.initial_azim)
+ self._ready = 0
+
+ self._sharez = sharez
+ if sharez is not None:
+ self._shared_z_axes.join(self, sharez)
+ self._adjustable = 'datalim'
+
+ super(Axes3D, self).__init__(fig, rect,
+ frameon=True,
+ *args, **kwargs)
+ # Disable drawing of axes by base class
+ super(Axes3D, self).set_axis_off()
+ # Enable drawing of axes by Axes3D class
+ self.set_axis_on()
+ self.M = None
+
+ # func used to format z -- fall back on major formatters
+ self.fmt_zdata = None
+
+ if zscale is not None:
+ self.set_zscale(zscale)
+
+ if self.zaxis is not None:
+ self._zcid = self.zaxis.callbacks.connect(
+ 'units finalize', lambda: self._on_units_changed(scalez=True))
+ else:
+ self._zcid = None
+
+ self._ready = 1
+ self.mouse_init()
+ self.set_top_view()
+
+ self.patch.set_linewidth(0)
+ # Calculate the pseudo-data width and height
+ pseudo_bbox = self.transLimits.inverted().transform([(0, 0), (1, 1)])
+ self._pseudo_w, self._pseudo_h = pseudo_bbox[1] - pseudo_bbox[0]
+
+ self.figure.add_axes(self)
+
+ def set_axis_off(self):
+ self._axis3don = False
+ self.stale = True
+
+ def set_axis_on(self):
+ self._axis3don = True
+ self.stale = True
+
+ def have_units(self):
+ """
+ Return *True* if units are set on the *x*, *y*, or *z* axes
+
+ """
+ return (self.xaxis.have_units() or self.yaxis.have_units() or
+ self.zaxis.have_units())
+
+ def convert_zunits(self, z):
+ """
+ For artists in an axes, if the zaxis has units support,
+ convert *z* using zaxis unit type
+
+ .. versionadded :: 1.2.1
+
+ """
+ return self.zaxis.convert_units(z)
+
+ def _process_unit_info(self, xdata=None, ydata=None, zdata=None,
+ kwargs=None):
+ """
+ Look for unit *kwargs* and update the axis instances as necessary
+
+ """
+ super(Axes3D, self)._process_unit_info(xdata=xdata, ydata=ydata,
+ kwargs=kwargs)
+
+ if self.xaxis is None or self.yaxis is None or self.zaxis is None:
+ return
+
+ if zdata is not None:
+ # we only need to update if there is nothing set yet.
+ if not self.zaxis.have_units():
+ self.zaxis.update_units(xdata)
+
+ # process kwargs 2nd since these will override default units
+ if kwargs is not None:
+ zunits = kwargs.pop('zunits', self.zaxis.units)
+ if zunits != self.zaxis.units:
+ self.zaxis.set_units(zunits)
+ # If the units being set imply a different converter,
+ # we need to update.
+ if zdata is not None:
+ self.zaxis.update_units(zdata)
+
+ def set_top_view(self):
+ # this happens to be the right view for the viewing coordinates
+ # moved up and to the left slightly to fit labels and axes
+ xdwl = (0.95/self.dist)
+ xdw = (0.9/self.dist)
+ ydwl = (0.95/self.dist)
+ ydw = (0.9/self.dist)
+
+ # This is purposely using the 2D Axes's set_xlim and set_ylim,
+ # because we are trying to place our viewing pane.
+ super(Axes3D, self).set_xlim(-xdwl, xdw, auto=None)
+ super(Axes3D, self).set_ylim(-ydwl, ydw, auto=None)
+
+ def _init_axis(self):
+ '''Init 3D axes; overrides creation of regular X/Y axes'''
+ self.w_xaxis = axis3d.XAxis('x', self.xy_viewLim.intervalx,
+ self.xy_dataLim.intervalx, self)
+ self.xaxis = self.w_xaxis
+ self.w_yaxis = axis3d.YAxis('y', self.xy_viewLim.intervaly,
+ self.xy_dataLim.intervaly, self)
+ self.yaxis = self.w_yaxis
+ self.w_zaxis = axis3d.ZAxis('z', self.zz_viewLim.intervalx,
+ self.zz_dataLim.intervalx, self)
+ self.zaxis = self.w_zaxis
+
+ for ax in self.xaxis, self.yaxis, self.zaxis:
+ ax.init3d()
+
+ def get_children(self):
+ return [self.zaxis, ] + super(Axes3D, self).get_children()
+
+ def _get_axis_list(self):
+ return super(Axes3D, self)._get_axis_list() + (self.zaxis, )
+
+ def unit_cube(self, vals=None):
+ minx, maxx, miny, maxy, minz, maxz = vals or self.get_w_lims()
+ xs, ys, zs = ([minx, maxx, maxx, minx, minx, maxx, maxx, minx],
+ [miny, miny, maxy, maxy, miny, miny, maxy, maxy],
+ [minz, minz, minz, minz, maxz, maxz, maxz, maxz])
+ return list(zip(xs, ys, zs))
+
+ def tunit_cube(self, vals=None, M=None):
+ if M is None:
+ M = self.M
+ xyzs = self.unit_cube(vals)
+ tcube = proj3d.proj_points(xyzs, M)
+ return tcube
+
+ def tunit_edges(self, vals=None, M=None):
+ tc = self.tunit_cube(vals, M)
+ edges = [(tc[0], tc[1]),
+ (tc[1], tc[2]),
+ (tc[2], tc[3]),
+ (tc[3], tc[0]),
+
+ (tc[0], tc[4]),
+ (tc[1], tc[5]),
+ (tc[2], tc[6]),
+ (tc[3], tc[7]),
+
+ (tc[4], tc[5]),
+ (tc[5], tc[6]),
+ (tc[6], tc[7]),
+ (tc[7], tc[4])]
+ return edges
+
+ def draw(self, renderer):
+ # draw the background patch
+ self.patch.draw(renderer)
+ self._frameon = False
+
+ # first, set the aspect
+ # this is duplicated from `axes._base._AxesBase.draw`
+ # but must be called before any of the artist are drawn as
+ # it adjusts the view limits and the size of the bounding box
+ # of the axes
+ locator = self.get_axes_locator()
+ if locator:
+ pos = locator(self, renderer)
+ self.apply_aspect(pos)
+ else:
+ self.apply_aspect()
+
+ # add the projection matrix to the renderer
+ self.M = self.get_proj()
+ renderer.M = self.M
+ renderer.vvec = self.vvec
+ renderer.eye = self.eye
+ renderer.get_axis_position = self.get_axis_position
+
+ # Calculate projection of collections and zorder them
+ for i, col in enumerate(
+ sorted(self.collections,
+ key=lambda col: col.do_3d_projection(renderer),
+ reverse=True)):
+ col.zorder = i
+
+ # Calculate projection of patches and zorder them
+ for i, patch in enumerate(
+ sorted(self.patches,
+ key=lambda patch: patch.do_3d_projection(renderer),
+ reverse=True)):
+ patch.zorder = i
+
+ if self._axis3don:
+ axes = (self.xaxis, self.yaxis, self.zaxis)
+ # Draw panes first
+ for ax in axes:
+ ax.draw_pane(renderer)
+ # Then axes
+ for ax in axes:
+ ax.draw(renderer)
+
+ # Then rest
+ super(Axes3D, self).draw(renderer)
+
+ def get_axis_position(self):
+ vals = self.get_w_lims()
+ tc = self.tunit_cube(vals, self.M)
+ xhigh = tc[1][2] > tc[2][2]
+ yhigh = tc[3][2] > tc[2][2]
+ zhigh = tc[0][2] > tc[2][2]
+ return xhigh, yhigh, zhigh
+
+ def _on_units_changed(self, scalex=False, scaley=False, scalez=False):
+ """
+ Callback for processing changes to axis units.
+
+ Currently forces updates of data limits and view limits.
+ """
+ self.relim()
+ self.autoscale_view(scalex=scalex, scaley=scaley, scalez=scalez)
+
+ def update_datalim(self, xys, **kwargs):
+ pass
+
+ def get_autoscale_on(self):
+ """
+ Get whether autoscaling is applied for all axes on plot commands
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ return super(Axes3D, self).get_autoscale_on() and self.get_autoscalez_on()
+
+ def get_autoscalez_on(self):
+ """
+ Get whether autoscaling for the z-axis is applied on plot commands
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ return self._autoscaleZon
+
+ def set_autoscale_on(self, b):
+ """
+ Set whether autoscaling is applied on plot commands
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+
+ Parameters
+ ----------
+ b : bool
+ .. ACCEPTS: bool
+ """
+ super(Axes3D, self).set_autoscale_on(b)
+ self.set_autoscalez_on(b)
+
+ def set_autoscalez_on(self, b):
+ """
+ Set whether autoscaling for the z-axis is applied on plot commands
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+
+ Parameters
+ ----------
+ b : bool
+ .. ACCEPTS: bool
+ """
+ self._autoscaleZon = b
+
+ def set_zmargin(self, m):
+ """
+ Set padding of Z data limits prior to autoscaling.
+
+ *m* times the data interval will be added to each
+ end of that interval before it is used in autoscaling.
+
+ accepts: float in range 0 to 1
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ if m < 0 or m > 1 :
+ raise ValueError("margin must be in range 0 to 1")
+ self._zmargin = m
+ self.stale = True
+
+ def margins(self, *args, **kw):
+ """
+ Convenience method to set or retrieve autoscaling margins.
+
+ signatures::
+ margins()
+
+ returns xmargin, ymargin, zmargin
+
+ ::
+
+ margins(margin)
+
+ margins(xmargin, ymargin, zmargin)
+
+ margins(x=xmargin, y=ymargin, z=zmargin)
+
+ margins(..., tight=False)
+
+ All forms above set the xmargin, ymargin and zmargin
+ parameters. All keyword parameters are optional. A single argument
+ specifies xmargin, ymargin and zmargin. The *tight* parameter
+ is passed to :meth:`autoscale_view`, which is executed after
+ a margin is changed; the default here is *True*, on the
+ assumption that when margins are specified, no additional
+ padding to match tick marks is usually desired. Setting
+ *tight* to *None* will preserve the previous setting.
+
+ Specifying any margin changes only the autoscaling; for example,
+ if *xmargin* is not None, then *xmargin* times the X data
+ interval will be added to each end of that interval before
+ it is used in autoscaling.
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ if not args and not kw:
+ return self._xmargin, self._ymargin, self._zmargin
+
+ tight = kw.pop('tight', True)
+ mx = kw.pop('x', None)
+ my = kw.pop('y', None)
+ mz = kw.pop('z', None)
+ if not args:
+ pass
+ elif len(args) == 1:
+ mx = my = mz = args[0]
+ elif len(args) == 2:
+ warnings.warn(
+ "Passing exactly two positional arguments to Axes3D.margins "
+ "is deprecated. If needed, pass them as keyword arguments "
+ "instead", cbook.mplDeprecation)
+ mx, my = args
+ elif len(args) == 3:
+ mx, my, mz = args
+ else:
+ raise ValueError(
+ "Axes3D.margins takes at most three positional arguments")
+ if mx is not None:
+ self.set_xmargin(mx)
+ if my is not None:
+ self.set_ymargin(my)
+ if mz is not None:
+ self.set_zmargin(mz)
+
+ scalex = mx is not None
+ scaley = my is not None
+ scalez = mz is not None
+
+ self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley,
+ scalez=scalez)
+
+ def autoscale(self, enable=True, axis='both', tight=None):
+ """
+ Convenience method for simple axis view autoscaling.
+ See :meth:`matplotlib.axes.Axes.autoscale` for full explanation.
+ Note that this function behaves the same, but for all
+ three axes. Therefore, 'z' can be passed for *axis*,
+ and 'both' applies to all three axes.
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ if enable is None:
+ scalex = True
+ scaley = True
+ scalez = True
+ else:
+ if axis in ['x', 'both']:
+ self._autoscaleXon = scalex = bool(enable)
+ else:
+ scalex = False
+ if axis in ['y', 'both']:
+ self._autoscaleYon = scaley = bool(enable)
+ else:
+ scaley = False
+ if axis in ['z', 'both']:
+ self._autoscaleZon = scalez = bool(enable)
+ else:
+ scalez = False
+ self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley,
+ scalez=scalez)
+
+ def auto_scale_xyz(self, X, Y, Z=None, had_data=None):
+ x, y, z = map(np.asarray, (X, Y, Z))
+ try:
+ x, y = x.flatten(), y.flatten()
+ if Z is not None:
+ z = z.flatten()
+ except AttributeError:
+ raise
+
+ # This updates the bounding boxes as to keep a record as
+ # to what the minimum sized rectangular volume holds the
+ # data.
+ self.xy_dataLim.update_from_data_xy(np.array([x, y]).T, not had_data)
+ if z is not None:
+ self.zz_dataLim.update_from_data_xy(np.array([z, z]).T, not had_data)
+
+ # Let autoscale_view figure out how to use this data.
+ self.autoscale_view()
+
+ def autoscale_view(self, tight=None, scalex=True, scaley=True,
+ scalez=True):
+ """
+ Autoscale the view limits using the data limits.
+ See :meth:`matplotlib.axes.Axes.autoscale_view` for documentation.
+ Note that this function applies to the 3D axes, and as such
+ adds the *scalez* to the function arguments.
+
+ .. versionchanged :: 1.1.0
+ Function signature was changed to better match the 2D version.
+ *tight* is now explicitly a kwarg and placed first.
+
+ .. versionchanged :: 1.2.1
+ This is now fully functional.
+
+ """
+ if not self._ready:
+ return
+
+ # This method looks at the rectangular volume (see above)
+ # of data and decides how to scale the view portal to fit it.
+ if tight is None:
+ # if image data only just use the datalim
+ _tight = self._tight or (len(self.images)>0 and
+ len(self.lines)==0 and
+ len(self.patches)==0)
+ else:
+ _tight = self._tight = bool(tight)
+
+ if scalex and self._autoscaleXon:
+ xshared = self._shared_x_axes.get_siblings(self)
+ dl = [ax.dataLim for ax in xshared]
+ bb = mtransforms.BboxBase.union(dl)
+ x0, x1 = self.xy_dataLim.intervalx
+ xlocator = self.xaxis.get_major_locator()
+ try:
+ x0, x1 = xlocator.nonsingular(x0, x1)
+ except AttributeError:
+ x0, x1 = mtransforms.nonsingular(x0, x1, increasing=False,
+ expander=0.05)
+ if self._xmargin > 0:
+ delta = (x1 - x0) * self._xmargin
+ x0 -= delta
+ x1 += delta
+ if not _tight:
+ x0, x1 = xlocator.view_limits(x0, x1)
+ self.set_xbound(x0, x1)
+
+ if scaley and self._autoscaleYon:
+ yshared = self._shared_y_axes.get_siblings(self)
+ dl = [ax.dataLim for ax in yshared]
+ bb = mtransforms.BboxBase.union(dl)
+ y0, y1 = self.xy_dataLim.intervaly
+ ylocator = self.yaxis.get_major_locator()
+ try:
+ y0, y1 = ylocator.nonsingular(y0, y1)
+ except AttributeError:
+ y0, y1 = mtransforms.nonsingular(y0, y1, increasing=False,
+ expander=0.05)
+ if self._ymargin > 0:
+ delta = (y1 - y0) * self._ymargin
+ y0 -= delta
+ y1 += delta
+ if not _tight:
+ y0, y1 = ylocator.view_limits(y0, y1)
+ self.set_ybound(y0, y1)
+
+ if scalez and self._autoscaleZon:
+ zshared = self._shared_z_axes.get_siblings(self)
+ dl = [ax.dataLim for ax in zshared]
+ bb = mtransforms.BboxBase.union(dl)
+ z0, z1 = self.zz_dataLim.intervalx
+ zlocator = self.zaxis.get_major_locator()
+ try:
+ z0, z1 = zlocator.nonsingular(z0, z1)
+ except AttributeError:
+ z0, z1 = mtransforms.nonsingular(z0, z1, increasing=False,
+ expander=0.05)
+ if self._zmargin > 0:
+ delta = (z1 - z0) * self._zmargin
+ z0 -= delta
+ z1 += delta
+ if not _tight:
+ z0, z1 = zlocator.view_limits(z0, z1)
+ self.set_zbound(z0, z1)
+
+ def get_w_lims(self):
+ '''Get 3D world limits.'''
+ minx, maxx = self.get_xlim3d()
+ miny, maxy = self.get_ylim3d()
+ minz, maxz = self.get_zlim3d()
+ return minx, maxx, miny, maxy, minz, maxz
+
+ def _determine_lims(self, xmin=None, xmax=None, *args, **kwargs):
+ if xmax is None and cbook.iterable(xmin):
+ xmin, xmax = xmin
+ if xmin == xmax:
+ xmin -= 0.05
+ xmax += 0.05
+ return (xmin, xmax)
+
+ def set_xlim3d(self, left=None, right=None, emit=True, auto=False, **kw):
+ """
+ Set 3D x limits.
+
+ See :meth:`matplotlib.axes.Axes.set_xlim` for full documentation.
+
+ """
+ if 'xmin' in kw:
+ left = kw.pop('xmin')
+ if 'xmax' in kw:
+ right = kw.pop('xmax')
+ if kw:
+ raise ValueError("unrecognized kwargs: %s" % list(kw))
+
+ if right is None and cbook.iterable(left):
+ left, right = left
+
+ self._process_unit_info(xdata=(left, right))
+ left = self._validate_converted_limits(left, self.convert_xunits)
+ right = self._validate_converted_limits(right, self.convert_xunits)
+
+ old_left, old_right = self.get_xlim()
+ if left is None:
+ left = old_left
+ if right is None:
+ right = old_right
+
+ if left == right:
+ warnings.warn(('Attempting to set identical left==right results\n'
+ 'in singular transformations; automatically expanding.\n'
+ 'left=%s, right=%s') % (left, right))
+ left, right = mtransforms.nonsingular(left, right, increasing=False)
+ left, right = self.xaxis.limit_range_for_scale(left, right)
+ self.xy_viewLim.intervalx = (left, right)
+
+ if auto is not None:
+ self._autoscaleXon = bool(auto)
+
+ if emit:
+ self.callbacks.process('xlim_changed', self)
+ # Call all of the other x-axes that are shared with this one
+ for other in self._shared_x_axes.get_siblings(self):
+ if other is not self:
+ other.set_xlim(self.xy_viewLim.intervalx,
+ emit=False, auto=auto)
+ if (other.figure != self.figure and
+ other.figure.canvas is not None):
+ other.figure.canvas.draw_idle()
+ self.stale = True
+ return left, right
+ set_xlim = set_xlim3d
+
+ def set_ylim3d(self, bottom=None, top=None, emit=True, auto=False, **kw):
+ """
+ Set 3D y limits.
+
+ See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation.
+
+ """
+ if 'ymin' in kw:
+ bottom = kw.pop('ymin')
+ if 'ymax' in kw:
+ top = kw.pop('ymax')
+ if kw:
+ raise ValueError("unrecognized kwargs: %s" % list(kw))
+
+ if top is None and cbook.iterable(bottom):
+ bottom, top = bottom
+
+ self._process_unit_info(ydata=(bottom, top))
+ bottom = self._validate_converted_limits(bottom, self.convert_yunits)
+ top = self._validate_converted_limits(top, self.convert_yunits)
+
+ old_bottom, old_top = self.get_ylim()
+ if bottom is None:
+ bottom = old_bottom
+ if top is None:
+ top = old_top
+
+ if top == bottom:
+ warnings.warn(('Attempting to set identical bottom==top results\n'
+ 'in singular transformations; automatically expanding.\n'
+ 'bottom=%s, top=%s') % (bottom, top))
+ bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
+ bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
+ self.xy_viewLim.intervaly = (bottom, top)
+
+ if auto is not None:
+ self._autoscaleYon = bool(auto)
+
+ if emit:
+ self.callbacks.process('ylim_changed', self)
+ # Call all of the other y-axes that are shared with this one
+ for other in self._shared_y_axes.get_siblings(self):
+ if other is not self:
+ other.set_ylim(self.xy_viewLim.intervaly,
+ emit=False, auto=auto)
+ if (other.figure != self.figure and
+ other.figure.canvas is not None):
+ other.figure.canvas.draw_idle()
+ self.stale = True
+ return bottom, top
+ set_ylim = set_ylim3d
+
+ def set_zlim3d(self, bottom=None, top=None, emit=True, auto=False, **kw):
+ """
+ Set 3D z limits.
+
+ See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation
+
+ """
+ if 'zmin' in kw:
+ bottom = kw.pop('zmin')
+ if 'zmax' in kw:
+ top = kw.pop('zmax')
+ if kw:
+ raise ValueError("unrecognized kwargs: %s" % list(kw))
+
+ if top is None and cbook.iterable(bottom):
+ bottom, top = bottom
+
+ self._process_unit_info(zdata=(bottom, top))
+ bottom = self._validate_converted_limits(bottom, self.convert_zunits)
+ top = self._validate_converted_limits(top, self.convert_zunits)
+
+ old_bottom, old_top = self.get_zlim()
+ if bottom is None:
+ bottom = old_bottom
+ if top is None:
+ top = old_top
+
+ if top == bottom:
+ warnings.warn(('Attempting to set identical bottom==top results\n'
+ 'in singular transformations; automatically expanding.\n'
+ 'bottom=%s, top=%s') % (bottom, top))
+ bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
+ bottom, top = self.zaxis.limit_range_for_scale(bottom, top)
+ self.zz_viewLim.intervalx = (bottom, top)
+
+ if auto is not None:
+ self._autoscaleZon = bool(auto)
+
+ if emit:
+ self.callbacks.process('zlim_changed', self)
+ # Call all of the other y-axes that are shared with this one
+ for other in self._shared_z_axes.get_siblings(self):
+ if other is not self:
+ other.set_zlim(self.zz_viewLim.intervalx,
+ emit=False, auto=auto)
+ if (other.figure != self.figure and
+ other.figure.canvas is not None):
+ other.figure.canvas.draw_idle()
+ self.stale = True
+ return bottom, top
+ set_zlim = set_zlim3d
+
+ def get_xlim3d(self):
+ return tuple(self.xy_viewLim.intervalx)
+ get_xlim3d.__doc__ = maxes.Axes.get_xlim.__doc__
+ get_xlim = get_xlim3d
+ if get_xlim.__doc__ is not None:
+ get_xlim.__doc__ += """
+ .. versionchanged :: 1.1.0
+ This function now correctly refers to the 3D x-limits
+ """
+
+ def get_ylim3d(self):
+ return tuple(self.xy_viewLim.intervaly)
+ get_ylim3d.__doc__ = maxes.Axes.get_ylim.__doc__
+ get_ylim = get_ylim3d
+ if get_ylim.__doc__ is not None:
+ get_ylim.__doc__ += """
+ .. versionchanged :: 1.1.0
+ This function now correctly refers to the 3D y-limits.
+ """
+
+ def get_zlim3d(self):
+ '''Get 3D z limits.'''
+ return tuple(self.zz_viewLim.intervalx)
+ get_zlim = get_zlim3d
+
+ def get_zscale(self):
+ """
+ Return the zaxis scale string %s
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """ % (", ".join(mscale.get_scale_names()))
+ return self.zaxis.get_scale()
+
+ # We need to slightly redefine these to pass scalez=False
+ # to their calls of autoscale_view.
+ def set_xscale(self, value, **kwargs):
+ self.xaxis._set_scale(value, **kwargs)
+ self.autoscale_view(scaley=False, scalez=False)
+ self._update_transScale()
+ if maxes.Axes.set_xscale.__doc__ is not None:
+ set_xscale.__doc__ = maxes.Axes.set_xscale.__doc__ + """
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+
+ def set_yscale(self, value, **kwargs):
+ self.yaxis._set_scale(value, **kwargs)
+ self.autoscale_view(scalex=False, scalez=False)
+ self._update_transScale()
+ self.stale = True
+ if maxes.Axes.set_yscale.__doc__ is not None:
+ set_yscale.__doc__ = maxes.Axes.set_yscale.__doc__ + """
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+
+ @docstring.dedent_interpd
+ def set_zscale(self, value, **kwargs):
+ """
+ Set the scaling of the z-axis: %(scale)s
+
+ ACCEPTS: [%(scale)s]
+
+ Different kwargs are accepted, depending on the scale:
+ %(scale_docs)s
+
+ .. note ::
+ Currently, Axes3D objects only supports linear scales.
+ Other scales may or may not work, and support for these
+ is improving with each release.
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ self.zaxis._set_scale(value, **kwargs)
+ self.autoscale_view(scalex=False, scaley=False)
+ self._update_transScale()
+ self.stale = True
+
+ def set_zticks(self, *args, **kwargs):
+ """
+ Set z-axis tick locations.
+ See :meth:`matplotlib.axes.Axes.set_yticks` for more details.
+
+ .. note::
+ Minor ticks are not supported.
+
+ .. versionadded:: 1.1.0
+ """
+ return self.zaxis.set_ticks(*args, **kwargs)
+
+ def get_zticks(self, minor=False):
+ """
+ Return the z ticks as a list of locations
+ See :meth:`matplotlib.axes.Axes.get_yticks` for more details.
+
+ .. note::
+ Minor ticks are not supported.
+
+ .. versionadded:: 1.1.0
+ """
+ return self.zaxis.get_ticklocs(minor=minor)
+
+ def get_zmajorticklabels(self):
+ """
+ Get the ztick labels as a list of Text instances
+
+ .. versionadded :: 1.1.0
+ """
+ return cbook.silent_list('Text zticklabel',
+ self.zaxis.get_majorticklabels())
+
+ def get_zminorticklabels(self):
+ """
+ Get the ztick labels as a list of Text instances
+
+ .. note::
+ Minor ticks are not supported. This function was added
+ only for completeness.
+
+ .. versionadded :: 1.1.0
+ """
+ return cbook.silent_list('Text zticklabel',
+ self.zaxis.get_minorticklabels())
+
+ def set_zticklabels(self, *args, **kwargs):
+ """
+ Set z-axis tick labels.
+ See :meth:`matplotlib.axes.Axes.set_yticklabels` for more details.
+
+ .. note::
+ Minor ticks are not supported by Axes3D objects.
+
+ .. versionadded:: 1.1.0
+ """
+ return self.zaxis.set_ticklabels(*args, **kwargs)
+
+ def get_zticklabels(self, minor=False):
+ """
+ Get ztick labels as a list of Text instances.
+ See :meth:`matplotlib.axes.Axes.get_yticklabels` for more details.
+
+ .. note::
+ Minor ticks are not supported.
+
+ .. versionadded:: 1.1.0
+ """
+ return cbook.silent_list('Text zticklabel',
+ self.zaxis.get_ticklabels(minor=minor))
+
+ def zaxis_date(self, tz=None):
+ """
+ Sets up z-axis ticks and labels that treat the z data as dates.
+
+ *tz* is a timezone string or :class:`tzinfo` instance.
+ Defaults to rc value.
+
+ .. note::
+ This function is merely provided for completeness.
+ Axes3D objects do not officially support dates for ticks,
+ and so this may or may not work as expected.
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ self.zaxis.axis_date(tz)
+
+ def get_zticklines(self):
+ """
+ Get ztick lines as a list of Line2D instances.
+ Note that this function is provided merely for completeness.
+ These lines are re-calculated as the display changes.
+
+ .. versionadded:: 1.1.0
+ """
+ return self.zaxis.get_ticklines()
+
+ def clabel(self, *args, **kwargs):
+ """
+ This function is currently not implemented for 3D axes.
+ Returns *None*.
+ """
+ return None
+
+ def view_init(self, elev=None, azim=None):
+ """
+ Set the elevation and azimuth of the axes.
+
+ This can be used to rotate the axes programmatically.
+
+ 'elev' stores the elevation angle in the z plane.
+ 'azim' stores the azimuth angle in the x,y plane.
+
+ if elev or azim are None (default), then the initial value
+ is used which was specified in the :class:`Axes3D` constructor.
+ """
+
+ self.dist = 10
+
+ if elev is None:
+ self.elev = self.initial_elev
+ else:
+ self.elev = elev
+
+ if azim is None:
+ self.azim = self.initial_azim
+ else:
+ self.azim = azim
+
+ def set_proj_type(self, proj_type):
+ """
+ Set the projection type.
+
+ Parameters
+ ----------
+ proj_type : str
+ Type of projection, accepts 'persp' and 'ortho'.
+
+ """
+ if proj_type == 'persp':
+ self._projection = proj3d.persp_transformation
+ elif proj_type == 'ortho':
+ self._projection = proj3d.ortho_transformation
+ else:
+ raise ValueError("unrecognized projection: %s" % proj_type)
+
+ def get_proj(self):
+ """
+ Create the projection matrix from the current viewing position.
+
+ elev stores the elevation angle in the z plane
+ azim stores the azimuth angle in the x,y plane
+
+ dist is the distance of the eye viewing point from the object
+ point.
+
+ """
+ relev, razim = np.pi * self.elev/180, np.pi * self.azim/180
+
+ xmin, xmax = self.get_xlim3d()
+ ymin, ymax = self.get_ylim3d()
+ zmin, zmax = self.get_zlim3d()
+
+ # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0
+ worldM = proj3d.world_transformation(xmin, xmax,
+ ymin, ymax,
+ zmin, zmax)
+
+ # look into the middle of the new coordinates
+ R = np.array([0.5, 0.5, 0.5])
+
+ xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist
+ yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist
+ zp = R[2] + np.sin(relev) * self.dist
+ E = np.array((xp, yp, zp))
+
+ self.eye = E
+ self.vvec = R - E
+ self.vvec = self.vvec / proj3d.mod(self.vvec)
+
+ if abs(relev) > np.pi/2:
+ # upside down
+ V = np.array((0, 0, -1))
+ else:
+ V = np.array((0, 0, 1))
+ zfront, zback = -self.dist, self.dist
+
+ viewM = proj3d.view_transformation(E, R, V)
+ projM = self._projection(zfront, zback)
+ M0 = np.dot(viewM, worldM)
+ M = np.dot(projM, M0)
+ return M
+
+ def mouse_init(self, rotate_btn=1, zoom_btn=3):
+ """Initializes mouse button callbacks to enable 3D rotation of
+ the axes. Also optionally sets the mouse buttons for 3D rotation
+ and zooming.
+
+ ============ =======================================================
+ Argument Description
+ ============ =======================================================
+ *rotate_btn* The integer or list of integers specifying which mouse
+ button or buttons to use for 3D rotation of the axes.
+ Default = 1.
+
+ *zoom_btn* The integer or list of integers specifying which mouse
+ button or buttons to use to zoom the 3D axes.
+ Default = 3.
+ ============ =======================================================
+
+ """
+ self.button_pressed = None
+ canv = self.figure.canvas
+ if canv is not None:
+ c1 = canv.mpl_connect('motion_notify_event', self._on_move)
+ c2 = canv.mpl_connect('button_press_event', self._button_press)
+ c3 = canv.mpl_connect('button_release_event', self._button_release)
+ self._cids = [c1, c2, c3]
+ else:
+ warnings.warn(
+ "Axes3D.figure.canvas is 'None', mouse rotation disabled. "
+ "Set canvas then call Axes3D.mouse_init().")
+
+ # coerce scalars into array-like, then convert into
+ # a regular list to avoid comparisons against None
+ # which breaks in recent versions of numpy.
+ self._rotate_btn = np.atleast_1d(rotate_btn).tolist()
+ self._zoom_btn = np.atleast_1d(zoom_btn).tolist()
+
+ def can_zoom(self):
+ """
+ Return *True* if this axes supports the zoom box button functionality.
+
+ 3D axes objects do not use the zoom box button.
+ """
+ return False
+
+ def can_pan(self):
+ """
+ Return *True* if this axes supports the pan/zoom button functionality.
+
+ 3D axes objects do not use the pan/zoom button.
+ """
+ return False
+
+ def cla(self):
+ """
+ Clear axes
+ """
+ # Disabling mouse interaction might have been needed a long
+ # time ago, but I can't find a reason for it now - BVR (2012-03)
+ #self.disable_mouse_rotation()
+ super(Axes3D, self).cla()
+ self.zaxis.cla()
+
+ if self._sharez is not None:
+ self.zaxis.major = self._sharez.zaxis.major
+ self.zaxis.minor = self._sharez.zaxis.minor
+ z0, z1 = self._sharez.get_zlim()
+ self.set_zlim(z0, z1, emit=False, auto=None)
+ self.zaxis._set_scale(self._sharez.zaxis.get_scale())
+ else:
+ self.zaxis._set_scale('linear')
+ try:
+ self.set_zlim(0, 1)
+ except TypeError:
+ pass
+
+ self._autoscaleZon = True
+ self._zmargin = 0
+
+ self.grid(rcParams['axes3d.grid'])
+
+ def disable_mouse_rotation(self):
+ """Disable mouse button callbacks.
+ """
+ # Disconnect the various events we set.
+ for cid in self._cids:
+ self.figure.canvas.mpl_disconnect(cid)
+
+ self._cids = []
+
+ def _button_press(self, event):
+ if event.inaxes == self:
+ self.button_pressed = event.button
+ self.sx, self.sy = event.xdata, event.ydata
+
+ def _button_release(self, event):
+ self.button_pressed = None
+
+ def format_zdata(self, z):
+ """
+ Return *z* string formatted. This function will use the
+ :attr:`fmt_zdata` attribute if it is callable, else will fall
+ back on the zaxis major formatter
+ """
+ try: return self.fmt_zdata(z)
+ except (AttributeError, TypeError):
+ func = self.zaxis.get_major_formatter().format_data_short
+ val = func(z)
+ return val
+
+ def format_coord(self, xd, yd):
+ """
+ Given the 2D view coordinates attempt to guess a 3D coordinate.
+ Looks for the nearest edge to the point and then assumes that
+ the point is at the same z location as the nearest point on the edge.
+ """
+
+ if self.M is None:
+ return ''
+
+ if self.button_pressed in self._rotate_btn:
+ return 'azimuth=%d deg, elevation=%d deg ' % (self.azim, self.elev)
+ # ignore xd and yd and display angles instead
+
+ # nearest edge
+ p0, p1 = min(self.tunit_edges(),
+ key=lambda edge: proj3d.line2d_seg_dist(
+ edge[0], edge[1], (xd, yd)))
+
+ # scale the z value to match
+ x0, y0, z0 = p0
+ x1, y1, z1 = p1
+ d0 = np.hypot(x0-xd, y0-yd)
+ d1 = np.hypot(x1-xd, y1-yd)
+ dt = d0+d1
+ z = d1/dt * z0 + d0/dt * z1
+
+ x, y, z = proj3d.inv_transform(xd, yd, z, self.M)
+
+ xs = self.format_xdata(x)
+ ys = self.format_ydata(y)
+ zs = self.format_zdata(z)
+ return 'x=%s, y=%s, z=%s' % (xs, ys, zs)
+
+ def _on_move(self, event):
+ """Mouse moving
+
+ button-1 rotates by default. Can be set explicitly in mouse_init().
+ button-3 zooms by default. Can be set explicitly in mouse_init().
+ """
+
+ if not self.button_pressed:
+ return
+
+ if self.M is None:
+ return
+
+ x, y = event.xdata, event.ydata
+ # In case the mouse is out of bounds.
+ if x is None:
+ return
+
+ dx, dy = x - self.sx, y - self.sy
+ w = self._pseudo_w
+ h = self._pseudo_h
+ self.sx, self.sy = x, y
+
+ # Rotation
+ if self.button_pressed in self._rotate_btn:
+ # rotate viewing point
+ # get the x and y pixel coords
+ if dx == 0 and dy == 0:
+ return
+ self.elev = art3d.norm_angle(self.elev - (dy/h)*180)
+ self.azim = art3d.norm_angle(self.azim - (dx/w)*180)
+ self.get_proj()
+ self.stale = True
+ self.figure.canvas.draw_idle()
+
+# elif self.button_pressed == 2:
+ # pan view
+ # project xv,yv,zv -> xw,yw,zw
+ # pan
+# pass
+
+ # Zoom
+ elif self.button_pressed in self._zoom_btn:
+ # zoom view
+ # hmmm..this needs some help from clipping....
+ minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
+ df = 1-((h - dy)/h)
+ dx = (maxx-minx)*df
+ dy = (maxy-miny)*df
+ dz = (maxz-minz)*df
+ self.set_xlim3d(minx - dx, maxx + dx)
+ self.set_ylim3d(miny - dy, maxy + dy)
+ self.set_zlim3d(minz - dz, maxz + dz)
+ self.get_proj()
+ self.figure.canvas.draw_idle()
+
+ def set_zlabel(self, zlabel, fontdict=None, labelpad=None, **kwargs):
+ '''
+ Set zlabel. See doc for :meth:`set_ylabel` for description.
+
+ '''
+ if labelpad is not None : self.zaxis.labelpad = labelpad
+ return self.zaxis.set_label_text(zlabel, fontdict, **kwargs)
+
+ def get_zlabel(self):
+ """
+ Get the z-label text string.
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ label = self.zaxis.get_label()
+ return label.get_text()
+
+ #### Axes rectangle characteristics
+
+ def get_frame_on(self):
+ """
+ Get whether the 3D axes panels are drawn.
+
+ .. versionadded :: 1.1.0
+ """
+ return self._frameon
+
+ def set_frame_on(self, b):
+ """
+ Set whether the 3D axes panels are drawn.
+
+ .. versionadded :: 1.1.0
+
+ Parameters
+ ----------
+ b : bool
+ .. ACCEPTS: bool
+ """
+ self._frameon = bool(b)
+ self.stale = True
+
+ def get_axisbelow(self):
+ """
+ Get whether axis below is true or not.
+
+ For axes3d objects, this will always be *True*
+
+ .. versionadded :: 1.1.0
+ This function was added for completeness.
+ """
+ return True
+
+ def set_axisbelow(self, b):
+ """
+ Set whether axis ticks and gridlines are above or below most artists.
+
+ For axes3d objects, this will ignore any settings and just use *True*
+
+ .. versionadded :: 1.1.0
+ This function was added for completeness.
+
+ Parameters
+ ----------
+ b : bool
+ .. ACCEPTS: bool
+ """
+ self._axisbelow = True
+ self.stale = True
+
+ def grid(self, b=True, **kwargs):
+ '''
+ Set / unset 3D grid.
+
+ .. note::
+
+ Currently, this function does not behave the same as
+ :meth:`matplotlib.axes.Axes.grid`, but it is intended to
+ eventually support that behavior.
+
+ .. versionchanged :: 1.1.0
+ This function was changed, but not tested. Please report any bugs.
+ '''
+ # TODO: Operate on each axes separately
+ if len(kwargs):
+ b = True
+ self._draw_grid = cbook._string_to_bool(b)
+ self.stale = True
+
+ def ticklabel_format(self, **kwargs):
+ """
+ Convenience method for manipulating the ScalarFormatter
+ used by default for linear axes in Axed3D objects.
+
+ See :meth:`matplotlib.axes.Axes.ticklabel_format` for full
+ documentation. Note that this version applies to all three
+ axes of the Axes3D object. Therefore, the *axis* argument
+ will also accept a value of 'z' and the value of 'both' will
+ apply to all three axes.
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ style = kwargs.pop('style', '').lower()
+ scilimits = kwargs.pop('scilimits', None)
+ useOffset = kwargs.pop('useOffset', None)
+ axis = kwargs.pop('axis', 'both').lower()
+ if scilimits is not None:
+ try:
+ m, n = scilimits
+ m+n+1 # check that both are numbers
+ except (ValueError, TypeError):
+ raise ValueError("scilimits must be a sequence of 2 integers")
+ if style[:3] == 'sci':
+ sb = True
+ elif style in ['plain', 'comma']:
+ sb = False
+ if style == 'plain':
+ cb = False
+ else:
+ cb = True
+ raise NotImplementedError("comma style remains to be added")
+ elif style == '':
+ sb = None
+ else:
+ raise ValueError("%s is not a valid style value")
+ try:
+ if sb is not None:
+ if axis in ['both', 'z']:
+ self.xaxis.major.formatter.set_scientific(sb)
+ if axis in ['both', 'y']:
+ self.yaxis.major.formatter.set_scientific(sb)
+ if axis in ['both', 'z'] :
+ self.zaxis.major.formatter.set_scientific(sb)
+ if scilimits is not None:
+ if axis in ['both', 'x']:
+ self.xaxis.major.formatter.set_powerlimits(scilimits)
+ if axis in ['both', 'y']:
+ self.yaxis.major.formatter.set_powerlimits(scilimits)
+ if axis in ['both', 'z']:
+ self.zaxis.major.formatter.set_powerlimits(scilimits)
+ if useOffset is not None:
+ if axis in ['both', 'x']:
+ self.xaxis.major.formatter.set_useOffset(useOffset)
+ if axis in ['both', 'y']:
+ self.yaxis.major.formatter.set_useOffset(useOffset)
+ if axis in ['both', 'z']:
+ self.zaxis.major.formatter.set_useOffset(useOffset)
+ except AttributeError:
+ raise AttributeError(
+ "This method only works with the ScalarFormatter.")
+
+ def locator_params(self, axis='both', tight=None, **kwargs):
+ """
+ Convenience method for controlling tick locators.
+
+ See :meth:`matplotlib.axes.Axes.locator_params` for full
+ documentation Note that this is for Axes3D objects,
+ therefore, setting *axis* to 'both' will result in the
+ parameters being set for all three axes. Also, *axis*
+ can also take a value of 'z' to apply parameters to the
+ z axis.
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ _x = axis in ['x', 'both']
+ _y = axis in ['y', 'both']
+ _z = axis in ['z', 'both']
+ if _x:
+ self.xaxis.get_major_locator().set_params(**kwargs)
+ if _y:
+ self.yaxis.get_major_locator().set_params(**kwargs)
+ if _z:
+ self.zaxis.get_major_locator().set_params(**kwargs)
+ self.autoscale_view(tight=tight, scalex=_x, scaley=_y, scalez=_z)
+
+ def tick_params(self, axis='both', **kwargs):
+ """
+ Convenience method for changing the appearance of ticks and
+ tick labels.
+
+ See :meth:`matplotlib.axes.Axes.tick_params` for more complete
+ documentation.
+
+ The only difference is that setting *axis* to 'both' will
+ mean that the settings are applied to all three axes. Also,
+ the *axis* parameter also accepts a value of 'z', which
+ would mean to apply to only the z-axis.
+
+ Also, because of how Axes3D objects are drawn very differently
+ from regular 2D axes, some of these settings may have
+ ambiguous meaning. For simplicity, the 'z' axis will
+ accept settings as if it was like the 'y' axis.
+
+ .. note::
+ While this function is currently implemented, the core part
+ of the Axes3D object may ignore some of these settings.
+ Future releases will fix this. Priority will be given to
+ those who file bugs.
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ super(Axes3D, self).tick_params(axis, **kwargs)
+ if axis in ['z', 'both'] :
+ zkw = dict(kwargs)
+ zkw.pop('top', None)
+ zkw.pop('bottom', None)
+ zkw.pop('labeltop', None)
+ zkw.pop('labelbottom', None)
+ self.zaxis.set_tick_params(**zkw)
+
+ ### data limits, ticks, tick labels, and formatting
+
+ def invert_zaxis(self):
+ """
+ Invert the z-axis.
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ bottom, top = self.get_zlim()
+ self.set_zlim(top, bottom, auto=None)
+
+ def zaxis_inverted(self):
+ '''
+ Returns True if the z-axis is inverted.
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ '''
+ bottom, top = self.get_zlim()
+ return top < bottom
+
+ def get_zbound(self):
+ """
+ Returns the z-axis numerical bounds where::
+
+ lowerBound < upperBound
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ bottom, top = self.get_zlim()
+ if bottom < top:
+ return bottom, top
+ else:
+ return top, bottom
+
+ def set_zbound(self, lower=None, upper=None):
+ """
+ Set the lower and upper numerical bounds of the z-axis.
+ This method will honor axes inversion regardless of parameter order.
+ It will not change the :attr:`_autoscaleZon` attribute.
+
+ .. versionadded :: 1.1.0
+ This function was added, but not tested. Please report any bugs.
+ """
+ if upper is None and cbook.iterable(lower):
+ lower,upper = lower
+
+ old_lower,old_upper = self.get_zbound()
+
+ if lower is None: lower = old_lower
+ if upper is None: upper = old_upper
+
+ if self.zaxis_inverted():
+ if lower < upper:
+ self.set_zlim(upper, lower, auto=None)
+ else:
+ self.set_zlim(lower, upper, auto=None)
+ else :
+ if lower < upper:
+ self.set_zlim(lower, upper, auto=None)
+ else :
+ self.set_zlim(upper, lower, auto=None)
+
+ def text(self, x, y, z, s, zdir=None, **kwargs):
+ '''
+ Add text to the plot. kwargs will be passed on to Axes.text,
+ except for the `zdir` keyword, which sets the direction to be
+ used as the z direction.
+ '''
+ text = super(Axes3D, self).text(x, y, s, **kwargs)
+ art3d.text_2d_to_3d(text, z, zdir)
+ return text
+
+ text3D = text
+ text2D = Axes.text
+
+ def plot(self, xs, ys, *args, **kwargs):
+ '''
+ Plot 2D or 3D data.
+
+ ========== ================================================
+ Argument Description
+ ========== ================================================
+ *xs*, *ys* x, y coordinates of vertices
+
+ *zs* z value(s), either one for all points or one for
+ each point.
+ *zdir* Which direction to use as z ('x', 'y' or 'z')
+ when plotting a 2D set.
+ ========== ================================================
+
+ Other arguments are passed on to
+ :func:`~matplotlib.axes.Axes.plot`
+ '''
+ had_data = self.has_data()
+
+ # `zs` can be passed positionally or as keyword; checking whether
+ # args[0] is a string matches the behavior of 2D `plot` (via
+ # `_process_plot_var_args`).
+ if args and not isinstance(args[0], six.string_types):
+ zs = args[0]
+ args = args[1:]
+ if 'zs' in kwargs:
+ raise TypeError("plot() for multiple values for argument 'z'")
+ else:
+ zs = kwargs.pop('zs', 0)
+ zdir = kwargs.pop('zdir', 'z')
+
+ # Match length
+ zs = _backports.broadcast_to(zs, len(xs))
+
+ lines = super(Axes3D, self).plot(xs, ys, *args, **kwargs)
+ for line in lines:
+ art3d.line_2d_to_3d(line, zs=zs, zdir=zdir)
+
+ xs, ys, zs = art3d.juggle_axes(xs, ys, zs, zdir)
+ self.auto_scale_xyz(xs, ys, zs, had_data)
+ return lines
+
+ plot3D = plot
+
+ def plot_surface(self, X, Y, Z, *args, **kwargs):
+ """
+ Create a surface plot.
+
+ By default it will be colored in shades of a solid color, but it also
+ supports color mapping by supplying the *cmap* argument.
+
+ .. note::
+
+ The *rcount* and *ccount* kwargs, which both default to 50,
+ determine the maximum number of samples used in each direction. If
+ the input data is larger, it will be downsampled (by slicing) to
+ these numbers of points.
+
+ Parameters
+ ----------
+ X, Y, Z : 2d arrays
+ Data values.
+
+ rcount, ccount : int
+ Maximum number of samples used in each direction. If the input
+ data is larger, it will be downsampled (by slicing) to these
+ numbers of points. Defaults to 50.
+
+ .. versionadded:: 2.0
+
+ rstride, cstride : int
+ Downsampling stride in each direction. These arguments are
+ mutually exclusive with *rcount* and *ccount*. If only one of
+ *rstride* or *cstride* is set, the other defaults to 10.
+
+ 'classic' mode uses a default of ``rstride = cstride = 10`` instead
+ of the new default of ``rcount = ccount = 50``.
+
+ color : color-like
+ Color of the surface patches.
+
+ cmap : Colormap
+ Colormap of the surface patches.
+
+ facecolors : array-like of colors.
+ Colors of each individual patch.
+
+ norm : Normalize
+ Normalization for the colormap.
+
+ vmin, vmax : float
+ Bounds for the normalization.
+
+ shade : bool
+ Whether to shade the face colors.
+
+ **kwargs :
+ Other arguments are forwarded to `.Poly3DCollection`.
+ """
+
+ had_data = self.has_data()
+
+ if Z.ndim != 2:
+ raise ValueError("Argument Z must be 2-dimensional.")
+ # TODO: Support masked arrays
+ X, Y, Z = np.broadcast_arrays(X, Y, Z)
+ rows, cols = Z.shape
+
+ has_stride = 'rstride' in kwargs or 'cstride' in kwargs
+ has_count = 'rcount' in kwargs or 'ccount' in kwargs
+
+ if has_stride and has_count:
+ raise ValueError("Cannot specify both stride and count arguments")
+
+ rstride = kwargs.pop('rstride', 10)
+ cstride = kwargs.pop('cstride', 10)
+ rcount = kwargs.pop('rcount', 50)
+ ccount = kwargs.pop('ccount', 50)
+
+ if rcParams['_internal.classic_mode']:
+ # Strides have priority over counts in classic mode.
+ # So, only compute strides from counts
+ # if counts were explicitly given
+ if has_count:
+ rstride = int(max(np.ceil(rows / rcount), 1))
+ cstride = int(max(np.ceil(cols / ccount), 1))
+ else:
+ # If the strides are provided then it has priority.
+ # Otherwise, compute the strides from the counts.
+ if not has_stride:
+ rstride = int(max(np.ceil(rows / rcount), 1))
+ cstride = int(max(np.ceil(cols / ccount), 1))
+
+ if 'facecolors' in kwargs:
+ fcolors = kwargs.pop('facecolors')
+ else:
+ color = kwargs.pop('color', None)
+ if color is None:
+ color = self._get_lines.get_next_color()
+ color = np.array(mcolors.to_rgba(color))
+ fcolors = None
+
+ cmap = kwargs.get('cmap', None)
+ norm = kwargs.pop('norm', None)
+ vmin = kwargs.pop('vmin', None)
+ vmax = kwargs.pop('vmax', None)
+ linewidth = kwargs.get('linewidth', None)
+ shade = kwargs.pop('shade', cmap is None)
+ lightsource = kwargs.pop('lightsource', None)
+
+ # Shade the data
+ if shade and cmap is not None and fcolors is not None:
+ fcolors = self._shade_colors_lightsource(Z, cmap, lightsource)
+
+ polys = []
+ # Only need these vectors to shade if there is no cmap
+ if cmap is None and shade :
+ totpts = int(np.ceil((rows - 1) / rstride) *
+ np.ceil((cols - 1) / cstride))
+ v1 = np.empty((totpts, 3))
+ v2 = np.empty((totpts, 3))
+ # This indexes the vertex points
+ which_pt = 0
+
+
+ #colset contains the data for coloring: either average z or the facecolor
+ colset = []
+ for rs in xrange(0, rows-1, rstride):
+ for cs in xrange(0, cols-1, cstride):
+ ps = []
+ for a in (X, Y, Z):
+ ztop = a[rs,cs:min(cols, cs+cstride+1)]
+ zleft = a[rs+1:min(rows, rs+rstride+1),
+ min(cols-1, cs+cstride)]
+ zbase = a[min(rows-1, rs+rstride), cs:min(cols, cs+cstride+1):][::-1]
+ zright = a[rs:min(rows-1, rs+rstride):, cs][::-1]
+ z = np.concatenate((ztop, zleft, zbase, zright))
+ ps.append(z)
+
+ # The construction leaves the array with duplicate points, which
+ # are removed here.
+ ps = list(zip(*ps))
+ lastp = np.array([])
+ ps2 = [ps[0]] + [ps[i] for i in xrange(1, len(ps)) if ps[i] != ps[i-1]]
+ avgzsum = sum(p[2] for p in ps2)
+ polys.append(ps2)
+
+ if fcolors is not None:
+ colset.append(fcolors[rs][cs])
+ else:
+ colset.append(avgzsum / len(ps2))
+
+ # Only need vectors to shade if no cmap
+ if cmap is None and shade:
+ i1, i2, i3 = 0, int(len(ps2)/3), int(2*len(ps2)/3)
+ v1[which_pt] = np.array(ps2[i1]) - np.array(ps2[i2])
+ v2[which_pt] = np.array(ps2[i2]) - np.array(ps2[i3])
+ which_pt += 1
+ if cmap is None and shade:
+ normals = np.cross(v1, v2)
+ else :
+ normals = []
+
+ polyc = art3d.Poly3DCollection(polys, *args, **kwargs)
+
+ if fcolors is not None:
+ if shade:
+ colset = self._shade_colors(colset, normals)
+ polyc.set_facecolors(colset)
+ polyc.set_edgecolors(colset)
+ elif cmap:
+ colset = np.array(colset)
+ polyc.set_array(colset)
+ if vmin is not None or vmax is not None:
+ polyc.set_clim(vmin, vmax)
+ if norm is not None:
+ polyc.set_norm(norm)
+ else:
+ if shade:
+ colset = self._shade_colors(color, normals)
+ else:
+ colset = color
+ polyc.set_facecolors(colset)
+
+ self.add_collection(polyc)
+ self.auto_scale_xyz(X, Y, Z, had_data)
+
+ return polyc
+
+ def _generate_normals(self, polygons):
+ '''
+ Generate normals for polygons by using the first three points.
+ This normal of course might not make sense for polygons with
+ more than three points not lying in a plane.
+ '''
+
+ normals = []
+ for verts in polygons:
+ v1 = np.array(verts[0]) - np.array(verts[1])
+ v2 = np.array(verts[2]) - np.array(verts[0])
+ normals.append(np.cross(v1, v2))
+ return normals
+
+ def _shade_colors(self, color, normals):
+ '''
+ Shade *color* using normal vectors given by *normals*.
+ *color* can also be an array of the same length as *normals*.
+ '''
+
+ shade = np.array([np.dot(n / proj3d.mod(n), [-1, -1, 0.5])
+ if proj3d.mod(n) else np.nan
+ for n in normals])
+ mask = ~np.isnan(shade)
+
+ if len(shade[mask]) > 0:
+ norm = Normalize(min(shade[mask]), max(shade[mask]))
+ shade[~mask] = min(shade[mask])
+ color = mcolors.to_rgba_array(color)
+ # shape of color should be (M, 4) (where M is number of faces)
+ # shape of shade should be (M,)
+ # colors should have final shape of (M, 4)
+ alpha = color[:, 3]
+ colors = (0.5 + norm(shade)[:, np.newaxis] * 0.5) * color
+ colors[:, 3] = alpha
+ else:
+ colors = np.asanyarray(color).copy()
+
+ return colors
+
+ def _shade_colors_lightsource(self, data, cmap, lightsource):
+ if lightsource is None:
+ lightsource = LightSource(azdeg=135, altdeg=55)
+ return lightsource.shade(data, cmap)
+
+ def plot_wireframe(self, X, Y, Z, *args, **kwargs):
+ """
+ Plot a 3D wireframe.
+
+ .. note::
+
+ The *rcount* and *ccount* kwargs, which both default to 50,
+ determine the maximum number of samples used in each direction. If
+ the input data is larger, it will be downsampled (by slicing) to
+ these numbers of points.
+
+ Parameters
+ ----------
+ X, Y, Z : 2d arrays
+ Data values.
+
+ rcount, ccount : int
+ Maximum number of samples used in each direction. If the input
+ data is larger, it will be downsampled (by slicing) to these
+ numbers of points. Setting a count to zero causes the data to be
+ not sampled in the corresponding direction, producing a 3D line
+ plot rather than a wireframe plot. Defaults to 50.
+
+ .. versionadded:: 2.0
+
+ rstride, cstride : int
+ Downsampling stride in each direction. These arguments are
+ mutually exclusive with *rcount* and *ccount*. If only one of
+ *rstride* or *cstride* is set, the other defaults to 1. Setting a
+ stride to zero causes the data to be not sampled in the
+ corresponding direction, producing a 3D line plot rather than a
+ wireframe plot.
+
+ 'classic' mode uses a default of ``rstride = cstride = 1`` instead
+ of the new default of ``rcount = ccount = 50``.
+
+ **kwargs :
+ Other arguments are forwarded to `.Line3DCollection`.
+ """
+
+ had_data = self.has_data()
+ if Z.ndim != 2:
+ raise ValueError("Argument Z must be 2-dimensional.")
+ # FIXME: Support masked arrays
+ X, Y, Z = np.broadcast_arrays(X, Y, Z)
+ rows, cols = Z.shape
+
+ has_stride = 'rstride' in kwargs or 'cstride' in kwargs
+ has_count = 'rcount' in kwargs or 'ccount' in kwargs
+
+ if has_stride and has_count:
+ raise ValueError("Cannot specify both stride and count arguments")
+
+ rstride = kwargs.pop('rstride', 1)
+ cstride = kwargs.pop('cstride', 1)
+ rcount = kwargs.pop('rcount', 50)
+ ccount = kwargs.pop('ccount', 50)
+
+ if rcParams['_internal.classic_mode']:
+ # Strides have priority over counts in classic mode.
+ # So, only compute strides from counts
+ # if counts were explicitly given
+ if has_count:
+ rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
+ cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
+ else:
+ # If the strides are provided then it has priority.
+ # Otherwise, compute the strides from the counts.
+ if not has_stride:
+ rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
+ cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
+
+ # We want two sets of lines, one running along the "rows" of
+ # Z and another set of lines running along the "columns" of Z.
+ # This transpose will make it easy to obtain the columns.
+ tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)
+
+ if rstride:
+ rii = list(xrange(0, rows, rstride))
+ # Add the last index only if needed
+ if rows > 0 and rii[-1] != (rows - 1):
+ rii += [rows-1]
+ else:
+ rii = []
+ if cstride:
+ cii = list(xrange(0, cols, cstride))
+ # Add the last index only if needed
+ if cols > 0 and cii[-1] != (cols - 1):
+ cii += [cols-1]
+ else:
+ cii = []
+
+ if rstride == 0 and cstride == 0:
+ raise ValueError("Either rstride or cstride must be non zero")
+
+ # If the inputs were empty, then just
+ # reset everything.
+ if Z.size == 0:
+ rii = []
+ cii = []
+
+ xlines = [X[i] for i in rii]
+ ylines = [Y[i] for i in rii]
+ zlines = [Z[i] for i in rii]
+
+ txlines = [tX[i] for i in cii]
+ tylines = [tY[i] for i in cii]
+ tzlines = [tZ[i] for i in cii]
+
+ lines = ([list(zip(xl, yl, zl))
+ for xl, yl, zl in zip(xlines, ylines, zlines)]
+ + [list(zip(xl, yl, zl))
+ for xl, yl, zl in zip(txlines, tylines, tzlines)])
+
+ linec = art3d.Line3DCollection(lines, *args, **kwargs)
+ self.add_collection(linec)
+ self.auto_scale_xyz(X, Y, Z, had_data)
+
+ return linec
+
+ def plot_trisurf(self, *args, **kwargs):
+ """
+ ============= ================================================
+ Argument Description
+ ============= ================================================
+ *X*, *Y*, *Z* Data values as 1D arrays
+ *color* Color of the surface patches
+ *cmap* A colormap for the surface patches.
+ *norm* An instance of Normalize to map values to colors
+ *vmin* Minimum value to map
+ *vmax* Maximum value to map
+ *shade* Whether to shade the facecolors
+ ============= ================================================
+
+ The (optional) triangulation can be specified in one of two ways;
+ either::
+
+ plot_trisurf(triangulation, ...)
+
+ where triangulation is a :class:`~matplotlib.tri.Triangulation`
+ object, or::
+
+ plot_trisurf(X, Y, ...)
+ plot_trisurf(X, Y, triangles, ...)
+ plot_trisurf(X, Y, triangles=triangles, ...)
+
+ in which case a Triangulation object will be created. See
+ :class:`~matplotlib.tri.Triangulation` for a explanation of
+ these possibilities.
+
+ The remaining arguments are::
+
+ plot_trisurf(..., Z)
+
+ where *Z* is the array of values to contour, one per point
+ in the triangulation.
+
+ Other arguments are passed on to
+ :class:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
+
+ **Examples:**
+
+ .. plot:: gallery/mplot3d/trisurf3d.py
+ .. plot:: gallery/mplot3d/trisurf3d_2.py
+
+ .. versionadded:: 1.2.0
+ This plotting function was added for the v1.2.0 release.
+ """
+
+ had_data = self.has_data()
+
+ # TODO: Support custom face colours
+ color = kwargs.pop('color', None)
+ if color is None:
+ color = self._get_lines.get_next_color()
+ color = np.array(mcolors.to_rgba(color))
+
+ cmap = kwargs.get('cmap', None)
+ norm = kwargs.pop('norm', None)
+ vmin = kwargs.pop('vmin', None)
+ vmax = kwargs.pop('vmax', None)
+ linewidth = kwargs.get('linewidth', None)
+ shade = kwargs.pop('shade', cmap is None)
+ lightsource = kwargs.pop('lightsource', None)
+
+ tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
+ if 'Z' in kwargs:
+ z = np.asarray(kwargs.pop('Z'))
+ else:
+ z = np.asarray(args[0])
+ # We do this so Z doesn't get passed as an arg to PolyCollection
+ args = args[1:]
+
+ triangles = tri.get_masked_triangles()
+ xt = tri.x[triangles]
+ yt = tri.y[triangles]
+ zt = z[triangles]
+
+ # verts = np.stack((xt, yt, zt), axis=-1)
+ verts = np.concatenate((
+ xt[..., np.newaxis], yt[..., np.newaxis], zt[..., np.newaxis]
+ ), axis=-1)
+
+ polyc = art3d.Poly3DCollection(verts, *args, **kwargs)
+
+ if cmap:
+ # average over the three points of each triangle
+ avg_z = verts[:, :, 2].mean(axis=1)
+ polyc.set_array(avg_z)
+ if vmin is not None or vmax is not None:
+ polyc.set_clim(vmin, vmax)
+ if norm is not None:
+ polyc.set_norm(norm)
+ else:
+ if shade:
+ v1 = verts[:, 0, :] - verts[:, 1, :]
+ v2 = verts[:, 1, :] - verts[:, 2, :]
+ normals = np.cross(v1, v2)
+ colset = self._shade_colors(color, normals)
+ else:
+ colset = color
+ polyc.set_facecolors(colset)
+
+ self.add_collection(polyc)
+ self.auto_scale_xyz(tri.x, tri.y, z, had_data)
+
+ return polyc
+
+ def _3d_extend_contour(self, cset, stride=5):
+ '''
+ Extend a contour in 3D by creating
+ '''
+
+ levels = cset.levels
+ colls = cset.collections
+ dz = (levels[1] - levels[0]) / 2
+
+ for z, linec in zip(levels, colls):
+ topverts = art3d.paths_to_3d_segments(linec.get_paths(), z - dz)
+ botverts = art3d.paths_to_3d_segments(linec.get_paths(), z + dz)
+
+ color = linec.get_color()[0]
+
+ polyverts = []
+ normals = []
+ nsteps = np.round(len(topverts[0]) / stride)
+ if nsteps <= 1:
+ if len(topverts[0]) > 1:
+ nsteps = 2
+ else:
+ continue
+
+ stepsize = (len(topverts[0]) - 1) / (nsteps - 1)
+ for i in range(int(np.round(nsteps)) - 1):
+ i1 = int(np.round(i * stepsize))
+ i2 = int(np.round((i + 1) * stepsize))
+ polyverts.append([topverts[0][i1],
+ topverts[0][i2],
+ botverts[0][i2],
+ botverts[0][i1]])
+
+ v1 = np.array(topverts[0][i1]) - np.array(topverts[0][i2])
+ v2 = np.array(topverts[0][i1]) - np.array(botverts[0][i1])
+ normals.append(np.cross(v1, v2))
+
+ colors = self._shade_colors(color, normals)
+ colors2 = self._shade_colors(color, normals)
+ polycol = art3d.Poly3DCollection(polyverts,
+ facecolors=colors,
+ edgecolors=colors2)
+ polycol.set_sort_zpos(z)
+ self.add_collection3d(polycol)
+
+ for col in colls:
+ self.collections.remove(col)
+
+ def add_contour_set(self, cset, extend3d=False, stride=5, zdir='z', offset=None):
+ zdir = '-' + zdir
+ if extend3d:
+ self._3d_extend_contour(cset, stride)
+ else:
+ for z, linec in zip(cset.levels, cset.collections):
+ if offset is not None:
+ z = offset
+ art3d.line_collection_2d_to_3d(linec, z, zdir=zdir)
+
+ def add_contourf_set(self, cset, zdir='z', offset=None):
+ zdir = '-' + zdir
+ for z, linec in zip(cset.levels, cset.collections):
+ if offset is not None :
+ z = offset
+ art3d.poly_collection_2d_to_3d(linec, z, zdir=zdir)
+ linec.set_sort_zpos(z)
+
+ def contour(self, X, Y, Z, *args, **kwargs):
+ '''
+ Create a 3D contour plot.
+
+ ========== ================================================
+ Argument Description
+ ========== ================================================
+ *X*, *Y*, Data values as numpy.arrays
+ *Z*
+ *extend3d* Whether to extend contour in 3D (default: False)
+ *stride* Stride (step size) for extending contour
+ *zdir* The direction to use: x, y or z (default)
+ *offset* If specified plot a projection of the contour
+ lines on this position in plane normal to zdir
+ ========== ================================================
+
+ The positional and other keyword arguments are passed on to
+ :func:`~matplotlib.axes.Axes.contour`
+
+ Returns a :class:`~matplotlib.axes.Axes.contour`
+ '''
+
+ extend3d = kwargs.pop('extend3d', False)
+ stride = kwargs.pop('stride', 5)
+ zdir = kwargs.pop('zdir', 'z')
+ offset = kwargs.pop('offset', None)
+
+ had_data = self.has_data()
+
+ jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
+ cset = super(Axes3D, self).contour(jX, jY, jZ, *args, **kwargs)
+ self.add_contour_set(cset, extend3d, stride, zdir, offset)
+
+ self.auto_scale_xyz(X, Y, Z, had_data)
+ return cset
+
+ contour3D = contour
+
+ def tricontour(self, *args, **kwargs):
+ """
+ Create a 3D contour plot.
+
+ ========== ================================================
+ Argument Description
+ ========== ================================================
+ *X*, *Y*, Data values as numpy.arrays
+ *Z*
+ *extend3d* Whether to extend contour in 3D (default: False)
+ *stride* Stride (step size) for extending contour
+ *zdir* The direction to use: x, y or z (default)
+ *offset* If specified plot a projection of the contour
+ lines on this position in plane normal to zdir
+ ========== ================================================
+
+ Other keyword arguments are passed on to
+ :func:`~matplotlib.axes.Axes.tricontour`
+
+ Returns a :class:`~matplotlib.axes.Axes.contour`
+
+ .. versionchanged:: 1.3.0
+ Added support for custom triangulations
+
+ EXPERIMENTAL: This method currently produces incorrect output due to a
+ longstanding bug in 3D PolyCollection rendering.
+ """
+
+ extend3d = kwargs.pop('extend3d', False)
+ stride = kwargs.pop('stride', 5)
+ zdir = kwargs.pop('zdir', 'z')
+ offset = kwargs.pop('offset', None)
+
+ had_data = self.has_data()
+
+ tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
+ *args, **kwargs)
+ X = tri.x
+ Y = tri.y
+ if 'Z' in kwargs:
+ Z = kwargs.pop('Z')
+ else:
+ Z = args[0]
+ # We do this so Z doesn't get passed as an arg to Axes.tricontour
+ args = args[1:]
+
+ jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
+ tri = Triangulation(jX, jY, tri.triangles, tri.mask)
+
+ cset = super(Axes3D, self).tricontour(tri, jZ, *args, **kwargs)
+ self.add_contour_set(cset, extend3d, stride, zdir, offset)
+
+ self.auto_scale_xyz(X, Y, Z, had_data)
+ return cset
+
+ def contourf(self, X, Y, Z, *args, **kwargs):
+ '''
+ Create a 3D contourf plot.
+
+ ========== ================================================
+ Argument Description
+ ========== ================================================
+ *X*, *Y*, Data values as numpy.arrays
+ *Z*
+ *zdir* The direction to use: x, y or z (default)
+ *offset* If specified plot a projection of the filled contour
+ on this position in plane normal to zdir
+ ========== ================================================
+
+ The positional and keyword arguments are passed on to
+ :func:`~matplotlib.axes.Axes.contourf`
+
+ Returns a :class:`~matplotlib.axes.Axes.contourf`
+
+ .. versionchanged :: 1.1.0
+ The *zdir* and *offset* kwargs were added.
+ '''
+
+ zdir = kwargs.pop('zdir', 'z')
+ offset = kwargs.pop('offset', None)
+
+ had_data = self.has_data()
+
+ jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
+ cset = super(Axes3D, self).contourf(jX, jY, jZ, *args, **kwargs)
+ self.add_contourf_set(cset, zdir, offset)
+
+ self.auto_scale_xyz(X, Y, Z, had_data)
+ return cset
+
+ contourf3D = contourf
+
+ def tricontourf(self, *args, **kwargs):
+ """
+ Create a 3D contourf plot.
+
+ ========== ================================================
+ Argument Description
+ ========== ================================================
+ *X*, *Y*, Data values as numpy.arrays
+ *Z*
+ *zdir* The direction to use: x, y or z (default)
+ *offset* If specified plot a projection of the contour
+ lines on this position in plane normal to zdir
+ ========== ================================================
+
+ Other keyword arguments are passed on to
+ :func:`~matplotlib.axes.Axes.tricontour`
+
+ Returns a :class:`~matplotlib.axes.Axes.contour`
+
+ .. versionchanged :: 1.3.0
+ Added support for custom triangulations
+
+ EXPERIMENTAL: This method currently produces incorrect output due to a
+ longstanding bug in 3D PolyCollection rendering.
+ """
+ zdir = kwargs.pop('zdir', 'z')
+ offset = kwargs.pop('offset', None)
+
+ had_data = self.has_data()
+
+ tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
+ *args, **kwargs)
+ X = tri.x
+ Y = tri.y
+ if 'Z' in kwargs:
+ Z = kwargs.pop('Z')
+ else:
+ Z = args[0]
+ # We do this so Z doesn't get passed as an arg to Axes.tricontourf
+ args = args[1:]
+
+ jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
+ tri = Triangulation(jX, jY, tri.triangles, tri.mask)
+
+ cset = super(Axes3D, self).tricontourf(tri, jZ, *args, **kwargs)
+ self.add_contourf_set(cset, zdir, offset)
+
+ self.auto_scale_xyz(X, Y, Z, had_data)
+ return cset
+
+ def add_collection3d(self, col, zs=0, zdir='z'):
+ '''
+ Add a 3D collection object to the plot.
+
+ 2D collection types are converted to a 3D version by
+ modifying the object and adding z coordinate information.
+
+ Supported are:
+ - PolyCollection
+ - LineCollection
+ - PatchCollection
+ '''
+ zvals = np.atleast_1d(zs)
+ if len(zvals) > 0 :
+ zsortval = min(zvals)
+ else :
+ zsortval = 0 # FIXME: Fairly arbitrary. Is there a better value?
+
+ # FIXME: use issubclass() (although, then a 3D collection
+ # object would also pass.) Maybe have a collection3d
+ # abstract class to test for and exclude?
+ if type(col) is mcoll.PolyCollection:
+ art3d.poly_collection_2d_to_3d(col, zs=zs, zdir=zdir)
+ col.set_sort_zpos(zsortval)
+ elif type(col) is mcoll.LineCollection:
+ art3d.line_collection_2d_to_3d(col, zs=zs, zdir=zdir)
+ col.set_sort_zpos(zsortval)
+ elif type(col) is mcoll.PatchCollection:
+ art3d.patch_collection_2d_to_3d(col, zs=zs, zdir=zdir)
+ col.set_sort_zpos(zsortval)
+
+ super(Axes3D, self).add_collection(col)
+
+ def scatter(self, xs, ys, zs=0, zdir='z', s=20, c=None, depthshade=True,
+ *args, **kwargs):
+ '''
+ Create a scatter plot.
+
+ ============ ========================================================
+ Argument Description
+ ============ ========================================================
+ *xs*, *ys* Positions of data points.
+ *zs* Either an array of the same length as *xs* and
+ *ys* or a single value to place all points in
+ the same plane. Default is 0.
+ *zdir* Which direction to use as z ('x', 'y' or 'z')
+ when plotting a 2D set.
+ *s* Size in points^2. It is a scalar or an array of the
+ same length as *x* and *y*.
+
+ *c* A color. *c* can be a single color format string, or a
+ sequence of color specifications of length *N*, or a
+ sequence of *N* numbers to be mapped to colors using the
+ *cmap* and *norm* specified via kwargs (see below). Note
+ that *c* should not be a single numeric RGB or RGBA
+ sequence because that is indistinguishable from an array
+ of values to be colormapped. *c* can be a 2-D array in
+ which the rows are RGB or RGBA, however, including the
+ case of a single row to specify the same color for
+ all points.
+
+ *depthshade*
+ Whether or not to shade the scatter markers to give
+ the appearance of depth. Default is *True*.
+ ============ ========================================================
+
+ Keyword arguments are passed on to
+ :func:`~matplotlib.axes.Axes.scatter`.
+
+ Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
+ '''
+
+ had_data = self.has_data()
+
+ xs, ys, zs = np.broadcast_arrays(
+ *[np.ravel(np.ma.filled(t, np.nan)) for t in [xs, ys, zs]])
+ s = np.ma.ravel(s) # This doesn't have to match x, y in size.
+
+ xs, ys, zs, s, c = cbook.delete_masked_points(xs, ys, zs, s, c)
+
+ patches = super(Axes3D, self).scatter(
+ xs, ys, s=s, c=c, *args, **kwargs)
+ is_2d = not cbook.iterable(zs)
+ zs = _backports.broadcast_to(zs, len(xs))
+ art3d.patch_collection_2d_to_3d(patches, zs=zs, zdir=zdir,
+ depthshade=depthshade)
+
+ if self._zmargin < 0.05 and xs.size > 0:
+ self.set_zmargin(0.05)
+
+ #FIXME: why is this necessary?
+ if not is_2d:
+ self.auto_scale_xyz(xs, ys, zs, had_data)
+
+ return patches
+
+ scatter3D = scatter
+
+ def bar(self, left, height, zs=0, zdir='z', *args, **kwargs):
+ '''
+ Add 2D bar(s).
+
+ ========== ================================================
+ Argument Description
+ ========== ================================================
+ *left* The x coordinates of the left sides of the bars.
+ *height* The height of the bars.
+ *zs* Z coordinate of bars, if one value is specified
+ they will all be placed at the same z.
+ *zdir* Which direction to use as z ('x', 'y' or 'z')
+ when plotting a 2D set.
+ ========== ================================================
+
+ Keyword arguments are passed onto :func:`~matplotlib.axes.Axes.bar`.
+
+ Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
+ '''
+
+ had_data = self.has_data()
+
+ patches = super(Axes3D, self).bar(left, height, *args, **kwargs)
+
+ zs = _backports.broadcast_to(zs, len(left))
+
+ verts = []
+ verts_zs = []
+ for p, z in zip(patches, zs):
+ vs = art3d.get_patch_verts(p)
+ verts += vs.tolist()
+ verts_zs += [z] * len(vs)
+ art3d.patch_2d_to_3d(p, z, zdir)
+ if 'alpha' in kwargs:
+ p.set_alpha(kwargs['alpha'])
+
+ if len(verts) > 0 :
+ # the following has to be skipped if verts is empty
+ # NOTE: Bugs could still occur if len(verts) > 0,
+ # but the "2nd dimension" is empty.
+ xs, ys = list(zip(*verts))
+ else :
+ xs, ys = [], []
+
+ xs, ys, verts_zs = art3d.juggle_axes(xs, ys, verts_zs, zdir)
+ self.auto_scale_xyz(xs, ys, verts_zs, had_data)
+
+ return patches
+
+ def bar3d(self, x, y, z, dx, dy, dz, color=None,
+ zsort='average', shade=True, *args, **kwargs):
+ """Generate a 3D barplot.
+
+ This method creates three dimensional barplot where the width,
+ depth, height, and color of the bars can all be uniquely set.
+
+ Parameters
+ ----------
+ x, y, z : array-like
+ The coordinates of the anchor point of the bars.
+
+ dx, dy, dz : scalar or array-like
+ The width, depth, and height of the bars, respectively.
+
+ color : sequence of valid color specifications, optional
+ The color of the bars can be specified globally or
+ individually. This parameter can be:
+
+ - A single color value, to color all bars the same color.
+ - An array of colors of length N bars, to color each bar
+ independently.
+ - An array of colors of length 6, to color the faces of the
+ bars similarly.
+ - An array of colors of length 6 * N bars, to color each face
+ independently.
+
+ When coloring the faces of the boxes specifically, this is
+ the order of the coloring:
+
+ 1. -Z (bottom of box)
+ 2. +Z (top of box)
+ 3. -Y
+ 4. +Y
+ 5. -X
+ 6. +X
+
+ zsort : str, optional
+ The z-axis sorting scheme passed onto
+ :func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
+
+ shade : bool, optional (default = True)
+ When true, this shades the dark sides of the bars (relative
+ to the plot's source of light).
+
+ Any additional keyword arguments are passed onto
+ :func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
+
+ Returns
+ -------
+ collection : Poly3DCollection
+ A collection of three dimensional polygons representing
+ the bars.
+ """
+
+ had_data = self.has_data()
+
+ x, y, z, dx, dy, dz = np.broadcast_arrays(
+ np.atleast_1d(x), y, z, dx, dy, dz)
+ minx = np.min(x)
+ maxx = np.max(x + dx)
+ miny = np.min(y)
+ maxy = np.max(y + dy)
+ minz = np.min(z)
+ maxz = np.max(z + dz)
+
+ polys = []
+ for xi, yi, zi, dxi, dyi, dzi in zip(x, y, z, dx, dy, dz):
+ polys.extend([
+ ((xi, yi, zi), (xi + dxi, yi, zi),
+ (xi + dxi, yi + dyi, zi), (xi, yi + dyi, zi)),
+ ((xi, yi, zi + dzi), (xi + dxi, yi, zi + dzi),
+ (xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
+
+ ((xi, yi, zi), (xi + dxi, yi, zi),
+ (xi + dxi, yi, zi + dzi), (xi, yi, zi + dzi)),
+ ((xi, yi + dyi, zi), (xi + dxi, yi + dyi, zi),
+ (xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
+
+ ((xi, yi, zi), (xi, yi + dyi, zi),
+ (xi, yi + dyi, zi + dzi), (xi, yi, zi + dzi)),
+ ((xi + dxi, yi, zi), (xi + dxi, yi + dyi, zi),
+ (xi + dxi, yi + dyi, zi + dzi), (xi + dxi, yi, zi + dzi)),
+ ])
+
+ facecolors = []
+ if color is None:
+ color = [self._get_patches_for_fill.get_next_color()]
+
+ if len(color) == len(x):
+ # bar colors specified, need to expand to number of faces
+ for c in color:
+ facecolors.extend([c] * 6)
+ else:
+ # a single color specified, or face colors specified explicitly
+ facecolors = list(mcolors.to_rgba_array(color))
+ if len(facecolors) < len(x):
+ facecolors *= (6 * len(x))
+
+ if shade:
+ normals = self._generate_normals(polys)
+ sfacecolors = self._shade_colors(facecolors, normals)
+ else:
+ sfacecolors = facecolors
+
+ col = art3d.Poly3DCollection(polys,
+ zsort=zsort,
+ facecolor=sfacecolors,
+ *args, **kwargs)
+ self.add_collection(col)
+
+ self.auto_scale_xyz((minx, maxx), (miny, maxy), (minz, maxz), had_data)
+
+ return col
+
+ def set_title(self, label, fontdict=None, loc='center', **kwargs):
+ ret = super(Axes3D, self).set_title(label, fontdict=fontdict, loc=loc,
+ **kwargs)
+ (x, y) = self.title.get_position()
+ self.title.set_y(0.92 * y)
+ return ret
+ set_title.__doc__ = maxes.Axes.set_title.__doc__
+
+ def quiver(self, *args, **kwargs):
+ """
+ Plot a 3D field of arrows.
+
+ call signatures::
+
+ quiver(X, Y, Z, U, V, W, **kwargs)
+
+ Arguments:
+
+ *X*, *Y*, *Z*:
+ The x, y and z coordinates of the arrow locations (default is
+ tail of arrow; see *pivot* kwarg)
+
+ *U*, *V*, *W*:
+ The x, y and z components of the arrow vectors
+
+ The arguments could be array-like or scalars, so long as they
+ they can be broadcast together. The arguments can also be
+ masked arrays. If an element in any of argument is masked, then
+ that corresponding quiver element will not be plotted.
+
+ Keyword arguments:
+
+ *length*: [1.0 | float]
+ The length of each quiver, default to 1.0, the unit is
+ the same with the axes
+
+ *arrow_length_ratio*: [0.3 | float]
+ The ratio of the arrow head with respect to the quiver,
+ default to 0.3
+
+ *pivot*: [ 'tail' | 'middle' | 'tip' ]
+ The part of the arrow that is at the grid point; the arrow
+ rotates about this point, hence the name *pivot*.
+ Default is 'tail'
+
+ *normalize*: bool
+ When True, all of the arrows will be the same length. This
+ defaults to False, where the arrows will be different lengths
+ depending on the values of u,v,w.
+
+ Any additional keyword arguments are delegated to
+ :class:`~matplotlib.collections.LineCollection`
+
+ """
+ def calc_arrow(uvw, angle=15):
+ """
+ To calculate the arrow head. uvw should be a unit vector.
+ We normalize it here:
+ """
+ # get unit direction vector perpendicular to (u,v,w)
+ norm = np.linalg.norm(uvw[:2])
+ if norm > 0:
+ x = uvw[1] / norm
+ y = -uvw[0] / norm
+ else:
+ x, y = 0, 1
+
+ # compute the two arrowhead direction unit vectors
+ ra = math.radians(angle)
+ c = math.cos(ra)
+ s = math.sin(ra)
+
+ # construct the rotation matrices
+ Rpos = np.array([[c+(x**2)*(1-c), x*y*(1-c), y*s],
+ [y*x*(1-c), c+(y**2)*(1-c), -x*s],
+ [-y*s, x*s, c]])
+ # opposite rotation negates all the sin terms
+ Rneg = Rpos.copy()
+ Rneg[[0,1,2,2],[2,2,0,1]] = -Rneg[[0,1,2,2],[2,2,0,1]]
+
+ # multiply them to get the rotated vector
+ return Rpos.dot(uvw), Rneg.dot(uvw)
+
+ had_data = self.has_data()
+
+ # handle kwargs
+ # shaft length
+ length = kwargs.pop('length', 1)
+ # arrow length ratio to the shaft length
+ arrow_length_ratio = kwargs.pop('arrow_length_ratio', 0.3)
+ # pivot point
+ pivot = kwargs.pop('pivot', 'tail')
+ # normalize
+ normalize = kwargs.pop('normalize', False)
+
+ # handle args
+ argi = 6
+ if len(args) < argi:
+ raise ValueError('Wrong number of arguments. Expected %d got %d' %
+ (argi, len(args)))
+
+ # first 6 arguments are X, Y, Z, U, V, W
+ input_args = args[:argi]
+ # if any of the args are scalar, convert into list
+ input_args = [[k] if isinstance(k, (int, float)) else k
+ for k in input_args]
+
+ # extract the masks, if any
+ masks = [k.mask for k in input_args if isinstance(k, np.ma.MaskedArray)]
+ # broadcast to match the shape
+ bcast = np.broadcast_arrays(*(input_args + masks))
+ input_args = bcast[:argi]
+ masks = bcast[argi:]
+ if masks:
+ # combine the masks into one
+ mask = reduce(np.logical_or, masks)
+ # put mask on and compress
+ input_args = [np.ma.array(k, mask=mask).compressed()
+ for k in input_args]
+ else:
+ input_args = [k.flatten() for k in input_args]
+
+ if any(len(v) == 0 for v in input_args):
+ # No quivers, so just make an empty collection and return early
+ linec = art3d.Line3DCollection([], *args[argi:], **kwargs)
+ self.add_collection(linec)
+ return linec
+
+ # Following assertions must be true before proceeding
+ # must all be ndarray
+ assert all(isinstance(k, np.ndarray) for k in input_args)
+ # must all in same shape
+ assert len({k.shape for k in input_args}) == 1
+
+ shaft_dt = np.linspace(0, length, num=2)
+ arrow_dt = shaft_dt * arrow_length_ratio
+
+ if pivot == 'tail':
+ shaft_dt -= length
+ elif pivot == 'middle':
+ shaft_dt -= length/2.
+ elif pivot != 'tip':
+ raise ValueError('Invalid pivot argument: ' + str(pivot))
+
+ XYZ = np.column_stack(input_args[:3])
+ UVW = np.column_stack(input_args[3:argi]).astype(float)
+
+ # Normalize rows of UVW
+ # Note: with numpy 1.9+, could use np.linalg.norm(UVW, axis=1)
+ norm = np.sqrt(np.sum(UVW**2, axis=1))
+
+ # If any row of UVW is all zeros, don't make a quiver for it
+ mask = norm > 0
+ XYZ = XYZ[mask]
+ if normalize:
+ UVW = UVW[mask] / norm[mask].reshape((-1, 1))
+ else:
+ UVW = UVW[mask]
+
+ if len(XYZ) > 0:
+ # compute the shaft lines all at once with an outer product
+ shafts = (XYZ - np.multiply.outer(shaft_dt, UVW)).swapaxes(0, 1)
+ # compute head direction vectors, n heads by 2 sides by 3 dimensions
+ head_dirs = np.array([calc_arrow(d) for d in UVW])
+ # compute all head lines at once, starting from where the shaft ends
+ heads = shafts[:, :1] - np.multiply.outer(arrow_dt, head_dirs)
+ # stack left and right head lines together
+ heads.shape = (len(arrow_dt), -1, 3)
+ # transpose to get a list of lines
+ heads = heads.swapaxes(0, 1)
+
+ lines = list(shafts) + list(heads)
+ else:
+ lines = []
+
+ linec = art3d.Line3DCollection(lines, *args[argi:], **kwargs)
+ self.add_collection(linec)
+
+ self.auto_scale_xyz(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], had_data)
+
+ return linec
+
+ quiver3D = quiver
+
+ def voxels(self, *args, **kwargs):
+ """
+ ax.voxels([x, y, z,] /, filled, **kwargs)
+
+ Plot a set of filled voxels
+
+ All voxels are plotted as 1x1x1 cubes on the axis, with filled[0,0,0]
+ placed with its lower corner at the origin. Occluded faces are not
+ plotted.
+
+ Call signatures::
+
+ voxels(filled, facecolors=fc, edgecolors=ec, **kwargs)
+ voxels(x, y, z, filled, facecolors=fc, edgecolors=ec, **kwargs)
+
+ .. versionadded:: 2.1
+
+ Parameters
+ ----------
+ filled : 3D np.array of bool
+ A 3d array of values, with truthy values indicating which voxels
+ to fill
+
+ x, y, z : 3D np.array, optional
+ The coordinates of the corners of the voxels. This should broadcast
+ to a shape one larger in every dimension than the shape of `filled`.
+ These can be used to plot non-cubic voxels.
+
+ If not specified, defaults to increasing integers along each axis,
+ like those returned by :func:`~numpy.indices`.
+ As indicated by the ``/`` in the function signature, these arguments
+ can only be passed positionally.
+
+ facecolors, edgecolors : array_like, optional
+ The color to draw the faces and edges of the voxels. Can only be
+ passed as keyword arguments.
+ This parameter can be:
+
+ - A single color value, to color all voxels the same color. This
+ can be either a string, or a 1D rgb/rgba array
+ - ``None``, the default, to use a single color for the faces, and
+ the style default for the edges.
+ - A 3D ndarray of color names, with each item the color for the
+ corresponding voxel. The size must match the voxels.
+ - A 4D ndarray of rgb/rgba data, with the components along the
+ last axis.
+
+ **kwargs
+ Additional keyword arguments to pass onto
+ :func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
+
+ Returns
+ -------
+ faces : dict
+ A dictionary indexed by coordinate, where ``faces[i,j,k]`` is a
+ `Poly3DCollection` of the faces drawn for the voxel
+ ``filled[i,j,k]``. If no faces were drawn for a given voxel, either
+ because it was not asked to be drawn, or it is fully occluded, then
+ ``(i,j,k) not in faces``.
+
+ Examples
+ --------
+ .. plot:: gallery/mplot3d/voxels.py
+ .. plot:: gallery/mplot3d/voxels_rgb.py
+ .. plot:: gallery/mplot3d/voxels_torus.py
+ .. plot:: gallery/mplot3d/voxels_numpy_logo.py
+ """
+
+ # work out which signature we should be using, and use it to parse
+ # the arguments. Name must be voxels for the correct error message
+ if len(args) >= 3:
+ # underscores indicate position only
+ def voxels(__x, __y, __z, filled, **kwargs):
+ return (__x, __y, __z), filled, kwargs
+ else:
+ def voxels(filled, **kwargs):
+ return None, filled, kwargs
+
+ xyz, filled, kwargs = voxels(*args, **kwargs)
+
+ # check dimensions
+ if filled.ndim != 3:
+ raise ValueError("Argument filled must be 3-dimensional")
+ size = np.array(filled.shape, dtype=np.intp)
+
+ # check xyz coordinates, which are one larger than the filled shape
+ coord_shape = tuple(size + 1)
+ if xyz is None:
+ x, y, z = np.indices(coord_shape)
+ else:
+ x, y, z = (_backports.broadcast_to(c, coord_shape) for c in xyz)
+
+ def _broadcast_color_arg(color, name):
+ if np.ndim(color) in (0, 1):
+ # single color, like "red" or [1, 0, 0]
+ return _backports.broadcast_to(
+ color, filled.shape + np.shape(color))
+ elif np.ndim(color) in (3, 4):
+ # 3D array of strings, or 4D array with last axis rgb
+ if np.shape(color)[:3] != filled.shape:
+ raise ValueError(
+ "When multidimensional, {} must match the shape of "
+ "filled".format(name))
+ return color
+ else:
+ raise ValueError("Invalid {} argument".format(name))
+
+ # intercept the facecolors, handling defaults and broacasting
+ facecolors = kwargs.pop('facecolors', None)
+ if facecolors is None:
+ facecolors = self._get_patches_for_fill.get_next_color()
+ facecolors = _broadcast_color_arg(facecolors, 'facecolors')
+
+ # broadcast but no default on edgecolors
+ edgecolors = kwargs.pop('edgecolors', None)
+ edgecolors = _broadcast_color_arg(edgecolors, 'edgecolors')
+
+ # always scale to the full array, even if the data is only in the center
+ self.auto_scale_xyz(x, y, z)
+
+ # points lying on corners of a square
+ square = np.array([
+ [0, 0, 0],
+ [0, 1, 0],
+ [1, 1, 0],
+ [1, 0, 0]
+ ], dtype=np.intp)
+
+ voxel_faces = defaultdict(list)
+
+ def permutation_matrices(n):
+ """ Generator of cyclic permutation matices """
+ mat = np.eye(n, dtype=np.intp)
+ for i in range(n):
+ yield mat
+ mat = np.roll(mat, 1, axis=0)
+
+ # iterate over each of the YZ, ZX, and XY orientations, finding faces to
+ # render
+ for permute in permutation_matrices(3):
+ # find the set of ranges to iterate over
+ pc, qc, rc = permute.T.dot(size)
+ pinds = np.arange(pc)
+ qinds = np.arange(qc)
+ rinds = np.arange(rc)
+
+ square_rot = square.dot(permute.T)
+
+ # iterate within the current plane
+ for p in pinds:
+ for q in qinds:
+ # iterate perpendicularly to the current plane, handling
+ # boundaries. We only draw faces between a voxel and an
+ # empty space, to avoid drawing internal faces.
+
+ # draw lower faces
+ p0 = permute.dot([p, q, 0])
+ i0 = tuple(p0)
+ if filled[i0]:
+ voxel_faces[i0].append(p0 + square_rot)
+
+ # draw middle faces
+ for r1, r2 in zip(rinds[:-1], rinds[1:]):
+ p1 = permute.dot([p, q, r1])
+ p2 = permute.dot([p, q, r2])
+
+ i1 = tuple(p1)
+ i2 = tuple(p2)
+
+ if filled[i1] and not filled[i2]:
+ voxel_faces[i1].append(p2 + square_rot)
+ elif not filled[i1] and filled[i2]:
+ voxel_faces[i2].append(p2 + square_rot)
+
+ # draw upper faces
+ pk = permute.dot([p, q, rc-1])
+ pk2 = permute.dot([p, q, rc])
+ ik = tuple(pk)
+ if filled[ik]:
+ voxel_faces[ik].append(pk2 + square_rot)
+
+ # iterate over the faces, and generate a Poly3DCollection for each voxel
+ polygons = {}
+ for coord, faces_inds in voxel_faces.items():
+ # convert indices into 3D positions
+ if xyz is None:
+ faces = faces_inds
+ else:
+ faces = []
+ for face_inds in faces_inds:
+ ind = face_inds[:, 0], face_inds[:, 1], face_inds[:, 2]
+ face = np.empty(face_inds.shape)
+ face[:, 0] = x[ind]
+ face[:, 1] = y[ind]
+ face[:, 2] = z[ind]
+ faces.append(face)
+
+ poly = art3d.Poly3DCollection(faces,
+ facecolors=facecolors[coord],
+ edgecolors=edgecolors[coord],
+ **kwargs
+ )
+ self.add_collection3d(poly)
+ polygons[coord] = poly
+
+ return polygons
+
+
+def get_test_data(delta=0.05):
+ '''
+ Return a tuple X, Y, Z with a test data set.
+ '''
+ x = y = np.arange(-3.0, 3.0, delta)
+ X, Y = np.meshgrid(x, y)
+
+ Z1 = np.exp(-(X**2 + Y**2) / 2) / (2 * np.pi)
+ Z2 = (np.exp(-(((X - 1) / 1.5)**2 + ((Y - 1) / 0.5)**2) / 2) /
+ (2 * np.pi * 0.5 * 1.5))
+ Z = Z2 - Z1
+
+ X = X * 10
+ Y = Y * 10
+ Z = Z * 500
+ return X, Y, Z
+
+
+########################################################
+# Register Axes3D as a 'projection' object available
+# for use just like any other axes
+########################################################
+import matplotlib.projections as proj
+proj.projection_registry.register(Axes3D)
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/axis3d.py b/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/axis3d.py
new file mode 100644
index 00000000000..50b81df9125
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/axis3d.py
@@ -0,0 +1,484 @@
+# axis3d.py, original mplot3d version by John Porter
+# Created: 23 Sep 2005
+# Parts rewritten by Reinier Heeres <reinier@heeres.eu>
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+
+import math
+import copy
+
+from matplotlib import lines as mlines, axis as maxis, patches as mpatches
+from matplotlib import rcParams
+from . import art3d
+from . import proj3d
+
+import numpy as np
+
+def get_flip_min_max(coord, index, mins, maxs):
+ if coord[index] == mins[index]:
+ return maxs[index]
+ else:
+ return mins[index]
+
+def move_from_center(coord, centers, deltas, axmask=(True, True, True)):
+ '''Return a coordinate that is moved by "deltas" away from the center.'''
+ coord = copy.copy(coord)
+ for i in range(3):
+ if not axmask[i]:
+ continue
+ if coord[i] < centers[i]:
+ coord[i] -= deltas[i]
+ else:
+ coord[i] += deltas[i]
+ return coord
+
+def tick_update_position(tick, tickxs, tickys, labelpos):
+ '''Update tick line and label position and style.'''
+
+ for (label, on) in [(tick.label1, tick.label1On),
+ (tick.label2, tick.label2On)]:
+ if on:
+ label.set_position(labelpos)
+
+ tick.tick1On, tick.tick2On = True, False
+ tick.tick1line.set_linestyle('-')
+ tick.tick1line.set_marker('')
+ tick.tick1line.set_data(tickxs, tickys)
+ tick.gridline.set_data(0, 0)
+
+class Axis(maxis.XAxis):
+
+ # These points from the unit cube make up the x, y and z-planes
+ _PLANES = (
+ (0, 3, 7, 4), (1, 2, 6, 5), # yz planes
+ (0, 1, 5, 4), (3, 2, 6, 7), # xz planes
+ (0, 1, 2, 3), (4, 5, 6, 7), # xy planes
+ )
+
+ # Some properties for the axes
+ _AXINFO = {
+ 'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2),
+ 'color': (0.95, 0.95, 0.95, 0.5)},
+ 'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2),
+ 'color': (0.90, 0.90, 0.90, 0.5)},
+ 'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1),
+ 'color': (0.925, 0.925, 0.925, 0.5)},
+ }
+
+ def __init__(self, adir, v_intervalx, d_intervalx, axes, *args, **kwargs):
+ # adir identifies which axes this is
+ self.adir = adir
+ # data and viewing intervals for this direction
+ self.d_interval = d_intervalx
+ self.v_interval = v_intervalx
+
+ # This is a temporary member variable.
+ # Do not depend on this existing in future releases!
+ self._axinfo = self._AXINFO[adir].copy()
+ if rcParams['_internal.classic_mode']:
+ self._axinfo.update(
+ {'label': {'va': 'center',
+ 'ha': 'center'},
+ 'tick': {'inward_factor': 0.2,
+ 'outward_factor': 0.1,
+ 'linewidth': rcParams['lines.linewidth'],
+ 'color': 'k'},
+ 'axisline': {'linewidth': 0.75,
+ 'color': (0, 0, 0, 1)},
+ 'grid': {'color': (0.9, 0.9, 0.9, 1),
+ 'linewidth': 1.0,
+ 'linestyle': '-'},
+ })
+ else:
+ self._axinfo.update(
+ {'label': {'va': 'center',
+ 'ha': 'center'},
+ 'tick': {'inward_factor': 0.2,
+ 'outward_factor': 0.1,
+ 'linewidth': rcParams.get(
+ adir + 'tick.major.width',
+ rcParams['xtick.major.width']),
+ 'color': rcParams.get(
+ adir + 'tick.color',
+ rcParams['xtick.color'])},
+ 'axisline': {'linewidth': rcParams['axes.linewidth'],
+ 'color': rcParams['axes.edgecolor']},
+ 'grid': {'color': rcParams['grid.color'],
+ 'linewidth': rcParams['grid.linewidth'],
+ 'linestyle': rcParams['grid.linestyle']},
+ })
+
+ maxis.XAxis.__init__(self, axes, *args, **kwargs)
+ self.set_rotate_label(kwargs.get('rotate_label', None))
+
+ def init3d(self):
+ self.line = mlines.Line2D(
+ xdata=(0, 0), ydata=(0, 0),
+ linewidth=self._axinfo['axisline']['linewidth'],
+ color=self._axinfo['axisline']['color'],
+ antialiased=True)
+
+ # Store dummy data in Polygon object
+ self.pane = mpatches.Polygon(
+ np.array([[0, 0], [0, 1], [1, 0], [0, 0]]),
+ closed=False, alpha=0.8, facecolor='k', edgecolor='k')
+ self.set_pane_color(self._axinfo['color'])
+
+ self.axes._set_artist_props(self.line)
+ self.axes._set_artist_props(self.pane)
+ self.gridlines = art3d.Line3DCollection([])
+ self.axes._set_artist_props(self.gridlines)
+ self.axes._set_artist_props(self.label)
+ self.axes._set_artist_props(self.offsetText)
+ # Need to be able to place the label at the correct location
+ self.label._transform = self.axes.transData
+ self.offsetText._transform = self.axes.transData
+
+ def get_tick_positions(self):
+ majorLocs = self.major.locator()
+ self.major.formatter.set_locs(majorLocs)
+ majorLabels = [self.major.formatter(val, i)
+ for i, val in enumerate(majorLocs)]
+ return majorLabels, majorLocs
+
+ def get_major_ticks(self, numticks=None):
+ ticks = maxis.XAxis.get_major_ticks(self, numticks)
+ for t in ticks:
+ t.tick1line.set_transform(self.axes.transData)
+ t.tick2line.set_transform(self.axes.transData)
+ t.gridline.set_transform(self.axes.transData)
+ t.label1.set_transform(self.axes.transData)
+ t.label2.set_transform(self.axes.transData)
+ return ticks
+
+ def set_pane_pos(self, xys):
+ xys = np.asarray(xys)
+ xys = xys[:,:2]
+ self.pane.xy = xys
+ self.stale = True
+
+ def set_pane_color(self, color):
+ '''Set pane color to a RGBA tuple.'''
+ self._axinfo['color'] = color
+ self.pane.set_edgecolor(color)
+ self.pane.set_facecolor(color)
+ self.pane.set_alpha(color[-1])
+ self.stale = True
+
+ def set_rotate_label(self, val):
+ '''
+ Whether to rotate the axis label: True, False or None.
+ If set to None the label will be rotated if longer than 4 chars.
+ '''
+ self._rotate_label = val
+ self.stale = True
+
+ def get_rotate_label(self, text):
+ if self._rotate_label is not None:
+ return self._rotate_label
+ else:
+ return len(text) > 4
+
+ def _get_coord_info(self, renderer):
+ minx, maxx, miny, maxy, minz, maxz = self.axes.get_w_lims()
+ if minx > maxx:
+ minx, maxx = maxx, minx
+ if miny > maxy:
+ miny, maxy = maxy, miny
+ if minz > maxz:
+ minz, maxz = maxz, minz
+ mins = np.array((minx, miny, minz))
+ maxs = np.array((maxx, maxy, maxz))
+ centers = (maxs + mins) / 2.
+ deltas = (maxs - mins) / 12.
+ mins = mins - deltas / 4.
+ maxs = maxs + deltas / 4.
+
+ vals = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
+ tc = self.axes.tunit_cube(vals, renderer.M)
+ avgz = [tc[p1][2] + tc[p2][2] + tc[p3][2] + tc[p4][2]
+ for p1, p2, p3, p4 in self._PLANES]
+ highs = np.array([avgz[2*i] < avgz[2*i+1] for i in range(3)])
+
+ return mins, maxs, centers, deltas, tc, highs
+
+ def draw_pane(self, renderer):
+ renderer.open_group('pane3d')
+
+ mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
+
+ info = self._axinfo
+ index = info['i']
+ if not highs[index]:
+ plane = self._PLANES[2 * index]
+ else:
+ plane = self._PLANES[2 * index + 1]
+ xys = [tc[p] for p in plane]
+ self.set_pane_pos(xys)
+ self.pane.draw(renderer)
+
+ renderer.close_group('pane3d')
+
+ def draw(self, renderer):
+ self.label._transform = self.axes.transData
+ renderer.open_group('axis3d')
+
+ # code from XAxis
+ majorTicks = self.get_major_ticks()
+ majorLocs = self.major.locator()
+
+ info = self._axinfo
+ index = info['i']
+
+ # filter locations here so that no extra grid lines are drawn
+ locmin, locmax = self.get_view_interval()
+ if locmin > locmax:
+ locmin, locmax = locmax, locmin
+
+ # Rudimentary clipping
+ majorLocs = [loc for loc in majorLocs if
+ locmin <= loc <= locmax]
+ self.major.formatter.set_locs(majorLocs)
+ majorLabels = [self.major.formatter(val, i)
+ for i, val in enumerate(majorLocs)]
+
+ mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
+
+ # Determine grid lines
+ minmax = np.where(highs, maxs, mins)
+
+ # Draw main axis line
+ juggled = info['juggled']
+ edgep1 = minmax.copy()
+ edgep1[juggled[0]] = get_flip_min_max(edgep1, juggled[0], mins, maxs)
+
+ edgep2 = edgep1.copy()
+ edgep2[juggled[1]] = get_flip_min_max(edgep2, juggled[1], mins, maxs)
+ pep = proj3d.proj_trans_points([edgep1, edgep2], renderer.M)
+ centpt = proj3d.proj_transform(
+ centers[0], centers[1], centers[2], renderer.M)
+ self.line.set_data((pep[0][0], pep[0][1]), (pep[1][0], pep[1][1]))
+ self.line.draw(renderer)
+
+ # Grid points where the planes meet
+ xyz0 = []
+ for val in majorLocs:
+ coord = minmax.copy()
+ coord[index] = val
+ xyz0.append(coord)
+
+ # Draw labels
+ peparray = np.asanyarray(pep)
+ # The transAxes transform is used because the Text object
+ # rotates the text relative to the display coordinate system.
+ # Therefore, if we want the labels to remain parallel to the
+ # axis regardless of the aspect ratio, we need to convert the
+ # edge points of the plane to display coordinates and calculate
+ # an angle from that.
+ # TODO: Maybe Text objects should handle this themselves?
+ dx, dy = (self.axes.transAxes.transform([peparray[0:2, 1]]) -
+ self.axes.transAxes.transform([peparray[0:2, 0]]))[0]
+
+ lxyz = 0.5*(edgep1 + edgep2)
+
+ # A rough estimate; points are ambiguous since 3D plots rotate
+ ax_scale = self.axes.bbox.size / self.figure.bbox.size
+ ax_inches = np.multiply(ax_scale, self.figure.get_size_inches())
+ ax_points_estimate = sum(72. * ax_inches)
+ deltas_per_point = 48. / ax_points_estimate
+ default_offset = 21.
+ labeldeltas = (
+ (self.labelpad + default_offset) * deltas_per_point * deltas)
+ axmask = [True, True, True]
+ axmask[index] = False
+ lxyz = move_from_center(lxyz, centers, labeldeltas, axmask)
+ tlx, tly, tlz = proj3d.proj_transform(lxyz[0], lxyz[1], lxyz[2],
+ renderer.M)
+ self.label.set_position((tlx, tly))
+ if self.get_rotate_label(self.label.get_text()):
+ angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
+ self.label.set_rotation(angle)
+ self.label.set_va(info['label']['va'])
+ self.label.set_ha(info['label']['ha'])
+ self.label.draw(renderer)
+
+
+ # Draw Offset text
+
+ # Which of the two edge points do we want to
+ # use for locating the offset text?
+ if juggled[2] == 2 :
+ outeredgep = edgep1
+ outerindex = 0
+ else :
+ outeredgep = edgep2
+ outerindex = 1
+
+ pos = copy.copy(outeredgep)
+ pos = move_from_center(pos, centers, labeldeltas, axmask)
+ olx, oly, olz = proj3d.proj_transform(
+ pos[0], pos[1], pos[2], renderer.M)
+ self.offsetText.set_text( self.major.formatter.get_offset() )
+ self.offsetText.set_position( (olx, oly) )
+ angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
+ self.offsetText.set_rotation(angle)
+ # Must set rotation mode to "anchor" so that
+ # the alignment point is used as the "fulcrum" for rotation.
+ self.offsetText.set_rotation_mode('anchor')
+
+ #----------------------------------------------------------------------
+ # Note: the following statement for determining the proper alignment of
+ # the offset text. This was determined entirely by trial-and-error
+ # and should not be in any way considered as "the way". There are
+ # still some edge cases where alignment is not quite right, but this
+ # seems to be more of a geometry issue (in other words, I might be
+ # using the wrong reference points).
+ #
+ # (TT, FF, TF, FT) are the shorthand for the tuple of
+ # (centpt[info['tickdir']] <= peparray[info['tickdir'], outerindex],
+ # centpt[index] <= peparray[index, outerindex])
+ #
+ # Three-letters (e.g., TFT, FTT) are short-hand for the array of bools
+ # from the variable 'highs'.
+ # ---------------------------------------------------------------------
+ if centpt[info['tickdir']] > peparray[info['tickdir'], outerindex] :
+ # if FT and if highs has an even number of Trues
+ if (centpt[index] <= peparray[index, outerindex]
+ and ((len(highs.nonzero()[0]) % 2) == 0)) :
+ # Usually, this means align right, except for the FTT case,
+ # in which offset for axis 1 and 2 are aligned left.
+ if highs.tolist() == [False, True, True] and index in (1, 2) :
+ align = 'left'
+ else :
+ align = 'right'
+ else :
+ # The FF case
+ align = 'left'
+ else :
+ # if TF and if highs has an even number of Trues
+ if (centpt[index] > peparray[index, outerindex]
+ and ((len(highs.nonzero()[0]) % 2) == 0)) :
+ # Usually mean align left, except if it is axis 2
+ if index == 2 :
+ align = 'right'
+ else :
+ align = 'left'
+ else :
+ # The TT case
+ align = 'right'
+
+ self.offsetText.set_va('center')
+ self.offsetText.set_ha(align)
+ self.offsetText.draw(renderer)
+
+ # Draw grid lines
+ if len(xyz0) > 0:
+ # Grid points at end of one plane
+ xyz1 = copy.deepcopy(xyz0)
+ newindex = (index + 1) % 3
+ newval = get_flip_min_max(xyz1[0], newindex, mins, maxs)
+ for i in range(len(majorLocs)):
+ xyz1[i][newindex] = newval
+
+ # Grid points at end of the other plane
+ xyz2 = copy.deepcopy(xyz0)
+ newindex = (index + 2) % 3
+ newval = get_flip_min_max(xyz2[0], newindex, mins, maxs)
+ for i in range(len(majorLocs)):
+ xyz2[i][newindex] = newval
+
+ lines = list(zip(xyz1, xyz0, xyz2))
+ if self.axes._draw_grid:
+ self.gridlines.set_segments(lines)
+ self.gridlines.set_color([info['grid']['color']] * len(lines))
+ self.gridlines.set_linewidth(
+ [info['grid']['linewidth']] * len(lines))
+ self.gridlines.set_linestyle(
+ [info['grid']['linestyle']] * len(lines))
+ self.gridlines.draw(renderer, project=True)
+
+ # Draw ticks
+ tickdir = info['tickdir']
+ tickdelta = deltas[tickdir]
+ if highs[tickdir]:
+ ticksign = 1
+ else:
+ ticksign = -1
+
+ for tick, loc, label in zip(majorTicks, majorLocs, majorLabels):
+ if tick is None:
+ continue
+
+ # Get tick line positions
+ pos = copy.copy(edgep1)
+ pos[index] = loc
+ pos[tickdir] = (
+ edgep1[tickdir]
+ + info['tick']['outward_factor'] * ticksign * tickdelta)
+ x1, y1, z1 = proj3d.proj_transform(pos[0], pos[1], pos[2],
+ renderer.M)
+ pos[tickdir] = (
+ edgep1[tickdir]
+ - info['tick']['inward_factor'] * ticksign * tickdelta)
+ x2, y2, z2 = proj3d.proj_transform(pos[0], pos[1], pos[2],
+ renderer.M)
+
+ # Get position of label
+ default_offset = 8. # A rough estimate
+ labeldeltas = (
+ (tick.get_pad() + default_offset) * deltas_per_point * deltas)
+
+ axmask = [True, True, True]
+ axmask[index] = False
+ pos[tickdir] = edgep1[tickdir]
+ pos = move_from_center(pos, centers, labeldeltas, axmask)
+ lx, ly, lz = proj3d.proj_transform(pos[0], pos[1], pos[2],
+ renderer.M)
+
+ tick_update_position(tick, (x1, x2), (y1, y2), (lx, ly))
+ tick.tick1line.set_linewidth(info['tick']['linewidth'])
+ tick.tick1line.set_color(info['tick']['color'])
+ tick.set_label1(label)
+ tick.set_label2(label)
+ tick.draw(renderer)
+
+ renderer.close_group('axis3d')
+ self.stale = False
+
+ def get_view_interval(self):
+ """return the Interval instance for this 3d axis view limits"""
+ return self.v_interval
+
+ def set_view_interval(self, vmin, vmax, ignore=False):
+ if ignore:
+ self.v_interval = vmin, vmax
+ else:
+ Vmin, Vmax = self.get_view_interval()
+ self.v_interval = min(vmin, Vmin), max(vmax, Vmax)
+
+ # TODO: Get this to work properly when mplot3d supports
+ # the transforms framework.
+ def get_tightbbox(self, renderer) :
+ # Currently returns None so that Axis.get_tightbbox
+ # doesn't return junk info.
+ return None
+
+# Use classes to look at different data limits
+
+class XAxis(Axis):
+ def get_data_interval(self):
+ 'return the Interval instance for this axis data limits'
+ return self.axes.xy_dataLim.intervalx
+
+class YAxis(Axis):
+ def get_data_interval(self):
+ 'return the Interval instance for this axis data limits'
+ return self.axes.xy_dataLim.intervaly
+
+class ZAxis(Axis):
+ def get_data_interval(self):
+ 'return the Interval instance for this axis data limits'
+ return self.axes.zz_dataLim.intervalx
diff --git a/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/proj3d.py b/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/proj3d.py
new file mode 100644
index 00000000000..a084e7f36a4
--- /dev/null
+++ b/contrib/python/matplotlib/py2/mpl_toolkits/mplot3d/proj3d.py
@@ -0,0 +1,203 @@
+# 3dproj.py
+#
+"""
+Various transforms used for by the 3D code
+"""
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import six
+from six.moves import zip
+
+import numpy as np
+import numpy.linalg as linalg
+
+
+
+def line2d(p0, p1):
+ """
+ Return 2D equation of line in the form ax+by+c = 0
+ """
+ # x + x1 = 0
+ x0, y0 = p0[:2]
+ x1, y1 = p1[:2]
+ #
+ if x0 == x1:
+ a = -1
+ b = 0
+ c = x1
+ elif y0 == y1:
+ a = 0
+ b = 1
+ c = -y1
+ else:
+ a = (y0-y1)
+ b = (x0-x1)
+ c = (x0*y1 - x1*y0)
+ return a, b, c
+
+def line2d_dist(l, p):
+ """
+ Distance from line to point
+ line is a tuple of coefficients a,b,c
+ """
+ a, b, c = l
+ x0, y0 = p
+ return abs((a*x0 + b*y0 + c)/np.sqrt(a**2+b**2))
+
+
+def line2d_seg_dist(p1, p2, p0):
+ """distance(s) from line defined by p1 - p2 to point(s) p0
+
+ p0[0] = x(s)
+ p0[1] = y(s)
+
+ intersection point p = p1 + u*(p2-p1)
+ and intersection point lies within segment if u is between 0 and 1
+ """
+
+ x21 = p2[0] - p1[0]
+ y21 = p2[1] - p1[1]
+ x01 = np.asarray(p0[0]) - p1[0]
+ y01 = np.asarray(p0[1]) - p1[1]
+
+ u = (x01*x21 + y01*y21) / (x21**2 + y21**2)
+ u = np.clip(u, 0, 1)
+ d = np.sqrt((x01 - u*x21)**2 + (y01 - u*y21)**2)
+
+ return d
+
+
+def mod(v):
+ """3d vector length"""
+ return np.sqrt(v[0]**2+v[1]**2+v[2]**2)
+
+def world_transformation(xmin, xmax,
+ ymin, ymax,
+ zmin, zmax):
+ dx, dy, dz = (xmax-xmin), (ymax-ymin), (zmax-zmin)
+ return np.array([
+ [1.0/dx,0,0,-xmin/dx],
+ [0,1.0/dy,0,-ymin/dy],
+ [0,0,1.0/dz,-zmin/dz],
+ [0,0,0,1.0]])
+
+
+def view_transformation(E, R, V):
+ n = (E - R)
+ ## new
+# n /= mod(n)
+# u = np.cross(V,n)
+# u /= mod(u)
+# v = np.cross(n,u)
+# Mr = np.diag([1.]*4)
+# Mt = np.diag([1.]*4)
+# Mr[:3,:3] = u,v,n
+# Mt[:3,-1] = -E
+ ## end new
+
+ ## old
+ n = n / mod(n)
+ u = np.cross(V, n)
+ u = u / mod(u)
+ v = np.cross(n, u)
+ Mr = [[u[0],u[1],u[2],0],
+ [v[0],v[1],v[2],0],
+ [n[0],n[1],n[2],0],
+ [0, 0, 0, 1],
+ ]
+ #
+ Mt = [[1, 0, 0, -E[0]],
+ [0, 1, 0, -E[1]],
+ [0, 0, 1, -E[2]],
+ [0, 0, 0, 1]]
+ ## end old
+
+ return np.dot(Mr, Mt)
+
+def persp_transformation(zfront, zback):
+ a = (zfront+zback)/(zfront-zback)
+ b = -2*(zfront*zback)/(zfront-zback)
+ return np.array([[1,0,0,0],
+ [0,1,0,0],
+ [0,0,a,b],
+ [0,0,-1,0]
+ ])
+
+def ortho_transformation(zfront, zback):
+ # note: w component in the resulting vector will be (zback-zfront), not 1
+ a = -(zfront + zback)
+ b = -(zfront - zback)
+ return np.array([[2,0,0,0],
+ [0,2,0,0],
+ [0,0,-2,0],
+ [0,0,a,b]
+ ])
+
+def proj_transform_vec(vec, M):
+ vecw = np.dot(M, vec)
+ w = vecw[3]
+ # clip here..
+ txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w
+ return txs, tys, tzs
+
+def proj_transform_vec_clip(vec, M):
+ vecw = np.dot(M, vec)
+ w = vecw[3]
+ # clip here.
+ txs, tys, tzs = vecw[0] / w, vecw[1] / w, vecw[2] / w
+ tis = (0 <= vecw[0]) & (vecw[0] <= 1) & (0 <= vecw[1]) & (vecw[1] <= 1)
+ if np.any(tis):
+ tis = vecw[1] < 1
+ return txs, tys, tzs, tis
+
+def inv_transform(xs, ys, zs, M):
+ iM = linalg.inv(M)
+ vec = vec_pad_ones(xs, ys, zs)
+ vecr = np.dot(iM, vec)
+ try:
+ vecr = vecr/vecr[3]
+ except OverflowError:
+ pass
+ return vecr[0], vecr[1], vecr[2]
+
+def vec_pad_ones(xs, ys, zs):
+ return np.array([xs, ys, zs, np.ones_like(xs)])
+
+def proj_transform(xs, ys, zs, M):
+ """
+ Transform the points by the projection matrix
+ """
+ vec = vec_pad_ones(xs, ys, zs)
+ return proj_transform_vec(vec, M)
+
+def proj_transform_clip(xs, ys, zs, M):
+ """
+ Transform the points by the projection matrix
+ and return the clipping result
+ returns txs,tys,tzs,tis
+ """
+ vec = vec_pad_ones(xs, ys, zs)
+ return proj_transform_vec_clip(vec, M)
+transform = proj_transform
+
+def proj_points(points, M):
+ return np.column_stack(proj_trans_points(points, M))
+
+def proj_trans_points(points, M):
+ xs, ys, zs = zip(*points)
+ return proj_transform(xs, ys, zs, M)
+
+def proj_trans_clip_points(points, M):
+ xs, ys, zs = zip(*points)
+ return proj_transform_clip(xs, ys, zs, M)
+
+
+def rot_x(V, alpha):
+ cosa, sina = np.cos(alpha), np.sin(alpha)
+ M1 = np.array([[1,0,0,0],
+ [0,cosa,-sina,0],
+ [0,sina,cosa,0],
+ [0,0,0,1]])
+
+ return np.dot(M1, V)
diff --git a/contrib/python/matplotlib/py2/pylab.py b/contrib/python/matplotlib/py2/pylab.py
new file mode 100644
index 00000000000..f9d135d36e2
--- /dev/null
+++ b/contrib/python/matplotlib/py2/pylab.py
@@ -0,0 +1,3 @@
+from matplotlib.pylab import *
+import matplotlib.pylab
+__doc__ = matplotlib.pylab.__doc__
diff --git a/contrib/python/matplotlib/py2/src/_backend_agg.cpp b/contrib/python/matplotlib/py2/src/_backend_agg.cpp
new file mode 100644
index 00000000000..3dc35f6782c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_backend_agg.cpp
@@ -0,0 +1,234 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+#define NO_IMPORT_ARRAY
+
+#include "_backend_agg.h"
+#include "mplutils.h"
+
+void BufferRegion::to_string_argb(uint8_t *buf)
+{
+ unsigned char *pix;
+ unsigned char tmp;
+ size_t i, j;
+
+ memcpy(buf, data, height * stride);
+
+ for (i = 0; i < (size_t)height; ++i) {
+ pix = buf + i * stride;
+ for (j = 0; j < (size_t)width; ++j) {
+ // Convert rgba to argb
+ tmp = pix[2];
+ pix[2] = pix[0];
+ pix[0] = tmp;
+ pix += 4;
+ }
+ }
+}
+
+RendererAgg::RendererAgg(unsigned int width, unsigned int height, double dpi)
+ : width(width),
+ height(height),
+ dpi(dpi),
+ NUMBYTES(width * height * 4),
+ pixBuffer(NULL),
+ renderingBuffer(),
+ alphaBuffer(NULL),
+ alphaMaskRenderingBuffer(),
+ alphaMask(alphaMaskRenderingBuffer),
+ pixfmtAlphaMask(alphaMaskRenderingBuffer),
+ rendererBaseAlphaMask(),
+ rendererAlphaMask(),
+ scanlineAlphaMask(),
+ slineP8(),
+ slineBin(),
+ pixFmt(),
+ rendererBase(),
+ rendererAA(),
+ rendererBin(),
+ theRasterizer(8192),
+ lastclippath(NULL),
+ _fill_color(agg::rgba(1, 1, 1, 0))
+{
+ unsigned stride(width * 4);
+
+ pixBuffer = new agg::int8u[NUMBYTES];
+ renderingBuffer.attach(pixBuffer, width, height, stride);
+ pixFmt.attach(renderingBuffer);
+ rendererBase.attach(pixFmt);
+ rendererBase.clear(_fill_color);
+ rendererAA.attach(rendererBase);
+ rendererBin.attach(rendererBase);
+ hatch_size = int(dpi);
+ hatchBuffer = new agg::int8u[hatch_size * hatch_size * 4];
+ hatchRenderingBuffer.attach(hatchBuffer, hatch_size, hatch_size, hatch_size * 4);
+}
+
+RendererAgg::~RendererAgg()
+{
+ delete[] hatchBuffer;
+ delete[] alphaBuffer;
+ delete[] pixBuffer;
+}
+
+void RendererAgg::create_alpha_buffers()
+{
+ if (!alphaBuffer) {
+ alphaBuffer = new agg::int8u[width * height];
+ alphaMaskRenderingBuffer.attach(alphaBuffer, width, height, width);
+ rendererBaseAlphaMask.attach(pixfmtAlphaMask);
+ rendererAlphaMask.attach(rendererBaseAlphaMask);
+ }
+}
+
+BufferRegion *RendererAgg::copy_from_bbox(agg::rect_d in_rect)
+{
+ agg::rect_i rect(
+ (int)in_rect.x1, height - (int)in_rect.y2, (int)in_rect.x2, height - (int)in_rect.y1);
+
+ BufferRegion *reg = NULL;
+ reg = new BufferRegion(rect);
+
+ agg::rendering_buffer rbuf;
+ rbuf.attach(reg->get_data(), reg->get_width(), reg->get_height(), reg->get_stride());
+
+ pixfmt pf(rbuf);
+ renderer_base rb(pf);
+ rb.copy_from(renderingBuffer, &rect, -rect.x1, -rect.y1);
+
+ return reg;
+}
+
+void RendererAgg::restore_region(BufferRegion &region)
+{
+ if (region.get_data() == NULL) {
+ throw std::runtime_error("Cannot restore_region from NULL data");
+ }
+
+ agg::rendering_buffer rbuf;
+ rbuf.attach(region.get_data(), region.get_width(), region.get_height(), region.get_stride());
+
+ rendererBase.copy_from(rbuf, 0, region.get_rect().x1, region.get_rect().y1);
+}
+
+// Restore the part of the saved region with offsets
+void
+RendererAgg::restore_region(BufferRegion &region, int xx1, int yy1, int xx2, int yy2, int x, int y )
+{
+ if (region.get_data() == NULL) {
+ throw std::runtime_error("Cannot restore_region from NULL data");
+ }
+
+ agg::rect_i &rrect = region.get_rect();
+
+ agg::rect_i rect(xx1 - rrect.x1, (yy1 - rrect.y1), xx2 - rrect.x1, (yy2 - rrect.y1));
+
+ agg::rendering_buffer rbuf;
+ rbuf.attach(region.get_data(), region.get_width(), region.get_height(), region.get_stride());
+
+ rendererBase.copy_from(rbuf, &rect, x, y);
+}
+
+bool RendererAgg::render_clippath(py::PathIterator &clippath,
+ const agg::trans_affine &clippath_trans)
+{
+ typedef agg::conv_transform<py::PathIterator> transformed_path_t;
+ typedef agg::conv_curve<transformed_path_t> curve_t;
+
+ bool has_clippath = (clippath.total_vertices() != 0);
+
+ if (has_clippath &&
+ (clippath.get_id() != lastclippath || clippath_trans != lastclippath_transform)) {
+ create_alpha_buffers();
+ agg::trans_affine trans(clippath_trans);
+ trans *= agg::trans_affine_scaling(1.0, -1.0);
+ trans *= agg::trans_affine_translation(0.0, (double)height);
+
+ rendererBaseAlphaMask.clear(agg::gray8(0, 0));
+ transformed_path_t transformed_clippath(clippath, trans);
+ curve_t curved_clippath(transformed_clippath);
+ theRasterizer.add_path(curved_clippath);
+ rendererAlphaMask.color(agg::gray8(255, 255));
+ agg::render_scanlines(theRasterizer, scanlineAlphaMask, rendererAlphaMask);
+ lastclippath = clippath.get_id();
+ lastclippath_transform = clippath_trans;
+ }
+
+ return has_clippath;
+}
+
+void RendererAgg::tostring_rgb(uint8_t *buf)
+{
+ // "Return the rendered buffer as an RGB string"
+
+ int row_len = width * 3;
+
+ agg::rendering_buffer renderingBufferTmp;
+ renderingBufferTmp.attach(buf, width, height, row_len);
+
+ agg::color_conv(&renderingBufferTmp, &renderingBuffer, agg::color_conv_rgba32_to_rgb24());
+}
+
+void RendererAgg::tostring_argb(uint8_t *buf)
+{
+ //"Return the rendered buffer as an RGB string";
+
+ int row_len = width * 4;
+
+ agg::rendering_buffer renderingBufferTmp;
+ renderingBufferTmp.attach(buf, width, height, row_len);
+ agg::color_conv(&renderingBufferTmp, &renderingBuffer, agg::color_conv_rgba32_to_argb32());
+}
+
+void RendererAgg::tostring_bgra(uint8_t *buf)
+{
+ //"Return the rendered buffer as an RGB string";
+
+ int row_len = width * 4;
+
+ agg::rendering_buffer renderingBufferTmp;
+ renderingBufferTmp.attach(buf, width, height, row_len);
+
+ agg::color_conv(&renderingBufferTmp, &renderingBuffer, agg::color_conv_rgba32_to_bgra32());
+}
+
+agg::rect_i RendererAgg::get_content_extents()
+{
+ agg::rect_i r(width, height, 0, 0);
+
+ // Looks at the alpha channel to find the minimum extents of the image
+ unsigned char *pixel = pixBuffer + 3;
+ for (int y = 0; y < (int)height; ++y) {
+ for (int x = 0; x < (int)width; ++x) {
+ if (*pixel) {
+ if (x < r.x1)
+ r.x1 = x;
+ if (y < r.y1)
+ r.y1 = y;
+ if (x > r.x2)
+ r.x2 = x;
+ if (y > r.y2)
+ r.y2 = y;
+ }
+ pixel += 4;
+ }
+ }
+
+ if (r.x1 == width && r.x2 == 0) {
+ // The buffer is completely empty.
+ r.x1 = r.y1 = r.x2 = r.y2 = 0;
+ } else {
+ r.x1 = std::max(0, r.x1);
+ r.y1 = std::max(0, r.y1);
+ r.x2 = std::min(r.x2 + 1, (int)width);
+ r.y2 = std::min(r.y2 + 1, (int)height);
+ }
+
+ return r;
+}
+
+void RendererAgg::clear()
+{
+ //"clear the rendered buffer";
+
+ rendererBase.clear(_fill_color);
+}
diff --git a/contrib/python/matplotlib/py2/src/_backend_agg.h b/contrib/python/matplotlib/py2/src/_backend_agg.h
new file mode 100644
index 00000000000..53b73f179ba
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_backend_agg.h
@@ -0,0 +1,1294 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+/* _backend_agg.h
+*/
+
+#ifndef __BACKEND_AGG_H__
+#define __BACKEND_AGG_H__
+
+#include <cmath>
+#include <vector>
+#include <algorithm>
+
+#include "agg_alpha_mask_u8.h"
+#include "agg_conv_curve.h"
+#include "agg_conv_dash.h"
+#include "agg_conv_stroke.h"
+#include "agg_image_accessors.h"
+#include "agg_pixfmt_amask_adaptor.h"
+#include "agg_pixfmt_gray.h"
+#include "agg_pixfmt_rgba.h"
+#include "agg_rasterizer_scanline_aa.h"
+#include "agg_renderer_base.h"
+#include "agg_renderer_scanline.h"
+#include "agg_rendering_buffer.h"
+#include "agg_scanline_bin.h"
+#include "agg_scanline_p.h"
+#include "agg_scanline_storage_aa.h"
+#include "agg_scanline_storage_bin.h"
+#include "agg_scanline_u.h"
+#include "agg_span_allocator.h"
+#include "agg_span_converter.h"
+#include "agg_span_gouraud_rgba.h"
+#include "agg_span_image_filter_gray.h"
+#include "agg_span_image_filter_rgba.h"
+#include "agg_span_interpolator_linear.h"
+#include "agg_span_pattern_rgba.h"
+#include "util/agg_color_conv_rgb8.h"
+
+#include "_backend_agg_basic_types.h"
+#include "path_converters.h"
+#include "array.h"
+#include "agg_workaround.h"
+
+/**********************************************************************/
+
+// a helper class to pass agg::buffer objects around. agg::buffer is
+// a class in the swig wrapper
+class BufferRegion
+{
+ public:
+ BufferRegion(const agg::rect_i &r) : rect(r)
+ {
+ width = r.x2 - r.x1;
+ height = r.y2 - r.y1;
+ stride = width * 4;
+ data = new agg::int8u[stride * height];
+ }
+
+ virtual ~BufferRegion()
+ {
+ delete[] data;
+ };
+
+ agg::int8u *get_data()
+ {
+ return data;
+ }
+
+ agg::rect_i &get_rect()
+ {
+ return rect;
+ }
+
+ int get_width()
+ {
+ return width;
+ }
+
+ int get_height()
+ {
+ return height;
+ }
+
+ int get_stride()
+ {
+ return stride;
+ }
+
+ void to_string_argb(uint8_t *buf);
+
+ private:
+ agg::int8u *data;
+ agg::rect_i rect;
+ int width;
+ int height;
+ int stride;
+
+ private:
+ // prevent copying
+ BufferRegion(const BufferRegion &);
+ BufferRegion &operator=(const BufferRegion &);
+};
+
+#define MARKER_CACHE_SIZE 512
+
+// the renderer
+class RendererAgg
+{
+ public:
+
+ typedef fixed_blender_rgba_plain<agg::rgba8, agg::order_rgba> fixed_blender_rgba32_plain;
+ typedef agg::pixfmt_alpha_blend_rgba<fixed_blender_rgba32_plain, agg::rendering_buffer> pixfmt;
+ typedef agg::renderer_base<pixfmt> renderer_base;
+ typedef agg::renderer_scanline_aa_solid<renderer_base> renderer_aa;
+ typedef agg::renderer_scanline_bin_solid<renderer_base> renderer_bin;
+ typedef agg::rasterizer_scanline_aa<agg::rasterizer_sl_clip_dbl> rasterizer;
+
+ typedef agg::scanline_p8 scanline_p8;
+ typedef agg::scanline_bin scanline_bin;
+ typedef agg::amask_no_clip_gray8 alpha_mask_type;
+ typedef agg::scanline_u8_am<alpha_mask_type> scanline_am;
+
+ typedef agg::renderer_base<agg::pixfmt_gray8> renderer_base_alpha_mask_type;
+ typedef agg::renderer_scanline_aa_solid<renderer_base_alpha_mask_type> renderer_alpha_mask_type;
+
+ /* TODO: Remove facepair_t */
+ typedef std::pair<bool, agg::rgba> facepair_t;
+
+ RendererAgg(unsigned int width, unsigned int height, double dpi);
+
+ virtual ~RendererAgg();
+
+ unsigned int get_width()
+ {
+ return width;
+ }
+
+ unsigned int get_height()
+ {
+ return height;
+ }
+
+ template <class PathIterator>
+ void draw_path(GCAgg &gc, PathIterator &path, agg::trans_affine &trans, agg::rgba &color);
+
+ template <class PathIterator>
+ void draw_markers(GCAgg &gc,
+ PathIterator &marker_path,
+ agg::trans_affine &marker_path_trans,
+ PathIterator &path,
+ agg::trans_affine &trans,
+ agg::rgba face);
+
+ template <class ImageArray>
+ void draw_text_image(GCAgg &gc, ImageArray &image, int x, int y, double angle);
+
+ template <class ImageArray>
+ void draw_image(GCAgg &gc,
+ double x,
+ double y,
+ ImageArray &image);
+
+ template <class PathGenerator,
+ class TransformArray,
+ class OffsetArray,
+ class ColorArray,
+ class LineWidthArray,
+ class AntialiasedArray>
+ void draw_path_collection(GCAgg &gc,
+ agg::trans_affine &master_transform,
+ PathGenerator &path,
+ TransformArray &transforms,
+ OffsetArray &offsets,
+ agg::trans_affine &offset_trans,
+ ColorArray &facecolors,
+ ColorArray &edgecolors,
+ LineWidthArray &linewidths,
+ DashesVector &linestyles,
+ AntialiasedArray &antialiaseds,
+ e_offset_position offset_position);
+
+ template <class CoordinateArray, class OffsetArray, class ColorArray>
+ void draw_quad_mesh(GCAgg &gc,
+ agg::trans_affine &master_transform,
+ unsigned int mesh_width,
+ unsigned int mesh_height,
+ CoordinateArray &coordinates,
+ OffsetArray &offsets,
+ agg::trans_affine &offset_trans,
+ ColorArray &facecolors,
+ bool antialiased,
+ ColorArray &edgecolors);
+
+ template <class PointArray, class ColorArray>
+ void draw_gouraud_triangle(GCAgg &gc,
+ PointArray &points,
+ ColorArray &colors,
+ agg::trans_affine &trans);
+
+ template <class PointArray, class ColorArray>
+ void draw_gouraud_triangles(GCAgg &gc,
+ PointArray &points,
+ ColorArray &colors,
+ agg::trans_affine &trans);
+
+ void tostring_rgb(uint8_t *buf);
+ void tostring_argb(uint8_t *buf);
+ void tostring_bgra(uint8_t *buf);
+ agg::rect_i get_content_extents();
+ void clear();
+
+ BufferRegion *copy_from_bbox(agg::rect_d in_rect);
+ void restore_region(BufferRegion &reg);
+ void restore_region(BufferRegion &region, int xx1, int yy1, int xx2, int yy2, int x, int y);
+
+ unsigned int width, height;
+ double dpi;
+ size_t NUMBYTES; // the number of bytes in buffer
+
+ agg::int8u *pixBuffer;
+ agg::rendering_buffer renderingBuffer;
+
+ agg::int8u *alphaBuffer;
+ agg::rendering_buffer alphaMaskRenderingBuffer;
+ alpha_mask_type alphaMask;
+ agg::pixfmt_gray8 pixfmtAlphaMask;
+ renderer_base_alpha_mask_type rendererBaseAlphaMask;
+ renderer_alpha_mask_type rendererAlphaMask;
+ scanline_am scanlineAlphaMask;
+
+ scanline_p8 slineP8;
+ scanline_bin slineBin;
+ pixfmt pixFmt;
+ renderer_base rendererBase;
+ renderer_aa rendererAA;
+ renderer_bin rendererBin;
+ rasterizer theRasterizer;
+
+ void *lastclippath;
+ agg::trans_affine lastclippath_transform;
+
+ size_t hatch_size;
+ agg::int8u *hatchBuffer;
+ agg::rendering_buffer hatchRenderingBuffer;
+
+ agg::rgba _fill_color;
+
+ protected:
+ inline double points_to_pixels(double points)
+ {
+ return points * dpi / 72.0;
+ }
+
+ template <class R>
+ void set_clipbox(const agg::rect_d &cliprect, R &rasterizer);
+
+ bool render_clippath(py::PathIterator &clippath, const agg::trans_affine &clippath_trans);
+
+ template <class PathIteratorType>
+ void _draw_path(PathIteratorType &path, bool has_clippath, const facepair_t &face, GCAgg &gc);
+
+ template <class PathIterator,
+ class PathGenerator,
+ class TransformArray,
+ class OffsetArray,
+ class ColorArray,
+ class LineWidthArray,
+ class AntialiasedArray>
+ void _draw_path_collection_generic(GCAgg &gc,
+ agg::trans_affine master_transform,
+ const agg::rect_d &cliprect,
+ PathIterator &clippath,
+ const agg::trans_affine &clippath_trans,
+ PathGenerator &path_generator,
+ TransformArray &transforms,
+ OffsetArray &offsets,
+ const agg::trans_affine &offset_trans,
+ ColorArray &facecolors,
+ ColorArray &edgecolors,
+ LineWidthArray &linewidths,
+ DashesVector &linestyles,
+ AntialiasedArray &antialiaseds,
+ e_offset_position offset_position,
+ int check_snap,
+ int has_curves);
+
+ template <class PointArray, class ColorArray>
+ void _draw_gouraud_triangle(PointArray &points,
+ ColorArray &colors,
+ agg::trans_affine trans,
+ bool has_clippath);
+
+ private:
+ void create_alpha_buffers();
+
+ // prevent copying
+ RendererAgg(const RendererAgg &);
+ RendererAgg &operator=(const RendererAgg &);
+};
+
+/***************************************************************************
+ * Implementation
+ */
+
+template <class path_t>
+inline void
+RendererAgg::_draw_path(path_t &path, bool has_clippath, const facepair_t &face, GCAgg &gc)
+{
+ typedef agg::conv_stroke<path_t> stroke_t;
+ typedef agg::conv_dash<path_t> dash_t;
+ typedef agg::conv_stroke<dash_t> stroke_dash_t;
+ typedef agg::pixfmt_amask_adaptor<pixfmt, alpha_mask_type> pixfmt_amask_type;
+ typedef agg::renderer_base<pixfmt_amask_type> amask_ren_type;
+ typedef agg::renderer_scanline_aa_solid<amask_ren_type> amask_aa_renderer_type;
+ typedef agg::renderer_scanline_bin_solid<amask_ren_type> amask_bin_renderer_type;
+
+ // Render face
+ if (face.first) {
+ theRasterizer.add_path(path);
+
+ if (gc.isaa) {
+ if (has_clippath) {
+ pixfmt_amask_type pfa(pixFmt, alphaMask);
+ amask_ren_type r(pfa);
+ amask_aa_renderer_type ren(r);
+ ren.color(face.second);
+ agg::render_scanlines(theRasterizer, scanlineAlphaMask, ren);
+ } else {
+ rendererAA.color(face.second);
+ agg::render_scanlines(theRasterizer, slineP8, rendererAA);
+ }
+ } else {
+ if (has_clippath) {
+ pixfmt_amask_type pfa(pixFmt, alphaMask);
+ amask_ren_type r(pfa);
+ amask_bin_renderer_type ren(r);
+ ren.color(face.second);
+ agg::render_scanlines(theRasterizer, scanlineAlphaMask, ren);
+ } else {
+ rendererBin.color(face.second);
+ agg::render_scanlines(theRasterizer, slineP8, rendererBin);
+ }
+ }
+ }
+
+ // Render hatch
+ if (gc.has_hatchpath()) {
+ // Reset any clipping that may be in effect, since we'll be
+ // drawing the hatch in a scratch buffer at origin (0, 0)
+ theRasterizer.reset_clipping();
+ rendererBase.reset_clipping(true);
+
+ // Create and transform the path
+ typedef agg::conv_transform<py::PathIterator> hatch_path_trans_t;
+ typedef agg::conv_curve<hatch_path_trans_t> hatch_path_curve_t;
+ typedef agg::conv_stroke<hatch_path_curve_t> hatch_path_stroke_t;
+
+ py::PathIterator hatch_path(gc.hatchpath);
+ agg::trans_affine hatch_trans;
+ hatch_trans *= agg::trans_affine_scaling(1.0, -1.0);
+ hatch_trans *= agg::trans_affine_translation(0.0, 1.0);
+ hatch_trans *= agg::trans_affine_scaling(hatch_size, hatch_size);
+ hatch_path_trans_t hatch_path_trans(hatch_path, hatch_trans);
+ hatch_path_curve_t hatch_path_curve(hatch_path_trans);
+ hatch_path_stroke_t hatch_path_stroke(hatch_path_curve);
+ hatch_path_stroke.width(points_to_pixels(gc.hatch_linewidth));
+ hatch_path_stroke.line_cap(agg::square_cap);
+
+ // Render the path into the hatch buffer
+ pixfmt hatch_img_pixf(hatchRenderingBuffer);
+ renderer_base rb(hatch_img_pixf);
+ renderer_aa rs(rb);
+ rb.clear(_fill_color);
+ rs.color(gc.hatch_color);
+
+ theRasterizer.add_path(hatch_path_curve);
+ agg::render_scanlines(theRasterizer, slineP8, rs);
+ theRasterizer.add_path(hatch_path_stroke);
+ agg::render_scanlines(theRasterizer, slineP8, rs);
+
+ // Put clipping back on, if originally set on entry to this
+ // function
+ set_clipbox(gc.cliprect, theRasterizer);
+ if (has_clippath) {
+ render_clippath(gc.clippath.path, gc.clippath.trans);
+ }
+
+ // Transfer the hatch to the main image buffer
+ typedef agg::image_accessor_wrap<pixfmt,
+ agg::wrap_mode_repeat_auto_pow2,
+ agg::wrap_mode_repeat_auto_pow2> img_source_type;
+ typedef agg::span_pattern_rgba<img_source_type> span_gen_type;
+ agg::span_allocator<agg::rgba8> sa;
+ img_source_type img_src(hatch_img_pixf);
+ span_gen_type sg(img_src, 0, 0);
+ theRasterizer.add_path(path);
+
+ if (has_clippath) {
+ pixfmt_amask_type pfa(pixFmt, alphaMask);
+ amask_ren_type ren(pfa);
+ agg::render_scanlines_aa(theRasterizer, slineP8, ren, sa, sg);
+ } else {
+ agg::render_scanlines_aa(theRasterizer, slineP8, rendererBase, sa, sg);
+ }
+ }
+
+ // Render stroke
+ if (gc.linewidth != 0.0) {
+ double linewidth = points_to_pixels(gc.linewidth);
+ if (!gc.isaa) {
+ linewidth = (linewidth < 0.5) ? 0.5 : mpl_round(linewidth);
+ }
+ if (gc.dashes.size() == 0) {
+ stroke_t stroke(path);
+ stroke.width(points_to_pixels(gc.linewidth));
+ stroke.line_cap(gc.cap);
+ stroke.line_join(gc.join);
+ stroke.miter_limit(points_to_pixels(gc.linewidth));
+ theRasterizer.add_path(stroke);
+ } else {
+ dash_t dash(path);
+ gc.dashes.dash_to_stroke(dash, dpi, gc.isaa);
+ stroke_dash_t stroke(dash);
+ stroke.line_cap(gc.cap);
+ stroke.line_join(gc.join);
+ stroke.width(linewidth);
+ stroke.miter_limit(points_to_pixels(gc.linewidth));
+ theRasterizer.add_path(stroke);
+ }
+
+ if (gc.isaa) {
+ if (has_clippath) {
+ pixfmt_amask_type pfa(pixFmt, alphaMask);
+ amask_ren_type r(pfa);
+ amask_aa_renderer_type ren(r);
+ ren.color(gc.color);
+ agg::render_scanlines(theRasterizer, scanlineAlphaMask, ren);
+ } else {
+ rendererAA.color(gc.color);
+ agg::render_scanlines(theRasterizer, slineP8, rendererAA);
+ }
+ } else {
+ if (has_clippath) {
+ pixfmt_amask_type pfa(pixFmt, alphaMask);
+ amask_ren_type r(pfa);
+ amask_bin_renderer_type ren(r);
+ ren.color(gc.color);
+ agg::render_scanlines(theRasterizer, scanlineAlphaMask, ren);
+ } else {
+ rendererBin.color(gc.color);
+ agg::render_scanlines(theRasterizer, slineBin, rendererBin);
+ }
+ }
+ }
+}
+
+template <class PathIterator>
+inline void
+RendererAgg::draw_path(GCAgg &gc, PathIterator &path, agg::trans_affine &trans, agg::rgba &color)
+{
+ typedef agg::conv_transform<py::PathIterator> transformed_path_t;
+ typedef PathNanRemover<transformed_path_t> nan_removed_t;
+ typedef PathClipper<nan_removed_t> clipped_t;
+ typedef PathSnapper<clipped_t> snapped_t;
+ typedef PathSimplifier<snapped_t> simplify_t;
+ typedef agg::conv_curve<simplify_t> curve_t;
+ typedef Sketch<curve_t> sketch_t;
+
+ facepair_t face(color.a != 0.0, color);
+
+ theRasterizer.reset_clipping();
+ rendererBase.reset_clipping(true);
+ set_clipbox(gc.cliprect, theRasterizer);
+ bool has_clippath = render_clippath(gc.clippath.path, gc.clippath.trans);
+
+ trans *= agg::trans_affine_scaling(1.0, -1.0);
+ trans *= agg::trans_affine_translation(0.0, (double)height);
+ bool clip = !face.first && !gc.has_hatchpath() && !path.has_curves();
+ bool simplify = path.should_simplify() && clip;
+ double snapping_linewidth = points_to_pixels(gc.linewidth);
+ if (gc.color.a == 0.0) {
+ snapping_linewidth = 0.0;
+ }
+
+ transformed_path_t tpath(path, trans);
+ nan_removed_t nan_removed(tpath, true, path.has_curves());
+ clipped_t clipped(nan_removed, clip && !path.has_curves(), width, height);
+ snapped_t snapped(clipped, gc.snap_mode, path.total_vertices(), snapping_linewidth);
+ simplify_t simplified(snapped, simplify, path.simplify_threshold());
+ curve_t curve(simplified);
+ sketch_t sketch(curve, gc.sketch.scale, gc.sketch.length, gc.sketch.randomness);
+
+ _draw_path(sketch, has_clippath, face, gc);
+}
+
+template <class PathIterator>
+inline void RendererAgg::draw_markers(GCAgg &gc,
+ PathIterator &marker_path,
+ agg::trans_affine &marker_trans,
+ PathIterator &path,
+ agg::trans_affine &trans,
+ agg::rgba color)
+{
+ typedef agg::conv_transform<py::PathIterator> transformed_path_t;
+ typedef PathNanRemover<transformed_path_t> nan_removed_t;
+ typedef PathSnapper<nan_removed_t> snap_t;
+ typedef agg::conv_curve<snap_t> curve_t;
+ typedef agg::conv_stroke<curve_t> stroke_t;
+ typedef agg::pixfmt_amask_adaptor<pixfmt, alpha_mask_type> pixfmt_amask_type;
+ typedef agg::renderer_base<pixfmt_amask_type> amask_ren_type;
+ typedef agg::renderer_scanline_aa_solid<amask_ren_type> amask_aa_renderer_type;
+
+ // Deal with the difference in y-axis direction
+ marker_trans *= agg::trans_affine_scaling(1.0, -1.0);
+
+ trans *= agg::trans_affine_scaling(1.0, -1.0);
+ trans *= agg::trans_affine_translation(0.5, (double)height + 0.5);
+
+ transformed_path_t marker_path_transformed(marker_path, marker_trans);
+ nan_removed_t marker_path_nan_removed(marker_path_transformed, true, marker_path.has_curves());
+ snap_t marker_path_snapped(marker_path_nan_removed,
+ gc.snap_mode,
+ marker_path.total_vertices(),
+ points_to_pixels(gc.linewidth));
+ curve_t marker_path_curve(marker_path_snapped);
+
+ if (!marker_path_snapped.is_snapping()) {
+ // If the path snapper isn't in effect, at least make sure the marker
+ // at (0, 0) is in the center of a pixel. This, importantly, makes
+ // the circle markers look centered around the point they refer to.
+ marker_trans *= agg::trans_affine_translation(0.5, 0.5);
+ }
+
+ transformed_path_t path_transformed(path, trans);
+ nan_removed_t path_nan_removed(path_transformed, false, false);
+ snap_t path_snapped(path_nan_removed, SNAP_FALSE, path.total_vertices(), 0.0);
+ curve_t path_curve(path_snapped);
+ path_curve.rewind(0);
+
+ facepair_t face(color.a != 0.0, color);
+
+ // maxim's suggestions for cached scanlines
+ agg::scanline_storage_aa8 scanlines;
+ theRasterizer.reset();
+ theRasterizer.reset_clipping();
+ rendererBase.reset_clipping(true);
+ agg::rect_i marker_size(0x7FFFFFFF, 0x7FFFFFFF, -0x7FFFFFFF, -0x7FFFFFFF);
+
+ agg::int8u staticFillCache[MARKER_CACHE_SIZE];
+ agg::int8u staticStrokeCache[MARKER_CACHE_SIZE];
+ agg::int8u *fillCache = staticFillCache;
+ agg::int8u *strokeCache = staticStrokeCache;
+
+ try
+ {
+ unsigned fillSize = 0;
+ if (face.first) {
+ theRasterizer.add_path(marker_path_curve);
+ agg::render_scanlines(theRasterizer, slineP8, scanlines);
+ fillSize = scanlines.byte_size();
+ if (fillSize >= MARKER_CACHE_SIZE) {
+ fillCache = new agg::int8u[fillSize];
+ }
+ scanlines.serialize(fillCache);
+ marker_size = agg::rect_i(scanlines.min_x(),
+ scanlines.min_y(),
+ scanlines.max_x(),
+ scanlines.max_y());
+ }
+
+ stroke_t stroke(marker_path_curve);
+ stroke.width(points_to_pixels(gc.linewidth));
+ stroke.line_cap(gc.cap);
+ stroke.line_join(gc.join);
+ stroke.miter_limit(points_to_pixels(gc.linewidth));
+ theRasterizer.reset();
+ theRasterizer.add_path(stroke);
+ agg::render_scanlines(theRasterizer, slineP8, scanlines);
+ unsigned strokeSize = scanlines.byte_size();
+ if (strokeSize >= MARKER_CACHE_SIZE) {
+ strokeCache = new agg::int8u[strokeSize];
+ }
+ scanlines.serialize(strokeCache);
+ marker_size = agg::rect_i(std::min(marker_size.x1, scanlines.min_x()),
+ std::min(marker_size.y1, scanlines.min_y()),
+ std::max(marker_size.x2, scanlines.max_x()),
+ std::max(marker_size.y2, scanlines.max_y()));
+
+ theRasterizer.reset_clipping();
+ rendererBase.reset_clipping(true);
+ set_clipbox(gc.cliprect, rendererBase);
+ bool has_clippath = render_clippath(gc.clippath.path, gc.clippath.trans);
+
+ double x, y;
+
+ agg::serialized_scanlines_adaptor_aa8 sa;
+ agg::serialized_scanlines_adaptor_aa8::embedded_scanline sl;
+
+ agg::rect_d clipping_rect(-1.0 - marker_size.x2,
+ -1.0 - marker_size.y2,
+ 1.0 + width - marker_size.x1,
+ 1.0 + height - marker_size.y1);
+
+ if (has_clippath) {
+ while (path_curve.vertex(&x, &y) != agg::path_cmd_stop) {
+ if (!(std::isfinite(x) && std::isfinite(y))) {
+ continue;
+ }
+
+ /* These values are correctly snapped above -- so we don't want
+ to round here, we really only want to truncate */
+ x = floor(x);
+ y = floor(y);
+
+ // Cull points outside the boundary of the image.
+ // Values that are too large may overflow and create
+ // segfaults.
+ // http://sourceforge.net/tracker/?func=detail&aid=2865490&group_id=80706&atid=560720
+ if (!clipping_rect.hit_test(x, y)) {
+ continue;
+ }
+
+ pixfmt_amask_type pfa(pixFmt, alphaMask);
+ amask_ren_type r(pfa);
+ amask_aa_renderer_type ren(r);
+
+ if (face.first) {
+ ren.color(face.second);
+ sa.init(fillCache, fillSize, x, y);
+ agg::render_scanlines(sa, sl, ren);
+ }
+ ren.color(gc.color);
+ sa.init(strokeCache, strokeSize, x, y);
+ agg::render_scanlines(sa, sl, ren);
+ }
+ } else {
+ while (path_curve.vertex(&x, &y) != agg::path_cmd_stop) {
+ if (!(std::isfinite(x) && std::isfinite(y))) {
+ continue;
+ }
+
+ /* These values are correctly snapped above -- so we don't want
+ to round here, we really only want to truncate */
+ x = floor(x);
+ y = floor(y);
+
+ // Cull points outside the boundary of the image.
+ // Values that are too large may overflow and create
+ // segfaults.
+ // http://sourceforge.net/tracker/?func=detail&aid=2865490&group_id=80706&atid=560720
+ if (!clipping_rect.hit_test(x, y)) {
+ continue;
+ }
+
+ if (face.first) {
+ rendererAA.color(face.second);
+ sa.init(fillCache, fillSize, x, y);
+ agg::render_scanlines(sa, sl, rendererAA);
+ }
+
+ rendererAA.color(gc.color);
+ sa.init(strokeCache, strokeSize, x, y);
+ agg::render_scanlines(sa, sl, rendererAA);
+ }
+ }
+ }
+ catch (...)
+ {
+ if (fillCache != staticFillCache)
+ delete[] fillCache;
+ if (strokeCache != staticStrokeCache)
+ delete[] strokeCache;
+ theRasterizer.reset_clipping();
+ rendererBase.reset_clipping(true);
+ throw;
+ }
+
+ if (fillCache != staticFillCache)
+ delete[] fillCache;
+ if (strokeCache != staticStrokeCache)
+ delete[] strokeCache;
+
+ theRasterizer.reset_clipping();
+ rendererBase.reset_clipping(true);
+}
+
+/**
+ * This is a custom span generator that converts spans in the
+ * 8-bit inverted greyscale font buffer to rgba that agg can use.
+ */
+template <class ChildGenerator>
+class font_to_rgba
+{
+ public:
+ typedef ChildGenerator child_type;
+ typedef agg::rgba8 color_type;
+ typedef typename child_type::color_type child_color_type;
+ typedef agg::span_allocator<child_color_type> span_alloc_type;
+
+ private:
+ child_type *_gen;
+ color_type _color;
+ span_alloc_type _allocator;
+
+ public:
+ font_to_rgba(child_type *gen, color_type color) : _gen(gen), _color(color)
+ {
+ }
+
+ inline void generate(color_type *output_span, int x, int y, unsigned len)
+ {
+ _allocator.allocate(len);
+ child_color_type *input_span = _allocator.span();
+ _gen->generate(input_span, x, y, len);
+
+ do {
+ *output_span = _color;
+ output_span->a = ((unsigned int)_color.a * (unsigned int)input_span->v) >> 8;
+ ++output_span;
+ ++input_span;
+ } while (--len);
+ }
+
+ void prepare()
+ {
+ _gen->prepare();
+ }
+};
+
+template <class ImageArray>
+inline void RendererAgg::draw_text_image(GCAgg &gc, ImageArray &image, int x, int y, double angle)
+{
+ typedef agg::span_allocator<agg::rgba8> color_span_alloc_type;
+ typedef agg::span_interpolator_linear<> interpolator_type;
+ typedef agg::image_accessor_clip<agg::pixfmt_gray8> image_accessor_type;
+ typedef agg::span_image_filter_gray<image_accessor_type, interpolator_type> image_span_gen_type;
+ typedef font_to_rgba<image_span_gen_type> span_gen_type;
+ typedef agg::renderer_scanline_aa<renderer_base, color_span_alloc_type, span_gen_type>
+ renderer_type;
+
+ theRasterizer.reset_clipping();
+ rendererBase.reset_clipping(true);
+ if (angle != 0.0) {
+ agg::rendering_buffer srcbuf(
+ image.data(), (unsigned)image.dim(1),
+ (unsigned)image.dim(0), (unsigned)image.dim(1));
+ agg::pixfmt_gray8 pixf_img(srcbuf);
+
+ set_clipbox(gc.cliprect, theRasterizer);
+
+ agg::trans_affine mtx;
+ mtx *= agg::trans_affine_translation(0, -image.dim(0));
+ mtx *= agg::trans_affine_rotation(-angle * agg::pi / 180.0);
+ mtx *= agg::trans_affine_translation(x, y);
+
+ agg::path_storage rect;
+ rect.move_to(0, 0);
+ rect.line_to(image.dim(1), 0);
+ rect.line_to(image.dim(1), image.dim(0));
+ rect.line_to(0, image.dim(0));
+ rect.line_to(0, 0);
+ agg::conv_transform<agg::path_storage> rect2(rect, mtx);
+
+ agg::trans_affine inv_mtx(mtx);
+ inv_mtx.invert();
+
+ agg::image_filter_lut filter;
+ filter.calculate(agg::image_filter_spline36());
+ interpolator_type interpolator(inv_mtx);
+ color_span_alloc_type sa;
+ image_accessor_type ia(pixf_img, agg::gray8(0));
+ image_span_gen_type image_span_generator(ia, interpolator, filter);
+ span_gen_type output_span_generator(&image_span_generator, gc.color);
+ renderer_type ri(rendererBase, sa, output_span_generator);
+
+ theRasterizer.add_path(rect2);
+ agg::render_scanlines(theRasterizer, slineP8, ri);
+ } else {
+ agg::rect_i fig, text;
+
+ fig.init(0, 0, width, height);
+ text.init(x, y - image.dim(0), x + image.dim(1), y);
+ text.clip(fig);
+
+ if (gc.cliprect.x1 != 0.0 || gc.cliprect.y1 != 0.0 || gc.cliprect.x2 != 0.0 || gc.cliprect.y2 != 0.0) {
+ agg::rect_i clip;
+
+ clip.init(int(mpl_round(gc.cliprect.x1)),
+ int(mpl_round(height - gc.cliprect.y2)),
+ int(mpl_round(gc.cliprect.x2)),
+ int(mpl_round(height - gc.cliprect.y1)));
+ text.clip(clip);
+ }
+
+ if (text.x2 > text.x1) {
+ for (int yi = text.y1; yi < text.y2; ++yi) {
+ pixFmt.blend_solid_hspan(text.x1, yi, (text.x2 - text.x1), gc.color,
+ &image(yi - (y - image.dim(0)), text.x1 - x));
+ }
+ }
+ }
+}
+
+class span_conv_alpha
+{
+ public:
+ typedef agg::rgba8 color_type;
+
+ double m_alpha;
+
+ span_conv_alpha(double alpha) : m_alpha(alpha)
+ {
+ }
+
+ void prepare()
+ {
+ }
+ void generate(color_type *span, int x, int y, unsigned len) const
+ {
+ do {
+ span->a = (agg::int8u)((double)span->a * m_alpha);
+ ++span;
+ } while (--len);
+ }
+};
+
+template <class ImageArray>
+inline void RendererAgg::draw_image(GCAgg &gc,
+ double x,
+ double y,
+ ImageArray &image)
+{
+ double alpha = gc.alpha;
+
+ theRasterizer.reset_clipping();
+ rendererBase.reset_clipping(true);
+ set_clipbox(gc.cliprect, theRasterizer);
+ bool has_clippath = render_clippath(gc.clippath.path, gc.clippath.trans);
+
+ agg::rendering_buffer buffer;
+ buffer.attach(
+ image.data(), (unsigned)image.dim(1), (unsigned)image.dim(0), -(int)image.dim(1) * 4);
+ pixfmt pixf(buffer);
+
+ if (has_clippath) {
+ agg::trans_affine mtx;
+ agg::path_storage rect;
+
+ mtx *= agg::trans_affine_translation((int)x, (int)(height - (y + image.dim(0))));
+
+ rect.move_to(0, 0);
+ rect.line_to(image.dim(1), 0);
+ rect.line_to(image.dim(1), image.dim(0));
+ rect.line_to(0, image.dim(0));
+ rect.line_to(0, 0);
+
+ agg::conv_transform<agg::path_storage> rect2(rect, mtx);
+
+ agg::trans_affine inv_mtx(mtx);
+ inv_mtx.invert();
+
+ typedef agg::span_allocator<agg::rgba8> color_span_alloc_type;
+ typedef agg::image_accessor_clip<pixfmt> image_accessor_type;
+ typedef agg::span_interpolator_linear<> interpolator_type;
+ typedef agg::span_image_filter_rgba_nn<image_accessor_type, interpolator_type>
+ image_span_gen_type;
+ typedef agg::span_converter<image_span_gen_type, span_conv_alpha> span_conv;
+
+ color_span_alloc_type sa;
+ image_accessor_type ia(pixf, agg::rgba8(0, 0, 0, 0));
+ interpolator_type interpolator(inv_mtx);
+ image_span_gen_type image_span_generator(ia, interpolator);
+ span_conv_alpha conv_alpha(alpha);
+ span_conv spans(image_span_generator, conv_alpha);
+
+ typedef agg::pixfmt_amask_adaptor<pixfmt, alpha_mask_type> pixfmt_amask_type;
+ typedef agg::renderer_base<pixfmt_amask_type> amask_ren_type;
+ typedef agg::renderer_scanline_aa<amask_ren_type, color_span_alloc_type, span_conv>
+ renderer_type_alpha;
+
+ pixfmt_amask_type pfa(pixFmt, alphaMask);
+ amask_ren_type r(pfa);
+ renderer_type_alpha ri(r, sa, spans);
+
+ theRasterizer.add_path(rect2);
+ agg::render_scanlines(theRasterizer, scanlineAlphaMask, ri);
+ } else {
+ set_clipbox(gc.cliprect, rendererBase);
+ rendererBase.blend_from(
+ pixf, 0, (int)x, (int)(height - (y + image.dim(0))), (agg::int8u)(alpha * 255));
+ }
+
+ rendererBase.reset_clipping(true);
+}
+
+template <class PathIterator,
+ class PathGenerator,
+ class TransformArray,
+ class OffsetArray,
+ class ColorArray,
+ class LineWidthArray,
+ class AntialiasedArray>
+inline void RendererAgg::_draw_path_collection_generic(GCAgg &gc,
+ agg::trans_affine master_transform,
+ const agg::rect_d &cliprect,
+ PathIterator &clippath,
+ const agg::trans_affine &clippath_trans,
+ PathGenerator &path_generator,
+ TransformArray &transforms,
+ OffsetArray &offsets,
+ const agg::trans_affine &offset_trans,
+ ColorArray &facecolors,
+ ColorArray &edgecolors,
+ LineWidthArray &linewidths,
+ DashesVector &linestyles,
+ AntialiasedArray &antialiaseds,
+ e_offset_position offset_position,
+ int check_snap,
+ int has_curves)
+{
+ typedef agg::conv_transform<typename PathGenerator::path_iterator> transformed_path_t;
+ typedef PathNanRemover<transformed_path_t> nan_removed_t;
+ typedef PathClipper<nan_removed_t> clipped_t;
+ typedef PathSnapper<clipped_t> snapped_t;
+ typedef agg::conv_curve<snapped_t> snapped_curve_t;
+ typedef agg::conv_curve<clipped_t> curve_t;
+
+ size_t Npaths = path_generator.num_paths();
+ size_t Noffsets = offsets.size();
+ size_t N = std::max(Npaths, Noffsets);
+
+ size_t Ntransforms = transforms.size();
+ size_t Nfacecolors = facecolors.size();
+ size_t Nedgecolors = edgecolors.size();
+ size_t Nlinewidths = linewidths.size();
+ size_t Nlinestyles = std::min(linestyles.size(), N);
+ size_t Naa = antialiaseds.size();
+
+ if ((Nfacecolors == 0 && Nedgecolors == 0) || Npaths == 0) {
+ return;
+ }
+
+ // Handle any clipping globally
+ theRasterizer.reset_clipping();
+ rendererBase.reset_clipping(true);
+ set_clipbox(cliprect, theRasterizer);
+ bool has_clippath = render_clippath(clippath, clippath_trans);
+
+ // Set some defaults, assuming no face or edge
+ gc.linewidth = 0.0;
+ facepair_t face;
+ face.first = Nfacecolors != 0;
+ agg::trans_affine trans;
+
+ for (int i = 0; i < (int)N; ++i) {
+ typename PathGenerator::path_iterator path = path_generator(i);
+
+ if (Ntransforms) {
+ int it = i % Ntransforms;
+ trans = agg::trans_affine(transforms(it, 0, 0),
+ transforms(it, 1, 0),
+ transforms(it, 0, 1),
+ transforms(it, 1, 1),
+ transforms(it, 0, 2),
+ transforms(it, 1, 2));
+ trans *= master_transform;
+ } else {
+ trans = master_transform;
+ }
+
+ if (Noffsets) {
+ double xo = offsets(i % Noffsets, 0);
+ double yo = offsets(i % Noffsets, 1);
+ offset_trans.transform(&xo, &yo);
+ if (offset_position == OFFSET_POSITION_DATA) {
+ trans = agg::trans_affine_translation(xo, yo) * trans;
+ } else {
+ trans *= agg::trans_affine_translation(xo, yo);
+ }
+ }
+
+ // These transformations must be done post-offsets
+ trans *= agg::trans_affine_scaling(1.0, -1.0);
+ trans *= agg::trans_affine_translation(0.0, (double)height);
+
+ if (Nfacecolors) {
+ int ic = i % Nfacecolors;
+ face.second = agg::rgba(facecolors(ic, 0), facecolors(ic, 1), facecolors(ic, 2), facecolors(ic, 3));
+ }
+
+ if (Nedgecolors) {
+ int ic = i % Nedgecolors;
+ gc.color = agg::rgba(edgecolors(ic, 0), edgecolors(ic, 1), edgecolors(ic, 2), edgecolors(ic, 3));
+
+ if (Nlinewidths) {
+ gc.linewidth = linewidths(i % Nlinewidths);
+ } else {
+ gc.linewidth = 1.0;
+ }
+ if (Nlinestyles) {
+ gc.dashes = linestyles[i % Nlinestyles];
+ }
+ }
+
+ bool do_clip = !face.first && !gc.has_hatchpath() && !has_curves;
+
+ if (check_snap) {
+ gc.isaa = antialiaseds(i % Naa);
+
+ transformed_path_t tpath(path, trans);
+ nan_removed_t nan_removed(tpath, true, has_curves);
+ clipped_t clipped(nan_removed, do_clip && !has_curves, width, height);
+ snapped_t snapped(
+ clipped, gc.snap_mode, path.total_vertices(), points_to_pixels(gc.linewidth));
+ if (has_curves) {
+ snapped_curve_t curve(snapped);
+ _draw_path(curve, has_clippath, face, gc);
+ } else {
+ _draw_path(snapped, has_clippath, face, gc);
+ }
+ } else {
+ gc.isaa = antialiaseds(i % Naa);
+
+ transformed_path_t tpath(path, trans);
+ nan_removed_t nan_removed(tpath, true, has_curves);
+ clipped_t clipped(nan_removed, do_clip, width, height);
+ if (has_curves) {
+ curve_t curve(clipped);
+ _draw_path(curve, has_clippath, face, gc);
+ } else {
+ _draw_path(clipped, has_clippath, face, gc);
+ }
+ }
+ }
+}
+
+template <class PathGenerator,
+ class TransformArray,
+ class OffsetArray,
+ class ColorArray,
+ class LineWidthArray,
+ class AntialiasedArray>
+inline void RendererAgg::draw_path_collection(GCAgg &gc,
+ agg::trans_affine &master_transform,
+ PathGenerator &path,
+ TransformArray &transforms,
+ OffsetArray &offsets,
+ agg::trans_affine &offset_trans,
+ ColorArray &facecolors,
+ ColorArray &edgecolors,
+ LineWidthArray &linewidths,
+ DashesVector &linestyles,
+ AntialiasedArray &antialiaseds,
+ e_offset_position offset_position)
+{
+ _draw_path_collection_generic(gc,
+ master_transform,
+ gc.cliprect,
+ gc.clippath.path,
+ gc.clippath.trans,
+ path,
+ transforms,
+ offsets,
+ offset_trans,
+ facecolors,
+ edgecolors,
+ linewidths,
+ linestyles,
+ antialiaseds,
+ offset_position,
+ 1,
+ 1);
+}
+
+template <class CoordinateArray>
+class QuadMeshGenerator
+{
+ unsigned m_meshWidth;
+ unsigned m_meshHeight;
+ CoordinateArray m_coordinates;
+
+ class QuadMeshPathIterator
+ {
+ unsigned m_iterator;
+ unsigned m_m, m_n;
+ const CoordinateArray *m_coordinates;
+
+ public:
+ QuadMeshPathIterator(unsigned m, unsigned n, const CoordinateArray *coordinates)
+ : m_iterator(0), m_m(m), m_n(n), m_coordinates(coordinates)
+ {
+ }
+
+ private:
+ inline unsigned vertex(unsigned idx, double *x, double *y)
+ {
+ size_t m = m_m + ((idx & 0x2) >> 1);
+ size_t n = m_n + (((idx + 1) & 0x2) >> 1);
+ *x = (*m_coordinates)(n, m, 0);
+ *y = (*m_coordinates)(n, m, 1);
+ return (idx) ? agg::path_cmd_line_to : agg::path_cmd_move_to;
+ }
+
+ public:
+ inline unsigned vertex(double *x, double *y)
+ {
+ if (m_iterator >= total_vertices()) {
+ return agg::path_cmd_stop;
+ }
+ return vertex(m_iterator++, x, y);
+ }
+
+ inline void rewind(unsigned path_id)
+ {
+ m_iterator = path_id;
+ }
+
+ inline unsigned total_vertices()
+ {
+ return 5;
+ }
+
+ inline bool should_simplify()
+ {
+ return false;
+ }
+ };
+
+ public:
+ typedef QuadMeshPathIterator path_iterator;
+
+ inline QuadMeshGenerator(unsigned meshWidth, unsigned meshHeight, CoordinateArray &coordinates)
+ : m_meshWidth(meshWidth), m_meshHeight(meshHeight), m_coordinates(coordinates)
+ {
+ }
+
+ inline size_t num_paths() const
+ {
+ return m_meshWidth * m_meshHeight;
+ }
+
+ inline path_iterator operator()(size_t i) const
+ {
+ return QuadMeshPathIterator(i % m_meshWidth, i / m_meshWidth, &m_coordinates);
+ }
+};
+
+template <class CoordinateArray, class OffsetArray, class ColorArray>
+inline void RendererAgg::draw_quad_mesh(GCAgg &gc,
+ agg::trans_affine &master_transform,
+ unsigned int mesh_width,
+ unsigned int mesh_height,
+ CoordinateArray &coordinates,
+ OffsetArray &offsets,
+ agg::trans_affine &offset_trans,
+ ColorArray &facecolors,
+ bool antialiased,
+ ColorArray &edgecolors)
+{
+ QuadMeshGenerator<CoordinateArray> path_generator(mesh_width, mesh_height, coordinates);
+
+ array::empty<double> transforms;
+ array::scalar<double, 1> linewidths(gc.linewidth);
+ array::scalar<uint8_t, 1> antialiaseds(antialiased);
+ DashesVector linestyles;
+ ColorArray *edgecolors_ptr = &edgecolors;
+
+ if (edgecolors.size() == 0) {
+ if (antialiased) {
+ edgecolors_ptr = &facecolors;
+ }
+ }
+
+ _draw_path_collection_generic(gc,
+ master_transform,
+ gc.cliprect,
+ gc.clippath.path,
+ gc.clippath.trans,
+ path_generator,
+ transforms,
+ offsets,
+ offset_trans,
+ facecolors,
+ *edgecolors_ptr,
+ linewidths,
+ linestyles,
+ antialiaseds,
+ OFFSET_POSITION_FIGURE,
+ 0,
+ 0);
+}
+
+template <class PointArray, class ColorArray>
+inline void RendererAgg::_draw_gouraud_triangle(PointArray &points,
+ ColorArray &colors,
+ agg::trans_affine trans,
+ bool has_clippath)
+{
+ typedef agg::rgba8 color_t;
+ typedef agg::span_gouraud_rgba<color_t> span_gen_t;
+ typedef agg::span_allocator<color_t> span_alloc_t;
+
+ trans *= agg::trans_affine_scaling(1.0, -1.0);
+ trans *= agg::trans_affine_translation(0.0, (double)height);
+
+ double tpoints[3][2];
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ tpoints[i][j] = points(i, j);
+ }
+ trans.transform(&tpoints[i][0], &tpoints[i][1]);
+ }
+
+ span_alloc_t span_alloc;
+ span_gen_t span_gen;
+
+ span_gen.colors(agg::rgba(colors(0, 0), colors(0, 1), colors(0, 2), colors(0, 3)),
+ agg::rgba(colors(1, 0), colors(1, 1), colors(1, 2), colors(1, 3)),
+ agg::rgba(colors(2, 0), colors(2, 1), colors(2, 2), colors(2, 3)));
+ span_gen.triangle(tpoints[0][0],
+ tpoints[0][1],
+ tpoints[1][0],
+ tpoints[1][1],
+ tpoints[2][0],
+ tpoints[2][1],
+ 0.5);
+
+ theRasterizer.add_path(span_gen);
+
+ if (has_clippath) {
+ typedef agg::pixfmt_amask_adaptor<pixfmt, alpha_mask_type> pixfmt_amask_type;
+ typedef agg::renderer_base<pixfmt_amask_type> amask_ren_type;
+ typedef agg::renderer_scanline_aa<amask_ren_type, span_alloc_t, span_gen_t>
+ amask_aa_renderer_type;
+
+ pixfmt_amask_type pfa(pixFmt, alphaMask);
+ amask_ren_type r(pfa);
+ amask_aa_renderer_type ren(r, span_alloc, span_gen);
+ agg::render_scanlines(theRasterizer, scanlineAlphaMask, ren);
+ } else {
+ agg::render_scanlines_aa(theRasterizer, slineP8, rendererBase, span_alloc, span_gen);
+ }
+}
+
+template <class PointArray, class ColorArray>
+inline void RendererAgg::draw_gouraud_triangle(GCAgg &gc,
+ PointArray &points,
+ ColorArray &colors,
+ agg::trans_affine &trans)
+{
+ theRasterizer.reset_clipping();
+ rendererBase.reset_clipping(true);
+ set_clipbox(gc.cliprect, theRasterizer);
+ bool has_clippath = render_clippath(gc.clippath.path, gc.clippath.trans);
+
+ _draw_gouraud_triangle(points, colors, trans, has_clippath);
+}
+
+template <class PointArray, class ColorArray>
+inline void RendererAgg::draw_gouraud_triangles(GCAgg &gc,
+ PointArray &points,
+ ColorArray &colors,
+ agg::trans_affine &trans)
+{
+ theRasterizer.reset_clipping();
+ rendererBase.reset_clipping(true);
+ set_clipbox(gc.cliprect, theRasterizer);
+ bool has_clippath = render_clippath(gc.clippath.path, gc.clippath.trans);
+
+ for (int i = 0; i < points.dim(0); ++i) {
+ typename PointArray::sub_t point = points.subarray(i);
+ typename ColorArray::sub_t color = colors.subarray(i);
+
+ _draw_gouraud_triangle(point, color, trans, has_clippath);
+ }
+}
+
+template <class R>
+void RendererAgg::set_clipbox(const agg::rect_d &cliprect, R &rasterizer)
+{
+ // set the clip rectangle from the gc
+
+ if (cliprect.x1 != 0.0 || cliprect.y1 != 0.0 || cliprect.x2 != 0.0 || cliprect.y2 != 0.0) {
+ rasterizer.clip_box(std::max(int(floor(cliprect.x1 + 0.5)), 0),
+ std::max(int(floor(height - cliprect.y1 + 0.5)), 0),
+ std::min(int(floor(cliprect.x2 + 0.5)), int(width)),
+ std::min(int(floor(height - cliprect.y2 + 0.5)), int(height)));
+ } else {
+ rasterizer.clip_box(0, 0, width, height);
+ }
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/src/_backend_agg_basic_types.h b/contrib/python/matplotlib/py2/src/_backend_agg_basic_types.h
new file mode 100644
index 00000000000..74a318e7d24
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_backend_agg_basic_types.h
@@ -0,0 +1,127 @@
+#ifndef __BACKEND_AGG_BASIC_TYPES_H__
+#define __BACKEND_AGG_BASIC_TYPES_H__
+
+/* Contains some simple types from the Agg backend that are also used
+ by other modules */
+
+#include <vector>
+
+#include "agg_color_rgba.h"
+#include "agg_math_stroke.h"
+#include "path_converters.h"
+
+#include "py_adaptors.h"
+
+struct ClipPath
+{
+ py::PathIterator path;
+ agg::trans_affine trans;
+};
+
+struct SketchParams
+{
+ double scale;
+ double length;
+ double randomness;
+};
+
+class Dashes
+{
+ typedef std::vector<std::pair<double, double> > dash_t;
+ double dash_offset;
+ dash_t dashes;
+
+ public:
+ double get_dash_offset() const
+ {
+ return dash_offset;
+ }
+ void set_dash_offset(double x)
+ {
+ dash_offset = x;
+ }
+ void add_dash_pair(double length, double skip)
+ {
+ dashes.push_back(std::make_pair(length, skip));
+ }
+ size_t size() const
+ {
+ return dashes.size();
+ }
+
+ template <class T>
+ void dash_to_stroke(T &stroke, double dpi, bool isaa)
+ {
+ for (dash_t::const_iterator i = dashes.begin(); i != dashes.end(); ++i) {
+ double val0 = i->first;
+ double val1 = i->second;
+ val0 = val0 * dpi / 72.0;
+ val1 = val1 * dpi / 72.0;
+ if (!isaa) {
+ val0 = (int)val0 + 0.5;
+ val1 = (int)val1 + 0.5;
+ }
+ stroke.add_dash(val0, val1);
+ }
+ stroke.dash_start(get_dash_offset() * dpi / 72.0);
+ }
+};
+
+typedef std::vector<Dashes> DashesVector;
+
+enum e_offset_position {
+ OFFSET_POSITION_FIGURE,
+ OFFSET_POSITION_DATA
+};
+
+class GCAgg
+{
+ public:
+ GCAgg()
+ : linewidth(1.0),
+ alpha(1.0),
+ cap(agg::butt_cap),
+ join(agg::round_join),
+ snap_mode(SNAP_FALSE)
+ {
+ }
+
+ ~GCAgg()
+ {
+ }
+
+ double linewidth;
+ double alpha;
+ bool forced_alpha;
+ agg::rgba color;
+ bool isaa;
+
+ agg::line_cap_e cap;
+ agg::line_join_e join;
+
+ agg::rect_d cliprect;
+
+ ClipPath clippath;
+
+ Dashes dashes;
+
+ e_snap_mode snap_mode;
+
+ py::PathIterator hatchpath;
+ agg::rgba hatch_color;
+ double hatch_linewidth;
+
+ SketchParams sketch;
+
+ bool has_hatchpath()
+ {
+ return hatchpath.total_vertices();
+ }
+
+ private:
+ // prevent copying
+ GCAgg(const GCAgg &);
+ GCAgg &operator=(const GCAgg &);
+};
+
+#endif
diff --git a/contrib/python/matplotlib/py2/src/_backend_agg_wrapper.cpp b/contrib/python/matplotlib/py2/src/_backend_agg_wrapper.cpp
new file mode 100644
index 00000000000..ea6c7b1267b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_backend_agg_wrapper.cpp
@@ -0,0 +1,777 @@
+#include "mplutils.h"
+#include "py_converters.h"
+#include "_backend_agg.h"
+
+typedef struct
+{
+ PyObject_HEAD
+ RendererAgg *x;
+ Py_ssize_t shape[3];
+ Py_ssize_t strides[3];
+ Py_ssize_t suboffsets[3];
+} PyRendererAgg;
+
+typedef struct
+{
+ PyObject_HEAD
+ BufferRegion *x;
+ Py_ssize_t shape[3];
+ Py_ssize_t strides[3];
+ Py_ssize_t suboffsets[3];
+} PyBufferRegion;
+
+
+/**********************************************************************
+ * BufferRegion
+ * */
+
+static PyObject *PyBufferRegion_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyBufferRegion *self;
+ self = (PyBufferRegion *)type->tp_alloc(type, 0);
+ self->x = NULL;
+ return (PyObject *)self;
+}
+
+static void PyBufferRegion_dealloc(PyBufferRegion *self)
+{
+ delete self->x;
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+static PyObject *PyBufferRegion_to_string(PyBufferRegion *self, PyObject *args, PyObject *kwds)
+{
+ return PyBytes_FromStringAndSize((const char *)self->x->get_data(),
+ self->x->get_height() * self->x->get_stride());
+}
+
+/* TODO: This doesn't seem to be used internally. Remove? */
+
+static PyObject *PyBufferRegion_set_x(PyBufferRegion *self, PyObject *args, PyObject *kwds)
+{
+ int x;
+ if (!PyArg_ParseTuple(args, "i:set_x", &x)) {
+ return NULL;
+ }
+ self->x->get_rect().x1 = x;
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *PyBufferRegion_set_y(PyBufferRegion *self, PyObject *args, PyObject *kwds)
+{
+ int y;
+ if (!PyArg_ParseTuple(args, "i:set_y", &y)) {
+ return NULL;
+ }
+ self->x->get_rect().y1 = y;
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *PyBufferRegion_get_extents(PyBufferRegion *self, PyObject *args, PyObject *kwds)
+{
+ agg::rect_i rect = self->x->get_rect();
+
+ return Py_BuildValue("IIII", rect.x1, rect.y1, rect.x2, rect.y2);
+}
+
+static PyObject *PyBufferRegion_to_string_argb(PyBufferRegion *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *bufobj;
+ uint8_t *buf;
+
+ bufobj = PyBytes_FromStringAndSize(NULL, self->x->get_height() * self->x->get_stride());
+ buf = (uint8_t *)PyBytes_AS_STRING(bufobj);
+
+ CALL_CPP_CLEANUP("to_string_argb", (self->x->to_string_argb(buf)), Py_DECREF(bufobj));
+
+ return bufobj;
+}
+
+int PyBufferRegion_get_buffer(PyBufferRegion *self, Py_buffer *buf, int flags)
+{
+ Py_INCREF(self);
+ buf->obj = (PyObject *)self;
+ buf->buf = self->x->get_data();
+ buf->len = self->x->get_width() * self->x->get_height() * 4;
+ buf->readonly = 0;
+ buf->format = (char *)"B";
+ buf->ndim = 3;
+ self->shape[0] = self->x->get_height();
+ self->shape[1] = self->x->get_width();
+ self->shape[2] = 4;
+ buf->shape = self->shape;
+ self->strides[0] = self->x->get_width() * 4;
+ self->strides[1] = 4;
+ self->strides[2] = 1;
+ buf->strides = self->strides;
+ buf->suboffsets = NULL;
+ buf->itemsize = 1;
+ buf->internal = NULL;
+
+ return 1;
+}
+
+static PyTypeObject PyBufferRegionType;
+
+static PyTypeObject *PyBufferRegion_init_type(PyObject *m, PyTypeObject *type)
+{
+ static PyMethodDef methods[] = {
+ { "to_string", (PyCFunction)PyBufferRegion_to_string, METH_NOARGS, NULL },
+ { "to_string_argb", (PyCFunction)PyBufferRegion_to_string_argb, METH_NOARGS, NULL },
+ { "set_x", (PyCFunction)PyBufferRegion_set_x, METH_VARARGS, NULL },
+ { "set_y", (PyCFunction)PyBufferRegion_set_y, METH_VARARGS, NULL },
+ { "get_extents", (PyCFunction)PyBufferRegion_get_extents, METH_NOARGS, NULL },
+ { NULL }
+ };
+
+ static PyBufferProcs buffer_procs;
+ memset(&buffer_procs, 0, sizeof(PyBufferProcs));
+ buffer_procs.bf_getbuffer = (getbufferproc)PyBufferRegion_get_buffer;
+
+ memset(type, 0, sizeof(PyTypeObject));
+ type->tp_name = "matplotlib.backends._backend_agg.BufferRegion";
+ type->tp_basicsize = sizeof(PyBufferRegion);
+ type->tp_dealloc = (destructor)PyBufferRegion_dealloc;
+ type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_NEWBUFFER;
+ type->tp_methods = methods;
+ type->tp_new = PyBufferRegion_new;
+ type->tp_as_buffer = &buffer_procs;
+
+ if (PyType_Ready(type) < 0) {
+ return NULL;
+ }
+
+ /* Don't need to add to module, since you can't create buffer
+ regions directly from Python */
+
+ return type;
+}
+
+/**********************************************************************
+ * RendererAgg
+ * */
+
+static PyObject *PyRendererAgg_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyRendererAgg *self;
+ self = (PyRendererAgg *)type->tp_alloc(type, 0);
+ self->x = NULL;
+ return (PyObject *)self;
+}
+
+static int PyRendererAgg_init(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ unsigned int width;
+ unsigned int height;
+ double dpi;
+ int debug = 0;
+
+ if (!PyArg_ParseTuple(args, "IId|i:RendererAgg", &width, &height, &dpi, &debug)) {
+ return -1;
+ }
+
+ if (dpi <= 0.0) {
+ PyErr_SetString(PyExc_ValueError, "dpi must be positive");
+ return -1;
+ }
+
+ if (width >= 1 << 16 || height >= 1 << 16) {
+ PyErr_Format(
+ PyExc_ValueError,
+ "Image size of %dx%d pixels is too large. "
+ "It must be less than 2^16 in each direction.",
+ width, height);
+ return -1;
+ }
+
+ CALL_CPP_INIT("RendererAgg", self->x = new RendererAgg(width, height, dpi))
+
+ return 0;
+}
+
+static void PyRendererAgg_dealloc(PyRendererAgg *self)
+{
+ delete self->x;
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+static PyObject *PyRendererAgg_draw_path(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ GCAgg gc;
+ py::PathIterator path;
+ agg::trans_affine trans;
+ PyObject *faceobj = NULL;
+ agg::rgba face;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&O&|O:draw_path",
+ &convert_gcagg,
+ &gc,
+ &convert_path,
+ &path,
+ &convert_trans_affine,
+ &trans,
+ &faceobj)) {
+ return NULL;
+ }
+
+ if (!convert_face(faceobj, gc, &face)) {
+ return NULL;
+ }
+
+ CALL_CPP("draw_path", (self->x->draw_path(gc, path, trans, face)));
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *PyRendererAgg_draw_text_image(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ numpy::array_view<agg::int8u, 2> image;
+ double x;
+ double y;
+ double angle;
+ GCAgg gc;
+
+ if (!PyArg_ParseTuple(args,
+ "O&dddO&:draw_text_image",
+ &image.converter_contiguous,
+ &image,
+ &x,
+ &y,
+ &angle,
+ &convert_gcagg,
+ &gc)) {
+ return NULL;
+ }
+
+ CALL_CPP("draw_text_image", (self->x->draw_text_image(gc, image, x, y, angle)));
+
+ Py_RETURN_NONE;
+}
+
+PyObject *PyRendererAgg_draw_markers(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ GCAgg gc;
+ py::PathIterator marker_path;
+ agg::trans_affine marker_path_trans;
+ py::PathIterator path;
+ agg::trans_affine trans;
+ PyObject *faceobj = NULL;
+ agg::rgba face;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&O&O&O&|O:draw_markers",
+ &convert_gcagg,
+ &gc,
+ &convert_path,
+ &marker_path,
+ &convert_trans_affine,
+ &marker_path_trans,
+ &convert_path,
+ &path,
+ &convert_trans_affine,
+ &trans,
+ &faceobj)) {
+ return NULL;
+ }
+
+ if (!convert_face(faceobj, gc, &face)) {
+ return NULL;
+ }
+
+ CALL_CPP("draw_markers",
+ (self->x->draw_markers(gc, marker_path, marker_path_trans, path, trans, face)));
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *PyRendererAgg_draw_image(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ GCAgg gc;
+ double x;
+ double y;
+ numpy::array_view<agg::int8u, 3> image;
+
+ if (!PyArg_ParseTuple(args,
+ "O&ddO&:draw_image",
+ &convert_gcagg,
+ &gc,
+ &x,
+ &y,
+ &image.converter_contiguous,
+ &image)) {
+ return NULL;
+ }
+
+ x = mpl_round(x);
+ y = mpl_round(y);
+
+ gc.alpha = 1.0;
+ CALL_CPP("draw_image", (self->x->draw_image(gc, x, y, image)));
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+PyRendererAgg_draw_path_collection(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ GCAgg gc;
+ agg::trans_affine master_transform;
+ PyObject *pathobj;
+ numpy::array_view<const double, 3> transforms;
+ numpy::array_view<const double, 2> offsets;
+ agg::trans_affine offset_trans;
+ numpy::array_view<const double, 2> facecolors;
+ numpy::array_view<const double, 2> edgecolors;
+ numpy::array_view<const double, 1> linewidths;
+ DashesVector dashes;
+ numpy::array_view<const uint8_t, 1> antialiaseds;
+ PyObject *ignored;
+ e_offset_position offset_position;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&OO&O&O&O&O&O&O&O&OO&:draw_path_collection",
+ &convert_gcagg,
+ &gc,
+ &convert_trans_affine,
+ &master_transform,
+ &pathobj,
+ &convert_transforms,
+ &transforms,
+ &convert_points,
+ &offsets,
+ &convert_trans_affine,
+ &offset_trans,
+ &convert_colors,
+ &facecolors,
+ &convert_colors,
+ &edgecolors,
+ &linewidths.converter,
+ &linewidths,
+ &convert_dashes_vector,
+ &dashes,
+ &antialiaseds.converter,
+ &antialiaseds,
+ &ignored,
+ &convert_offset_position,
+ &offset_position)) {
+ return NULL;
+ }
+
+ try
+ {
+ py::PathGenerator path(pathobj);
+
+ CALL_CPP("draw_path_collection",
+ (self->x->draw_path_collection(gc,
+ master_transform,
+ path,
+ transforms,
+ offsets,
+ offset_trans,
+ facecolors,
+ edgecolors,
+ linewidths,
+ dashes,
+ antialiaseds,
+ offset_position)));
+ }
+ catch (const py::exception &)
+ {
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *PyRendererAgg_draw_quad_mesh(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ GCAgg gc;
+ agg::trans_affine master_transform;
+ unsigned int mesh_width;
+ unsigned int mesh_height;
+ numpy::array_view<const double, 3> coordinates;
+ numpy::array_view<const double, 2> offsets;
+ agg::trans_affine offset_trans;
+ numpy::array_view<const double, 2> facecolors;
+ int antialiased;
+ numpy::array_view<const double, 2> edgecolors;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&IIO&O&O&O&iO&:draw_quad_mesh",
+ &convert_gcagg,
+ &gc,
+ &convert_trans_affine,
+ &master_transform,
+ &mesh_width,
+ &mesh_height,
+ &coordinates.converter,
+ &coordinates,
+ &convert_points,
+ &offsets,
+ &convert_trans_affine,
+ &offset_trans,
+ &convert_colors,
+ &facecolors,
+ &antialiased,
+ &convert_colors,
+ &edgecolors)) {
+ return NULL;
+ }
+
+ CALL_CPP("draw_quad_mesh",
+ (self->x->draw_quad_mesh(gc,
+ master_transform,
+ mesh_width,
+ mesh_height,
+ coordinates,
+ offsets,
+ offset_trans,
+ facecolors,
+ antialiased,
+ edgecolors)));
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+PyRendererAgg_draw_gouraud_triangle(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ GCAgg gc;
+ numpy::array_view<const double, 2> points;
+ numpy::array_view<const double, 2> colors;
+ agg::trans_affine trans;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&O&O&|O:draw_gouraud_triangle",
+ &convert_gcagg,
+ &gc,
+ &points.converter,
+ &points,
+ &colors.converter,
+ &colors,
+ &convert_trans_affine,
+ &trans)) {
+ return NULL;
+ }
+
+ if (points.dim(0) != 3 || points.dim(1) != 2) {
+ PyErr_Format(PyExc_ValueError,
+ "points must be a 3x2 array, got %" NPY_INTP_FMT "x%" NPY_INTP_FMT,
+ points.dim(0), points.dim(1));
+ return NULL;
+ }
+
+ if (colors.dim(0) != 3 || colors.dim(1) != 4) {
+ PyErr_Format(PyExc_ValueError,
+ "colors must be a 3x4 array, got %" NPY_INTP_FMT "x%" NPY_INTP_FMT,
+ colors.dim(0), colors.dim(1));
+ return NULL;
+ }
+
+
+ CALL_CPP("draw_gouraud_triangle", (self->x->draw_gouraud_triangle(gc, points, colors, trans)));
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+PyRendererAgg_draw_gouraud_triangles(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ GCAgg gc;
+ numpy::array_view<const double, 3> points;
+ numpy::array_view<const double, 3> colors;
+ agg::trans_affine trans;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&O&O&|O:draw_gouraud_triangles",
+ &convert_gcagg,
+ &gc,
+ &points.converter,
+ &points,
+ &colors.converter,
+ &colors,
+ &convert_trans_affine,
+ &trans)) {
+ return NULL;
+ }
+
+ if (points.size() != 0 && (points.dim(1) != 3 || points.dim(2) != 2)) {
+ PyErr_Format(PyExc_ValueError,
+ "points must be a Nx3x2 array, got %" NPY_INTP_FMT "x%" NPY_INTP_FMT "x%" NPY_INTP_FMT,
+ points.dim(0), points.dim(1), points.dim(2));
+ return NULL;
+ }
+
+ if (colors.size() != 0 && (colors.dim(1) != 3 || colors.dim(2) != 4)) {
+ PyErr_Format(PyExc_ValueError,
+ "colors must be a Nx3x4 array, got %" NPY_INTP_FMT "x%" NPY_INTP_FMT "x%" NPY_INTP_FMT,
+ colors.dim(0), colors.dim(1), colors.dim(2));
+ return NULL;
+ }
+
+ if (points.size() != colors.size()) {
+ PyErr_Format(PyExc_ValueError,
+ "points and colors arrays must be the same length, got %" NPY_INTP_FMT " and %" NPY_INTP_FMT,
+ points.dim(0), colors.dim(0));
+ return NULL;
+ }
+
+ CALL_CPP("draw_gouraud_triangles", self->x->draw_gouraud_triangles(gc, points, colors, trans));
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *PyRendererAgg_tostring_rgb(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *buffobj = NULL;
+
+ buffobj = PyBytes_FromStringAndSize(NULL, self->x->get_width() * self->x->get_height() * 3);
+ if (buffobj == NULL) {
+ return NULL;
+ }
+
+ CALL_CPP_CLEANUP("tostring_rgb",
+ (self->x->tostring_rgb((uint8_t *)PyBytes_AS_STRING(buffobj))),
+ Py_DECREF(buffobj));
+
+ return buffobj;
+}
+
+static PyObject *PyRendererAgg_tostring_argb(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *buffobj = NULL;
+
+ buffobj = PyBytes_FromStringAndSize(NULL, self->x->get_width() * self->x->get_height() * 4);
+ if (buffobj == NULL) {
+ return NULL;
+ }
+
+ CALL_CPP_CLEANUP("tostring_argb",
+ (self->x->tostring_argb((uint8_t *)PyBytes_AS_STRING(buffobj))),
+ Py_DECREF(buffobj));
+
+ return buffobj;
+}
+
+static PyObject *PyRendererAgg_tostring_bgra(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *buffobj = NULL;
+
+ buffobj = PyBytes_FromStringAndSize(NULL, self->x->get_width() * self->x->get_height() * 4);
+ if (buffobj == NULL) {
+ return NULL;
+ }
+
+ CALL_CPP_CLEANUP("to_string_bgra",
+ (self->x->tostring_bgra((uint8_t *)PyBytes_AS_STRING(buffobj))),
+ Py_DECREF(buffobj));
+
+ return buffobj;
+}
+
+static PyObject *
+PyRendererAgg_get_content_extents(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ agg::rect_i extents;
+
+ CALL_CPP("get_content_extents", (extents = self->x->get_content_extents()));
+
+ return Py_BuildValue(
+ "iiii", extents.x1, extents.y1, extents.x2 - extents.x1, extents.y2 - extents.y1);
+}
+
+static PyObject *PyRendererAgg_buffer_rgba(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+#if PY3K
+ return PyBytes_FromStringAndSize((const char *)self->x->pixBuffer,
+ self->x->get_width() * self->x->get_height() * 4);
+#else
+ return PyBuffer_FromReadWriteMemory(self->x->pixBuffer,
+ self->x->get_width() * self->x->get_height() * 4);
+#endif
+}
+
+int PyRendererAgg_get_buffer(PyRendererAgg *self, Py_buffer *buf, int flags)
+{
+ Py_INCREF(self);
+ buf->obj = (PyObject *)self;
+ buf->buf = self->x->pixBuffer;
+ buf->len = self->x->get_width() * self->x->get_height() * 4;
+ buf->readonly = 0;
+ buf->format = (char *)"B";
+ buf->ndim = 3;
+ self->shape[0] = self->x->get_height();
+ self->shape[1] = self->x->get_width();
+ self->shape[2] = 4;
+ buf->shape = self->shape;
+ self->strides[0] = self->x->get_width() * 4;
+ self->strides[1] = 4;
+ self->strides[2] = 1;
+ buf->strides = self->strides;
+ buf->suboffsets = NULL;
+ buf->itemsize = 1;
+ buf->internal = NULL;
+
+ return 1;
+}
+
+static PyObject *PyRendererAgg_clear(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ CALL_CPP("clear", self->x->clear());
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *PyRendererAgg_copy_from_bbox(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ agg::rect_d bbox;
+ BufferRegion *reg;
+ PyObject *regobj;
+
+ if (!PyArg_ParseTuple(args, "O&:copy_from_bbox", &convert_rect, &bbox)) {
+ return 0;
+ }
+
+ CALL_CPP("copy_from_bbox", (reg = self->x->copy_from_bbox(bbox)));
+
+ regobj = PyBufferRegion_new(&PyBufferRegionType, NULL, NULL);
+ ((PyBufferRegion *)regobj)->x = reg;
+
+ return regobj;
+}
+
+static PyObject *PyRendererAgg_restore_region(PyRendererAgg *self, PyObject *args, PyObject *kwds)
+{
+ PyBufferRegion *regobj;
+ int xx1 = 0, yy1 = 0, xx2 = 0, yy2 = 0, x = 0, y = 0;
+
+ if (!PyArg_ParseTuple(args,
+ "O!|iiiiii:restore_region",
+ &PyBufferRegionType,
+ &regobj,
+ &xx1,
+ &yy1,
+ &xx2,
+ &yy2,
+ &x,
+ &y)) {
+ return 0;
+ }
+
+ if (PySequence_Size(args) == 1) {
+ CALL_CPP("restore_region", (self->x->restore_region(*(regobj->x))));
+ } else {
+ CALL_CPP("restore_region", self->x->restore_region(*(regobj->x), xx1, yy1, xx2, yy2, x, y));
+ }
+
+ Py_RETURN_NONE;
+}
+
+PyTypeObject PyRendererAggType;
+
+static PyTypeObject *PyRendererAgg_init_type(PyObject *m, PyTypeObject *type)
+{
+ static PyMethodDef methods[] = {
+ {"draw_path", (PyCFunction)PyRendererAgg_draw_path, METH_VARARGS, NULL},
+ {"draw_markers", (PyCFunction)PyRendererAgg_draw_markers, METH_VARARGS, NULL},
+ {"draw_text_image", (PyCFunction)PyRendererAgg_draw_text_image, METH_VARARGS, NULL},
+ {"draw_image", (PyCFunction)PyRendererAgg_draw_image, METH_VARARGS, NULL},
+ {"draw_path_collection", (PyCFunction)PyRendererAgg_draw_path_collection, METH_VARARGS, NULL},
+ {"draw_quad_mesh", (PyCFunction)PyRendererAgg_draw_quad_mesh, METH_VARARGS, NULL},
+ {"draw_gouraud_triangle", (PyCFunction)PyRendererAgg_draw_gouraud_triangle, METH_VARARGS, NULL},
+ {"draw_gouraud_triangles", (PyCFunction)PyRendererAgg_draw_gouraud_triangles, METH_VARARGS, NULL},
+
+ {"tostring_rgb", (PyCFunction)PyRendererAgg_tostring_rgb, METH_NOARGS, NULL},
+ {"tostring_argb", (PyCFunction)PyRendererAgg_tostring_argb, METH_NOARGS, NULL},
+ {"tostring_bgra", (PyCFunction)PyRendererAgg_tostring_bgra, METH_NOARGS, NULL},
+ {"get_content_extents", (PyCFunction)PyRendererAgg_get_content_extents, METH_NOARGS, NULL},
+ {"buffer_rgba", (PyCFunction)PyRendererAgg_buffer_rgba, METH_NOARGS, NULL},
+ {"clear", (PyCFunction)PyRendererAgg_clear, METH_NOARGS, NULL},
+
+ {"copy_from_bbox", (PyCFunction)PyRendererAgg_copy_from_bbox, METH_VARARGS, NULL},
+ {"restore_region", (PyCFunction)PyRendererAgg_restore_region, METH_VARARGS, NULL},
+ {NULL}
+ };
+
+ static PyBufferProcs buffer_procs;
+ memset(&buffer_procs, 0, sizeof(PyBufferProcs));
+ buffer_procs.bf_getbuffer = (getbufferproc)PyRendererAgg_get_buffer;
+
+ memset(type, 0, sizeof(PyTypeObject));
+ type->tp_name = "matplotlib.backends._backend_agg.RendererAgg";
+ type->tp_basicsize = sizeof(PyRendererAgg);
+ type->tp_dealloc = (destructor)PyRendererAgg_dealloc;
+ type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_NEWBUFFER;
+ type->tp_methods = methods;
+ type->tp_init = (initproc)PyRendererAgg_init;
+ type->tp_new = PyRendererAgg_new;
+ type->tp_as_buffer = &buffer_procs;
+
+ if (PyType_Ready(type) < 0) {
+ return NULL;
+ }
+
+ if (PyModule_AddObject(m, "RendererAgg", (PyObject *)type)) {
+ return NULL;
+ }
+
+ return type;
+}
+
+extern "C" {
+
+#if PY3K
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_backend_agg",
+ NULL,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__backend_agg(void)
+
+#else
+#define INITERROR return
+
+PyMODINIT_FUNC init_backend_agg(void)
+#endif
+
+{
+ PyObject *m;
+
+#if PY3K
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule3("_backend_agg", NULL, NULL);
+#endif
+
+ if (m == NULL) {
+ INITERROR;
+ }
+
+ import_array();
+
+ if (!PyRendererAgg_init_type(m, &PyRendererAggType)) {
+ INITERROR;
+ }
+
+ if (!PyBufferRegion_init_type(m, &PyBufferRegionType)) {
+ INITERROR;
+ }
+
+#if PY3K
+ return m;
+#endif
+}
+
+} // extern "C"
diff --git a/contrib/python/matplotlib/py2/src/_contour.cpp b/contrib/python/matplotlib/py2/src/_contour.cpp
new file mode 100644
index 00000000000..aecb442c7e5
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_contour.cpp
@@ -0,0 +1,1790 @@
+// This file contains liberal use of asserts to assist code development and
+// debugging. Standard matplotlib builds disable asserts so they cause no
+// performance reduction. To enable the asserts, you need to undefine the
+// NDEBUG macro, which is achieved by adding the following
+// undef_macros=['NDEBUG']
+// to the appropriate make_extension call in setupext.py, and then rebuilding.
+#define NO_IMPORT_ARRAY
+
+#include "src/mplutils.h"
+#include "src/_contour.h"
+#include <algorithm>
+
+
+// 'kind' codes.
+#define MOVETO 1
+#define LINETO 2
+#define CLOSEPOLY 79
+
+// Point indices from current quad index.
+#define POINT_SW (quad)
+#define POINT_SE (quad+1)
+#define POINT_NW (quad+_nx)
+#define POINT_NE (quad+_nx+1)
+
+// CacheItem masks, only accessed directly to set. To read, use accessors
+// detailed below. 1 and 2 refer to level indices (lower and upper).
+#define MASK_Z_LEVEL 0x0003 // Combines the following two.
+#define MASK_Z_LEVEL_1 0x0001 // z > lower_level.
+#define MASK_Z_LEVEL_2 0x0002 // z > upper_level.
+#define MASK_VISITED_1 0x0004 // Algorithm has visited this quad.
+#define MASK_VISITED_2 0x0008
+#define MASK_SADDLE_1 0x0010 // quad is a saddle quad.
+#define MASK_SADDLE_2 0x0020
+#define MASK_SADDLE_LEFT_1 0x0040 // Contours turn left at saddle quad.
+#define MASK_SADDLE_LEFT_2 0x0080
+#define MASK_SADDLE_START_SW_1 0x0100 // Next visit starts on S or W edge.
+#define MASK_SADDLE_START_SW_2 0x0200
+#define MASK_BOUNDARY_S 0x0400 // S edge of quad is a boundary.
+#define MASK_BOUNDARY_W 0x0800 // W edge of quad is a boundary.
+// EXISTS_QUAD bit is always used, but the 4 EXISTS_CORNER are only used if
+// _corner_mask is true. Only one of EXISTS_QUAD or EXISTS_??_CORNER is ever
+// set per quad, hence not using unique bits for each; care is needed when
+// testing for these flags as they overlap.
+#define MASK_EXISTS_QUAD 0x1000 // All of quad exists (is not masked).
+#define MASK_EXISTS_SW_CORNER 0x2000 // SW corner exists, NE corner is masked.
+#define MASK_EXISTS_SE_CORNER 0x3000
+#define MASK_EXISTS_NW_CORNER 0x4000
+#define MASK_EXISTS_NE_CORNER 0x5000
+#define MASK_EXISTS 0x7000 // Combines all 5 EXISTS masks.
+
+// The following are only needed for filled contours.
+#define MASK_VISITED_S 0x10000 // Algorithm has visited S boundary.
+#define MASK_VISITED_W 0x20000 // Algorithm has visited W boundary.
+#define MASK_VISITED_CORNER 0x40000 // Algorithm has visited corner edge.
+
+
+// Accessors for various CacheItem masks. li is shorthand for level_index.
+#define Z_LEVEL(quad) (_cache[quad] & MASK_Z_LEVEL)
+#define Z_NE Z_LEVEL(POINT_NE)
+#define Z_NW Z_LEVEL(POINT_NW)
+#define Z_SE Z_LEVEL(POINT_SE)
+#define Z_SW Z_LEVEL(POINT_SW)
+#define VISITED(quad,li) (_cache[quad] & (li==1 ? MASK_VISITED_1 : MASK_VISITED_2))
+#define VISITED_S(quad) (_cache[quad] & MASK_VISITED_S)
+#define VISITED_W(quad) (_cache[quad] & MASK_VISITED_W)
+#define VISITED_CORNER(quad) (_cache[quad] & MASK_VISITED_CORNER)
+#define SADDLE(quad,li) (_cache[quad] & (li==1 ? MASK_SADDLE_1 : MASK_SADDLE_2))
+#define SADDLE_LEFT(quad,li) (_cache[quad] & (li==1 ? MASK_SADDLE_LEFT_1 : MASK_SADDLE_LEFT_2))
+#define SADDLE_START_SW(quad,li) (_cache[quad] & (li==1 ? MASK_SADDLE_START_SW_1 : MASK_SADDLE_START_SW_2))
+#define BOUNDARY_S(quad) (_cache[quad] & MASK_BOUNDARY_S)
+#define BOUNDARY_W(quad) (_cache[quad] & MASK_BOUNDARY_W)
+#define BOUNDARY_N(quad) BOUNDARY_S(quad+_nx)
+#define BOUNDARY_E(quad) BOUNDARY_W(quad+1)
+#define EXISTS_QUAD(quad) ((_cache[quad] & MASK_EXISTS) == MASK_EXISTS_QUAD)
+#define EXISTS_NONE(quad) ((_cache[quad] & MASK_EXISTS) == 0)
+// The following are only used if _corner_mask is true.
+#define EXISTS_SW_CORNER(quad) ((_cache[quad] & MASK_EXISTS) == MASK_EXISTS_SW_CORNER)
+#define EXISTS_SE_CORNER(quad) ((_cache[quad] & MASK_EXISTS) == MASK_EXISTS_SE_CORNER)
+#define EXISTS_NW_CORNER(quad) ((_cache[quad] & MASK_EXISTS) == MASK_EXISTS_NW_CORNER)
+#define EXISTS_NE_CORNER(quad) ((_cache[quad] & MASK_EXISTS) == MASK_EXISTS_NE_CORNER)
+#define EXISTS_ANY_CORNER(quad) (!EXISTS_NONE(quad) && !EXISTS_QUAD(quad))
+#define EXISTS_W_EDGE(quad) (EXISTS_QUAD(quad) || EXISTS_SW_CORNER(quad) || EXISTS_NW_CORNER(quad))
+#define EXISTS_E_EDGE(quad) (EXISTS_QUAD(quad) || EXISTS_SE_CORNER(quad) || EXISTS_NE_CORNER(quad))
+#define EXISTS_S_EDGE(quad) (EXISTS_QUAD(quad) || EXISTS_SW_CORNER(quad) || EXISTS_SE_CORNER(quad))
+#define EXISTS_N_EDGE(quad) (EXISTS_QUAD(quad) || EXISTS_NW_CORNER(quad) || EXISTS_NE_CORNER(quad))
+// Note that EXISTS_NE_CORNER(quad) is equivalent to BOUNDARY_SW(quad), etc.
+
+
+
+QuadEdge::QuadEdge()
+ : quad(-1), edge(Edge_None)
+{}
+
+QuadEdge::QuadEdge(long quad_, Edge edge_)
+ : quad(quad_), edge(edge_)
+{}
+
+bool QuadEdge::operator<(const QuadEdge& other) const
+{
+ if (quad != other.quad)
+ return quad < other.quad;
+ else
+ return edge < other.edge;
+}
+
+bool QuadEdge::operator==(const QuadEdge& other) const
+{
+ return quad == other.quad && edge == other.edge;
+}
+
+bool QuadEdge::operator!=(const QuadEdge& other) const
+{
+ return !operator==(other);
+}
+
+std::ostream& operator<<(std::ostream& os, const QuadEdge& quad_edge)
+{
+ return os << quad_edge.quad << ' ' << quad_edge.edge;
+}
+
+
+// conflict with code from matplotlib/tri/_tri.cpp
+#if 0
+XY::XY()
+{}
+
+XY::XY(const double& x_, const double& y_)
+ : x(x_), y(y_)
+{}
+
+bool XY::operator==(const XY& other) const
+{
+ return x == other.x && y == other.y;
+}
+
+bool XY::operator!=(const XY& other) const
+{
+ return x != other.x || y != other.y;
+}
+
+XY XY::operator*(const double& multiplier) const
+{
+ return XY(x*multiplier, y*multiplier);
+}
+
+const XY& XY::operator+=(const XY& other)
+{
+ x += other.x;
+ y += other.y;
+ return *this;
+}
+
+const XY& XY::operator-=(const XY& other)
+{
+ x -= other.x;
+ y -= other.y;
+ return *this;
+}
+
+XY XY::operator+(const XY& other) const
+{
+ return XY(x + other.x, y + other.y);
+}
+
+XY XY::operator-(const XY& other) const
+{
+ return XY(x - other.x, y - other.y);
+}
+
+std::ostream& operator<<(std::ostream& os, const XY& xy)
+{
+ return os << '(' << xy.x << ' ' << xy.y << ')';
+}
+#endif
+
+
+ContourLine::ContourLine(bool is_hole)
+ : std::vector<XY>(),
+ _is_hole(is_hole),
+ _parent(0)
+{}
+
+void ContourLine::add_child(ContourLine* child)
+{
+ assert(!_is_hole && "Cannot add_child to a hole");
+ assert(child != 0 && "Null child ContourLine");
+ _children.push_back(child);
+}
+
+void ContourLine::clear_parent()
+{
+ assert(is_hole() && "Cannot clear parent of non-hole");
+ assert(_parent != 0 && "Null parent ContourLine");
+ _parent = 0;
+}
+
+const ContourLine::Children& ContourLine::get_children() const
+{
+ assert(!_is_hole && "Cannot get_children of a hole");
+ return _children;
+}
+
+const ContourLine* ContourLine::get_parent() const
+{
+ assert(_is_hole && "Cannot get_parent of a non-hole");
+ return _parent;
+}
+
+ContourLine* ContourLine::get_parent()
+{
+ assert(_is_hole && "Cannot get_parent of a non-hole");
+ return _parent;
+}
+
+bool ContourLine::is_hole() const
+{
+ return _is_hole;
+}
+
+// conflict with code from matplotlib/tri/_tri.cpp
+#if 0
+void ContourLine::push_back(const XY& point)
+{
+ if (empty() || point != back())
+ std::vector<XY>::push_back(point);
+}
+#endif
+
+void ContourLine::set_parent(ContourLine* parent)
+{
+ assert(_is_hole && "Cannot set parent of a non-hole");
+ assert(parent != 0 && "Null parent ContourLine");
+ _parent = parent;
+}
+
+// conflict with code from matplotlib/tri/_tri.cpp
+#if 0
+void ContourLine::write() const
+{
+ std::cout << "ContourLine " << this << " of " << size() << " points:";
+ for (const_iterator it = begin(); it != end(); ++it)
+ std::cout << ' ' << *it;
+ if (is_hole())
+ std::cout << " hole, parent=" << get_parent();
+ else {
+ std::cout << " not hole";
+ if (!_children.empty()) {
+ std::cout << ", children=";
+ for (Children::const_iterator it = _children.begin();
+ it != _children.end(); ++it)
+ std::cout << *it << ' ';
+ }
+ }
+ std::cout << std::endl;
+}
+#endif
+
+
+Contour::Contour()
+{}
+
+Contour::~Contour()
+{
+ delete_contour_lines();
+}
+
+void Contour::delete_contour_lines()
+{
+ for (iterator line_it = begin(); line_it != end(); ++line_it) {
+ delete *line_it;
+ *line_it = 0;
+ }
+ std::vector<ContourLine*>::clear();
+}
+
+void Contour::write() const
+{
+ std::cout << "Contour of " << size() << " lines." << std::endl;
+ for (const_iterator it = begin(); it != end(); ++it)
+ (*it)->write();
+}
+
+
+
+ParentCache::ParentCache(long nx, long x_chunk_points, long y_chunk_points)
+ : _nx(nx),
+ _x_chunk_points(x_chunk_points),
+ _y_chunk_points(y_chunk_points),
+ _lines(0), // Initialised when first needed.
+ _istart(0),
+ _jstart(0)
+{
+ assert(_x_chunk_points > 0 && _y_chunk_points > 0 &&
+ "Chunk sizes must be positive");
+}
+
+ContourLine* ParentCache::get_parent(long quad)
+{
+ long index = quad_to_index(quad);
+ ContourLine* parent = _lines[index];
+ while (parent == 0) {
+ index -= _x_chunk_points;
+ assert(index >= 0 && "Failed to find parent in chunk ParentCache");
+ parent = _lines[index];
+ }
+ assert(parent != 0 && "Failed to find parent in chunk ParentCache");
+ return parent;
+}
+
+long ParentCache::quad_to_index(long quad) const
+{
+ long i = quad % _nx;
+ long j = quad / _nx;
+ long index = (i-_istart) + (j-_jstart)*_x_chunk_points;
+
+ assert(i >= _istart && i < _istart + _x_chunk_points &&
+ "i-index outside chunk");
+ assert(j >= _jstart && j < _jstart + _y_chunk_points &&
+ "j-index outside chunk");
+ assert(index >= 0 && index < static_cast<long>(_lines.size()) &&
+ "ParentCache index outside chunk");
+
+ return index;
+}
+
+void ParentCache::set_chunk_starts(long istart, long jstart)
+{
+ assert(istart >= 0 && jstart >= 0 &&
+ "Chunk start indices cannot be negative");
+ _istart = istart;
+ _jstart = jstart;
+ if (_lines.empty())
+ _lines.resize(_x_chunk_points*_y_chunk_points, 0);
+ else
+ std::fill(_lines.begin(), _lines.end(), (ContourLine*)0);
+}
+
+void ParentCache::set_parent(long quad, ContourLine& contour_line)
+{
+ assert(!_lines.empty() &&
+ "Accessing ParentCache before it has been initialised");
+ long index = quad_to_index(quad);
+ if (_lines[index] == 0)
+ _lines[index] = (contour_line.is_hole() ? contour_line.get_parent()
+ : &contour_line);
+}
+
+
+
+QuadContourGenerator::QuadContourGenerator(const CoordinateArray& x,
+ const CoordinateArray& y,
+ const CoordinateArray& z,
+ const MaskArray& mask,
+ bool corner_mask,
+ long chunk_size)
+ : _x(x),
+ _y(y),
+ _z(z),
+ _nx(static_cast<long>(_x.dim(1))),
+ _ny(static_cast<long>(_x.dim(0))),
+ _n(_nx*_ny),
+ _corner_mask(corner_mask),
+ _chunk_size(chunk_size > 0 ? std::min(chunk_size, std::max(_nx, _ny)-1)
+ : std::max(_nx, _ny)-1),
+ _nxchunk(calc_chunk_count(_nx)),
+ _nychunk(calc_chunk_count(_ny)),
+ _chunk_count(_nxchunk*_nychunk),
+ _cache(new CacheItem[_n]),
+ _parent_cache(_nx,
+ chunk_size > 0 ? chunk_size+1 : _nx,
+ chunk_size > 0 ? chunk_size+1 : _ny)
+{
+ assert(!_x.empty() && !_y.empty() && !_z.empty() && "Empty array");
+ assert(_y.dim(0) == _x.dim(0) && _y.dim(1) == _x.dim(1) &&
+ "Different-sized y and x arrays");
+ assert(_z.dim(0) == _x.dim(0) && _z.dim(1) == _x.dim(1) &&
+ "Different-sized z and x arrays");
+ assert((mask.empty() ||
+ (mask.dim(0) == _x.dim(0) && mask.dim(1) == _x.dim(1))) &&
+ "Different-sized mask and x arrays");
+
+ init_cache_grid(mask);
+}
+
+QuadContourGenerator::~QuadContourGenerator()
+{
+ delete [] _cache;
+}
+
+void QuadContourGenerator::append_contour_line_to_vertices(
+ ContourLine& contour_line,
+ PyObject* vertices_list) const
+{
+ assert(vertices_list != 0 && "Null python vertices_list");
+
+ // Convert ContourLine to python equivalent, and clear it.
+ npy_intp dims[2] = {static_cast<npy_intp>(contour_line.size()), 2};
+ numpy::array_view<double, 2> line(dims);
+ npy_intp i = 0;
+ for (ContourLine::const_iterator point = contour_line.begin();
+ point != contour_line.end(); ++point, ++i) {
+ line(i, 0) = point->x;
+ line(i, 1) = point->y;
+ }
+ if (PyList_Append(vertices_list, line.pyobj_steal())) {
+ Py_XDECREF(vertices_list);
+ throw std::runtime_error("Unable to add contour line to vertices_list");
+ }
+
+ contour_line.clear();
+}
+
+void QuadContourGenerator::append_contour_to_vertices_and_codes(
+ Contour& contour,
+ PyObject* vertices_list,
+ PyObject* codes_list) const
+{
+ assert(vertices_list != 0 && "Null python vertices_list");
+ assert(codes_list != 0 && "Null python codes_list");
+
+ // Convert Contour to python equivalent, and clear it.
+ for (Contour::iterator line_it = contour.begin(); line_it != contour.end();
+ ++line_it) {
+ ContourLine& line = **line_it;
+ if (line.is_hole()) {
+ // If hole has already been converted to python its parent will be
+ // set to 0 and it can be deleted.
+ if (line.get_parent() != 0) {
+ delete *line_it;
+ *line_it = 0;
+ }
+ }
+ else {
+ // Non-holes are converted to python together with their child
+ // holes so that they are rendered correctly.
+ ContourLine::const_iterator point;
+ ContourLine::Children::const_iterator children_it;
+
+ const ContourLine::Children& children = line.get_children();
+ npy_intp npoints = static_cast<npy_intp>(line.size() + 1);
+ for (children_it = children.begin(); children_it != children.end();
+ ++children_it)
+ npoints += static_cast<npy_intp>((*children_it)->size() + 1);
+
+ npy_intp vertices_dims[2] = {npoints, 2};
+ numpy::array_view<double, 2> vertices(vertices_dims);
+ double* vertices_ptr = vertices.data();
+
+ npy_intp codes_dims[1] = {npoints};
+ numpy::array_view<unsigned char, 1> codes(codes_dims);
+ unsigned char* codes_ptr = codes.data();
+
+ for (point = line.begin(); point != line.end(); ++point) {
+ *vertices_ptr++ = point->x;
+ *vertices_ptr++ = point->y;
+ *codes_ptr++ = (point == line.begin() ? MOVETO : LINETO);
+ }
+ point = line.begin();
+ *vertices_ptr++ = point->x;
+ *vertices_ptr++ = point->y;
+ *codes_ptr++ = CLOSEPOLY;
+
+ for (children_it = children.begin(); children_it != children.end();
+ ++children_it) {
+ ContourLine& child = **children_it;
+ for (point = child.begin(); point != child.end(); ++point) {
+ *vertices_ptr++ = point->x;
+ *vertices_ptr++ = point->y;
+ *codes_ptr++ = (point == child.begin() ? MOVETO : LINETO);
+ }
+ point = child.begin();
+ *vertices_ptr++ = point->x;
+ *vertices_ptr++ = point->y;
+ *codes_ptr++ = CLOSEPOLY;
+
+ child.clear_parent(); // To indicate it can be deleted.
+ }
+
+ if (PyList_Append(vertices_list, vertices.pyobj_steal()) ||
+ PyList_Append(codes_list, codes.pyobj_steal())) {
+ Py_XDECREF(vertices_list);
+ Py_XDECREF(codes_list);
+ contour.delete_contour_lines();
+ throw std::runtime_error("Unable to add contour line to vertices and codes lists");
+ }
+
+ delete *line_it;
+ *line_it = 0;
+ }
+ }
+
+ // Delete remaining contour lines.
+ contour.delete_contour_lines();
+}
+
+long QuadContourGenerator::calc_chunk_count(long point_count) const
+{
+ assert(point_count > 0 && "point count must be positive");
+ assert(_chunk_size > 0 && "Chunk size must be positive");
+
+ if (_chunk_size > 0) {
+ long count = (point_count-1) / _chunk_size;
+ if (count*_chunk_size < point_count-1)
+ ++count;
+
+ assert(count >= 1 && "Invalid chunk count");
+ return count;
+ }
+ else
+ return 1;
+}
+
+PyObject* QuadContourGenerator::create_contour(const double& level)
+{
+ init_cache_levels(level, level);
+
+ PyObject* vertices_list = PyList_New(0);
+ if (vertices_list == 0)
+ throw std::runtime_error("Failed to create Python list");
+
+ // Lines that start and end on boundaries.
+ long ichunk, jchunk, istart, iend, jstart, jend;
+ for (long ijchunk = 0; ijchunk < _chunk_count; ++ijchunk) {
+ get_chunk_limits(ijchunk, ichunk, jchunk, istart, iend, jstart, jend);
+
+ for (long j = jstart; j < jend; ++j) {
+ long quad_end = iend + j*_nx;
+ for (long quad = istart + j*_nx; quad < quad_end; ++quad) {
+ if (EXISTS_NONE(quad) || VISITED(quad,1)) continue;
+
+ if (BOUNDARY_S(quad) && Z_SW >= 1 && Z_SE < 1 &&
+ start_line(vertices_list, quad, Edge_S, level)) continue;
+
+ if (BOUNDARY_W(quad) && Z_NW >= 1 && Z_SW < 1 &&
+ start_line(vertices_list, quad, Edge_W, level)) continue;
+
+ if (BOUNDARY_N(quad) && Z_NE >= 1 && Z_NW < 1 &&
+ start_line(vertices_list, quad, Edge_N, level)) continue;
+
+ if (BOUNDARY_E(quad) && Z_SE >= 1 && Z_NE < 1 &&
+ start_line(vertices_list, quad, Edge_E, level)) continue;
+
+ if (_corner_mask) {
+ // Equates to NE boundary.
+ if (EXISTS_SW_CORNER(quad) && Z_SE >= 1 && Z_NW < 1 &&
+ start_line(vertices_list, quad, Edge_NE, level)) continue;
+
+ // Equates to NW boundary.
+ if (EXISTS_SE_CORNER(quad) && Z_NE >= 1 && Z_SW < 1 &&
+ start_line(vertices_list, quad, Edge_NW, level)) continue;
+
+ // Equates to SE boundary.
+ if (EXISTS_NW_CORNER(quad) && Z_SW >= 1 && Z_NE < 1 &&
+ start_line(vertices_list, quad, Edge_SE, level)) continue;
+
+ // Equates to SW boundary.
+ if (EXISTS_NE_CORNER(quad) && Z_NW >= 1 && Z_SE < 1 &&
+ start_line(vertices_list, quad, Edge_SW, level)) continue;
+ }
+ }
+ }
+ }
+
+ // Internal loops.
+ ContourLine contour_line(false); // Reused for each contour line.
+ for (long ijchunk = 0; ijchunk < _chunk_count; ++ijchunk) {
+ get_chunk_limits(ijchunk, ichunk, jchunk, istart, iend, jstart, jend);
+
+ for (long j = jstart; j < jend; ++j) {
+ long quad_end = iend + j*_nx;
+ for (long quad = istart + j*_nx; quad < quad_end; ++quad) {
+ if (EXISTS_NONE(quad) || VISITED(quad,1))
+ continue;
+
+ Edge start_edge = get_start_edge(quad, 1);
+ if (start_edge == Edge_None)
+ continue;
+
+ QuadEdge quad_edge(quad, start_edge);
+ QuadEdge start_quad_edge(quad_edge);
+
+ // To obtain output identical to that produced by legacy code,
+ // sometimes need to ignore the first point and add it on the
+ // end instead.
+ bool ignore_first = (start_edge == Edge_N);
+ follow_interior(contour_line, quad_edge, 1, level,
+ !ignore_first, &start_quad_edge, 1, false);
+ if (ignore_first && !contour_line.empty())
+ contour_line.push_back(contour_line.front());
+ append_contour_line_to_vertices(contour_line, vertices_list);
+
+ // Repeat if saddle point but not visited.
+ if (SADDLE(quad,1) && !VISITED(quad,1))
+ --quad;
+ }
+ }
+ }
+
+ return vertices_list;
+}
+
+PyObject* QuadContourGenerator::create_filled_contour(const double& lower_level,
+ const double& upper_level)
+{
+ init_cache_levels(lower_level, upper_level);
+
+ Contour contour;
+
+ PyObject* vertices = PyList_New(0);
+ if (vertices == 0)
+ throw std::runtime_error("Failed to create Python list");
+
+ PyObject* codes = PyList_New(0);
+ if (codes == 0) {
+ Py_XDECREF(vertices);
+ throw std::runtime_error("Failed to create Python list");
+ }
+
+ long ichunk, jchunk, istart, iend, jstart, jend;
+ for (long ijchunk = 0; ijchunk < _chunk_count; ++ijchunk) {
+ get_chunk_limits(ijchunk, ichunk, jchunk, istart, iend, jstart, jend);
+ _parent_cache.set_chunk_starts(istart, jstart);
+
+ for (long j = jstart; j < jend; ++j) {
+ long quad_end = iend + j*_nx;
+ for (long quad = istart + j*_nx; quad < quad_end; ++quad) {
+ if (!EXISTS_NONE(quad))
+ single_quad_filled(contour, quad, lower_level, upper_level);
+ }
+ }
+
+ // Clear VISITED_W and VISITED_S flags that are reused by later chunks.
+ if (jchunk < _nychunk-1) {
+ long quad_end = iend + jend*_nx;
+ for (long quad = istart + jend*_nx; quad < quad_end; ++quad)
+ _cache[quad] &= ~MASK_VISITED_S;
+ }
+
+ if (ichunk < _nxchunk-1) {
+ long quad_end = iend + jend*_nx;
+ for (long quad = iend + jstart*_nx; quad < quad_end; quad += _nx)
+ _cache[quad] &= ~MASK_VISITED_W;
+ }
+
+ // Create python objects to return for this chunk.
+ append_contour_to_vertices_and_codes(contour, vertices, codes);
+ }
+
+ PyObject* tuple = PyTuple_New(2);
+ if (tuple == 0) {
+ Py_XDECREF(vertices);
+ Py_XDECREF(codes);
+ throw std::runtime_error("Failed to create Python tuple");
+ }
+
+ // No error checking here as filling in a brand new pre-allocated tuple.
+ PyTuple_SET_ITEM(tuple, 0, vertices);
+ PyTuple_SET_ITEM(tuple, 1, codes);
+
+ return tuple;
+}
+
+XY QuadContourGenerator::edge_interp(const QuadEdge& quad_edge,
+ const double& level)
+{
+ assert(quad_edge.quad >= 0 && quad_edge.quad < _n &&
+ "Quad index out of bounds");
+ assert(quad_edge.edge != Edge_None && "Invalid edge");
+ return interp(get_edge_point_index(quad_edge, true),
+ get_edge_point_index(quad_edge, false),
+ level);
+}
+
+unsigned int QuadContourGenerator::follow_boundary(
+ ContourLine& contour_line,
+ QuadEdge& quad_edge,
+ const double& lower_level,
+ const double& upper_level,
+ unsigned int level_index,
+ const QuadEdge& start_quad_edge)
+{
+ assert(quad_edge.quad >= 0 && quad_edge.quad < _n &&
+ "Quad index out of bounds");
+ assert(quad_edge.edge != Edge_None && "Invalid edge");
+ assert(is_edge_a_boundary(quad_edge) && "Not a boundary edge");
+ assert((level_index == 1 || level_index == 2) &&
+ "level index must be 1 or 2");
+ assert(start_quad_edge.quad >= 0 && start_quad_edge.quad < _n &&
+ "Start quad index out of bounds");
+ assert(start_quad_edge.edge != Edge_None && "Invalid start edge");
+
+ // Only called for filled contours, so always updates _parent_cache.
+ unsigned int end_level = 0;
+ bool first_edge = true;
+ bool stop = false;
+ long& quad = quad_edge.quad;
+
+ while (true) {
+ // Levels of start and end points of quad_edge.
+ unsigned int start_level =
+ (first_edge ? Z_LEVEL(get_edge_point_index(quad_edge, true))
+ : end_level);
+ long end_point = get_edge_point_index(quad_edge, false);
+ end_level = Z_LEVEL(end_point);
+
+ if (level_index == 1) {
+ if (start_level <= level_index && end_level == 2) {
+ // Increasing z, switching levels from 1 to 2.
+ level_index = 2;
+ stop = true;
+ }
+ else if (start_level >= 1 && end_level == 0) {
+ // Decreasing z, keeping same level.
+ stop = true;
+ }
+ }
+ else { // level_index == 2
+ if (start_level <= level_index && end_level == 2) {
+ // Increasing z, keeping same level.
+ stop = true;
+ }
+ else if (start_level >= 1 && end_level == 0) {
+ // Decreasing z, switching levels from 2 to 1.
+ level_index = 1;
+ stop = true;
+ }
+ }
+
+ if (!first_edge && !stop && quad_edge == start_quad_edge)
+ // Return if reached start point of contour line. Do this before
+ // checking/setting VISITED flags as will already have been
+ // visited.
+ break;
+
+ switch (quad_edge.edge) {
+ case Edge_E:
+ assert(!VISITED_W(quad+1) && "Already visited");
+ _cache[quad+1] |= MASK_VISITED_W;
+ break;
+ case Edge_N:
+ assert(!VISITED_S(quad+_nx) && "Already visited");
+ _cache[quad+_nx] |= MASK_VISITED_S;
+ break;
+ case Edge_W:
+ assert(!VISITED_W(quad) && "Already visited");
+ _cache[quad] |= MASK_VISITED_W;
+ break;
+ case Edge_S:
+ assert(!VISITED_S(quad) && "Already visited");
+ _cache[quad] |= MASK_VISITED_S;
+ break;
+ case Edge_NE:
+ case Edge_NW:
+ case Edge_SW:
+ case Edge_SE:
+ assert(!VISITED_CORNER(quad) && "Already visited");
+ _cache[quad] |= MASK_VISITED_CORNER;
+ break;
+ default:
+ assert(0 && "Invalid Edge");
+ break;
+ }
+
+ if (stop) {
+ // Exiting boundary to enter interior.
+ contour_line.push_back(edge_interp(quad_edge,
+ level_index == 1 ? lower_level
+ : upper_level));
+ break;
+ }
+
+ move_to_next_boundary_edge(quad_edge);
+
+ // Just moved to new quad edge, so label parent of start of quad edge.
+ switch (quad_edge.edge) {
+ case Edge_W:
+ case Edge_SW:
+ case Edge_S:
+ case Edge_SE:
+ if (!EXISTS_SE_CORNER(quad))
+ _parent_cache.set_parent(quad, contour_line);
+ break;
+ case Edge_E:
+ case Edge_NE:
+ case Edge_N:
+ case Edge_NW:
+ if (!EXISTS_SW_CORNER(quad))
+ _parent_cache.set_parent(quad + 1, contour_line);
+ break;
+ default:
+ assert(0 && "Invalid edge");
+ break;
+ }
+
+ // Add point to contour.
+ contour_line.push_back(get_point_xy(end_point));
+
+ if (first_edge)
+ first_edge = false;
+ }
+
+ return level_index;
+}
+
+void QuadContourGenerator::follow_interior(ContourLine& contour_line,
+ QuadEdge& quad_edge,
+ unsigned int level_index,
+ const double& level,
+ bool want_initial_point,
+ const QuadEdge* start_quad_edge,
+ unsigned int start_level_index,
+ bool set_parents)
+{
+ assert(quad_edge.quad >= 0 && quad_edge.quad < _n &&
+ "Quad index out of bounds.");
+ assert(quad_edge.edge != Edge_None && "Invalid edge");
+ assert((level_index == 1 || level_index == 2) &&
+ "level index must be 1 or 2");
+ assert((start_quad_edge == 0 ||
+ (start_quad_edge->quad >= 0 && start_quad_edge->quad < _n)) &&
+ "Start quad index out of bounds.");
+ assert((start_quad_edge == 0 || start_quad_edge->edge != Edge_None) &&
+ "Invalid start edge");
+ assert((start_level_index == 1 || start_level_index == 2) &&
+ "start level index must be 1 or 2");
+
+ long& quad = quad_edge.quad;
+ Edge& edge = quad_edge.edge;
+
+ if (want_initial_point)
+ contour_line.push_back(edge_interp(quad_edge, level));
+
+ CacheItem visited_mask = (level_index == 1 ? MASK_VISITED_1 : MASK_VISITED_2);
+ CacheItem saddle_mask = (level_index == 1 ? MASK_SADDLE_1 : MASK_SADDLE_2);
+ Dir dir = Dir_Straight;
+
+ while (true) {
+ assert(!EXISTS_NONE(quad) && "Quad does not exist");
+ assert(!(_cache[quad] & visited_mask) && "Quad already visited");
+
+ // Determine direction to move to next quad. If the quad is already
+ // labelled as a saddle quad then the direction is easily read from
+ // the cache. Otherwise the direction is determined differently
+ // depending on whether the quad is a corner quad or not.
+
+ if (_cache[quad] & saddle_mask) {
+ // Already identified as a saddle quad, so direction is easy.
+ dir = (SADDLE_LEFT(quad,level_index) ? Dir_Left : Dir_Right);
+ _cache[quad] |= visited_mask;
+ }
+ else if (EXISTS_ANY_CORNER(quad)) {
+ // Need z-level of point opposite the entry edge, as that
+ // determines whether contour turns left or right.
+ long point_opposite = -1;
+ switch (edge) {
+ case Edge_E:
+ point_opposite = (EXISTS_SE_CORNER(quad) ? POINT_SW
+ : POINT_NW);
+ break;
+ case Edge_N:
+ point_opposite = (EXISTS_NW_CORNER(quad) ? POINT_SW
+ : POINT_SE);
+ break;
+ case Edge_W:
+ point_opposite = (EXISTS_SW_CORNER(quad) ? POINT_SE
+ : POINT_NE);
+ break;
+ case Edge_S:
+ point_opposite = (EXISTS_SW_CORNER(quad) ? POINT_NW
+ : POINT_NE);
+ break;
+ case Edge_NE: point_opposite = POINT_SW; break;
+ case Edge_NW: point_opposite = POINT_SE; break;
+ case Edge_SW: point_opposite = POINT_NE; break;
+ case Edge_SE: point_opposite = POINT_NW; break;
+ default: assert(0 && "Invalid edge"); break;
+ }
+ assert(point_opposite != -1 && "Failed to find opposite point");
+
+ // Lower-level polygons (level_index == 1) always have higher
+ // values to the left of the contour. Upper-level contours
+ // (level_index == 2) are reversed, which is what the fancy XOR
+ // does below.
+ if ((Z_LEVEL(point_opposite) >= level_index) ^ (level_index == 2))
+ dir = Dir_Right;
+ else
+ dir = Dir_Left;
+ _cache[quad] |= visited_mask;
+ }
+ else {
+ // Calculate configuration of this quad.
+ long point_left = -1, point_right = -1;
+ switch (edge) {
+ case Edge_E: point_left = POINT_SW; point_right = POINT_NW; break;
+ case Edge_N: point_left = POINT_SE; point_right = POINT_SW; break;
+ case Edge_W: point_left = POINT_NE; point_right = POINT_SE; break;
+ case Edge_S: point_left = POINT_NW; point_right = POINT_NE; break;
+ default: assert(0 && "Invalid edge"); break;
+ }
+
+ unsigned int config = (Z_LEVEL(point_left) >= level_index) << 1 |
+ (Z_LEVEL(point_right) >= level_index);
+
+ // Upper level (level_index == 2) polygons are reversed compared to
+ // lower level ones, i.e. higher values on the right rather than
+ // the left.
+ if (level_index == 2)
+ config = 3 - config;
+
+ // Calculate turn direction to move to next quad along contour line.
+ if (config == 1) {
+ // New saddle quad, set up cache bits for it.
+ double zmid = 0.25*(get_point_z(POINT_SW) +
+ get_point_z(POINT_SE) +
+ get_point_z(POINT_NW) +
+ get_point_z(POINT_NE));
+ _cache[quad] |= (level_index == 1 ? MASK_SADDLE_1 : MASK_SADDLE_2);
+ if ((zmid > level) ^ (level_index == 2)) {
+ dir = Dir_Right;
+ }
+ else {
+ dir = Dir_Left;
+ _cache[quad] |= (level_index == 1 ? MASK_SADDLE_LEFT_1
+ : MASK_SADDLE_LEFT_2);
+ }
+ if (edge == Edge_N || edge == Edge_E) {
+ // Next visit to this quad must start on S or W.
+ _cache[quad] |= (level_index == 1 ? MASK_SADDLE_START_SW_1
+ : MASK_SADDLE_START_SW_2);
+ }
+ }
+ else {
+ // Normal (non-saddle) quad.
+ dir = (config == 0 ? Dir_Left
+ : (config == 3 ? Dir_Right : Dir_Straight));
+ _cache[quad] |= visited_mask;
+ }
+ }
+
+ // Use dir to determine exit edge.
+ edge = get_exit_edge(quad_edge, dir);
+
+ if (set_parents) {
+ if (edge == Edge_E)
+ _parent_cache.set_parent(quad+1, contour_line);
+ else if (edge == Edge_W)
+ _parent_cache.set_parent(quad, contour_line);
+ }
+
+ // Add new point to contour line.
+ contour_line.push_back(edge_interp(quad_edge, level));
+
+ // Stop if reached boundary.
+ if (is_edge_a_boundary(quad_edge))
+ break;
+
+ move_to_next_quad(quad_edge);
+ assert(quad_edge.quad >= 0 && quad_edge.quad < _n &&
+ "Quad index out of bounds");
+
+ // Return if reached start point of contour line.
+ if (start_quad_edge != 0 &&
+ quad_edge == *start_quad_edge &&
+ level_index == start_level_index)
+ break;
+ }
+}
+
+void QuadContourGenerator::get_chunk_limits(long ijchunk,
+ long& ichunk,
+ long& jchunk,
+ long& istart,
+ long& iend,
+ long& jstart,
+ long& jend)
+{
+ assert(ijchunk >= 0 && ijchunk < _chunk_count && "ijchunk out of bounds");
+ ichunk = ijchunk % _nxchunk;
+ jchunk = ijchunk / _nxchunk;
+ istart = ichunk*_chunk_size;
+ iend = (ichunk == _nxchunk-1 ? _nx : (ichunk+1)*_chunk_size);
+ jstart = jchunk*_chunk_size;
+ jend = (jchunk == _nychunk-1 ? _ny : (jchunk+1)*_chunk_size);
+}
+
+Edge QuadContourGenerator::get_corner_start_edge(long quad,
+ unsigned int level_index) const
+{
+ assert(quad >= 0 && quad < _n && "Quad index out of bounds");
+ assert((level_index == 1 || level_index == 2) &&
+ "level index must be 1 or 2");
+ assert(EXISTS_ANY_CORNER(quad) && "Quad is not a corner");
+
+ // Diagram for NE corner. Rotate for other corners.
+ //
+ // edge12
+ // point1 +---------+ point2
+ // \ |
+ // \ | edge23
+ // edge31 \ |
+ // \ |
+ // + point3
+ //
+ long point1, point2, point3;
+ Edge edge12, edge23, edge31;
+ switch (_cache[quad] & MASK_EXISTS) {
+ case MASK_EXISTS_SW_CORNER:
+ point1 = POINT_SE; point2 = POINT_SW; point3 = POINT_NW;
+ edge12 = Edge_S; edge23 = Edge_W; edge31 = Edge_NE;
+ break;
+ case MASK_EXISTS_SE_CORNER:
+ point1 = POINT_NE; point2 = POINT_SE; point3 = POINT_SW;
+ edge12 = Edge_E; edge23 = Edge_S; edge31 = Edge_NW;
+ break;
+ case MASK_EXISTS_NW_CORNER:
+ point1 = POINT_SW; point2 = POINT_NW; point3 = POINT_NE;
+ edge12 = Edge_W; edge23 = Edge_N; edge31 = Edge_SE;
+ break;
+ case MASK_EXISTS_NE_CORNER:
+ point1 = POINT_NW; point2 = POINT_NE; point3 = POINT_SE;
+ edge12 = Edge_N; edge23 = Edge_E; edge31 = Edge_SW;
+ break;
+ default:
+ assert(0 && "Invalid EXISTS for quad");
+ return Edge_None;
+ }
+
+ unsigned int config = (Z_LEVEL(point1) >= level_index) << 2 |
+ (Z_LEVEL(point2) >= level_index) << 1 |
+ (Z_LEVEL(point3) >= level_index);
+
+ // Upper level (level_index == 2) polygons are reversed compared to lower
+ // level ones, i.e. higher values on the right rather than the left.
+ if (level_index == 2)
+ config = 7 - config;
+
+ switch (config) {
+ case 0: return Edge_None;
+ case 1: return edge23;
+ case 2: return edge12;
+ case 3: return edge12;
+ case 4: return edge31;
+ case 5: return edge23;
+ case 6: return edge31;
+ case 7: return Edge_None;
+ default: assert(0 && "Invalid config"); return Edge_None;
+ }
+}
+
+long QuadContourGenerator::get_edge_point_index(const QuadEdge& quad_edge,
+ bool start) const
+{
+ assert(quad_edge.quad >= 0 && quad_edge.quad < _n &&
+ "Quad index out of bounds");
+ assert(quad_edge.edge != Edge_None && "Invalid edge");
+
+ // Edges are ordered anticlockwise around their quad, as indicated by
+ // directions of arrows in diagrams below.
+ // Full quad NW corner (others similar)
+ //
+ // POINT_NW Edge_N POINT_NE POINT_NW Edge_N POINT_NE
+ // +----<-----+ +----<-----+
+ // | | | /
+ // | | | quad /
+ // Edge_W V quad ^ Edge_E Edge_W V ^
+ // | | | / Edge_SE
+ // | | | /
+ // +---->-----+ +
+ // POINT_SW Edge_S POINT_SE POINT_SW
+ //
+ const long& quad = quad_edge.quad;
+ switch (quad_edge.edge) {
+ case Edge_E: return (start ? POINT_SE : POINT_NE);
+ case Edge_N: return (start ? POINT_NE : POINT_NW);
+ case Edge_W: return (start ? POINT_NW : POINT_SW);
+ case Edge_S: return (start ? POINT_SW : POINT_SE);
+ case Edge_NE: return (start ? POINT_SE : POINT_NW);
+ case Edge_NW: return (start ? POINT_NE : POINT_SW);
+ case Edge_SW: return (start ? POINT_NW : POINT_SE);
+ case Edge_SE: return (start ? POINT_SW : POINT_NE);
+ default: assert(0 && "Invalid edge"); return 0;
+ }
+}
+
+Edge QuadContourGenerator::get_exit_edge(const QuadEdge& quad_edge,
+ Dir dir) const
+{
+ assert(quad_edge.quad >= 0 && quad_edge.quad < _n &&
+ "Quad index out of bounds");
+ assert(quad_edge.edge != Edge_None && "Invalid edge");
+
+ const long& quad = quad_edge.quad;
+ const Edge& edge = quad_edge.edge;
+ if (EXISTS_ANY_CORNER(quad)) {
+ // Corner directions are always left or right. A corner is a triangle,
+ // entered via one edge so the other two edges are the left and right
+ // ones.
+ switch (edge) {
+ case Edge_E:
+ return (EXISTS_SE_CORNER(quad)
+ ? (dir == Dir_Left ? Edge_S : Edge_NW)
+ : (dir == Dir_Right ? Edge_N : Edge_SW));
+ case Edge_N:
+ return (EXISTS_NW_CORNER(quad)
+ ? (dir == Dir_Right ? Edge_W : Edge_SE)
+ : (dir == Dir_Left ? Edge_E : Edge_SW));
+ case Edge_W:
+ return (EXISTS_SW_CORNER(quad)
+ ? (dir == Dir_Right ? Edge_S : Edge_NE)
+ : (dir == Dir_Left ? Edge_N : Edge_SE));
+ case Edge_S:
+ return (EXISTS_SW_CORNER(quad)
+ ? (dir == Dir_Left ? Edge_W : Edge_NE)
+ : (dir == Dir_Right ? Edge_E : Edge_NW));
+ case Edge_NE: return (dir == Dir_Left ? Edge_S : Edge_W);
+ case Edge_NW: return (dir == Dir_Left ? Edge_E : Edge_S);
+ case Edge_SW: return (dir == Dir_Left ? Edge_N : Edge_E);
+ case Edge_SE: return (dir == Dir_Left ? Edge_W : Edge_N);
+ default: assert(0 && "Invalid edge"); return Edge_None;
+ }
+ }
+ else {
+ // A full quad has four edges, entered via one edge so that other three
+ // edges correspond to left, straight and right directions.
+ switch (edge) {
+ case Edge_E:
+ return (dir == Dir_Left ? Edge_S :
+ (dir == Dir_Right ? Edge_N : Edge_W));
+ case Edge_N:
+ return (dir == Dir_Left ? Edge_E :
+ (dir == Dir_Right ? Edge_W : Edge_S));
+ case Edge_W:
+ return (dir == Dir_Left ? Edge_N :
+ (dir == Dir_Right ? Edge_S : Edge_E));
+ case Edge_S:
+ return (dir == Dir_Left ? Edge_W :
+ (dir == Dir_Right ? Edge_E : Edge_N));
+ default: assert(0 && "Invalid edge"); return Edge_None;
+ }
+ }
+}
+
+XY QuadContourGenerator::get_point_xy(long point) const
+{
+ assert(point >= 0 && point < _n && "Point index out of bounds.");
+ return XY(_x.data()[static_cast<npy_intp>(point)],
+ _y.data()[static_cast<npy_intp>(point)]);
+}
+
+const double& QuadContourGenerator::get_point_z(long point) const
+{
+ assert(point >= 0 && point < _n && "Point index out of bounds.");
+ return _z.data()[static_cast<npy_intp>(point)];
+}
+
+Edge QuadContourGenerator::get_quad_start_edge(long quad,
+ unsigned int level_index) const
+{
+ assert(quad >= 0 && quad < _n && "Quad index out of bounds");
+ assert((level_index == 1 || level_index == 2) &&
+ "level index must be 1 or 2");
+ assert(EXISTS_QUAD(quad) && "Quad does not exist");
+
+ unsigned int config = (Z_NW >= level_index) << 3 |
+ (Z_NE >= level_index) << 2 |
+ (Z_SW >= level_index) << 1 |
+ (Z_SE >= level_index);
+
+ // Upper level (level_index == 2) polygons are reversed compared to lower
+ // level ones, i.e. higher values on the right rather than the left.
+ if (level_index == 2)
+ config = 15 - config;
+
+ switch (config) {
+ case 0: return Edge_None;
+ case 1: return Edge_E;
+ case 2: return Edge_S;
+ case 3: return Edge_E;
+ case 4: return Edge_N;
+ case 5: return Edge_N;
+ case 6:
+ // If already identified as a saddle quad then the start edge is
+ // read from the cache. Otherwise return either valid start edge
+ // and the subsequent call to follow_interior() will correctly set
+ // up saddle bits in cache.
+ if (!SADDLE(quad,level_index) || SADDLE_START_SW(quad,level_index))
+ return Edge_S;
+ else
+ return Edge_N;
+ case 7: return Edge_N;
+ case 8: return Edge_W;
+ case 9:
+ // See comment for 6 above.
+ if (!SADDLE(quad,level_index) || SADDLE_START_SW(quad,level_index))
+ return Edge_W;
+ else
+ return Edge_E;
+ case 10: return Edge_S;
+ case 11: return Edge_E;
+ case 12: return Edge_W;
+ case 13: return Edge_W;
+ case 14: return Edge_S;
+ case 15: return Edge_None;
+ default: assert(0 && "Invalid config"); return Edge_None;
+ }
+}
+
+Edge QuadContourGenerator::get_start_edge(long quad,
+ unsigned int level_index) const
+{
+ if (EXISTS_ANY_CORNER(quad))
+ return get_corner_start_edge(quad, level_index);
+ else
+ return get_quad_start_edge(quad, level_index);
+}
+
+void QuadContourGenerator::init_cache_grid(const MaskArray& mask)
+{
+ long i, j, quad;
+
+ if (mask.empty()) {
+ // No mask, easy to calculate quad existence and boundaries together.
+ quad = 0;
+ for (j = 0; j < _ny; ++j) {
+ for (i = 0; i < _nx; ++i, ++quad) {
+ _cache[quad] = 0;
+
+ if (i < _nx-1 && j < _ny-1)
+ _cache[quad] |= MASK_EXISTS_QUAD;
+
+ if ((i % _chunk_size == 0 || i == _nx-1) && j < _ny-1)
+ _cache[quad] |= MASK_BOUNDARY_W;
+
+ if ((j % _chunk_size == 0 || j == _ny-1) && i < _nx-1)
+ _cache[quad] |= MASK_BOUNDARY_S;
+ }
+ }
+ }
+ else {
+ // Casting avoids problem when sizeof(bool) != sizeof(npy_bool).
+ const npy_bool* mask_ptr =
+ reinterpret_cast<const npy_bool*>(mask.data());
+
+ // Have mask so use two stages.
+ // Stage 1, determine if quads/corners exist.
+ quad = 0;
+ for (j = 0; j < _ny; ++j) {
+ for (i = 0; i < _nx; ++i, ++quad) {
+ _cache[quad] = 0;
+
+ if (i < _nx-1 && j < _ny-1) {
+ unsigned int config = mask_ptr[POINT_NW] << 3 |
+ mask_ptr[POINT_NE] << 2 |
+ mask_ptr[POINT_SW] << 1 |
+ mask_ptr[POINT_SE];
+
+ if (_corner_mask) {
+ switch (config) {
+ case 0: _cache[quad] = MASK_EXISTS_QUAD; break;
+ case 1: _cache[quad] = MASK_EXISTS_NW_CORNER; break;
+ case 2: _cache[quad] = MASK_EXISTS_NE_CORNER; break;
+ case 4: _cache[quad] = MASK_EXISTS_SW_CORNER; break;
+ case 8: _cache[quad] = MASK_EXISTS_SE_CORNER; break;
+ default:
+ // Do nothing, quad is masked out.
+ break;
+ }
+ }
+ else if (config == 0)
+ _cache[quad] = MASK_EXISTS_QUAD;
+ }
+ }
+ }
+
+ // Stage 2, calculate W and S boundaries. For each quad use boundary
+ // data already calculated for quads to W and S, so must iterate
+ // through quads in correct order (increasing i and j indices).
+ // Cannot use boundary data for quads to E and N as have not yet
+ // calculated it.
+ quad = 0;
+ for (j = 0; j < _ny; ++j) {
+ for (i = 0; i < _nx; ++i, ++quad) {
+ if (_corner_mask) {
+ bool W_exists_none = (i == 0 || EXISTS_NONE(quad-1));
+ bool S_exists_none = (j == 0 || EXISTS_NONE(quad-_nx));
+ bool W_exists_E_edge = (i > 0 && EXISTS_E_EDGE(quad-1));
+ bool S_exists_N_edge = (j > 0 && EXISTS_N_EDGE(quad-_nx));
+
+ if ((EXISTS_W_EDGE(quad) && W_exists_none) ||
+ (EXISTS_NONE(quad) && W_exists_E_edge) ||
+ (i % _chunk_size == 0 && EXISTS_W_EDGE(quad) &&
+ W_exists_E_edge))
+ _cache[quad] |= MASK_BOUNDARY_W;
+
+ if ((EXISTS_S_EDGE(quad) && S_exists_none) ||
+ (EXISTS_NONE(quad) && S_exists_N_edge) ||
+ (j % _chunk_size == 0 && EXISTS_S_EDGE(quad) &&
+ S_exists_N_edge))
+ _cache[quad] |= MASK_BOUNDARY_S;
+ }
+ else {
+ bool W_exists_quad = (i > 0 && EXISTS_QUAD(quad-1));
+ bool S_exists_quad = (j > 0 && EXISTS_QUAD(quad-_nx));
+
+ if ((EXISTS_QUAD(quad) != W_exists_quad) ||
+ (i % _chunk_size == 0 && EXISTS_QUAD(quad) &&
+ W_exists_quad))
+ _cache[quad] |= MASK_BOUNDARY_W;
+
+ if ((EXISTS_QUAD(quad) != S_exists_quad) ||
+ (j % _chunk_size == 0 && EXISTS_QUAD(quad) &&
+ S_exists_quad))
+ _cache[quad] |= MASK_BOUNDARY_S;
+ }
+ }
+ }
+ }
+}
+
+void QuadContourGenerator::init_cache_levels(const double& lower_level,
+ const double& upper_level)
+{
+ assert(upper_level >= lower_level &&
+ "upper and lower levels are wrong way round");
+
+ bool two_levels = (lower_level != upper_level);
+ CacheItem keep_mask =
+ (_corner_mask ? MASK_EXISTS | MASK_BOUNDARY_S | MASK_BOUNDARY_W
+ : MASK_EXISTS_QUAD | MASK_BOUNDARY_S | MASK_BOUNDARY_W);
+
+ if (two_levels) {
+ const double* z_ptr = _z.data();
+ for (long quad = 0; quad < _n; ++quad, ++z_ptr) {
+ _cache[quad] &= keep_mask;
+ if (*z_ptr > upper_level)
+ _cache[quad] |= MASK_Z_LEVEL_2;
+ else if (*z_ptr > lower_level)
+ _cache[quad] |= MASK_Z_LEVEL_1;
+ }
+ }
+ else {
+ const double* z_ptr = _z.data();
+ for (long quad = 0; quad < _n; ++quad, ++z_ptr) {
+ _cache[quad] &= keep_mask;
+ if (*z_ptr > lower_level)
+ _cache[quad] |= MASK_Z_LEVEL_1;
+ }
+ }
+}
+
+XY QuadContourGenerator::interp(
+ long point1, long point2, const double& level) const
+{
+ assert(point1 >= 0 && point1 < _n && "Point index 1 out of bounds.");
+ assert(point2 >= 0 && point2 < _n && "Point index 2 out of bounds.");
+ assert(point1 != point2 && "Identical points");
+ double fraction = (get_point_z(point2) - level) /
+ (get_point_z(point2) - get_point_z(point1));
+ return get_point_xy(point1)*fraction + get_point_xy(point2)*(1.0 - fraction);
+}
+
+bool QuadContourGenerator::is_edge_a_boundary(const QuadEdge& quad_edge) const
+{
+ assert(quad_edge.quad >= 0 && quad_edge.quad < _n &&
+ "Quad index out of bounds");
+ assert(quad_edge.edge != Edge_None && "Invalid edge");
+
+ switch (quad_edge.edge) {
+ case Edge_E: return BOUNDARY_E(quad_edge.quad);
+ case Edge_N: return BOUNDARY_N(quad_edge.quad);
+ case Edge_W: return BOUNDARY_W(quad_edge.quad);
+ case Edge_S: return BOUNDARY_S(quad_edge.quad);
+ case Edge_NE: return EXISTS_SW_CORNER(quad_edge.quad);
+ case Edge_NW: return EXISTS_SE_CORNER(quad_edge.quad);
+ case Edge_SW: return EXISTS_NE_CORNER(quad_edge.quad);
+ case Edge_SE: return EXISTS_NW_CORNER(quad_edge.quad);
+ default: assert(0 && "Invalid edge"); return true;
+ }
+}
+
+void QuadContourGenerator::move_to_next_boundary_edge(QuadEdge& quad_edge) const
+{
+ assert(is_edge_a_boundary(quad_edge) && "QuadEdge is not a boundary");
+
+ long& quad = quad_edge.quad;
+ Edge& edge = quad_edge.edge;
+
+ quad = get_edge_point_index(quad_edge, false);
+
+ // quad is now such that POINT_SW is the end point of the quad_edge passed
+ // to this function.
+
+ // To find the next boundary edge, first attempt to turn left 135 degrees
+ // and if that edge is a boundary then move to it. If not, attempt to turn
+ // left 90 degrees, then left 45 degrees, then straight on, etc, until can
+ // move.
+ // First determine which edge to attempt first.
+ int index = 0;
+ switch (edge) {
+ case Edge_E: index = 0; break;
+ case Edge_SE: index = 1; break;
+ case Edge_S: index = 2; break;
+ case Edge_SW: index = 3; break;
+ case Edge_W: index = 4; break;
+ case Edge_NW: index = 5; break;
+ case Edge_N: index = 6; break;
+ case Edge_NE: index = 7; break;
+ default: assert(0 && "Invalid edge"); break;
+ }
+
+ // If _corner_mask not set, only need to consider odd index in loop below.
+ if (!_corner_mask)
+ ++index;
+
+ // Try each edge in turn until a boundary is found.
+ int start_index = index;
+ do
+ {
+ switch (index) {
+ case 0:
+ if (EXISTS_SE_CORNER(quad-_nx-1)) { // Equivalent to BOUNDARY_NW
+ quad -= _nx+1;
+ edge = Edge_NW;
+ return;
+ }
+ break;
+ case 1:
+ if (BOUNDARY_N(quad-_nx-1)) {
+ quad -= _nx+1;
+ edge = Edge_N;
+ return;
+ }
+ break;
+ case 2:
+ if (EXISTS_SW_CORNER(quad-1)) { // Equivalent to BOUNDARY_NE
+ quad -= 1;
+ edge = Edge_NE;
+ return;
+ }
+ break;
+ case 3:
+ if (BOUNDARY_E(quad-1)) {
+ quad -= 1;
+ edge = Edge_E;
+ return;
+ }
+ break;
+ case 4:
+ if (EXISTS_NW_CORNER(quad)) { // Equivalent to BOUNDARY_SE
+ edge = Edge_SE;
+ return;
+ }
+ break;
+ case 5:
+ if (BOUNDARY_S(quad)) {
+ edge = Edge_S;
+ return;
+ }
+ break;
+ case 6:
+ if (EXISTS_NE_CORNER(quad-_nx)) { // Equivalent to BOUNDARY_SW
+ quad -= _nx;
+ edge = Edge_SW;
+ return;
+ }
+ break;
+ case 7:
+ if (BOUNDARY_W(quad-_nx)) {
+ quad -= _nx;
+ edge = Edge_W;
+ return;
+ }
+ break;
+ default: assert(0 && "Invalid index"); break;
+ }
+
+ if (_corner_mask)
+ index = (index + 1) % 8;
+ else
+ index = (index + 2) % 8;
+ } while (index != start_index);
+
+ assert(0 && "Failed to find next boundary edge");
+}
+
+void QuadContourGenerator::move_to_next_quad(QuadEdge& quad_edge) const
+{
+ assert(quad_edge.quad >= 0 && quad_edge.quad < _n &&
+ "Quad index out of bounds");
+ assert(quad_edge.edge != Edge_None && "Invalid edge");
+
+ // Move from quad_edge.quad to the neighbouring quad in the direction
+ // specified by quad_edge.edge.
+ switch (quad_edge.edge) {
+ case Edge_E: quad_edge.quad += 1; quad_edge.edge = Edge_W; break;
+ case Edge_N: quad_edge.quad += _nx; quad_edge.edge = Edge_S; break;
+ case Edge_W: quad_edge.quad -= 1; quad_edge.edge = Edge_E; break;
+ case Edge_S: quad_edge.quad -= _nx; quad_edge.edge = Edge_N; break;
+ default: assert(0 && "Invalid edge"); break;
+ }
+}
+
+void QuadContourGenerator::single_quad_filled(Contour& contour,
+ long quad,
+ const double& lower_level,
+ const double& upper_level)
+{
+ assert(quad >= 0 && quad < _n && "Quad index out of bounds");
+
+ // Order of checking is important here as can have different ContourLines
+ // from both lower and upper levels in the same quad. First check the S
+ // edge, then move up the quad to the N edge checking as required.
+
+ // Possible starts from S boundary.
+ if (BOUNDARY_S(quad) && EXISTS_S_EDGE(quad)) {
+
+ // Lower-level start from S boundary into interior.
+ if (!VISITED_S(quad) && Z_SW >= 1 && Z_SE == 0)
+ contour.push_back(start_filled(quad, Edge_S, 1, NotHole, Interior,
+ lower_level, upper_level));
+
+ // Upper-level start from S boundary into interior.
+ if (!VISITED_S(quad) && Z_SW < 2 && Z_SE == 2)
+ contour.push_back(start_filled(quad, Edge_S, 2, NotHole, Interior,
+ lower_level, upper_level));
+
+ // Lower-level start following S boundary from W to E.
+ if (!VISITED_S(quad) && Z_SW <= 1 && Z_SE == 1)
+ contour.push_back(start_filled(quad, Edge_S, 1, NotHole, Boundary,
+ lower_level, upper_level));
+
+ // Upper-level start following S boundary from W to E.
+ if (!VISITED_S(quad) && Z_SW == 2 && Z_SE == 1)
+ contour.push_back(start_filled(quad, Edge_S, 2, NotHole, Boundary,
+ lower_level, upper_level));
+ }
+
+ // Possible starts from W boundary.
+ if (BOUNDARY_W(quad) && EXISTS_W_EDGE(quad)) {
+
+ // Lower-level start from W boundary into interior.
+ if (!VISITED_W(quad) && Z_NW >= 1 && Z_SW == 0)
+ contour.push_back(start_filled(quad, Edge_W, 1, NotHole, Interior,
+ lower_level, upper_level));
+
+ // Upper-level start from W boundary into interior.
+ if (!VISITED_W(quad) && Z_NW < 2 && Z_SW == 2)
+ contour.push_back(start_filled(quad, Edge_W, 2, NotHole, Interior,
+ lower_level, upper_level));
+
+ // Lower-level start following W boundary from N to S.
+ if (!VISITED_W(quad) && Z_NW <= 1 && Z_SW == 1)
+ contour.push_back(start_filled(quad, Edge_W, 1, NotHole, Boundary,
+ lower_level, upper_level));
+
+ // Upper-level start following W boundary from N to S.
+ if (!VISITED_W(quad) && Z_NW == 2 && Z_SW == 1)
+ contour.push_back(start_filled(quad, Edge_W, 2, NotHole, Boundary,
+ lower_level, upper_level));
+ }
+
+ // Possible starts from NE boundary.
+ if (EXISTS_SW_CORNER(quad)) { // i.e. BOUNDARY_NE
+
+ // Lower-level start following NE boundary from SE to NW, hole.
+ if (!VISITED_CORNER(quad) && Z_NW == 1 && Z_SE == 1)
+ contour.push_back(start_filled(quad, Edge_NE, 1, Hole, Boundary,
+ lower_level, upper_level));
+ }
+ // Possible starts from SE boundary.
+ else if (EXISTS_NW_CORNER(quad)) { // i.e. BOUNDARY_SE
+
+ // Lower-level start from N to SE.
+ if (!VISITED(quad,1) && Z_NW == 0 && Z_SW == 0 && Z_NE >= 1)
+ contour.push_back(start_filled(quad, Edge_N, 1, NotHole, Interior,
+ lower_level, upper_level));
+
+ // Upper-level start from SE to N, hole.
+ if (!VISITED(quad,2) && Z_NW < 2 && Z_SW < 2 && Z_NE == 2)
+ contour.push_back(start_filled(quad, Edge_SE, 2, Hole, Interior,
+ lower_level, upper_level));
+
+ // Upper-level start from N to SE.
+ if (!VISITED(quad,2) && Z_NW == 2 && Z_SW == 2 && Z_NE < 2)
+ contour.push_back(start_filled(quad, Edge_N, 2, NotHole, Interior,
+ lower_level, upper_level));
+
+ // Lower-level start from SE to N, hole.
+ if (!VISITED(quad,1) && Z_NW >= 1 && Z_SW >= 1 && Z_NE == 0)
+ contour.push_back(start_filled(quad, Edge_SE, 1, Hole, Interior,
+ lower_level, upper_level));
+ }
+ // Possible starts from NW boundary.
+ else if (EXISTS_SE_CORNER(quad)) { // i.e. BOUNDARY_NW
+
+ // Lower-level start from NW to E.
+ if (!VISITED(quad,1) && Z_SW == 0 && Z_SE == 0 && Z_NE >= 1)
+ contour.push_back(start_filled(quad, Edge_NW, 1, NotHole, Interior,
+ lower_level, upper_level));
+
+ // Upper-level start from E to NW, hole.
+ if (!VISITED(quad,2) && Z_SW < 2 && Z_SE < 2 && Z_NE == 2)
+ contour.push_back(start_filled(quad, Edge_E, 2, Hole, Interior,
+ lower_level, upper_level));
+
+ // Upper-level start from NW to E.
+ if (!VISITED(quad,2) && Z_SW == 2 && Z_SE == 2 && Z_NE < 2)
+ contour.push_back(start_filled(quad, Edge_NW, 2, NotHole, Interior,
+ lower_level, upper_level));
+
+ // Lower-level start from E to NW, hole.
+ if (!VISITED(quad,1) && Z_SW >= 1 && Z_SE >= 1 && Z_NE == 0)
+ contour.push_back(start_filled(quad, Edge_E, 1, Hole, Interior,
+ lower_level, upper_level));
+ }
+ // Possible starts from SW boundary.
+ else if (EXISTS_NE_CORNER(quad)) { // i.e. BOUNDARY_SW
+
+ // Lower-level start from SW boundary into interior.
+ if (!VISITED_CORNER(quad) && Z_NW >= 1 && Z_SE == 0)
+ contour.push_back(start_filled(quad, Edge_SW, 1, NotHole, Interior,
+ lower_level, upper_level));
+
+ // Upper-level start from SW boundary into interior.
+ if (!VISITED_CORNER(quad) && Z_NW < 2 && Z_SE == 2)
+ contour.push_back(start_filled(quad, Edge_SW, 2, NotHole, Interior,
+ lower_level, upper_level));
+
+ // Lower-level start following SW boundary from NW to SE.
+ if (!VISITED_CORNER(quad) && Z_NW <= 1 && Z_SE == 1)
+ contour.push_back(start_filled(quad, Edge_SW, 1, NotHole, Boundary,
+ lower_level, upper_level));
+
+ // Upper-level start following SW boundary from NW to SE.
+ if (!VISITED_CORNER(quad) && Z_NW == 2 && Z_SE == 1)
+ contour.push_back(start_filled(quad, Edge_SW, 2, NotHole, Boundary,
+ lower_level, upper_level));
+ }
+
+ // A full (unmasked) quad can only have a start on the NE corner, i.e. from
+ // N to E (lower level) or E to N (upper level). Any other start will have
+ // already been created in a call to this function for a prior quad so we
+ // don't need to test for it again here.
+ //
+ // The situation is complicated by the possibility that the quad is a
+ // saddle quad, in which case a contour line starting on the N could leave
+ // by either the W or the E. We only need to consider those leaving E.
+ //
+ // A NE corner can also have a N to E or E to N start.
+ if (EXISTS_QUAD(quad) || EXISTS_NE_CORNER(quad)) {
+
+ // Lower-level start from N to E.
+ if (!VISITED(quad,1) && Z_NW == 0 && Z_SE == 0 && Z_NE >= 1 &&
+ (!SADDLE(quad,1) || SADDLE_LEFT(quad,1)))
+ contour.push_back(start_filled(quad, Edge_N, 1, NotHole, Interior,
+ lower_level, upper_level));
+
+ // Upper-level start from E to N, hole.
+ if (!VISITED(quad,2) && Z_NW < 2 && Z_SE < 2 && Z_NE == 2 &&
+ (!SADDLE(quad,2) || !SADDLE_LEFT(quad,2)))
+ contour.push_back(start_filled(quad, Edge_E, 2, Hole, Interior,
+ lower_level, upper_level));
+
+ // Upper-level start from N to E.
+ if (!VISITED(quad,2) && Z_NW == 2 && Z_SE == 2 && Z_NE < 2 &&
+ (!SADDLE(quad,2) || SADDLE_LEFT(quad,2)))
+ contour.push_back(start_filled(quad, Edge_N, 2, NotHole, Interior,
+ lower_level, upper_level));
+
+ // Lower-level start from E to N, hole.
+ if (!VISITED(quad,1) && Z_NW >= 1 && Z_SE >= 1 && Z_NE == 0 &&
+ (!SADDLE(quad,1) || !SADDLE_LEFT(quad,1)))
+ contour.push_back(start_filled(quad, Edge_E, 1, Hole, Interior,
+ lower_level, upper_level));
+
+ // All possible contours passing through the interior of this quad
+ // should have already been created, so assert this.
+ assert((VISITED(quad,1) || get_start_edge(quad, 1) == Edge_None) &&
+ "Found start of contour that should have already been created");
+ assert((VISITED(quad,2) || get_start_edge(quad, 2) == Edge_None) &&
+ "Found start of contour that should have already been created");
+ }
+
+ // Lower-level start following N boundary from E to W, hole.
+ // This is required for an internal masked region which is a hole in a
+ // surrounding contour line.
+ if (BOUNDARY_N(quad) && EXISTS_N_EDGE(quad) &&
+ !VISITED_S(quad+_nx) && Z_NW == 1 && Z_NE == 1)
+ contour.push_back(start_filled(quad, Edge_N, 1, Hole, Boundary,
+ lower_level, upper_level));
+}
+
+ContourLine* QuadContourGenerator::start_filled(
+ long quad,
+ Edge edge,
+ unsigned int start_level_index,
+ HoleOrNot hole_or_not,
+ BoundaryOrInterior boundary_or_interior,
+ const double& lower_level,
+ const double& upper_level)
+{
+ assert(quad >= 0 && quad < _n && "Quad index out of bounds");
+ assert(edge != Edge_None && "Invalid edge");
+ assert((start_level_index == 1 || start_level_index == 2) &&
+ "start level index must be 1 or 2");
+
+ ContourLine* contour_line = new ContourLine(hole_or_not == Hole);
+ if (hole_or_not == Hole) {
+ // Find and set parent ContourLine.
+ ContourLine* parent = _parent_cache.get_parent(quad + 1);
+ assert(parent != 0 && "Failed to find parent ContourLine");
+ contour_line->set_parent(parent);
+ parent->add_child(contour_line);
+ }
+
+ QuadEdge quad_edge(quad, edge);
+ const QuadEdge start_quad_edge(quad_edge);
+ unsigned int level_index = start_level_index;
+
+ // If starts on interior, can only finish on interior.
+ // If starts on boundary, can only finish on boundary.
+
+ while (true) {
+ if (boundary_or_interior == Interior) {
+ double level = (level_index == 1 ? lower_level : upper_level);
+ follow_interior(*contour_line, quad_edge, level_index, level,
+ false, &start_quad_edge, start_level_index, true);
+ }
+ else {
+ level_index = follow_boundary(
+ *contour_line, quad_edge, lower_level,
+ upper_level, level_index, start_quad_edge);
+ }
+
+ if (quad_edge == start_quad_edge && (boundary_or_interior == Boundary ||
+ level_index == start_level_index))
+ break;
+
+ if (boundary_or_interior == Boundary)
+ boundary_or_interior = Interior;
+ else
+ boundary_or_interior = Boundary;
+ }
+
+ return contour_line;
+}
+
+bool QuadContourGenerator::start_line(
+ PyObject* vertices_list, long quad, Edge edge, const double& level)
+{
+ assert(vertices_list != 0 && "Null python vertices list");
+ assert(is_edge_a_boundary(QuadEdge(quad, edge)) &&
+ "QuadEdge is not a boundary");
+
+ QuadEdge quad_edge(quad, edge);
+ ContourLine contour_line(false);
+ follow_interior(contour_line, quad_edge, 1, level, true, 0, 1, false);
+ append_contour_line_to_vertices(contour_line, vertices_list);
+ return VISITED(quad,1);
+}
+
+void QuadContourGenerator::write_cache(bool grid_only) const
+{
+ std::cout << "-----------------------------------------------" << std::endl;
+ for (long quad = 0; quad < _n; ++quad)
+ write_cache_quad(quad, grid_only);
+ std::cout << "-----------------------------------------------" << std::endl;
+}
+
+void QuadContourGenerator::write_cache_quad(long quad, bool grid_only) const
+{
+ long j = quad / _nx;
+ long i = quad - j*_nx;
+ std::cout << quad << ": i=" << i << " j=" << j
+ << " EXISTS=" << EXISTS_QUAD(quad);
+ if (_corner_mask)
+ std::cout << " CORNER=" << EXISTS_SW_CORNER(quad) << EXISTS_SE_CORNER(quad)
+ << EXISTS_NW_CORNER(quad) << EXISTS_NE_CORNER(quad);
+ std::cout << " BNDY=" << (BOUNDARY_S(quad)>0) << (BOUNDARY_W(quad)>0);
+ if (!grid_only) {
+ std::cout << " Z=" << Z_LEVEL(quad)
+ << " SAD=" << (SADDLE(quad,1)>0) << (SADDLE(quad,2)>0)
+ << " LEFT=" << (SADDLE_LEFT(quad,1)>0) << (SADDLE_LEFT(quad,2)>0)
+ << " NW=" << (SADDLE_START_SW(quad,1)>0) << (SADDLE_START_SW(quad,2)>0)
+ << " VIS=" << (VISITED(quad,1)>0) << (VISITED(quad,2)>0)
+ << (VISITED_S(quad)>0) << (VISITED_W(quad)>0)
+ << (VISITED_CORNER(quad)>0);
+ }
+ std::cout << std::endl;
+}
diff --git a/contrib/python/matplotlib/py2/src/_contour.h b/contrib/python/matplotlib/py2/src/_contour.h
new file mode 100644
index 00000000000..e01c3bc732b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_contour.h
@@ -0,0 +1,530 @@
+/*
+ * QuadContourGenerator
+ * --------------------
+ * A QuadContourGenerator generates contours for scalar fields defined on
+ * quadrilateral grids. A single QuadContourGenerator object can create both
+ * line contours (at single levels) and filled contours (between pairs of
+ * levels) for the same field.
+ *
+ * A field to be contoured has nx, ny points in the x- and y-directions
+ * respectively. The quad grid is defined by x and y arrays of shape(ny, nx),
+ * and the field itself is the z array also of shape(ny, nx). There is an
+ * optional boolean mask; if it exists then it also has shape(ny, nx). The
+ * mask applies to grid points rather than quads.
+ *
+ * How quads are masked based on the point mask is determined by the boolean
+ * 'corner_mask' flag. If false then any quad that has one or more of its four
+ * corner points masked is itself masked. If true the behaviour is the same
+ * except that any quad which has exactly one of its four corner points masked
+ * has only the triangular corner (half of the quad) adjacent to that point
+ * masked; the opposite triangular corner has three unmasked points and is not
+ * masked.
+ *
+ * By default the entire domain of nx*ny points is contoured together which can
+ * result in some very long polygons. The alternative is to break up the
+ * domain into subdomains or 'chunks' of smaller size, each of which is
+ * independently contoured. The size of these chunks is controlled by the
+ * 'nchunk' (or 'chunk_size') parameter. Chunking not only results in shorter
+ * polygons but also requires slightly less RAM. It can result in rendering
+ * artifacts though, depending on backend, antialiased flag and alpha value.
+ *
+ * Notation
+ * --------
+ * i and j are array indices in the x- and y-directions respectively. Although
+ * a single element of an array z can be accessed using z[j][i] or z(j,i), it
+ * is often convenient to use the single quad index z[quad], where
+ * quad = i + j*nx
+ * and hence
+ * i = quad % nx
+ * j = quad / nx
+ *
+ * Rather than referring to x- and y-directions, compass directions are used
+ * instead such that W, E, S, N refer to the -x, +x, -y, +y directions
+ * respectively. To move one quad to the E you would therefore add 1 to the
+ * quad index, to move one quad to the N you would add nx to the quad index.
+ *
+ * Cache
+ * -----
+ * Lots of information that is reused during contouring is stored as single
+ * bits in a mesh-sized cache, indexed by quad. Each quad's cache entry stores
+ * information about the quad itself such as if it is masked, and about the
+ * point at the SW corner of the quad, and about the W and S edges. Hence
+ * information about each point and each edge is only stored once in the cache.
+ *
+ * Cache information is divided into two types: that which is constant over the
+ * lifetime of the QuadContourGenerator, and that which changes for each
+ * contouring operation. The former is all grid-specific information such
+ * as quad and corner masks, and which edges are boundaries, either between
+ * masked and non-masked regions or between adjacent chunks. The latter
+ * includes whether points lie above or below the current contour levels, plus
+ * some flags to indicate how the contouring is progressing.
+ *
+ * Line Contours
+ * -------------
+ * A line contour connects points with the same z-value. Each point of such a
+ * contour occurs on an edge of the grid, at a point linearly interpolated to
+ * the contour z-level from the z-values at the end points of the edge. The
+ * direction of a line contour is such that higher values are to the left of
+ * the contour, so any edge that the contour passes through will have a left-
+ * hand end point with z > contour level and a right-hand end point with
+ * z <= contour level.
+ *
+ * Line contours are of two types. Firstly there are open line strips that
+ * start on a boundary, traverse the interior of the domain and end on a
+ * boundary. Secondly there are closed line loops that occur completely within
+ * the interior of the domain and do not touch a boundary.
+ *
+ * The QuadContourGenerator makes two sweeps through the grid to generate line
+ * contours for a particular level. In the first sweep it looks only for start
+ * points that occur on boundaries, and when it finds one it follows the
+ * contour through the interior until it finishes on another boundary edge.
+ * Each quad that is visited by the algorithm has a 'visited' flag set in the
+ * cache to indicate that the quad does not need to be visited again. In the
+ * second sweep all non-visited quads are checked to see if they contain part
+ * of an interior closed loop, and again each time one is found it is followed
+ * through the domain interior until it returns back to its start quad and is
+ * therefore completed.
+ *
+ * The situation is complicated by saddle quads that have two opposite corners
+ * with z >= contour level and the other two corners with z < contour level.
+ * These therefore contain two segments of a line contour, and the visited
+ * flags take account of this by only being set on the second visit. On the
+ * first visit a number of saddle flags are set in the cache to indicate which
+ * one of the two segments has been completed so far.
+ *
+ * Filled Contours
+ * ---------------
+ * Filled contours are produced between two contour levels and are always
+ * closed polygons. They can occur completely within the interior of the
+ * domain without touching a boundary, following either the lower or upper
+ * contour levels. Those on the lower level are exactly like interior line
+ * contours with higher values on the left. Those on the upper level are
+ * reversed such that higher values are on the right.
+ *
+ * Filled contours can also involve a boundary in which case they consist of
+ * one or more sections along a boundary and one or more sections through the
+ * interior. Interior sections can be on either level, and again those on the
+ * upper level have higher values on the right. Boundary sections can remain
+ * on either contour level or switch between the two.
+ *
+ * Once the start of a filled contour is found, the algorithm is similar to
+ * that for line contours in that it follows the contour to its end, which
+ * because filled contours are always closed polygons will be by returning
+ * back to the start. However, because two levels must be considered, each
+ * level has its own set of saddle and visited flags and indeed some extra
+ * visited flags for boundary edges.
+ *
+ * The major complication for filled contours is that some polygons can be
+ * holes (with points ordered clockwise) within other polygons (with points
+ * ordered anticlockwise). When it comes to rendering filled contours each
+ * non-hole polygon must be rendered along with its zero or more contained
+ * holes or the rendering will not be correct. The filled contour finding
+ * algorithm could progress pretty much as the line contour algorithm does,
+ * taking each polygon as it is found, but then at the end there would have to
+ * be an extra step to identify the parent non-hole polygon for each hole.
+ * This is not a particularly onerous task but it does not scale well and can
+ * easily dominate the execution time of the contour finding for even modest
+ * problems. It is much better to identity each hole's parent non-hole during
+ * the sweep algorithm.
+ *
+ * This requirement dictates the order that filled contours are identified. As
+ * the algorithm sweeps up through the grid, every time a polygon passes
+ * through a quad a ParentCache object is updated with the new possible parent.
+ * When a new hole polygon is started, the ParentCache is used to find the
+ * first possible parent in the same quad or to the S of it. Great care is
+ * needed each time a new quad is checked to see if a new polygon should be
+ * started, as a single quad can have multiple polygon starts, e.g. a quad
+ * could be a saddle quad for both lower and upper contour levels, meaning it
+ * has four contour line segments passing through it which could all be from
+ * different polygons. The S-most polygon must be started first, then the next
+ * S-most and so on until the N-most polygon is started in that quad.
+ */
+#ifndef _CONTOUR_H
+#define _CONTOUR_H
+
+#include "src/numpy_cpp.h"
+#include <stdint.h>
+#include <list>
+#include <iostream>
+#include <vector>
+
+
+// Edge of a quad including diagonal edges of masked quads if _corner_mask true.
+typedef enum
+{
+ // Listing values here so easier to check for debug purposes.
+ Edge_None = -1,
+ Edge_E = 0,
+ Edge_N = 1,
+ Edge_W = 2,
+ Edge_S = 3,
+ // The following are only used if _corner_mask is true.
+ Edge_NE = 4,
+ Edge_NW = 5,
+ Edge_SW = 6,
+ Edge_SE = 7
+} Edge;
+
+// Combination of a quad and an edge of that quad.
+// An invalid quad edge has quad of -1.
+struct QuadEdge
+{
+ QuadEdge();
+ QuadEdge(long quad_, Edge edge_);
+ bool operator<(const QuadEdge& other) const;
+ bool operator==(const QuadEdge& other) const;
+ bool operator!=(const QuadEdge& other) const;
+ friend std::ostream& operator<<(std::ostream& os,
+ const QuadEdge& quad_edge);
+
+ long quad;
+ Edge edge;
+};
+
+// 2D point with x,y coordinates.
+struct XY
+{
+ XY();
+ XY(const double& x_, const double& y_);
+ bool operator==(const XY& other) const;
+ bool operator!=(const XY& other) const;
+ XY operator*(const double& multiplier) const;
+ const XY& operator+=(const XY& other);
+ const XY& operator-=(const XY& other);
+ XY operator+(const XY& other) const;
+ XY operator-(const XY& other) const;
+ friend std::ostream& operator<<(std::ostream& os, const XY& xy);
+
+ double x, y;
+};
+
+// A single line of a contour, which may be a closed line loop or an open line
+// strip. Identical adjacent points are avoided using push_back().
+// A ContourLine is either a hole (points ordered clockwise) or it is not
+// (points ordered anticlockwise). Each hole has a parent ContourLine that is
+// not a hole; each non-hole contains zero or more child holes. A non-hole and
+// its child holes must be rendered together to obtain the correct results.
+class ContourLine : public std::vector<XY>
+{
+public:
+ typedef std::list<ContourLine*> Children;
+
+ ContourLine(bool is_hole);
+ void add_child(ContourLine* child);
+ void clear_parent();
+ const Children& get_children() const;
+ const ContourLine* get_parent() const;
+ ContourLine* get_parent();
+ bool is_hole() const;
+ void push_back(const XY& point);
+ void set_parent(ContourLine* parent);
+ void write() const;
+
+private:
+ bool _is_hole;
+ ContourLine* _parent; // Only set if is_hole, not owned.
+ Children _children; // Only set if !is_hole, not owned.
+};
+
+
+// A Contour is a collection of zero or more ContourLines.
+class Contour : public std::vector<ContourLine*>
+{
+public:
+ Contour();
+ virtual ~Contour();
+ void delete_contour_lines();
+ void write() const;
+};
+
+
+// Single chunk of ContourLine parents, indexed by quad. As a chunk's filled
+// contours are created, the ParentCache is updated each time a ContourLine
+// passes through each quad. When a new ContourLine is created, if it is a
+// hole its parent ContourLine is read from the ParentCache by looking at the
+// start quad, then each quad to the S in turn until a non-zero ContourLine is
+// found.
+class ParentCache
+{
+public:
+ ParentCache(long nx, long x_chunk_points, long y_chunk_points);
+ ContourLine* get_parent(long quad);
+ void set_chunk_starts(long istart, long jstart);
+ void set_parent(long quad, ContourLine& contour_line);
+
+private:
+ long quad_to_index(long quad) const;
+
+ long _nx;
+ long _x_chunk_points, _y_chunk_points; // Number of points not quads.
+ std::vector<ContourLine*> _lines; // Not owned.
+ long _istart, _jstart;
+};
+
+
+// See overview of algorithm at top of file.
+class QuadContourGenerator
+{
+public:
+ typedef numpy::array_view<const double, 2> CoordinateArray;
+ typedef numpy::array_view<const bool, 2> MaskArray;
+
+ // Constructor with optional mask.
+ // x, y, z: double arrays of shape (ny,nx).
+ // mask: boolean array, ether empty (if no mask), or of shape (ny,nx).
+ // corner_mask: flag for different masking behaviour.
+ // chunk_size: 0 for no chunking, or +ve integer for size of chunks that
+ // the domain is subdivided into.
+ QuadContourGenerator(const CoordinateArray& x,
+ const CoordinateArray& y,
+ const CoordinateArray& z,
+ const MaskArray& mask,
+ bool corner_mask,
+ long chunk_size);
+
+ // Destructor.
+ ~QuadContourGenerator();
+
+ // Create and return polygons for a line (i.e. non-filled) contour at the
+ // specified level.
+ PyObject* create_contour(const double& level);
+
+ // Create and return polygons for a filled contour between the two
+ // specified levels.
+ PyObject* create_filled_contour(const double& lower_level,
+ const double& upper_level);
+
+private:
+ // Typedef for following either a boundary of the domain or the interior;
+ // clearer than using a boolean.
+ typedef enum
+ {
+ Boundary,
+ Interior
+ } BoundaryOrInterior;
+
+ // Typedef for direction of movement from one quad to the next.
+ typedef enum
+ {
+ Dir_Right = -1,
+ Dir_Straight = 0,
+ Dir_Left = +1
+ } Dir;
+
+ // Typedef for a polygon being a hole or not; clearer than using a boolean.
+ typedef enum
+ {
+ NotHole,
+ Hole
+ } HoleOrNot;
+
+ // Append a C++ ContourLine to the end of a python list. Used for line
+ // contours where each ContourLine is converted to a separate numpy array
+ // of (x,y) points.
+ // Clears the ContourLine too.
+ void append_contour_line_to_vertices(ContourLine& contour_line,
+ PyObject* vertices_list) const;
+
+ // Append a C++ Contour to the end of two python lists. Used for filled
+ // contours where each non-hole ContourLine and its child holes are
+ // represented by a numpy array of (x,y) points and a second numpy array of
+ // 'kinds' or 'codes' that indicates where the points array is split into
+ // individual polygons.
+ // Clears the Contour too, freeing each ContourLine as soon as possible
+ // for minimum RAM usage.
+ void append_contour_to_vertices_and_codes(Contour& contour,
+ PyObject* vertices_list,
+ PyObject* codes_list) const;
+
+ // Return number of chunks that fit in the specified point_count.
+ long calc_chunk_count(long point_count) const;
+
+ // Return the point on the specified QuadEdge that intersects the specified
+ // level.
+ XY edge_interp(const QuadEdge& quad_edge, const double& level);
+
+ // Follow a contour along a boundary, appending points to the ContourLine
+ // as it progresses. Only called for filled contours. Stops when the
+ // contour leaves the boundary to move into the interior of the domain, or
+ // when the start_quad_edge is reached in which case the ContourLine is a
+ // completed closed loop. Always adds the end point of each boundary edge
+ // to the ContourLine, regardless of whether moving to another boundary
+ // edge or leaving the boundary into the interior. Never adds the start
+ // point of the first boundary edge to the ContourLine.
+ // contour_line: ContourLine to append points to.
+ // quad_edge: on entry the QuadEdge to start from, on exit the QuadEdge
+ // that is stopped on.
+ // lower_level: lower contour z-value.
+ // upper_level: upper contour z-value.
+ // level_index: level index started on (1 = lower, 2 = upper level).
+ // start_quad_edge: QuadEdge that the ContourLine started from, which is
+ // used to check if the ContourLine is finished.
+ // Returns the end level_index.
+ unsigned int follow_boundary(ContourLine& contour_line,
+ QuadEdge& quad_edge,
+ const double& lower_level,
+ const double& upper_level,
+ unsigned int level_index,
+ const QuadEdge& start_quad_edge);
+
+ // Follow a contour across the interior of the domain, appending points to
+ // the ContourLine as it progresses. Called for both line and filled
+ // contours. Stops when the contour reaches a boundary or, if the
+ // start_quad_edge is specified, when quad_edge == start_quad_edge and
+ // level_index == start_level_index. Always adds the end point of each
+ // quad traversed to the ContourLine; only adds the start point of the
+ // first quad if want_initial_point flag is true.
+ // contour_line: ContourLine to append points to.
+ // quad_edge: on entry the QuadEdge to start from, on exit the QuadEdge
+ // that is stopped on.
+ // level_index: level index started on (1 = lower, 2 = upper level).
+ // level: contour z-value.
+ // want_initial_point: whether want to append the initial point to the
+ // ContourLine or not.
+ // start_quad_edge: the QuadEdge that the ContourLine started from to
+ // check if the ContourLine is finished, or 0 if no check should occur.
+ // start_level_index: the level_index that the ContourLine started from.
+ // set_parents: whether should set ParentCache as it progresses or not.
+ // This is true for filled contours, false for line contours.
+ void follow_interior(ContourLine& contour_line,
+ QuadEdge& quad_edge,
+ unsigned int level_index,
+ const double& level,
+ bool want_initial_point,
+ const QuadEdge* start_quad_edge,
+ unsigned int start_level_index,
+ bool set_parents);
+
+ // Return the index limits of a particular chunk.
+ void get_chunk_limits(long ijchunk,
+ long& ichunk,
+ long& jchunk,
+ long& istart,
+ long& iend,
+ long& jstart,
+ long& jend);
+
+ // Check if a contour starts within the specified corner quad on the
+ // specified level_index, and if so return the start edge. Otherwise
+ // return Edge_None.
+ Edge get_corner_start_edge(long quad, unsigned int level_index) const;
+
+ // Return index of point at start or end of specified QuadEdge, assuming
+ // anticlockwise ordering around non-masked quads.
+ long get_edge_point_index(const QuadEdge& quad_edge, bool start) const;
+
+ // Return the edge to exit a quad from, given the specified entry quad_edge
+ // and direction to move in.
+ Edge get_exit_edge(const QuadEdge& quad_edge, Dir dir) const;
+
+ // Return the (x,y) coordinates of the specified point index.
+ XY get_point_xy(long point) const;
+
+ // Return the z-value of the specified point index.
+ const double& get_point_z(long point) const;
+
+ // Check if a contour starts within the specified non-corner quad on the
+ // specified level_index, and if so return the start edge. Otherwise
+ // return Edge_None.
+ Edge get_quad_start_edge(long quad, unsigned int level_index) const;
+
+ // Check if a contour starts within the specified quad, whether it is a
+ // corner or a full quad, and if so return the start edge. Otherwise
+ // return Edge_None.
+ Edge get_start_edge(long quad, unsigned int level_index) const;
+
+ // Initialise the cache to contain grid information that is constant
+ // across the lifetime of this object, i.e. does not vary between calls to
+ // create_contour() and create_filled_contour().
+ void init_cache_grid(const MaskArray& mask);
+
+ // Initialise the cache with information that is specific to contouring the
+ // specified two levels. The levels are the same for contour lines,
+ // different for filled contours.
+ void init_cache_levels(const double& lower_level,
+ const double& upper_level);
+
+ // Return the (x,y) point at which the level intersects the line connecting
+ // the two specified point indices.
+ XY interp(long point1, long point2, const double& level) const;
+
+ // Return true if the specified QuadEdge is a boundary, i.e. is either an
+ // edge between a masked and non-masked quad/corner or is a chunk boundary.
+ bool is_edge_a_boundary(const QuadEdge& quad_edge) const;
+
+ // Follow a boundary from one QuadEdge to the next in an anticlockwise
+ // manner around the non-masked region.
+ void move_to_next_boundary_edge(QuadEdge& quad_edge) const;
+
+ // Move from the quad specified by quad_edge.quad to the neighbouring quad
+ // by crossing the edge specified by quad_edge.edge.
+ void move_to_next_quad(QuadEdge& quad_edge) const;
+
+ // Check for filled contours starting within the specified quad and
+ // complete any that are found, appending them to the specified Contour.
+ void single_quad_filled(Contour& contour,
+ long quad,
+ const double& lower_level,
+ const double& upper_level);
+
+ // Start and complete a filled contour line.
+ // quad: index of quad to start ContourLine in.
+ // edge: edge of quad to start ContourLine from.
+ // start_level_index: the level_index that the ContourLine starts from.
+ // hole_or_not: whether the ContourLine is a hole or not.
+ // boundary_or_interior: whether the ContourLine starts on a boundary or
+ // the interior.
+ // lower_level: lower contour z-value.
+ // upper_level: upper contour z-value.
+ // Returns newly created ContourLine.
+ ContourLine* start_filled(long quad,
+ Edge edge,
+ unsigned int start_level_index,
+ HoleOrNot hole_or_not,
+ BoundaryOrInterior boundary_or_interior,
+ const double& lower_level,
+ const double& upper_level);
+
+ // Start and complete a line contour that both starts and end on a
+ // boundary, traversing the interior of the domain.
+ // vertices_list: Python list that the ContourLine should be appended to.
+ // quad: index of quad to start ContourLine in.
+ // edge: boundary edge to start ContourLine from.
+ // level: contour z-value.
+ // Returns true if the start quad does not need to be visited again, i.e.
+ // VISITED(quad,1).
+ bool start_line(PyObject* vertices_list,
+ long quad,
+ Edge edge,
+ const double& level);
+
+ // Debug function that writes the cache status to stdout.
+ void write_cache(bool grid_only = false) const;
+
+ // Debug function that writes that cache status for a single quad to
+ // stdout.
+ void write_cache_quad(long quad, bool grid_only) const;
+
+
+
+ // Note that mask is not stored as once it has been used to initialise the
+ // cache it is no longer needed.
+ CoordinateArray _x, _y, _z;
+ long _nx, _ny; // Number of points in each direction.
+ long _n; // Total number of points (and hence quads).
+
+ bool _corner_mask;
+ long _chunk_size; // Number of quads per chunk (not points).
+ // Always > 0, unlike python nchunk which is 0
+ // for no chunking.
+
+ long _nxchunk, _nychunk; // Number of chunks in each direction.
+ long _chunk_count; // Total number of chunks.
+
+ typedef uint32_t CacheItem;
+ CacheItem* _cache;
+
+ ParentCache _parent_cache; // On W quad sides.
+};
+
+#endif // _CONTOUR_H
diff --git a/contrib/python/matplotlib/py2/src/_contour_wrapper.cpp b/contrib/python/matplotlib/py2/src/_contour_wrapper.cpp
new file mode 100644
index 00000000000..eedc8a1aec2
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_contour_wrapper.cpp
@@ -0,0 +1,203 @@
+#include "src/_contour.h"
+#include "src/mplutils.h"
+#include "src/py_exceptions.h"
+
+/* QuadContourGenerator */
+
+typedef struct
+{
+ PyObject_HEAD
+ QuadContourGenerator* ptr;
+} PyQuadContourGenerator;
+
+static PyTypeObject PyQuadContourGeneratorType;
+
+static PyObject* PyQuadContourGenerator_new(PyTypeObject* type, PyObject* args, PyObject* kwds)
+{
+ PyQuadContourGenerator* self;
+ self = (PyQuadContourGenerator*)type->tp_alloc(type, 0);
+ self->ptr = NULL;
+ return (PyObject*)self;
+}
+
+const char* PyQuadContourGenerator_init__doc__ =
+ "QuadContourGenerator(x, y, z, mask, corner_mask, chunk_size)\n"
+ "\n"
+ "Create a new C++ QuadContourGenerator object\n";
+
+static int PyQuadContourGenerator_init(PyQuadContourGenerator* self, PyObject* args, PyObject* kwds)
+{
+ QuadContourGenerator::CoordinateArray x, y, z;
+ QuadContourGenerator::MaskArray mask;
+ int corner_mask;
+ long chunk_size;
+
+ if (!PyArg_ParseTuple(args, "O&O&O&O&il",
+ &x.converter_contiguous, &x,
+ &y.converter_contiguous, &y,
+ &z.converter_contiguous, &z,
+ &mask.converter_contiguous, &mask,
+ &corner_mask,
+ &chunk_size)) {
+ return -1;
+ }
+
+ if (x.empty() || y.empty() || z.empty() ||
+ y.dim(0) != x.dim(0) || z.dim(0) != x.dim(0) ||
+ y.dim(1) != x.dim(1) || z.dim(1) != x.dim(1)) {
+ PyErr_SetString(PyExc_ValueError,
+ "x, y and z must all be 2D arrays with the same dimensions");
+ return -1;
+ }
+
+ if (z.dim(0) < 2 || z.dim(1) < 2) {
+ PyErr_SetString(PyExc_ValueError,
+ "x, y and z must all be at least 2x2 arrays");
+ return -1;
+ }
+
+ // Mask array is optional, if set must be same size as other arrays.
+ if (!mask.empty() && (mask.dim(0) != x.dim(0) || mask.dim(1) != x.dim(1))) {
+ PyErr_SetString(PyExc_ValueError,
+ "If mask is set it must be a 2D array with the same dimensions as x.");
+ return -1;
+ }
+
+ CALL_CPP_INIT("QuadContourGenerator",
+ (self->ptr = new QuadContourGenerator(
+ x, y, z, mask, corner_mask, chunk_size)));
+ return 0;
+}
+
+static void PyQuadContourGenerator_dealloc(PyQuadContourGenerator* self)
+{
+ delete self->ptr;
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+const char* PyQuadContourGenerator_create_contour__doc__ =
+ "create_contour(level)\n"
+ "\n"
+ "Create and return a non-filled contour.";
+
+static PyObject* PyQuadContourGenerator_create_contour(PyQuadContourGenerator* self, PyObject* args, PyObject* kwds)
+{
+ double level;
+ if (!PyArg_ParseTuple(args, "d:create_contour", &level)) {
+ return NULL;
+ }
+
+ PyObject* result;
+ CALL_CPP("create_contour", (result = self->ptr->create_contour(level)));
+ return result;
+}
+
+const char* PyQuadContourGenerator_create_filled_contour__doc__ =
+ "create_filled_contour(lower_level, upper_level)\n"
+ "\n"
+ "Create and return a filled contour";
+
+static PyObject* PyQuadContourGenerator_create_filled_contour(PyQuadContourGenerator* self, PyObject* args, PyObject* kwds)
+{
+ double lower_level, upper_level;
+ if (!PyArg_ParseTuple(args, "dd:create_filled_contour",
+ &lower_level, &upper_level)) {
+ return NULL;
+ }
+
+ if (lower_level >= upper_level)
+ {
+ PyErr_SetString(PyExc_ValueError,
+ "filled contour levels must be increasing");
+ return NULL;
+ }
+
+ PyObject* result;
+ CALL_CPP("create_filled_contour",
+ (result = self->ptr->create_filled_contour(lower_level,
+ upper_level)));
+ return result;
+}
+
+static PyTypeObject* PyQuadContourGenerator_init_type(PyObject* m, PyTypeObject* type)
+{
+ static PyMethodDef methods[] = {
+ {"create_contour", (PyCFunction)PyQuadContourGenerator_create_contour, METH_VARARGS, PyQuadContourGenerator_create_contour__doc__},
+ {"create_filled_contour", (PyCFunction)PyQuadContourGenerator_create_filled_contour, METH_VARARGS, PyQuadContourGenerator_create_filled_contour__doc__},
+ {NULL}
+ };
+
+ memset(type, 0, sizeof(PyTypeObject));
+ type->tp_name = "matplotlib.QuadContourGenerator";
+ type->tp_doc = PyQuadContourGenerator_init__doc__;
+ type->tp_basicsize = sizeof(PyQuadContourGenerator);
+ type->tp_dealloc = (destructor)PyQuadContourGenerator_dealloc;
+ type->tp_flags = Py_TPFLAGS_DEFAULT;
+ type->tp_methods = methods;
+ type->tp_new = PyQuadContourGenerator_new;
+ type->tp_init = (initproc)PyQuadContourGenerator_init;
+
+ if (PyType_Ready(type) < 0) {
+ return NULL;
+ }
+
+ if (PyModule_AddObject(m, "QuadContourGenerator", (PyObject*)type)) {
+ return NULL;
+ }
+
+ return type;
+}
+
+
+/* Module */
+
+extern "C" {
+
+#if PY3K
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_contour",
+ NULL,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__contour(void)
+
+#else
+#define INITERROR return
+
+PyMODINIT_FUNC init_contour(void)
+#endif
+
+{
+ PyObject *m;
+
+#if PY3K
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule3("_contour", NULL, NULL);
+#endif
+
+ if (m == NULL) {
+ INITERROR;
+ }
+
+ if (!PyQuadContourGenerator_init_type(m, &PyQuadContourGeneratorType)) {
+ INITERROR;
+ }
+
+ import_array();
+
+#if PY3K
+ return m;
+#endif
+}
+
+} // extern "C"
diff --git a/contrib/python/matplotlib/py2/src/_image.cpp b/contrib/python/matplotlib/py2/src/_image.cpp
new file mode 100644
index 00000000000..8fc386fccb8
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_image.cpp
@@ -0,0 +1,175 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+#define NO_IMPORT_ARRAY
+
+#include <math.h>
+
+// utilities for irregular grids
+void _bin_indices_middle(
+ unsigned int *irows, int nrows, const float *ys1, unsigned long ny, float dy, float y_min)
+{
+ int i, j, j_last;
+ unsigned int *rowstart = irows;
+ const float *ys2 = ys1 + 1;
+ const float *yl = ys1 + ny;
+ float yo = y_min + dy / 2.0;
+ float ym = 0.5f * (*ys1 + *ys2);
+ // y/rows
+ j = 0;
+ j_last = j;
+ for (i = 0; i < nrows; i++, yo += dy, rowstart++) {
+ while (ys2 != yl && yo > ym) {
+ ys1 = ys2;
+ ys2 = ys1 + 1;
+ ym = 0.5f * (*ys1 + *ys2);
+ j++;
+ }
+ *rowstart = j - j_last;
+ j_last = j;
+ }
+}
+
+void _bin_indices_middle_linear(float *arows,
+ unsigned int *irows,
+ int nrows,
+ const float *y,
+ unsigned long ny,
+ float dy,
+ float y_min)
+{
+ int i;
+ int ii = 0;
+ int iilast = (int)ny - 1;
+ float sc = 1 / dy;
+ int iy0 = (int)floor(sc * (y[ii] - y_min));
+ int iy1 = (int)floor(sc * (y[ii + 1] - y_min));
+ float invgap = 1.0f / (iy1 - iy0);
+ for (i = 0; i < nrows && i <= iy0; i++) {
+ irows[i] = 0;
+ arows[i] = 1.0;
+ }
+ for (; i < nrows; i++) {
+ while (i > iy1 && ii < iilast) {
+ ii++;
+ iy0 = iy1;
+ iy1 = (int)floor(sc * (y[ii + 1] - y_min));
+ invgap = 1.0f / (iy1 - iy0);
+ }
+ if (i >= iy0 && i <= iy1) {
+ irows[i] = ii;
+ arows[i] = (iy1 - i) * invgap;
+ } else
+ break;
+ }
+ for (; i < nrows; i++) {
+ irows[i] = iilast - 1;
+ arows[i] = 0.0;
+ }
+}
+
+void _bin_indices(int *irows, int nrows, const double *y, unsigned long ny, double sc, double offs)
+{
+ int i;
+ if (sc * (y[ny - 1] - y[0]) > 0) {
+ int ii = 0;
+ int iilast = (int)ny - 1;
+ int iy0 = (int)floor(sc * (y[ii] - offs));
+ int iy1 = (int)floor(sc * (y[ii + 1] - offs));
+ for (i = 0; i < nrows && i < iy0; i++) {
+ irows[i] = -1;
+ }
+ for (; i < nrows; i++) {
+ while (i > iy1 && ii < iilast) {
+ ii++;
+ iy0 = iy1;
+ iy1 = (int)floor(sc * (y[ii + 1] - offs));
+ }
+ if (i >= iy0 && i <= iy1)
+ irows[i] = ii;
+ else
+ break;
+ }
+ for (; i < nrows; i++) {
+ irows[i] = -1;
+ }
+ } else {
+ int iilast = (int)ny - 1;
+ int ii = iilast;
+ int iy0 = (int)floor(sc * (y[ii] - offs));
+ int iy1 = (int)floor(sc * (y[ii - 1] - offs));
+ for (i = 0; i < nrows && i < iy0; i++) {
+ irows[i] = -1;
+ }
+ for (; i < nrows; i++) {
+ while (i > iy1 && ii > 1) {
+ ii--;
+ iy0 = iy1;
+ iy1 = (int)floor(sc * (y[ii - 1] - offs));
+ }
+ if (i >= iy0 && i <= iy1)
+ irows[i] = ii - 1;
+ else
+ break;
+ }
+ for (; i < nrows; i++) {
+ irows[i] = -1;
+ }
+ }
+}
+
+void _bin_indices_linear(
+ float *arows, int *irows, int nrows, double *y, unsigned long ny, double sc, double offs)
+{
+ int i;
+ if (sc * (y[ny - 1] - y[0]) > 0) {
+ int ii = 0;
+ int iilast = (int)ny - 1;
+ int iy0 = (int)floor(sc * (y[ii] - offs));
+ int iy1 = (int)floor(sc * (y[ii + 1] - offs));
+ float invgap = 1.0 / (iy1 - iy0);
+ for (i = 0; i < nrows && i < iy0; i++) {
+ irows[i] = -1;
+ }
+ for (; i < nrows; i++) {
+ while (i > iy1 && ii < iilast) {
+ ii++;
+ iy0 = iy1;
+ iy1 = (int)floor(sc * (y[ii + 1] - offs));
+ invgap = 1.0 / (iy1 - iy0);
+ }
+ if (i >= iy0 && i <= iy1) {
+ irows[i] = ii;
+ arows[i] = (iy1 - i) * invgap;
+ } else
+ break;
+ }
+ for (; i < nrows; i++) {
+ irows[i] = -1;
+ }
+ } else {
+ int iilast = (int)ny - 1;
+ int ii = iilast;
+ int iy0 = (int)floor(sc * (y[ii] - offs));
+ int iy1 = (int)floor(sc * (y[ii - 1] - offs));
+ float invgap = 1.0 / (iy1 - iy0);
+ for (i = 0; i < nrows && i < iy0; i++) {
+ irows[i] = -1;
+ }
+ for (; i < nrows; i++) {
+ while (i > iy1 && ii > 1) {
+ ii--;
+ iy0 = iy1;
+ iy1 = (int)floor(sc * (y[ii - 1] - offs));
+ invgap = 1.0 / (iy1 - iy0);
+ }
+ if (i >= iy0 && i <= iy1) {
+ irows[i] = ii - 1;
+ arows[i] = (i - iy0) * invgap;
+ } else
+ break;
+ }
+ for (; i < nrows; i++) {
+ irows[i] = -1;
+ }
+ }
+}
diff --git a/contrib/python/matplotlib/py2/src/_image.h b/contrib/python/matplotlib/py2/src/_image.h
new file mode 100644
index 00000000000..629714d2ec3
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_image.h
@@ -0,0 +1,200 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+/* image.h
+ *
+ */
+
+#ifndef _IMAGE_H
+#define _IMAGE_H
+
+#include <vector>
+
+
+// utilities for irregular grids
+void _bin_indices_middle(
+ unsigned int *irows, int nrows, const float *ys1, unsigned long ny, float dy, float y_min);
+void _bin_indices_middle_linear(float *arows,
+ unsigned int *irows,
+ int nrows,
+ const float *y,
+ unsigned long ny,
+ float dy,
+ float y_min);
+void _bin_indices(int *irows, int nrows, const double *y, unsigned long ny, double sc, double offs);
+void _bin_indices_linear(
+ float *arows, int *irows, int nrows, double *y, unsigned long ny, double sc, double offs);
+
+template <class CoordinateArray, class ColorArray, class OutputArray>
+void pcolor(CoordinateArray &x,
+ CoordinateArray &y,
+ ColorArray &d,
+ unsigned int rows,
+ unsigned int cols,
+ float bounds[4],
+ int interpolation,
+ OutputArray &out)
+{
+ if (rows >= 32768 || cols >= 32768) {
+ throw std::runtime_error("rows and cols must both be less than 32768");
+ }
+
+ float x_min = bounds[0];
+ float x_max = bounds[1];
+ float y_min = bounds[2];
+ float y_max = bounds[3];
+ float width = x_max - x_min;
+ float height = y_max - y_min;
+ float dx = width / ((float)cols);
+ float dy = height / ((float)rows);
+
+ // Check we have something to output to
+ if (rows == 0 || cols == 0) {
+ throw std::runtime_error("Cannot scale to zero size");
+ }
+
+ if (d.dim(2) != 4) {
+ throw std::runtime_error("data must be in RGBA format");
+ }
+
+ // Check dimensions match
+ unsigned long nx = x.dim(0);
+ unsigned long ny = y.dim(0);
+ if (nx != (unsigned long)d.dim(1) || ny != (unsigned long)d.dim(0)) {
+ throw std::runtime_error("data and axis dimensions do not match");
+ }
+
+ // Allocate memory for pointer arrays
+ std::vector<unsigned int> rowstarts(rows);
+ std::vector<unsigned int> colstarts(cols);
+
+ // Calculate the pointer arrays to map input x to output x
+ unsigned int i, j;
+ unsigned int *colstart = &colstarts[0];
+ unsigned int *rowstart = &rowstarts[0];
+ const float *xs1 = x.data();
+ const float *ys1 = y.data();
+
+ // Copy data to output buffer
+ const unsigned char *start;
+ const unsigned char *inposition;
+ size_t inrowsize = nx * 4;
+ size_t rowsize = cols * 4;
+ unsigned char *position = (unsigned char *)out.data();
+ unsigned char *oldposition = NULL;
+ start = d.data();
+
+ if (interpolation == NEAREST) {
+ _bin_indices_middle(colstart, cols, xs1, nx, dx, x_min);
+ _bin_indices_middle(rowstart, rows, ys1, ny, dy, y_min);
+ for (i = 0; i < rows; i++, rowstart++) {
+ if (i > 0 && *rowstart == 0) {
+ memcpy(position, oldposition, rowsize * sizeof(unsigned char));
+ oldposition = position;
+ position += rowsize;
+ } else {
+ oldposition = position;
+ start += *rowstart * inrowsize;
+ inposition = start;
+ for (j = 0, colstart = &colstarts[0]; j < cols; j++, position += 4, colstart++) {
+ inposition += *colstart * 4;
+ memcpy(position, inposition, 4 * sizeof(unsigned char));
+ }
+ }
+ }
+ } else if (interpolation == BILINEAR) {
+ std::vector<float> acols(cols);
+ std::vector<float> arows(rows);
+
+ _bin_indices_middle_linear(&acols[0], colstart, cols, xs1, nx, dx, x_min);
+ _bin_indices_middle_linear(&arows[0], rowstart, rows, ys1, ny, dy, y_min);
+ double a00, a01, a10, a11, alpha, beta;
+
+ // Copy data to output buffer
+ for (i = 0; i < rows; i++) {
+ for (j = 0; j < cols; j++) {
+ alpha = arows[i];
+ beta = acols[j];
+
+ a00 = alpha * beta;
+ a01 = alpha * (1.0 - beta);
+ a10 = (1.0 - alpha) * beta;
+ a11 = 1.0 - a00 - a01 - a10;
+
+ for (size_t k = 0; k < 4; ++k) {
+ position[k] =
+ d(rowstart[i], colstart[j], k) * a00 +
+ d(rowstart[i], colstart[j] + 1, k) * a01 +
+ d(rowstart[i] + 1, colstart[j], k) * a10 +
+ d(rowstart[i] + 1, colstart[j] + 1, k) * a11;
+ }
+ position += 4;
+ }
+ }
+ }
+}
+
+template <class CoordinateArray, class ColorArray, class Color, class OutputArray>
+void pcolor2(CoordinateArray &x,
+ CoordinateArray &y,
+ ColorArray &d,
+ unsigned int rows,
+ unsigned int cols,
+ float bounds[4],
+ Color &bg,
+ OutputArray &out)
+{
+ double x_left = bounds[0];
+ double x_right = bounds[1];
+ double y_bot = bounds[2];
+ double y_top = bounds[3];
+
+ // Check we have something to output to
+ if (rows == 0 || cols == 0) {
+ throw std::runtime_error("rows or cols is zero; there are no pixels");
+ }
+
+ if (d.dim(2) != 4) {
+ throw std::runtime_error("data must be in RGBA format");
+ }
+
+ // Check dimensions match
+ unsigned long nx = x.dim(0);
+ unsigned long ny = y.dim(0);
+ if (nx != (unsigned long)d.dim(1) + 1 || ny != (unsigned long)d.dim(0) + 1) {
+ throw std::runtime_error("data and axis bin boundary dimensions are incompatible");
+ }
+
+ if (bg.dim(0) != 4) {
+ throw std::runtime_error("bg must be in RGBA format");
+ }
+
+ std::vector<int> irows(rows);
+ std::vector<int> jcols(cols);
+
+ // Calculate the pointer arrays to map input x to output x
+ size_t i, j;
+ const double *x0 = x.data();
+ const double *y0 = y.data();
+ double sx = cols / (x_right - x_left);
+ double sy = rows / (y_top - y_bot);
+ _bin_indices(&jcols[0], cols, x0, nx, sx, x_left);
+ _bin_indices(&irows[0], rows, y0, ny, sy, y_bot);
+
+ // Copy data to output buffer
+ unsigned char *position = (unsigned char *)out.data();
+
+ for (i = 0; i < rows; i++) {
+ for (j = 0; j < cols; j++) {
+ if (irows[i] == -1 || jcols[j] == -1) {
+ memcpy(position, (const unsigned char *)bg.data(), 4 * sizeof(unsigned char));
+ } else {
+ for (size_t k = 0; k < 4; ++k) {
+ position[k] = d(irows[i], jcols[j], k);
+ }
+ }
+ position += 4;
+ }
+ }
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/src/_image_resample.h b/contrib/python/matplotlib/py2/src/_image_resample.h
new file mode 100644
index 00000000000..86cbef03248
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_image_resample.h
@@ -0,0 +1,1013 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+#ifndef RESAMPLE_H
+#define RESAMPLE_H
+
+#include "agg_image_accessors.h"
+#include "agg_path_storage.h"
+#include "agg_pixfmt_gray.h"
+#include "agg_pixfmt_rgb.h"
+#include "agg_pixfmt_rgba.h"
+#include "agg_renderer_base.h"
+#include "agg_renderer_scanline.h"
+#include "agg_rasterizer_scanline_aa.h"
+#include "agg_scanline_u.h"
+#include "agg_span_allocator.h"
+#include "agg_span_converter.h"
+#include "agg_span_image_filter_gray.h"
+#include "agg_span_image_filter_rgba.h"
+#include "agg_span_interpolator_adaptor.h"
+#include "agg_span_interpolator_linear.h"
+
+#include "agg_workaround.h"
+
+// Based on:
+
+//----------------------------------------------------------------------------
+// Anti-Grain Geometry - Version 2.4
+// Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
+//
+// Permission to copy, use, modify, sell and distribute this software
+// is granted provided this copyright notice appears in all copies.
+// This software is provided "as is" without express or implied
+// warranty, and with no claim as to its suitability for any purpose.
+//
+//----------------------------------------------------------------------------
+// Contact: mcseem@antigrain.com
+// mcseemagg@yahoo.com
+// http://www.antigrain.com
+//----------------------------------------------------------------------------
+//
+// Adaptation for high precision colors has been sponsored by
+// Liberty Technology Systems, Inc., visit http://lib-sys.com
+//
+// Liberty Technology Systems, Inc. is the provider of
+// PostScript and PDF technology for software developers.
+//
+
+//===================================================================gray64
+namespace agg
+{
+ struct gray64
+ {
+ typedef double value_type;
+ typedef double calc_type;
+ typedef double long_type;
+ typedef gray64 self_type;
+
+ value_type v;
+ value_type a;
+
+ //--------------------------------------------------------------------
+ gray64() {}
+
+ //--------------------------------------------------------------------
+ explicit gray64(value_type v_, value_type a_ = 1) :
+ v(v_), a(a_) {}
+
+ //--------------------------------------------------------------------
+ gray64(const self_type& c, value_type a_) :
+ v(c.v), a(a_) {}
+
+ //--------------------------------------------------------------------
+ gray64(const gray64& c) :
+ v(c.v),
+ a(c.a) {}
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(a);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return 1;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a <= 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a >= 1;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type invert(value_type x)
+ {
+ return 1 - x;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ return value_type(a * b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ return (b == 0) ? 0 : value_type(a / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return n > 0 ? a / (1 << n) : a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
+ {
+ return value_type(a * b / cover_mask);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return cover_type(uround(a * b));
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return (1 - a) * p + q; // more accurate than "p + q - p * a"
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ // The form "p + a * (q - p)" avoids a multiplication, but may produce an
+ // inaccurate result. For example, "p + (q - p)" may not be exactly equal
+ // to q. Therefore, stick to the basic expression, which at least produces
+ // the correct result at either extreme.
+ return (1 - a) * p + a * q;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ v = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if (a_ > 1) a = 1;
+ else a = value_type(a_);
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return a;
+ }
+
+
+ //--------------------------------------------------------------------
+ self_type& premultiply()
+ {
+ if (a < 0) v = 0;
+ else if(a < 1) v *= a;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& demultiply()
+ {
+ if (a < 0) v = 0;
+ else if (a < 1) v /= a;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type gradient(self_type c, double k) const
+ {
+ return self_type(
+ value_type(v + (c.v - v) * k),
+ value_type(a + (c.a - a) * k));
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0); }
+ };
+
+
+ //====================================================================rgba32
+ struct rgba64
+ {
+ typedef double value_type;
+ typedef double calc_type;
+ typedef double long_type;
+ typedef rgba64 self_type;
+
+ value_type r;
+ value_type g;
+ value_type b;
+ value_type a;
+
+ //--------------------------------------------------------------------
+ rgba64() {}
+
+ //--------------------------------------------------------------------
+ rgba64(value_type r_, value_type g_, value_type b_, value_type a_= 1) :
+ r(r_), g(g_), b(b_), a(a_) {}
+
+ //--------------------------------------------------------------------
+ rgba64(const self_type& c, float a_) :
+ r(c.r), g(c.g), b(c.b), a(a_) {}
+
+ //--------------------------------------------------------------------
+ rgba64(const rgba& c) :
+ r(value_type(c.r)), g(value_type(c.g)), b(value_type(c.b)), a(value_type(c.a)) {}
+
+ //--------------------------------------------------------------------
+ operator rgba() const
+ {
+ return rgba(r, g, b, a);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE double to_double(value_type a)
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type from_double(double a)
+ {
+ return value_type(a);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type empty_value()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type full_value()
+ {
+ return 1;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_transparent() const
+ {
+ return a <= 0;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE bool is_opaque() const
+ {
+ return a >= 1;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type invert(value_type x)
+ {
+ return 1 - x;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
+ {
+ return value_type(a * b);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
+ {
+ return (b == 0) ? 0 : value_type(a / b);
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downscale(T a)
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ template<typename T>
+ static AGG_INLINE T downshift(T a, unsigned n)
+ {
+ return n > 0 ? a / (1 << n) : a;
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
+ {
+ return value_type(a * b / cover_mask);
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
+ {
+ return cover_type(uround(a * b));
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a, assuming q is premultiplied by a.
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
+ {
+ return (1 - a) * p + q; // more accurate than "p + q - p * a"
+ }
+
+ //--------------------------------------------------------------------
+ // Interpolate p to q by a.
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
+ {
+ // The form "p + a * (q - p)" avoids a multiplication, but may produce an
+ // inaccurate result. For example, "p + (q - p)" may not be exactly equal
+ // to q. Therefore, stick to the basic expression, which at least produces
+ // the correct result at either extreme.
+ return (1 - a) * p + a * q;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& clear()
+ {
+ r = g = b = a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ self_type& transparent()
+ {
+ a = 0;
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& opacity(double a_)
+ {
+ if (a_ < 0) a = 0;
+ else if (a_ > 1) a = 1;
+ else a = value_type(a_);
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ double opacity() const
+ {
+ return a;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& premultiply()
+ {
+ if (a < 1)
+ {
+ if (a <= 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ r *= a;
+ g *= a;
+ b *= a;
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type& demultiply()
+ {
+ if (a < 1)
+ {
+ if (a <= 0)
+ {
+ r = g = b = 0;
+ }
+ else
+ {
+ r /= a;
+ g /= a;
+ b /= a;
+ }
+ }
+ return *this;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE self_type gradient(const self_type& c, double k) const
+ {
+ self_type ret;
+ ret.r = value_type(r + (c.r - r) * k);
+ ret.g = value_type(g + (c.g - g) * k);
+ ret.b = value_type(b + (c.b - b) * k);
+ ret.a = value_type(a + (c.a - a) * k);
+ return ret;
+ }
+
+ //--------------------------------------------------------------------
+ AGG_INLINE void add(const self_type& c, unsigned cover)
+ {
+ if (cover == cover_mask)
+ {
+ if (c.is_opaque())
+ {
+ *this = c;
+ return;
+ }
+ else
+ {
+ r += c.r;
+ g += c.g;
+ b += c.b;
+ a += c.a;
+ }
+ }
+ else
+ {
+ r += mult_cover(c.r, cover);
+ g += mult_cover(c.g, cover);
+ b += mult_cover(c.b, cover);
+ a += mult_cover(c.a, cover);
+ }
+ if (a > 1) a = 1;
+ if (r > a) r = a;
+ if (g > a) g = a;
+ if (b > a) b = a;
+ }
+
+ //--------------------------------------------------------------------
+ static self_type no_color() { return self_type(0,0,0,0); }
+ };
+}
+
+
+typedef enum {
+ NEAREST,
+ BILINEAR,
+ BICUBIC,
+ SPLINE16,
+ SPLINE36,
+ HANNING,
+ HAMMING,
+ HERMITE,
+ KAISER,
+ QUADRIC,
+ CATROM,
+ GAUSSIAN,
+ BESSEL,
+ MITCHELL,
+ SINC,
+ LANCZOS,
+ BLACKMAN,
+ _n_interpolation
+} interpolation_e;
+
+
+template <typename T>
+class type_mapping;
+
+
+template <> class type_mapping<agg::rgba8>
+{
+ public:
+ typedef agg::rgba8 color_type;
+ typedef fixed_blender_rgba_plain<color_type, agg::order_rgba> blender_type;
+ typedef fixed_blender_rgba_pre<color_type, agg::order_rgba> pre_blender_type;
+ typedef agg::pixfmt_alpha_blend_rgba<blender_type, agg::rendering_buffer> pixfmt_type;
+ typedef agg::pixfmt_alpha_blend_rgba<pre_blender_type, agg::rendering_buffer> pixfmt_pre_type;
+
+ template <typename A>
+ struct span_gen_affine_type
+ {
+ typedef agg::span_image_resample_rgba_affine<A> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_filter_type
+ {
+ typedef agg::span_image_filter_rgba<A, B> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_nn_type
+ {
+ typedef agg::span_image_filter_rgba_nn<A, B> type;
+ };
+};
+
+
+template <> class type_mapping<agg::rgba16>
+{
+ public:
+ typedef agg::rgba16 color_type;
+ typedef fixed_blender_rgba_plain<color_type, agg::order_rgba> blender_type;
+ typedef fixed_blender_rgba_pre<color_type, agg::order_rgba> pre_blender_type;
+ typedef agg::pixfmt_alpha_blend_rgba<blender_type, agg::rendering_buffer> pixfmt_type;
+ typedef agg::pixfmt_alpha_blend_rgba<pre_blender_type, agg::rendering_buffer> pixfmt_pre_type;
+
+ template <typename A>
+ struct span_gen_affine_type
+ {
+ typedef agg::span_image_resample_rgba_affine<A> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_filter_type
+ {
+ typedef agg::span_image_filter_rgba<A, B> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_nn_type
+ {
+ typedef agg::span_image_filter_rgba_nn<A, B> type;
+ };
+};
+
+
+template <> class type_mapping<agg::rgba32>
+{
+ public:
+ typedef agg::rgba32 color_type;
+ typedef agg::blender_rgba_plain<color_type, agg::order_rgba> blender_type;
+ typedef agg::blender_rgba_pre<color_type, agg::order_rgba> pre_blender_type;
+ typedef agg::pixfmt_alpha_blend_rgba<blender_type, agg::rendering_buffer> pixfmt_type;
+ typedef agg::pixfmt_alpha_blend_rgba<pre_blender_type, agg::rendering_buffer> pixfmt_pre_type;
+
+ template <typename A>
+ struct span_gen_affine_type
+ {
+ typedef agg::span_image_resample_rgba_affine<A> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_filter_type
+ {
+ typedef agg::span_image_filter_rgba<A, B> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_nn_type
+ {
+ typedef agg::span_image_filter_rgba_nn<A, B> type;
+ };
+};
+
+
+template <> class type_mapping<agg::rgba64>
+{
+ public:
+ typedef agg::rgba64 color_type;
+ typedef agg::blender_rgba_plain<color_type, agg::order_rgba> blender_type;
+ typedef agg::blender_rgba_pre<color_type, agg::order_rgba> pre_blender_type;
+ typedef agg::pixfmt_alpha_blend_rgba<blender_type, agg::rendering_buffer> pixfmt_type;
+ typedef agg::pixfmt_alpha_blend_rgba<pre_blender_type, agg::rendering_buffer> pixfmt_pre_type;
+
+ template <typename A>
+ struct span_gen_affine_type
+ {
+ typedef agg::span_image_resample_rgba_affine<A> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_filter_type
+ {
+ typedef agg::span_image_filter_rgba<A, B> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_nn_type
+ {
+ typedef agg::span_image_filter_rgba_nn<A, B> type;
+ };
+};
+
+
+template <> class type_mapping<double>
+{
+ public:
+ typedef agg::gray64 color_type;
+ typedef agg::blender_gray<color_type> blender_type;
+ typedef agg::pixfmt_alpha_blend_gray<blender_type, agg::rendering_buffer> pixfmt_type;
+ typedef pixfmt_type pixfmt_pre_type;
+
+ template <typename A>
+ struct span_gen_affine_type
+ {
+ typedef agg::span_image_resample_gray_affine<A> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_filter_type
+ {
+ typedef agg::span_image_filter_gray<A, B> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_nn_type
+ {
+ typedef agg::span_image_filter_gray_nn<A, B> type;
+ };
+};
+
+
+template <> class type_mapping<float>
+{
+ public:
+ typedef agg::gray32 color_type;
+ typedef agg::blender_gray<color_type> blender_type;
+ typedef agg::pixfmt_alpha_blend_gray<blender_type, agg::rendering_buffer> pixfmt_type;
+ typedef pixfmt_type pixfmt_pre_type;
+
+ template <typename A>
+ struct span_gen_affine_type
+ {
+ typedef agg::span_image_resample_gray_affine<A> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_filter_type
+ {
+ typedef agg::span_image_filter_gray<A, B> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_nn_type
+ {
+ typedef agg::span_image_filter_gray_nn<A, B> type;
+ };
+};
+
+
+template <> class type_mapping<unsigned short>
+{
+ public:
+ typedef agg::gray16 color_type;
+ typedef agg::blender_gray<color_type> blender_type;
+ typedef agg::pixfmt_alpha_blend_gray<blender_type, agg::rendering_buffer> pixfmt_type;
+ typedef pixfmt_type pixfmt_pre_type;
+
+ template <typename A>
+ struct span_gen_affine_type
+ {
+ typedef agg::span_image_resample_gray_affine<A> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_filter_type
+ {
+ typedef agg::span_image_filter_gray<A, B> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_nn_type
+ {
+ typedef agg::span_image_filter_gray_nn<A, B> type;
+ };
+};
+
+
+template <> class type_mapping<unsigned char>
+{
+ public:
+ typedef agg::gray8 color_type;
+ typedef agg::blender_gray<color_type> blender_type;
+ typedef agg::pixfmt_alpha_blend_gray<blender_type, agg::rendering_buffer> pixfmt_type;
+ typedef pixfmt_type pixfmt_pre_type;
+
+ template <typename A>
+ struct span_gen_affine_type
+ {
+ typedef agg::span_image_resample_gray_affine<A> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_filter_type
+ {
+ typedef agg::span_image_filter_gray<A, B> type;
+ };
+
+ template <typename A, typename B>
+ struct span_gen_nn_type
+ {
+ typedef agg::span_image_filter_gray_nn<A, B> type;
+ };
+};
+
+
+
+template<class color_type>
+class span_conv_alpha
+{
+public:
+ span_conv_alpha(const double alpha) :
+ m_alpha(alpha)
+ {
+ }
+
+ void prepare() {}
+
+ void generate(color_type* span, int x, int y, unsigned len) const
+ {
+ if (m_alpha != 1.0) {
+ do {
+ span->a *= m_alpha;
+ ++span;
+ } while (--len);
+ }
+ }
+private:
+
+ const double m_alpha;
+};
+
+
+/* A class to use a lookup table for a transformation */
+class lookup_distortion
+{
+public:
+ lookup_distortion(const double *mesh, int in_width, int in_height,
+ int out_width, int out_height) :
+ m_mesh(mesh),
+ m_in_width(in_width),
+ m_in_height(in_height),
+ m_out_width(out_width),
+ m_out_height(out_height)
+ {}
+
+ void calculate(int* x, int* y) {
+ if (m_mesh) {
+ double dx = double(*x) / agg::image_subpixel_scale;
+ double dy = double(*y) / agg::image_subpixel_scale;
+ if (dx >= 0 && dx < m_out_width &&
+ dy >= 0 && dy < m_out_height) {
+ const double *coord = m_mesh + (int(dy) * m_out_width + int(dx)) * 2;
+ *x = int(coord[0] * agg::image_subpixel_scale);
+ *y = int(coord[1] * agg::image_subpixel_scale);
+ }
+ }
+ }
+
+protected:
+ const double *m_mesh;
+ int m_in_width;
+ int m_in_height;
+ int m_out_width;
+ int m_out_height;
+};
+
+
+struct resample_params_t {
+ interpolation_e interpolation;
+ bool is_affine;
+ agg::trans_affine affine;
+ const double *transform_mesh;
+ bool resample;
+ double norm;
+ double radius;
+ double alpha;
+};
+
+
+static void get_filter(const resample_params_t &params,
+ agg::image_filter_lut &filter)
+{
+ switch (params.interpolation) {
+ case NEAREST:
+ case _n_interpolation:
+ // Never should get here. Here to silence compiler warnings.
+ break;
+
+ case HANNING:
+ filter.calculate(agg::image_filter_hanning(), params.norm);
+ break;
+
+ case HAMMING:
+ filter.calculate(agg::image_filter_hamming(), params.norm);
+ break;
+
+ case HERMITE:
+ filter.calculate(agg::image_filter_hermite(), params.norm);
+ break;
+
+ case BILINEAR:
+ filter.calculate(agg::image_filter_bilinear(), params.norm);
+ break;
+
+ case BICUBIC:
+ filter.calculate(agg::image_filter_bicubic(), params.norm);
+ break;
+
+ case SPLINE16:
+ filter.calculate(agg::image_filter_spline16(), params.norm);
+ break;
+
+ case SPLINE36:
+ filter.calculate(agg::image_filter_spline36(), params.norm);
+ break;
+
+ case KAISER:
+ filter.calculate(agg::image_filter_kaiser(), params.norm);
+ break;
+
+ case QUADRIC:
+ filter.calculate(agg::image_filter_quadric(), params.norm);
+ break;
+
+ case CATROM:
+ filter.calculate(agg::image_filter_catrom(), params.norm);
+ break;
+
+ case GAUSSIAN:
+ filter.calculate(agg::image_filter_gaussian(), params.norm);
+ break;
+
+ case BESSEL:
+ filter.calculate(agg::image_filter_bessel(), params.norm);
+ break;
+
+ case MITCHELL:
+ filter.calculate(agg::image_filter_mitchell(), params.norm);
+ break;
+
+ case SINC:
+ filter.calculate(agg::image_filter_sinc(params.radius), params.norm);
+ break;
+
+ case LANCZOS:
+ filter.calculate(agg::image_filter_lanczos(params.radius), params.norm);
+ break;
+
+ case BLACKMAN:
+ filter.calculate(agg::image_filter_blackman(params.radius), params.norm);
+ break;
+ }
+}
+
+
+template<class T>
+void resample(
+ const T *input, int in_width, int in_height,
+ T *output, int out_width, int out_height,
+ resample_params_t &params)
+{
+ typedef type_mapping<T> type_mapping_t;
+
+ typedef typename type_mapping_t::pixfmt_type input_pixfmt_t;
+ typedef typename type_mapping_t::pixfmt_type output_pixfmt_t;
+
+ typedef agg::renderer_base<output_pixfmt_t> renderer_t;
+ typedef agg::rasterizer_scanline_aa<agg::rasterizer_sl_clip_dbl> rasterizer_t;
+
+ typedef agg::wrap_mode_reflect reflect_t;
+ typedef agg::image_accessor_wrap<input_pixfmt_t, reflect_t, reflect_t> image_accessor_t;
+
+ typedef agg::span_allocator<typename type_mapping_t::color_type> span_alloc_t;
+ typedef span_conv_alpha<typename type_mapping_t::color_type> span_conv_alpha_t;
+
+ typedef agg::span_interpolator_linear<> affine_interpolator_t;
+ typedef agg::span_interpolator_adaptor<agg::span_interpolator_linear<>, lookup_distortion>
+ arbitrary_interpolator_t;
+
+ if (params.interpolation != NEAREST &&
+ params.is_affine &&
+ fabs(params.affine.sx) == 1.0 &&
+ fabs(params.affine.sy) == 1.0 &&
+ params.affine.shx == 0.0 &&
+ params.affine.shy == 0.0) {
+ params.interpolation = NEAREST;
+ }
+
+ span_alloc_t span_alloc;
+ rasterizer_t rasterizer;
+ agg::scanline_u8 scanline;
+
+ span_conv_alpha_t conv_alpha(params.alpha);
+
+ agg::rendering_buffer input_buffer;
+ input_buffer.attach((unsigned char *)input, in_width, in_height,
+ in_width * sizeof(T));
+ input_pixfmt_t input_pixfmt(input_buffer);
+ image_accessor_t input_accessor(input_pixfmt);
+
+ agg::rendering_buffer output_buffer;
+ output_buffer.attach((unsigned char *)output, out_width, out_height,
+ out_width * sizeof(T));
+ output_pixfmt_t output_pixfmt(output_buffer);
+ renderer_t renderer(output_pixfmt);
+
+ agg::trans_affine inverted = params.affine;
+ inverted.invert();
+
+ rasterizer.clip_box(0, 0, out_width, out_height);
+
+ agg::path_storage path;
+ if (params.is_affine) {
+ path.move_to(0, 0);
+ path.line_to(in_width, 0);
+ path.line_to(in_width, in_height);
+ path.line_to(0, in_height);
+ path.close_polygon();
+ agg::conv_transform<agg::path_storage> rectangle(path, params.affine);
+ rasterizer.add_path(rectangle);
+ } else {
+ path.move_to(0, 0);
+ path.line_to(out_width, 0);
+ path.line_to(out_width, out_height);
+ path.line_to(0, out_height);
+ path.close_polygon();
+ rasterizer.add_path(path);
+ }
+
+ if (params.interpolation == NEAREST) {
+ if (params.is_affine) {
+ typedef typename type_mapping_t::template span_gen_nn_type<image_accessor_t, affine_interpolator_t>::type span_gen_t;
+ typedef agg::span_converter<span_gen_t, span_conv_alpha_t> span_conv_t;
+ typedef agg::renderer_scanline_aa<renderer_t, span_alloc_t, span_conv_t> nn_renderer_t;
+
+ affine_interpolator_t interpolator(inverted);
+ span_gen_t span_gen(input_accessor, interpolator);
+ span_conv_t span_conv(span_gen, conv_alpha);
+ nn_renderer_t nn_renderer(renderer, span_alloc, span_conv);
+ agg::render_scanlines(rasterizer, scanline, nn_renderer);
+ } else {
+ typedef typename type_mapping_t::template span_gen_nn_type<image_accessor_t, arbitrary_interpolator_t>::type span_gen_t;
+ typedef agg::span_converter<span_gen_t, span_conv_alpha_t> span_conv_t;
+ typedef agg::renderer_scanline_aa<renderer_t, span_alloc_t, span_conv_t> nn_renderer_t;
+
+ lookup_distortion dist(
+ params.transform_mesh, in_width, in_height, out_width, out_height);
+ arbitrary_interpolator_t interpolator(inverted, dist);
+ span_gen_t span_gen(input_accessor, interpolator);
+ span_conv_t span_conv(span_gen, conv_alpha);
+ nn_renderer_t nn_renderer(renderer, span_alloc, span_conv);
+ agg::render_scanlines(rasterizer, scanline, nn_renderer);
+ }
+ } else {
+ agg::image_filter_lut filter;
+ get_filter(params, filter);
+
+ if (params.is_affine && params.resample) {
+ typedef typename type_mapping_t::template span_gen_affine_type<image_accessor_t>::type span_gen_t;
+ typedef agg::span_converter<span_gen_t, span_conv_alpha_t> span_conv_t;
+ typedef agg::renderer_scanline_aa<renderer_t, span_alloc_t, span_conv_t> int_renderer_t;
+
+ affine_interpolator_t interpolator(inverted);
+ span_gen_t span_gen(input_accessor, interpolator, filter);
+ span_conv_t span_conv(span_gen, conv_alpha);
+ int_renderer_t int_renderer(renderer, span_alloc, span_conv);
+ agg::render_scanlines(rasterizer, scanline, int_renderer);
+ } else {
+ typedef typename type_mapping_t::template span_gen_filter_type<image_accessor_t, arbitrary_interpolator_t>::type span_gen_t;
+ typedef agg::span_converter<span_gen_t, span_conv_alpha_t> span_conv_t;
+ typedef agg::renderer_scanline_aa<renderer_t, span_alloc_t, span_conv_t> int_renderer_t;
+
+ lookup_distortion dist(
+ params.transform_mesh, in_width, in_height, out_width, out_height);
+ arbitrary_interpolator_t interpolator(inverted, dist);
+ span_gen_t span_gen(input_accessor, interpolator, filter);
+ span_conv_t span_conv(span_gen, conv_alpha);
+ int_renderer_t int_renderer(renderer, span_alloc, span_conv);
+ agg::render_scanlines(rasterizer, scanline, int_renderer);
+ }
+ }
+}
+
+#endif /* RESAMPLE_H */
diff --git a/contrib/python/matplotlib/py2/src/_image_wrapper.cpp b/contrib/python/matplotlib/py2/src/_image_wrapper.cpp
new file mode 100644
index 00000000000..ee0bfe84c74
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_image_wrapper.cpp
@@ -0,0 +1,510 @@
+#include "mplutils.h"
+#include "_image_resample.h"
+#include "_image.h"
+#include "py_converters.h"
+
+
+#ifndef NPY_1_7_API_VERSION
+#define NPY_ARRAY_C_CONTIGUOUS NPY_C_CONTIGUOUS
+#endif
+
+
+/**********************************************************************
+ * Free functions
+ * */
+
+const char* image_resample__doc__ =
+"resample(input_array, output_array, matrix, interpolation=NEAREST, alpha=1.0, norm=0, radius=1)\n\n"
+
+"Resample input_array, blending it in-place into output_array, using an\n"
+"affine transformation.\n\n"
+
+"Parameters\n"
+"----------\n"
+"input_array : 2-d or 3-d Numpy array of float, double or uint8\n"
+" If 2-d, the image is grayscale. If 3-d, the image must be of size\n"
+" 4 in the last dimension and represents RGBA data.\n\n"
+
+"output_array : 2-d or 3-d Numpy array of float, double or uint8\n"
+" The dtype and number of dimensions must match `input_array`.\n\n"
+
+"transform : matplotlib.transforms.Transform instance\n"
+" The transformation from the input array to the output\n"
+" array.\n\n"
+
+"interpolation : int, optional\n"
+" The interpolation method. Must be one of the following constants\n"
+" defined in this module:\n\n"
+
+" NEAREST (default), BILINEAR, BICUBIC, SPLINE16, SPLINE36,\n"
+" HANNING, HAMMING, HERMITE, KAISER, QUADRIC, CATROM, GAUSSIAN,\n"
+" BESSEL, MITCHELL, SINC, LANCZOS, BLACKMAN\n\n"
+
+"resample : bool, optional\n"
+" When `True`, use a full resampling method. When `False`, only\n"
+" resample when the output image is larger than the input image.\n\n"
+
+"alpha : float, optional\n"
+" The level of transparency to apply. 1.0 is completely opaque.\n"
+" 0.0 is completely transparent.\n\n"
+
+"norm : float, optional\n"
+" The norm for the interpolation function. Default is 0.\n\n"
+
+"radius: float, optional\n"
+" The radius of the kernel, if method is SINC, LANCZOS or BLACKMAN.\n"
+" Default is 1.\n";
+
+
+static PyArrayObject *
+_get_transform_mesh(PyObject *py_affine, npy_intp *dims)
+{
+ /* TODO: Could we get away with float, rather than double, arrays here? */
+
+ /* Given a non-affine transform object, create a mesh that maps
+ every pixel in the output image to the input image. This is used
+ as a lookup table during the actual resampling. */
+
+ PyObject *py_inverse = NULL;
+ npy_intp out_dims[3];
+
+ out_dims[0] = dims[0] * dims[1];
+ out_dims[1] = 2;
+
+ py_inverse = PyObject_CallMethod(
+ py_affine, (char *)"inverted", (char *)"", NULL);
+ if (py_inverse == NULL) {
+ return NULL;
+ }
+
+ numpy::array_view<double, 2> input_mesh(out_dims);
+ double *p = (double *)input_mesh.data();
+
+ for (npy_intp y = 0; y < dims[0]; ++y) {
+ for (npy_intp x = 0; x < dims[1]; ++x) {
+ *p++ = (double)x;
+ *p++ = (double)y;
+ }
+ }
+
+ PyObject *output_mesh =
+ PyObject_CallMethod(
+ py_inverse, (char *)"transform", (char *)"O",
+ (char *)input_mesh.pyobj(), NULL);
+
+ Py_DECREF(py_inverse);
+
+ if (output_mesh == NULL) {
+ return NULL;
+ }
+
+ PyArrayObject *output_mesh_array =
+ (PyArrayObject *)PyArray_ContiguousFromAny(
+ output_mesh, NPY_DOUBLE, 2, 2);
+
+ Py_DECREF(output_mesh);
+
+ if (output_mesh_array == NULL) {
+ return NULL;
+ }
+
+ return output_mesh_array;
+}
+
+
+static PyObject *
+image_resample(PyObject *self, PyObject* args, PyObject *kwargs)
+{
+ PyObject *py_input_array = NULL;
+ PyObject *py_output_array = NULL;
+ PyObject *py_transform = NULL;
+ resample_params_t params;
+ int resample_;
+
+ PyArrayObject *input_array = NULL;
+ PyArrayObject *output_array = NULL;
+ PyArrayObject *transform_mesh_array = NULL;
+
+ params.transform_mesh = NULL;
+
+ const char *kwlist[] = {
+ "input_array", "output_array", "transform", "interpolation",
+ "resample", "alpha", "norm", "radius", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(
+ args, kwargs, "OOO|iiddd:resample", (char **)kwlist,
+ &py_input_array, &py_output_array, &py_transform,
+ &params.interpolation, &resample_, &params.alpha, &params.norm,
+ &params.radius)) {
+ return NULL;
+ }
+
+ if (params.interpolation < 0 || params.interpolation >= _n_interpolation) {
+ PyErr_Format(PyExc_ValueError, "invalid interpolation value %d",
+ params.interpolation);
+ goto error;
+ }
+
+ params.resample = (resample_ != 0);
+
+ input_array = (PyArrayObject *)PyArray_FromAny(
+ py_input_array, NULL, 2, 3, NPY_ARRAY_C_CONTIGUOUS, NULL);
+ if (input_array == NULL) {
+ goto error;
+ }
+
+ output_array = (PyArrayObject *)PyArray_FromAny(
+ py_output_array, NULL, 2, 3, NPY_ARRAY_C_CONTIGUOUS, NULL);
+ if (output_array == NULL) {
+ goto error;
+ }
+
+ if (py_transform == NULL || py_transform == Py_None) {
+ params.is_affine = true;
+ } else {
+ PyObject *py_is_affine;
+ int py_is_affine2;
+ py_is_affine = PyObject_GetAttrString(py_transform, "is_affine");
+ if (py_is_affine == NULL) {
+ goto error;
+ }
+
+ py_is_affine2 = PyObject_IsTrue(py_is_affine);
+ Py_DECREF(py_is_affine);
+
+ if (py_is_affine2 == -1) {
+ goto error;
+ } else if (py_is_affine2) {
+ if (!convert_trans_affine(py_transform, &params.affine)) {
+ goto error;
+ }
+ params.is_affine = true;
+ } else {
+ transform_mesh_array = _get_transform_mesh(
+ py_transform, PyArray_DIMS(output_array));
+ if (transform_mesh_array == NULL) {
+ goto error;
+ }
+ params.transform_mesh = (double *)PyArray_DATA(transform_mesh_array);
+ params.is_affine = false;
+ }
+ }
+
+ if (PyArray_NDIM(input_array) != PyArray_NDIM(output_array)) {
+ PyErr_Format(
+ PyExc_ValueError,
+ "Mismatched number of dimensions. Got %d and %d.",
+ PyArray_NDIM(input_array), PyArray_NDIM(output_array));
+ goto error;
+ }
+
+ if (PyArray_TYPE(input_array) != PyArray_TYPE(output_array)) {
+ PyErr_SetString(PyExc_ValueError, "Mismatched types");
+ goto error;
+ }
+
+ if (PyArray_NDIM(input_array) == 3) {
+ if (PyArray_DIM(output_array, 2) != 4) {
+ PyErr_SetString(
+ PyExc_ValueError,
+ "Output array must be RGBA");
+ goto error;
+ }
+
+ if (PyArray_DIM(input_array, 2) == 4) {
+ switch(PyArray_TYPE(input_array)) {
+ case NPY_BYTE:
+ case NPY_UINT8:
+ Py_BEGIN_ALLOW_THREADS
+ resample(
+ (agg::rgba8 *)PyArray_DATA(input_array),
+ PyArray_DIM(input_array, 1),
+ PyArray_DIM(input_array, 0),
+ (agg::rgba8 *)PyArray_DATA(output_array),
+ PyArray_DIM(output_array, 1),
+ PyArray_DIM(output_array, 0),
+ params);
+ Py_END_ALLOW_THREADS
+ break;
+ case NPY_UINT16:
+ case NPY_INT16:
+ Py_BEGIN_ALLOW_THREADS
+ resample(
+ (agg::rgba16 *)PyArray_DATA(input_array),
+ PyArray_DIM(input_array, 1),
+ PyArray_DIM(input_array, 0),
+ (agg::rgba16 *)PyArray_DATA(output_array),
+ PyArray_DIM(output_array, 1),
+ PyArray_DIM(output_array, 0),
+ params);
+ Py_END_ALLOW_THREADS
+ break;
+ case NPY_FLOAT32:
+ Py_BEGIN_ALLOW_THREADS
+ resample(
+ (agg::rgba32 *)PyArray_DATA(input_array),
+ PyArray_DIM(input_array, 1),
+ PyArray_DIM(input_array, 0),
+ (agg::rgba32 *)PyArray_DATA(output_array),
+ PyArray_DIM(output_array, 1),
+ PyArray_DIM(output_array, 0),
+ params);
+ Py_END_ALLOW_THREADS
+ break;
+ case NPY_FLOAT64:
+ Py_BEGIN_ALLOW_THREADS
+ resample(
+ (agg::rgba64 *)PyArray_DATA(input_array),
+ PyArray_DIM(input_array, 1),
+ PyArray_DIM(input_array, 0),
+ (agg::rgba64 *)PyArray_DATA(output_array),
+ PyArray_DIM(output_array, 1),
+ PyArray_DIM(output_array, 0),
+ params);
+ Py_END_ALLOW_THREADS
+ break;
+ default:
+ PyErr_SetString(
+ PyExc_ValueError,
+ "3-dimensional arrays must be of dtype unsigned byte, "
+ "unsigned short, float32 or float64");
+ goto error;
+ }
+ } else {
+ PyErr_Format(
+ PyExc_ValueError,
+ "If 3-dimensional, array must be RGBA. Got %" NPY_INTP_FMT " planes.",
+ PyArray_DIM(input_array, 2));
+ goto error;
+ }
+ } else { // NDIM == 2
+ switch (PyArray_TYPE(input_array)) {
+ case NPY_DOUBLE:
+ Py_BEGIN_ALLOW_THREADS
+ resample(
+ (double *)PyArray_DATA(input_array),
+ PyArray_DIM(input_array, 1),
+ PyArray_DIM(input_array, 0),
+ (double *)PyArray_DATA(output_array),
+ PyArray_DIM(output_array, 1),
+ PyArray_DIM(output_array, 0),
+ params);
+ Py_END_ALLOW_THREADS
+ break;
+ case NPY_FLOAT:
+ Py_BEGIN_ALLOW_THREADS
+ resample(
+ (float *)PyArray_DATA(input_array),
+ PyArray_DIM(input_array, 1),
+ PyArray_DIM(input_array, 0),
+ (float *)PyArray_DATA(output_array),
+ PyArray_DIM(output_array, 1),
+ PyArray_DIM(output_array, 0),
+ params);
+ Py_END_ALLOW_THREADS
+ break;
+ case NPY_UINT8:
+ case NPY_BYTE:
+ Py_BEGIN_ALLOW_THREADS
+ resample(
+ (unsigned char *)PyArray_DATA(input_array),
+ PyArray_DIM(input_array, 1),
+ PyArray_DIM(input_array, 0),
+ (unsigned char *)PyArray_DATA(output_array),
+ PyArray_DIM(output_array, 1),
+ PyArray_DIM(output_array, 0),
+ params);
+ Py_END_ALLOW_THREADS
+ break;
+ case NPY_UINT16:
+ case NPY_INT16:
+ Py_BEGIN_ALLOW_THREADS
+ resample(
+ (unsigned short *)PyArray_DATA(input_array),
+ PyArray_DIM(input_array, 1),
+ PyArray_DIM(input_array, 0),
+ (unsigned short *)PyArray_DATA(output_array),
+ PyArray_DIM(output_array, 1),
+ PyArray_DIM(output_array, 0),
+ params);
+ Py_END_ALLOW_THREADS
+ break;
+ default:
+ PyErr_SetString(PyExc_ValueError, "Unsupported dtype");
+ goto error;
+ }
+ }
+
+ Py_DECREF(input_array);
+ Py_XDECREF(transform_mesh_array);
+ return (PyObject *)output_array;
+
+ error:
+ Py_XDECREF(input_array);
+ Py_XDECREF(output_array);
+ Py_XDECREF(transform_mesh_array);
+ return NULL;
+}
+
+
+const char *image_pcolor__doc__ =
+ "pcolor(x, y, data, rows, cols, bounds)\n"
+ "\n"
+ "Generate a pseudo-color image from data on a non-uniform grid using\n"
+ "nearest neighbour or linear interpolation.\n"
+ "bounds = (x_min, x_max, y_min, y_max)\n"
+ "interpolation = NEAREST or BILINEAR \n";
+
+static PyObject *image_pcolor(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ numpy::array_view<const float, 1> x;
+ numpy::array_view<const float, 1> y;
+ numpy::array_view<const agg::int8u, 3> d;
+ npy_intp rows, cols;
+ float bounds[4];
+ int interpolation;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&O&nn(ffff)i:pcolor",
+ &x.converter,
+ &x,
+ &y.converter,
+ &y,
+ &d.converter_contiguous,
+ &d,
+ &rows,
+ &cols,
+ &bounds[0],
+ &bounds[1],
+ &bounds[2],
+ &bounds[3],
+ &interpolation)) {
+ return NULL;
+ }
+
+ npy_intp dim[3] = {rows, cols, 4};
+ numpy::array_view<const agg::int8u, 3> output(dim);
+
+ CALL_CPP("pcolor", (pcolor(x, y, d, rows, cols, bounds, interpolation, output)));
+
+ return output.pyobj();
+}
+
+const char *image_pcolor2__doc__ =
+ "pcolor2(x, y, data, rows, cols, bounds, bg)\n"
+ "\n"
+ "Generate a pseudo-color image from data on a non-uniform grid\n"
+ "specified by its cell boundaries.\n"
+ "bounds = (x_left, x_right, y_bot, y_top)\n"
+ "bg = ndarray of 4 uint8 representing background rgba\n";
+
+static PyObject *image_pcolor2(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ numpy::array_view<const double, 1> x;
+ numpy::array_view<const double, 1> y;
+ numpy::array_view<const agg::int8u, 3> d;
+ npy_intp rows, cols;
+ float bounds[4];
+ numpy::array_view<const agg::int8u, 1> bg;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&O&nn(ffff)O&:pcolor2",
+ &x.converter_contiguous,
+ &x,
+ &y.converter_contiguous,
+ &y,
+ &d.converter_contiguous,
+ &d,
+ &rows,
+ &cols,
+ &bounds[0],
+ &bounds[1],
+ &bounds[2],
+ &bounds[3],
+ &bg.converter,
+ &bg)) {
+ return NULL;
+ }
+
+ npy_intp dim[3] = {rows, cols, 4};
+ numpy::array_view<const agg::int8u, 3> output(dim);
+
+ CALL_CPP("pcolor2", (pcolor2(x, y, d, rows, cols, bounds, bg, output)));
+
+ return output.pyobj();
+}
+
+static PyMethodDef module_functions[] = {
+ {"resample", (PyCFunction)image_resample, METH_VARARGS|METH_KEYWORDS, image_resample__doc__},
+ {"pcolor", (PyCFunction)image_pcolor, METH_VARARGS, image_pcolor__doc__},
+ {"pcolor2", (PyCFunction)image_pcolor2, METH_VARARGS, image_pcolor2__doc__},
+ {NULL}
+};
+
+extern "C" {
+
+#if PY3K
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_image",
+ NULL,
+ 0,
+ module_functions,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit__image(void)
+
+#else
+#define INITERROR return
+
+PyMODINIT_FUNC init_image(void)
+#endif
+
+{
+ PyObject *m;
+
+#if PY3K
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule3("_image", module_functions, NULL);
+#endif
+
+ if (m == NULL) {
+ INITERROR;
+ }
+
+ if (PyModule_AddIntConstant(m, "NEAREST", NEAREST) ||
+ PyModule_AddIntConstant(m, "BILINEAR", BILINEAR) ||
+ PyModule_AddIntConstant(m, "BICUBIC", BICUBIC) ||
+ PyModule_AddIntConstant(m, "SPLINE16", SPLINE16) ||
+ PyModule_AddIntConstant(m, "SPLINE36", SPLINE36) ||
+ PyModule_AddIntConstant(m, "HANNING", HANNING) ||
+ PyModule_AddIntConstant(m, "HAMMING", HAMMING) ||
+ PyModule_AddIntConstant(m, "HERMITE", HERMITE) ||
+ PyModule_AddIntConstant(m, "KAISER", KAISER) ||
+ PyModule_AddIntConstant(m, "QUADRIC", QUADRIC) ||
+ PyModule_AddIntConstant(m, "CATROM", CATROM) ||
+ PyModule_AddIntConstant(m, "GAUSSIAN", GAUSSIAN) ||
+ PyModule_AddIntConstant(m, "BESSEL", BESSEL) ||
+ PyModule_AddIntConstant(m, "MITCHELL", MITCHELL) ||
+ PyModule_AddIntConstant(m, "SINC", SINC) ||
+ PyModule_AddIntConstant(m, "LANCZOS", LANCZOS) ||
+ PyModule_AddIntConstant(m, "BLACKMAN", BLACKMAN) ||
+ PyModule_AddIntConstant(m, "_n_interpolation", _n_interpolation)) {
+ INITERROR;
+ }
+
+ import_array();
+
+#if PY3K
+ return m;
+#endif
+}
+
+} // extern "C"
diff --git a/contrib/python/matplotlib/py2/src/_path.h b/contrib/python/matplotlib/py2/src/_path.h
new file mode 100644
index 00000000000..76f1894c4af
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_path.h
@@ -0,0 +1,1316 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+#ifndef __PATH_H__
+#define __PATH_H__
+
+#include <limits>
+#include <math.h>
+#include <vector>
+#include <cmath>
+#include <algorithm>
+
+#include "agg_conv_contour.h"
+#include "agg_conv_curve.h"
+#include "agg_conv_stroke.h"
+#include "agg_conv_transform.h"
+#include "agg_path_storage.h"
+#include "agg_trans_affine.h"
+
+#include "path_converters.h"
+#include "_backend_agg_basic_types.h"
+#include "numpy_cpp.h"
+
+struct XY
+{
+ double x;
+ double y;
+
+ XY(double x_, double y_) : x(x_), y(y_)
+ {
+ }
+
+ bool operator==(const XY& o)
+ {
+ return (x == o.x && y == o.y);
+ }
+
+ bool operator!=(const XY& o)
+ {
+ return (x != o.x || y != o.y);
+ }
+};
+
+typedef std::vector<XY> Polygon;
+
+void _finalize_polygon(std::vector<Polygon> &result, int closed_only)
+{
+ if (result.size() == 0) {
+ return;
+ }
+
+ Polygon &polygon = result.back();
+
+ /* Clean up the last polygon in the result. */
+ if (polygon.size() == 0) {
+ result.pop_back();
+ } else if (closed_only) {
+ if (polygon.size() < 3) {
+ result.pop_back();
+ } else if (polygon.front() != polygon.back()) {
+ polygon.push_back(polygon.front());
+ }
+ }
+}
+
+//
+// The following function was found in the Agg 2.3 examples (interactive_polygon.cpp).
+// It has been generalized to work on (possibly curved) polylines, rather than
+// just polygons. The original comments have been kept intact.
+// -- Michael Droettboom 2007-10-02
+//
+//======= Crossings Multiply algorithm of InsideTest ========================
+//
+// By Eric Haines, 3D/Eye Inc, erich@eye.com
+//
+// This version is usually somewhat faster than the original published in
+// Graphics Gems IV; by turning the division for testing the X axis crossing
+// into a tricky multiplication test this part of the test became faster,
+// which had the additional effect of making the test for "both to left or
+// both to right" a bit slower for triangles than simply computing the
+// intersection each time. The main increase is in triangle testing speed,
+// which was about 15% faster; all other polygon complexities were pretty much
+// the same as before. On machines where division is very expensive (not the
+// case on the HP 9000 series on which I tested) this test should be much
+// faster overall than the old code. Your mileage may (in fact, will) vary,
+// depending on the machine and the test data, but in general I believe this
+// code is both shorter and faster. This test was inspired by unpublished
+// Graphics Gems submitted by Joseph Samosky and Mark Haigh-Hutchinson.
+// Related work by Samosky is in:
+//
+// Samosky, Joseph, "SectionView: A system for interactively specifying and
+// visualizing sections through three-dimensional medical image data",
+// M.S. Thesis, Department of Electrical Engineering and Computer Science,
+// Massachusetts Institute of Technology, 1993.
+//
+// Shoot a test ray along +X axis. The strategy is to compare vertex Y values
+// to the testing point's Y and quickly discard edges which are entirely to one
+// side of the test ray. Note that CONVEX and WINDING code can be added as
+// for the CrossingsTest() code; it is left out here for clarity.
+//
+// Input 2D polygon _pgon_ with _numverts_ number of vertices and test point
+// _point_, returns 1 if inside, 0 if outside.
+template <class PathIterator, class PointArray, class ResultArray>
+void point_in_path_impl(PointArray &points, PathIterator &path, ResultArray &inside_flag)
+{
+ uint8_t yflag1;
+ double vtx0, vty0, vtx1, vty1;
+ double tx, ty;
+ double sx, sy;
+ double x, y;
+ size_t i;
+ bool all_done;
+
+ size_t n = points.size();
+
+ std::vector<uint8_t> yflag0(n);
+ std::vector<uint8_t> subpath_flag(n);
+
+ path.rewind(0);
+
+ for (i = 0; i < n; ++i) {
+ inside_flag[i] = 0;
+ }
+
+ unsigned code = 0;
+ do {
+ if (code != agg::path_cmd_move_to) {
+ code = path.vertex(&x, &y);
+ if (code == agg::path_cmd_stop ||
+ (code & agg::path_cmd_end_poly) == agg::path_cmd_end_poly) {
+ continue;
+ }
+ }
+
+ sx = vtx0 = vtx1 = x;
+ sy = vty0 = vty1 = y;
+
+ for (i = 0; i < n; ++i) {
+ ty = points(i, 1);
+
+ if (std::isfinite(ty)) {
+ // get test bit for above/below X axis
+ yflag0[i] = (vty0 >= ty);
+
+ subpath_flag[i] = 0;
+ }
+ }
+
+ do {
+ code = path.vertex(&x, &y);
+
+ // The following cases denote the beginning on a new subpath
+ if (code == agg::path_cmd_stop ||
+ (code & agg::path_cmd_end_poly) == agg::path_cmd_end_poly) {
+ x = sx;
+ y = sy;
+ } else if (code == agg::path_cmd_move_to) {
+ break;
+ }
+
+ for (i = 0; i < n; ++i) {
+ tx = points(i, 0);
+ ty = points(i, 1);
+
+ if (!(std::isfinite(tx) && std::isfinite(ty))) {
+ continue;
+ }
+
+ yflag1 = (vty1 >= ty);
+ // Check if endpoints straddle (are on opposite sides) of
+ // X axis (i.e. the Y's differ); if so, +X ray could
+ // intersect this edge. The old test also checked whether
+ // the endpoints are both to the right or to the left of
+ // the test point. However, given the faster intersection
+ // point computation used below, this test was found to be
+ // a break-even proposition for most polygons and a loser
+ // for triangles (where 50% or more of the edges which
+ // survive this test will cross quadrants and so have to
+ // have the X intersection computed anyway). I credit
+ // Joseph Samosky with inspiring me to try dropping the
+ // "both left or both right" part of my code.
+ if (yflag0[i] != yflag1) {
+ // Check intersection of pgon segment with +X ray.
+ // Note if >= point's X; if so, the ray hits it. The
+ // division operation is avoided for the ">=" test by
+ // checking the sign of the first vertex wrto the test
+ // point; idea inspired by Joseph Samosky's and Mark
+ // Haigh-Hutchinson's different polygon inclusion
+ // tests.
+ if (((vty1 - ty) * (vtx0 - vtx1) >= (vtx1 - tx) * (vty0 - vty1)) == yflag1) {
+ subpath_flag[i] ^= 1;
+ }
+ }
+
+ // Move to the next pair of vertices, retaining info as
+ // possible.
+ yflag0[i] = yflag1;
+ }
+
+ vtx0 = vtx1;
+ vty0 = vty1;
+
+ vtx1 = x;
+ vty1 = y;
+ } while (code != agg::path_cmd_stop &&
+ (code & agg::path_cmd_end_poly) != agg::path_cmd_end_poly);
+
+ all_done = true;
+ for (i = 0; i < n; ++i) {
+ tx = points(i, 0);
+ ty = points(i, 1);
+
+ if (!(std::isfinite(tx) && std::isfinite(ty))) {
+ continue;
+ }
+
+ yflag1 = (vty1 >= ty);
+ if (yflag0[i] != yflag1) {
+ if (((vty1 - ty) * (vtx0 - vtx1) >= (vtx1 - tx) * (vty0 - vty1)) == yflag1) {
+ subpath_flag[i] = subpath_flag[i] ^ true;
+ }
+ }
+ inside_flag[i] |= subpath_flag[i];
+ if (inside_flag[i] == 0) {
+ all_done = false;
+ }
+ }
+
+ if (all_done) {
+ break;
+ }
+ } while (code != agg::path_cmd_stop);
+}
+
+template <class PathIterator, class PointArray, class ResultArray>
+inline void points_in_path(PointArray &points,
+ const double r,
+ PathIterator &path,
+ agg::trans_affine &trans,
+ ResultArray &result)
+{
+ typedef agg::conv_transform<PathIterator> transformed_path_t;
+ typedef PathNanRemover<transformed_path_t> no_nans_t;
+ typedef agg::conv_curve<no_nans_t> curve_t;
+ typedef agg::conv_contour<curve_t> contour_t;
+
+ size_t i;
+ for (i = 0; i < points.size(); ++i) {
+ result[i] = false;
+ }
+
+ if (path.total_vertices() < 3) {
+ return;
+ }
+
+ transformed_path_t trans_path(path, trans);
+ no_nans_t no_nans_path(trans_path, true, path.has_curves());
+ curve_t curved_path(no_nans_path);
+ if (r != 0.0) {
+ contour_t contoured_path(curved_path);
+ contoured_path.width(r);
+ point_in_path_impl(points, contoured_path, result);
+ } else {
+ point_in_path_impl(points, curved_path, result);
+ }
+}
+
+template <class PathIterator>
+inline bool point_in_path(
+ double x, double y, const double r, PathIterator &path, agg::trans_affine &trans)
+{
+ npy_intp shape[] = {1, 2};
+ numpy::array_view<double, 2> points(shape);
+ points(0, 0) = x;
+ points(0, 1) = y;
+
+ int result[1];
+ result[0] = 0;
+
+ points_in_path(points, r, path, trans, result);
+
+ return (bool)result[0];
+}
+
+template <class PathIterator, class PointArray, class ResultArray>
+void points_on_path(PointArray &points,
+ const double r,
+ PathIterator &path,
+ agg::trans_affine &trans,
+ ResultArray result)
+{
+ typedef agg::conv_transform<PathIterator> transformed_path_t;
+ typedef PathNanRemover<transformed_path_t> no_nans_t;
+ typedef agg::conv_curve<no_nans_t> curve_t;
+ typedef agg::conv_stroke<curve_t> stroke_t;
+
+ size_t i;
+ for (i = 0; i < points.size(); ++i) {
+ result[i] = false;
+ }
+
+ transformed_path_t trans_path(path, trans);
+ no_nans_t nan_removed_path(trans_path, true, path.has_curves());
+ curve_t curved_path(nan_removed_path);
+ stroke_t stroked_path(curved_path);
+ stroked_path.width(r * 2.0);
+ point_in_path_impl(points, stroked_path, result);
+}
+
+template <class PathIterator>
+inline bool point_on_path(
+ double x, double y, const double r, PathIterator &path, agg::trans_affine &trans)
+{
+ npy_intp shape[] = {1, 2};
+ numpy::array_view<double, 2> points(shape);
+ points(0, 0) = x;
+ points(0, 1) = y;
+
+ int result[1];
+ result[0] = 0;
+
+ points_on_path(points, r, path, trans, result);
+
+ return (bool)result[0];
+}
+
+struct extent_limits
+{
+ double x0;
+ double y0;
+ double x1;
+ double y1;
+ double xm;
+ double ym;
+};
+
+void reset_limits(extent_limits &e)
+{
+ e.x0 = std::numeric_limits<double>::infinity();
+ e.y0 = std::numeric_limits<double>::infinity();
+ e.x1 = -std::numeric_limits<double>::infinity();
+ e.y1 = -std::numeric_limits<double>::infinity();
+ /* xm and ym are the minimum positive values in the data, used
+ by log scaling */
+ e.xm = std::numeric_limits<double>::infinity();
+ e.ym = std::numeric_limits<double>::infinity();
+}
+
+inline void update_limits(double x, double y, extent_limits &e)
+{
+ if (x < e.x0)
+ e.x0 = x;
+ if (y < e.y0)
+ e.y0 = y;
+ if (x > e.x1)
+ e.x1 = x;
+ if (y > e.y1)
+ e.y1 = y;
+ /* xm and ym are the minimum positive values in the data, used
+ by log scaling */
+ if (x > 0.0 && x < e.xm)
+ e.xm = x;
+ if (y > 0.0 && y < e.ym)
+ e.ym = y;
+}
+
+template <class PathIterator>
+void update_path_extents(PathIterator &path, agg::trans_affine &trans, extent_limits &extents)
+{
+ typedef agg::conv_transform<PathIterator> transformed_path_t;
+ typedef PathNanRemover<transformed_path_t> nan_removed_t;
+ double x, y;
+ unsigned code;
+
+ transformed_path_t tpath(path, trans);
+ nan_removed_t nan_removed(tpath, true, path.has_curves());
+
+ nan_removed.rewind(0);
+
+ while ((code = nan_removed.vertex(&x, &y)) != agg::path_cmd_stop) {
+ if ((code & agg::path_cmd_end_poly) == agg::path_cmd_end_poly) {
+ continue;
+ }
+ update_limits(x, y, extents);
+ }
+}
+
+template <class PathGenerator, class TransformArray, class OffsetArray>
+void get_path_collection_extents(agg::trans_affine &master_transform,
+ PathGenerator &paths,
+ TransformArray &transforms,
+ OffsetArray &offsets,
+ agg::trans_affine &offset_trans,
+ extent_limits &extent)
+{
+ if (offsets.size() != 0 && offsets.dim(1) != 2) {
+ throw std::runtime_error("Offsets array must be Nx2");
+ }
+
+ size_t Npaths = paths.size();
+ size_t Noffsets = offsets.size();
+ size_t N = std::max(Npaths, Noffsets);
+ size_t Ntransforms = std::min(transforms.size(), N);
+ size_t i;
+
+ agg::trans_affine trans;
+
+ reset_limits(extent);
+
+ for (i = 0; i < N; ++i) {
+ typename PathGenerator::path_iterator path(paths(i % Npaths));
+ if (Ntransforms) {
+ size_t ti = i % Ntransforms;
+ trans = agg::trans_affine(transforms(ti, 0, 0),
+ transforms(ti, 1, 0),
+ transforms(ti, 0, 1),
+ transforms(ti, 1, 1),
+ transforms(ti, 0, 2),
+ transforms(ti, 1, 2));
+ } else {
+ trans = master_transform;
+ }
+
+ if (Noffsets) {
+ double xo = offsets(i % Noffsets, 0);
+ double yo = offsets(i % Noffsets, 1);
+ offset_trans.transform(&xo, &yo);
+ trans *= agg::trans_affine_translation(xo, yo);
+ }
+
+ update_path_extents(path, trans, extent);
+ }
+}
+
+template <class PathGenerator, class TransformArray, class OffsetArray>
+void point_in_path_collection(double x,
+ double y,
+ double radius,
+ agg::trans_affine &master_transform,
+ PathGenerator &paths,
+ TransformArray &transforms,
+ OffsetArray &offsets,
+ agg::trans_affine &offset_trans,
+ bool filled,
+ e_offset_position offset_position,
+ std::vector<int> &result)
+{
+ size_t Npaths = paths.size();
+
+ if (Npaths == 0) {
+ return;
+ }
+
+ size_t Noffsets = offsets.size();
+ size_t N = std::max(Npaths, Noffsets);
+ size_t Ntransforms = std::min(transforms.size(), N);
+ size_t i;
+
+ agg::trans_affine trans;
+
+ for (i = 0; i < N; ++i) {
+ typename PathGenerator::path_iterator path = paths(i % Npaths);
+
+ if (Ntransforms) {
+ size_t ti = i % Ntransforms;
+ trans = agg::trans_affine(transforms(ti, 0, 0),
+ transforms(ti, 1, 0),
+ transforms(ti, 0, 1),
+ transforms(ti, 1, 1),
+ transforms(ti, 0, 2),
+ transforms(ti, 1, 2));
+ trans *= master_transform;
+ } else {
+ trans = master_transform;
+ }
+
+ if (Noffsets) {
+ double xo = offsets(i % Noffsets, 0);
+ double yo = offsets(i % Noffsets, 1);
+ offset_trans.transform(&xo, &yo);
+ if (offset_position == OFFSET_POSITION_DATA) {
+ trans = agg::trans_affine_translation(xo, yo) * trans;
+ } else {
+ trans *= agg::trans_affine_translation(xo, yo);
+ }
+ }
+
+ if (filled) {
+ if (point_in_path(x, y, radius, path, trans)) {
+ result.push_back(i);
+ }
+ } else {
+ if (point_on_path(x, y, radius, path, trans)) {
+ result.push_back(i);
+ }
+ }
+ }
+}
+
+template <class PathIterator1, class PathIterator2>
+bool path_in_path(PathIterator1 &a,
+ agg::trans_affine &atrans,
+ PathIterator2 &b,
+ agg::trans_affine &btrans)
+{
+ typedef agg::conv_transform<PathIterator2> transformed_path_t;
+ typedef PathNanRemover<transformed_path_t> no_nans_t;
+ typedef agg::conv_curve<no_nans_t> curve_t;
+
+ if (a.total_vertices() < 3) {
+ return false;
+ }
+
+ transformed_path_t b_path_trans(b, btrans);
+ no_nans_t b_no_nans(b_path_trans, true, b.has_curves());
+ curve_t b_curved(b_no_nans);
+
+ double x, y;
+ b_curved.rewind(0);
+ while (b_curved.vertex(&x, &y) != agg::path_cmd_stop) {
+ if (!point_in_path(x, y, 0.0, a, atrans)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/** The clip_path_to_rect code here is a clean-room implementation of
+ the Sutherland-Hodgman clipping algorithm described here:
+
+ http://en.wikipedia.org/wiki/Sutherland-Hodgman_clipping_algorithm
+*/
+
+namespace clip_to_rect_filters
+{
+/* There are four different passes needed to create/remove
+ vertices (one for each side of the rectangle). The differences
+ between those passes are encapsulated in these functor classes.
+*/
+struct bisectx
+{
+ double m_x;
+
+ bisectx(double x) : m_x(x)
+ {
+ }
+
+ inline void bisect(double sx, double sy, double px, double py, double *bx, double *by) const
+ {
+ *bx = m_x;
+ double dx = px - sx;
+ double dy = py - sy;
+ *by = sy + dy * ((m_x - sx) / dx);
+ }
+};
+
+struct xlt : public bisectx
+{
+ xlt(double x) : bisectx(x)
+ {
+ }
+
+ inline bool is_inside(double x, double y) const
+ {
+ return x <= m_x;
+ }
+};
+
+struct xgt : public bisectx
+{
+ xgt(double x) : bisectx(x)
+ {
+ }
+
+ inline bool is_inside(double x, double y) const
+ {
+ return x >= m_x;
+ }
+};
+
+struct bisecty
+{
+ double m_y;
+
+ bisecty(double y) : m_y(y)
+ {
+ }
+
+ inline void bisect(double sx, double sy, double px, double py, double *bx, double *by) const
+ {
+ *by = m_y;
+ double dx = px - sx;
+ double dy = py - sy;
+ *bx = sx + dx * ((m_y - sy) / dy);
+ }
+};
+
+struct ylt : public bisecty
+{
+ ylt(double y) : bisecty(y)
+ {
+ }
+
+ inline bool is_inside(double x, double y) const
+ {
+ return y <= m_y;
+ }
+};
+
+struct ygt : public bisecty
+{
+ ygt(double y) : bisecty(y)
+ {
+ }
+
+ inline bool is_inside(double x, double y) const
+ {
+ return y >= m_y;
+ }
+};
+}
+
+template <class Filter>
+inline void clip_to_rect_one_step(const Polygon &polygon, Polygon &result, const Filter &filter)
+{
+ double sx, sy, px, py, bx, by;
+ bool sinside, pinside;
+ result.clear();
+
+ if (polygon.size() == 0) {
+ return;
+ }
+
+ sx = polygon.back().x;
+ sy = polygon.back().y;
+ for (Polygon::const_iterator i = polygon.begin(); i != polygon.end(); ++i) {
+ px = i->x;
+ py = i->y;
+
+ sinside = filter.is_inside(sx, sy);
+ pinside = filter.is_inside(px, py);
+
+ if (sinside ^ pinside) {
+ filter.bisect(sx, sy, px, py, &bx, &by);
+ result.push_back(XY(bx, by));
+ }
+
+ if (pinside) {
+ result.push_back(XY(px, py));
+ }
+
+ sx = px;
+ sy = py;
+ }
+}
+
+template <class PathIterator>
+void
+clip_path_to_rect(PathIterator &path, agg::rect_d &rect, bool inside, std::vector<Polygon> &results)
+{
+ double xmin, ymin, xmax, ymax;
+ if (rect.x1 < rect.x2) {
+ xmin = rect.x1;
+ xmax = rect.x2;
+ } else {
+ xmin = rect.x2;
+ xmax = rect.x1;
+ }
+
+ if (rect.y1 < rect.y2) {
+ ymin = rect.y1;
+ ymax = rect.y2;
+ } else {
+ ymin = rect.y2;
+ ymax = rect.y1;
+ }
+
+ if (!inside) {
+ std::swap(xmin, xmax);
+ std::swap(ymin, ymax);
+ }
+
+ typedef agg::conv_curve<PathIterator> curve_t;
+ curve_t curve(path);
+
+ Polygon polygon1, polygon2;
+ double x = 0, y = 0;
+ unsigned code = 0;
+ curve.rewind(0);
+
+ do {
+ // Grab the next subpath and store it in polygon1
+ polygon1.clear();
+ do {
+ if (code == agg::path_cmd_move_to) {
+ polygon1.push_back(XY(x, y));
+ }
+
+ code = curve.vertex(&x, &y);
+
+ if (code == agg::path_cmd_stop) {
+ break;
+ }
+
+ if (code != agg::path_cmd_move_to) {
+ polygon1.push_back(XY(x, y));
+ }
+ } while ((code & agg::path_cmd_end_poly) != agg::path_cmd_end_poly);
+
+ // The result of each step is fed into the next (note the
+ // swapping of polygon1 and polygon2 at each step).
+ clip_to_rect_one_step(polygon1, polygon2, clip_to_rect_filters::xlt(xmax));
+ clip_to_rect_one_step(polygon2, polygon1, clip_to_rect_filters::xgt(xmin));
+ clip_to_rect_one_step(polygon1, polygon2, clip_to_rect_filters::ylt(ymax));
+ clip_to_rect_one_step(polygon2, polygon1, clip_to_rect_filters::ygt(ymin));
+
+ // Empty polygons aren't very useful, so skip them
+ if (polygon1.size()) {
+ _finalize_polygon(results, 1);
+ results.push_back(polygon1);
+ }
+ } while (code != agg::path_cmd_stop);
+
+ _finalize_polygon(results, 1);
+}
+
+template <class VerticesArray, class ResultArray>
+void affine_transform_2d(VerticesArray &vertices, agg::trans_affine &trans, ResultArray &result)
+{
+ if (vertices.size() != 0 && vertices.dim(1) != 2) {
+ throw std::runtime_error("Invalid vertices array.");
+ }
+
+ size_t n = vertices.size();
+ double x;
+ double y;
+ double t0;
+ double t1;
+ double t;
+
+ for (size_t i = 0; i < n; ++i) {
+ x = vertices(i, 0);
+ y = vertices(i, 1);
+
+ t0 = trans.sx * x;
+ t1 = trans.shx * y;
+ t = t0 + t1 + trans.tx;
+ result(i, 0) = t;
+
+ t0 = trans.shy * x;
+ t1 = trans.sy * y;
+ t = t0 + t1 + trans.ty;
+ result(i, 1) = t;
+ }
+}
+
+template <class VerticesArray, class ResultArray>
+void affine_transform_1d(VerticesArray &vertices, agg::trans_affine &trans, ResultArray &result)
+{
+ if (vertices.dim(0) != 2) {
+ throw std::runtime_error("Invalid vertices array.");
+ }
+
+ double x;
+ double y;
+ double t0;
+ double t1;
+ double t;
+
+ x = vertices(0);
+ y = vertices(1);
+
+ t0 = trans.sx * x;
+ t1 = trans.shx * y;
+ t = t0 + t1 + trans.tx;
+ result(0) = t;
+
+ t0 = trans.shy * x;
+ t1 = trans.sy * y;
+ t = t0 + t1 + trans.ty;
+ result(1) = t;
+}
+
+template <class BBoxArray>
+int count_bboxes_overlapping_bbox(agg::rect_d &a, BBoxArray &bboxes)
+{
+ agg::rect_d b;
+ int count = 0;
+
+ if (a.x2 < a.x1) {
+ std::swap(a.x1, a.x2);
+ }
+ if (a.y2 < a.y1) {
+ std::swap(a.y1, a.y2);
+ }
+
+ size_t num_bboxes = bboxes.size();
+ for (size_t i = 0; i < num_bboxes; ++i) {
+ b = agg::rect_d(bboxes(i, 0, 0), bboxes(i, 0, 1), bboxes(i, 1, 0), bboxes(i, 1, 1));
+
+ if (b.x2 < b.x1) {
+ std::swap(b.x1, b.x2);
+ }
+ if (b.y2 < b.y1) {
+ std::swap(b.y1, b.y2);
+ }
+ if (!((b.x2 <= a.x1) || (b.y2 <= a.y1) || (b.x1 >= a.x2) || (b.y1 >= a.y2))) {
+ ++count;
+ }
+ }
+
+ return count;
+}
+
+inline bool segments_intersect(const double &x1,
+ const double &y1,
+ const double &x2,
+ const double &y2,
+ const double &x3,
+ const double &y3,
+ const double &x4,
+ const double &y4)
+{
+ double den = ((y4 - y3) * (x2 - x1)) - ((x4 - x3) * (y2 - y1));
+ if (den == 0.0) {
+ return false;
+ }
+
+ double n1 = ((x4 - x3) * (y1 - y3)) - ((y4 - y3) * (x1 - x3));
+ double n2 = ((x2 - x1) * (y1 - y3)) - ((y2 - y1) * (x1 - x3));
+
+ double u1 = n1 / den;
+ double u2 = n2 / den;
+
+ return (u1 >= 0.0 && u1 <= 1.0 && u2 >= 0.0 && u2 <= 1.0);
+}
+
+template <class PathIterator1, class PathIterator2>
+bool path_intersects_path(PathIterator1 &p1, PathIterator2 &p2)
+{
+ typedef PathNanRemover<py::PathIterator> no_nans_t;
+ typedef agg::conv_curve<no_nans_t> curve_t;
+
+ if (p1.total_vertices() < 2 || p2.total_vertices() < 2) {
+ return false;
+ }
+
+ no_nans_t n1(p1, true, p1.has_curves());
+ no_nans_t n2(p2, true, p2.has_curves());
+
+ curve_t c1(n1);
+ curve_t c2(n2);
+
+ double x11, y11, x12, y12;
+ double x21, y21, x22, y22;
+
+ c1.vertex(&x11, &y11);
+ while (c1.vertex(&x12, &y12) != agg::path_cmd_stop) {
+ c2.rewind(0);
+ c2.vertex(&x21, &y21);
+ while (c2.vertex(&x22, &y22) != agg::path_cmd_stop) {
+ if (segments_intersect(x11, y11, x12, y12, x21, y21, x22, y22)) {
+ return true;
+ }
+ x21 = x22;
+ y21 = y22;
+ }
+ x11 = x12;
+ y11 = y12;
+ }
+
+ return false;
+}
+
+// returns whether the segment from (x1,y1) to (x2,y2)
+// intersects the rectangle centered at (cx,cy) with size (w,h)
+// see doc/segment_intersects_rectangle.svg for a more detailed explanation
+inline bool segment_intersects_rectangle(double x1, double y1,
+ double x2, double y2,
+ double cx, double cy,
+ double w, double h)
+{
+ return fabs(x1 + x2 - 2.0 * cx) < fabs(x1 - x2) + w &&
+ fabs(y1 + y2 - 2.0 * cy) < fabs(y1 - y2) + h &&
+ 2.0 * fabs((x1 - cx) * (y1 - y2) - (y1 - cy) * (x1 - x2)) <
+ w * fabs(y1 - y2) + h * fabs(x1 - x2);
+}
+
+template <class PathIterator>
+bool path_intersects_rectangle(PathIterator &path,
+ double rect_x1, double rect_y1,
+ double rect_x2, double rect_y2,
+ bool filled)
+{
+ typedef PathNanRemover<py::PathIterator> no_nans_t;
+ typedef agg::conv_curve<no_nans_t> curve_t;
+
+ if (path.total_vertices() == 0) {
+ return false;
+ }
+
+ no_nans_t no_nans(path, true, path.has_curves());
+ curve_t curve(no_nans);
+
+ double cx = (rect_x1 + rect_x2) * 0.5, cy = (rect_y1 + rect_y2) * 0.5;
+ double w = fabs(rect_x1 - rect_x2), h = fabs(rect_y1 - rect_y2);
+
+ double x1, y1, x2, y2;
+
+ curve.vertex(&x1, &y1);
+ if (2.0 * fabs(x1 - cx) <= w && 2.0 * fabs(y1 - cy) <= h) {
+ return true;
+ }
+
+ while (curve.vertex(&x2, &y2) != agg::path_cmd_stop) {
+ if (segment_intersects_rectangle(x1, y1, x2, y2, cx, cy, w, h)) {
+ return true;
+ }
+ x1 = x2;
+ y1 = y2;
+ }
+
+ if (filled) {
+ agg::trans_affine trans;
+ if (point_in_path(cx, cy, 0.0, path, trans)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+template <class PathIterator>
+void convert_path_to_polygons(PathIterator &path,
+ agg::trans_affine &trans,
+ double width,
+ double height,
+ int closed_only,
+ std::vector<Polygon> &result)
+{
+ typedef agg::conv_transform<py::PathIterator> transformed_path_t;
+ typedef PathNanRemover<transformed_path_t> nan_removal_t;
+ typedef PathClipper<nan_removal_t> clipped_t;
+ typedef PathSimplifier<clipped_t> simplify_t;
+ typedef agg::conv_curve<simplify_t> curve_t;
+
+ bool do_clip = width != 0.0 && height != 0.0;
+ bool simplify = path.should_simplify();
+
+ transformed_path_t tpath(path, trans);
+ nan_removal_t nan_removed(tpath, true, path.has_curves());
+ clipped_t clipped(nan_removed, do_clip && !path.has_curves(), width, height);
+ simplify_t simplified(clipped, simplify, path.simplify_threshold());
+ curve_t curve(simplified);
+
+ result.push_back(Polygon());
+ Polygon *polygon = &result.back();
+ double x, y;
+ unsigned code;
+
+ while ((code = curve.vertex(&x, &y)) != agg::path_cmd_stop) {
+ if ((code & agg::path_cmd_end_poly) == agg::path_cmd_end_poly) {
+ _finalize_polygon(result, 1);
+ result.push_back(Polygon());
+ polygon = &result.back();
+ } else {
+ if (code == agg::path_cmd_move_to) {
+ _finalize_polygon(result, closed_only);
+ result.push_back(Polygon());
+ polygon = &result.back();
+ }
+ polygon->push_back(XY(x, y));
+ }
+ }
+
+ _finalize_polygon(result, closed_only);
+}
+
+template <class VertexSource>
+void
+__cleanup_path(VertexSource &source, std::vector<double> &vertices, std::vector<npy_uint8> &codes)
+{
+ unsigned code;
+ double x, y;
+ do {
+ code = source.vertex(&x, &y);
+ vertices.push_back(x);
+ vertices.push_back(y);
+ codes.push_back((npy_uint8)code);
+ } while (code != agg::path_cmd_stop);
+}
+
+template <class PathIterator>
+void cleanup_path(PathIterator &path,
+ agg::trans_affine &trans,
+ bool remove_nans,
+ bool do_clip,
+ const agg::rect_base<double> &rect,
+ e_snap_mode snap_mode,
+ double stroke_width,
+ bool do_simplify,
+ bool return_curves,
+ SketchParams sketch_params,
+ std::vector<double> &vertices,
+ std::vector<unsigned char> &codes)
+{
+ typedef agg::conv_transform<py::PathIterator> transformed_path_t;
+ typedef PathNanRemover<transformed_path_t> nan_removal_t;
+ typedef PathClipper<nan_removal_t> clipped_t;
+ typedef PathSnapper<clipped_t> snapped_t;
+ typedef PathSimplifier<snapped_t> simplify_t;
+ typedef agg::conv_curve<simplify_t> curve_t;
+ typedef Sketch<curve_t> sketch_t;
+
+ transformed_path_t tpath(path, trans);
+ nan_removal_t nan_removed(tpath, remove_nans, path.has_curves());
+ clipped_t clipped(nan_removed, do_clip && !path.has_curves(), rect);
+ snapped_t snapped(clipped, snap_mode, path.total_vertices(), stroke_width);
+ simplify_t simplified(snapped, do_simplify, path.simplify_threshold());
+
+ vertices.reserve(path.total_vertices() * 2);
+ codes.reserve(path.total_vertices());
+
+ if (return_curves && sketch_params.scale == 0.0) {
+ __cleanup_path(simplified, vertices, codes);
+ } else {
+ curve_t curve(simplified);
+ sketch_t sketch(curve, sketch_params.scale, sketch_params.length, sketch_params.randomness);
+ __cleanup_path(sketch, vertices, codes);
+ }
+}
+
+void quad2cubic(double x0, double y0,
+ double x1, double y1,
+ double x2, double y2,
+ double *outx, double *outy)
+{
+
+ outx[0] = x0 + 2./3. * (x1 - x0);
+ outy[0] = y0 + 2./3. * (y1 - y0);
+ outx[1] = outx[0] + 1./3. * (x2 - x0);
+ outy[1] = outy[0] + 1./3. * (y2 - y0);
+ outx[2] = x2;
+ outy[2] = y2;
+}
+
+char *__append_to_string(char *p, char **buffer, size_t *buffersize,
+ const char *content)
+{
+ for (const char *i = content; *i; ++i) {
+ if (p < *buffer) {
+ /* This is just an internal error */
+ return NULL;
+ }
+ if ((size_t)(p - *buffer) >= *buffersize) {
+ ptrdiff_t diff = p - *buffer;
+ *buffersize *= 2;
+ *buffer = (char *)realloc(*buffer, *buffersize);
+ if (*buffer == NULL) {
+ return NULL;
+ }
+ p = *buffer + diff;
+ }
+
+ *p++ = *i;
+ }
+
+ return p;
+}
+
+
+char *__add_number(double val, const char *format, int precision,
+ char **buffer, char *p, size_t *buffersize)
+{
+ char *result;
+
+#if PY_VERSION_HEX >= 0x02070000
+ char *str;
+ str = PyOS_double_to_string(val, format[0], precision, 0, NULL);
+#else
+ char str[64];
+ PyOS_ascii_formatd(str, 64, format, val);
+#endif
+
+ // Delete trailing zeros and decimal point
+ char *q = str;
+ for (; *q != 0; ++q) {
+ // Find the end of the string
+ }
+
+ --q;
+ for (; q >= str && *q == '0'; --q) {
+ // Rewind through all the zeros
+ }
+
+ // If the end is a decimal qoint, delete that too
+ if (q >= str && *q == '.') {
+ --q;
+ }
+
+ // Truncate the string
+ ++q;
+ *q = 0;
+
+#if PY_VERSION_HEX >= 0x02070000
+ if ((result = __append_to_string(p, buffer, buffersize, str)) == NULL) {
+ PyMem_Free(str);
+ return NULL;
+ }
+ PyMem_Free(str);
+#else
+ if ((result = __append_to_string(p, buffer, buffersize, str)) == NULL) {
+ return NULL;
+ }
+#endif
+
+ return result;
+}
+
+
+template <class PathIterator>
+int __convert_to_string(PathIterator &path,
+ int precision,
+ char **codes,
+ bool postfix,
+ char **buffer,
+ size_t *buffersize)
+{
+#if PY_VERSION_HEX >= 0x02070000
+ const char *format = "f";
+#else
+ char format[64];
+ snprintf(format, 64, "%s.%df", "%", precision);
+#endif
+
+ char *p = *buffer;
+ double x[3];
+ double y[3];
+ double last_x = 0.0;
+ double last_y = 0.0;
+
+ const int sizes[] = { 1, 1, 2, 3 };
+ int size = 0;
+ unsigned code;
+
+ while ((code = path.vertex(&x[0], &y[0])) != agg::path_cmd_stop) {
+ if (code == 0x4f) {
+ if ((p = __append_to_string(p, buffer, buffersize, codes[4])) == NULL) return 1;
+ } else if (code < 5) {
+ size = sizes[code - 1];
+
+ for (int i = 1; i < size; ++i) {
+ unsigned subcode = path.vertex(&x[i], &y[i]);
+ if (subcode != code) {
+ return 2;
+ }
+ }
+
+ /* For formats that don't support quad curves, convert to
+ cubic curves */
+ if (code == CURVE3 && codes[code - 1][0] == '\0') {
+ quad2cubic(last_x, last_y, x[0], y[0], x[1], y[1], x, y);
+ code++;
+ size = 3;
+ }
+
+ if (!postfix) {
+ if ((p = __append_to_string(p, buffer, buffersize, codes[code - 1])) == NULL) return 1;
+ if ((p = __append_to_string(p, buffer, buffersize, " ")) == NULL) return 1;
+ }
+
+ for (int i = 0; i < size; ++i) {
+ if ((p = __add_number(x[i], format, precision, buffer, p, buffersize)) == NULL) return 1;
+ if ((p = __append_to_string(p, buffer, buffersize, " ")) == NULL) return 1;
+ if ((p = __add_number(y[i], format, precision, buffer, p, buffersize)) == NULL) return 1;
+ if ((p = __append_to_string(p, buffer, buffersize, " ")) == NULL) return 1;
+ }
+
+ if (postfix) {
+ if ((p = __append_to_string(p, buffer, buffersize, codes[code - 1])) == NULL) return 1;
+ }
+
+ last_x = x[size - 1];
+ last_y = y[size - 1];
+ } else {
+ // Unknown code value
+ return 2;
+ }
+
+ if ((p = __append_to_string(p, buffer, buffersize, "\n")) == NULL) return 1;
+ }
+
+ *buffersize = p - *buffer;
+
+ return 0;
+}
+
+template <class PathIterator>
+int convert_to_string(PathIterator &path,
+ agg::trans_affine &trans,
+ agg::rect_d &clip_rect,
+ bool simplify,
+ SketchParams sketch_params,
+ int precision,
+ char **codes,
+ bool postfix,
+ char **buffer,
+ size_t *buffersize)
+{
+ typedef agg::conv_transform<py::PathIterator> transformed_path_t;
+ typedef PathNanRemover<transformed_path_t> nan_removal_t;
+ typedef PathClipper<nan_removal_t> clipped_t;
+ typedef PathSimplifier<clipped_t> simplify_t;
+ typedef agg::conv_curve<simplify_t> curve_t;
+ typedef Sketch<curve_t> sketch_t;
+
+ bool do_clip = (clip_rect.x1 < clip_rect.x2 && clip_rect.y1 < clip_rect.y2);
+
+ transformed_path_t tpath(path, trans);
+ nan_removal_t nan_removed(tpath, true, path.has_curves());
+ clipped_t clipped(nan_removed, do_clip && !path.has_curves(), clip_rect);
+ simplify_t simplified(clipped, simplify, path.simplify_threshold());
+
+ *buffersize = path.total_vertices() * (precision + 5) * 4;
+ if (*buffersize == 0) {
+ return 0;
+ }
+
+ if (sketch_params.scale != 0.0) {
+ *buffersize *= 10.0;
+ }
+
+ *buffer = (char *)malloc(*buffersize);
+ if (*buffer == NULL) {
+ return 1;
+ }
+
+ if (sketch_params.scale == 0.0) {
+ return __convert_to_string(simplified, precision, codes, postfix, buffer, buffersize);
+ } else {
+ curve_t curve(simplified);
+ sketch_t sketch(curve, sketch_params.scale, sketch_params.length, sketch_params.randomness);
+ return __convert_to_string(sketch, precision, codes, postfix, buffer, buffersize);
+ }
+
+}
+
+template<class T>
+struct _is_sorted
+{
+ bool operator()(PyArrayObject *array)
+ {
+ npy_intp size;
+ npy_intp i;
+ T last_value;
+ T current_value;
+
+ size = PyArray_DIM(array, 0);
+
+ // std::isnan is only in C++11, which we don't yet require,
+ // so we use the "self == self" trick
+ for (i = 0; i < size; ++i) {
+ last_value = *((T *)PyArray_GETPTR1(array, i));
+ if (last_value == last_value) {
+ break;
+ }
+ }
+
+ if (i == size) {
+ // The whole array is non-finite
+ return false;
+ }
+
+ for (; i < size; ++i) {
+ current_value = *((T *)PyArray_GETPTR1(array, i));
+ if (current_value == current_value) {
+ if (current_value < last_value) {
+ return false;
+ }
+ last_value = current_value;
+ }
+ }
+
+ return true;
+ }
+};
+
+
+template<class T>
+struct _is_sorted_int
+{
+ bool operator()(PyArrayObject *array)
+ {
+ npy_intp size;
+ npy_intp i;
+ T last_value;
+ T current_value;
+
+ size = PyArray_DIM(array, 0);
+
+ last_value = *((T *)PyArray_GETPTR1(array, 0));
+
+ for (i = 1; i < size; ++i) {
+ current_value = *((T *)PyArray_GETPTR1(array, i));
+ if (current_value < last_value) {
+ return false;
+ }
+ last_value = current_value;
+ }
+
+ return true;
+ }
+};
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/src/_path_wrapper.cpp b/contrib/python/matplotlib/py2/src/_path_wrapper.cpp
new file mode 100644
index 00000000000..08a595e7c4b
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_path_wrapper.cpp
@@ -0,0 +1,900 @@
+#include "numpy_cpp.h"
+
+#include "_path.h"
+
+#include "py_converters.h"
+#include "py_adaptors.h"
+
+PyObject *convert_polygon_vector(std::vector<Polygon> &polygons)
+{
+ PyObject *pyresult = PyList_New(polygons.size());
+
+ for (size_t i = 0; i < polygons.size(); ++i) {
+ Polygon poly = polygons[i];
+ npy_intp dims[2];
+ dims[1] = 2;
+
+ dims[0] = (npy_intp)poly.size();
+
+ numpy::array_view<double, 2> subresult(dims);
+ memcpy(subresult.data(), &poly[0], sizeof(double) * poly.size() * 2);
+
+ if (PyList_SetItem(pyresult, i, subresult.pyobj())) {
+ Py_DECREF(pyresult);
+ return NULL;
+ }
+ }
+
+ return pyresult;
+}
+
+const char *Py_point_in_path__doc__ = "point_in_path(x, y, radius, path, trans)";
+
+static PyObject *Py_point_in_path(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ double x, y, r;
+ py::PathIterator path;
+ agg::trans_affine trans;
+ bool result;
+
+ if (!PyArg_ParseTuple(args,
+ "dddO&O&:point_in_path",
+ &x,
+ &y,
+ &r,
+ &convert_path,
+ &path,
+ &convert_trans_affine,
+ &trans)) {
+ return NULL;
+ }
+
+ CALL_CPP("point_in_path", (result = point_in_path(x, y, r, path, trans)));
+
+ if (result) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+}
+
+const char *Py_points_in_path__doc__ = "points_in_path(points, radius, path, trans)";
+
+static PyObject *Py_points_in_path(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ numpy::array_view<const double, 2> points;
+ double r;
+ py::PathIterator path;
+ agg::trans_affine trans;
+
+ if (!PyArg_ParseTuple(args,
+ "O&dO&O&:points_in_path",
+ &convert_points,
+ &points,
+ &r,
+ &convert_path,
+ &path,
+ &convert_trans_affine,
+ &trans)) {
+ return NULL;
+ }
+
+ npy_intp dims[] = { (npy_intp)points.size() };
+ numpy::array_view<uint8_t, 1> results(dims);
+
+ CALL_CPP("points_in_path", (points_in_path(points, r, path, trans, results)));
+
+ return results.pyobj();
+}
+
+const char *Py_point_on_path__doc__ = "point_on_path(x, y, radius, path, trans)";
+
+static PyObject *Py_point_on_path(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ double x, y, r;
+ py::PathIterator path;
+ agg::trans_affine trans;
+ bool result;
+
+ if (!PyArg_ParseTuple(args,
+ "dddO&O&:point_on_path",
+ &x,
+ &y,
+ &r,
+ &convert_path,
+ &path,
+ &convert_trans_affine,
+ &trans)) {
+ return NULL;
+ }
+
+ CALL_CPP("point_on_path", (result = point_on_path(x, y, r, path, trans)));
+
+ if (result) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+}
+
+const char *Py_points_on_path__doc__ = "points_on_path(points, radius, path, trans)";
+
+static PyObject *Py_points_on_path(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ numpy::array_view<const double, 2> points;
+ double r;
+ py::PathIterator path;
+ agg::trans_affine trans;
+
+ if (!PyArg_ParseTuple(args,
+ "O&dO&O&:points_on_path",
+ &convert_points,
+ &points,
+ &r,
+ &convert_path,
+ &path,
+ &convert_trans_affine,
+ &trans)) {
+ return NULL;
+ }
+
+ npy_intp dims[] = { (npy_intp)points.size() };
+ numpy::array_view<uint8_t, 1> results(dims);
+
+ CALL_CPP("points_on_path", (points_on_path(points, r, path, trans, results)));
+
+ return results.pyobj();
+}
+
+const char *Py_get_path_extents__doc__ = "get_path_extents(path, trans)";
+
+static PyObject *Py_get_path_extents(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ py::PathIterator path;
+ agg::trans_affine trans;
+
+ if (!PyArg_ParseTuple(
+ args, "O&O&:get_path_extents", &convert_path, &path, &convert_trans_affine, &trans)) {
+ return NULL;
+ }
+
+ extent_limits e;
+
+ CALL_CPP("get_path_extents", (reset_limits(e)));
+ CALL_CPP("get_path_extents", (update_path_extents(path, trans, e)));
+
+ npy_intp dims[] = { 2, 2 };
+ numpy::array_view<double, 2> extents(dims);
+ extents(0, 0) = e.x0;
+ extents(0, 1) = e.y0;
+ extents(1, 0) = e.x1;
+ extents(1, 1) = e.y1;
+
+ return extents.pyobj();
+}
+
+const char *Py_update_path_extents__doc__ =
+ "update_path_extents(path, trans, rect, minpos, ignore)";
+
+static PyObject *Py_update_path_extents(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ py::PathIterator path;
+ agg::trans_affine trans;
+ agg::rect_d rect;
+ numpy::array_view<double, 1> minpos;
+ int ignore;
+ int changed;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&O&O&i:update_path_extents",
+ &convert_path,
+ &path,
+ &convert_trans_affine,
+ &trans,
+ &convert_rect,
+ &rect,
+ &minpos.converter,
+ &minpos,
+ &ignore)) {
+ return NULL;
+ }
+
+ if (minpos.dim(0) != 2) {
+ PyErr_Format(PyExc_ValueError,
+ "minpos must be of length 2, got %" NPY_INTP_FMT,
+ minpos.dim(0));
+ return NULL;
+ }
+
+ extent_limits e;
+
+ if (ignore) {
+ CALL_CPP("update_path_extents", reset_limits(e));
+ } else {
+ if (rect.x1 > rect.x2) {
+ e.x0 = std::numeric_limits<double>::infinity();
+ e.x1 = -std::numeric_limits<double>::infinity();
+ } else {
+ e.x0 = rect.x1;
+ e.x1 = rect.x2;
+ }
+ if (rect.y1 > rect.y2) {
+ e.y0 = std::numeric_limits<double>::infinity();
+ e.y1 = -std::numeric_limits<double>::infinity();
+ } else {
+ e.y0 = rect.y1;
+ e.y1 = rect.y2;
+ }
+ e.xm = minpos(0);
+ e.ym = minpos(1);
+ }
+
+ CALL_CPP("update_path_extents", (update_path_extents(path, trans, e)));
+
+ changed = (e.x0 != rect.x1 || e.y0 != rect.y1 || e.x1 != rect.x2 || e.y1 != rect.y2 ||
+ e.xm != minpos(0) || e.ym != minpos(1));
+
+ npy_intp extentsdims[] = { 2, 2 };
+ numpy::array_view<double, 2> outextents(extentsdims);
+ outextents(0, 0) = e.x0;
+ outextents(0, 1) = e.y0;
+ outextents(1, 0) = e.x1;
+ outextents(1, 1) = e.y1;
+
+ npy_intp minposdims[] = { 2 };
+ numpy::array_view<double, 1> outminpos(minposdims);
+ outminpos(0) = e.xm;
+ outminpos(1) = e.ym;
+
+ return Py_BuildValue(
+ "NNi", outextents.pyobj(), outminpos.pyobj(), changed);
+}
+
+const char *Py_get_path_collection_extents__doc__ = "get_path_collection_extents(";
+
+static PyObject *Py_get_path_collection_extents(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ agg::trans_affine master_transform;
+ PyObject *pathsobj;
+ numpy::array_view<const double, 3> transforms;
+ numpy::array_view<const double, 2> offsets;
+ agg::trans_affine offset_trans;
+ extent_limits e;
+
+ if (!PyArg_ParseTuple(args,
+ "O&OO&O&O&:get_path_collection_extents",
+ &convert_trans_affine,
+ &master_transform,
+ &pathsobj,
+ &convert_transforms,
+ &transforms,
+ &convert_points,
+ &offsets,
+ &convert_trans_affine,
+ &offset_trans)) {
+ return NULL;
+ }
+
+ try
+ {
+ py::PathGenerator paths(pathsobj);
+
+ CALL_CPP("get_path_collection_extents",
+ (get_path_collection_extents(
+ master_transform, paths, transforms, offsets, offset_trans, e)));
+ }
+ catch (const py::exception &)
+ {
+ return NULL;
+ }
+
+ npy_intp dims[] = { 2, 2 };
+ numpy::array_view<double, 2> extents(dims);
+ extents(0, 0) = e.x0;
+ extents(0, 1) = e.y0;
+ extents(1, 0) = e.x1;
+ extents(1, 1) = e.y1;
+
+ return extents.pyobj();
+}
+
+const char *Py_point_in_path_collection__doc__ =
+ "point_in_path_collection(x, y, radius, master_transform, paths, transforms, offsets, "
+ "offset_trans, filled, offset_position)";
+
+static PyObject *Py_point_in_path_collection(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ double x, y, radius;
+ agg::trans_affine master_transform;
+ PyObject *pathsobj;
+ numpy::array_view<const double, 3> transforms;
+ numpy::array_view<const double, 2> offsets;
+ agg::trans_affine offset_trans;
+ int filled;
+ e_offset_position offset_position;
+ std::vector<int> result;
+
+ if (!PyArg_ParseTuple(args,
+ "dddO&OO&O&O&iO&:point_in_path_collection",
+ &x,
+ &y,
+ &radius,
+ &convert_trans_affine,
+ &master_transform,
+ &pathsobj,
+ &convert_transforms,
+ &transforms,
+ &convert_points,
+ &offsets,
+ &convert_trans_affine,
+ &offset_trans,
+ &filled,
+ &convert_offset_position,
+ &offset_position)) {
+ return NULL;
+ }
+
+ try
+ {
+ py::PathGenerator paths(pathsobj);
+
+ CALL_CPP("point_in_path_collection",
+ (point_in_path_collection(x,
+ y,
+ radius,
+ master_transform,
+ paths,
+ transforms,
+ offsets,
+ offset_trans,
+ filled,
+ offset_position,
+ result)));
+ }
+ catch (const py::exception &)
+ {
+ return NULL;
+ }
+
+ npy_intp dims[] = {(npy_intp)result.size() };
+ numpy::array_view<int, 1> pyresult(dims);
+ if (result.size() > 0) {
+ memcpy(pyresult.data(), &result[0], result.size() * sizeof(int));
+ }
+ return pyresult.pyobj();
+}
+
+const char *Py_path_in_path__doc__ = "path_in_path(path_a, trans_a, path_b, trans_b)";
+
+static PyObject *Py_path_in_path(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ py::PathIterator a;
+ agg::trans_affine atrans;
+ py::PathIterator b;
+ agg::trans_affine btrans;
+ bool result;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&O&O&:path_in_path",
+ &convert_path,
+ &a,
+ &convert_trans_affine,
+ &atrans,
+ &convert_path,
+ &b,
+ &convert_trans_affine,
+ &btrans)) {
+ return NULL;
+ }
+
+ CALL_CPP("path_in_path", (result = path_in_path(a, atrans, b, btrans)));
+
+ if (result) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+}
+
+const char *Py_clip_path_to_rect__doc__ = "clip_path_to_rect(path, rect, inside)";
+
+static PyObject *Py_clip_path_to_rect(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ py::PathIterator path;
+ agg::rect_d rect;
+ int inside;
+ std::vector<Polygon> result;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&i:clip_path_to_rect",
+ &convert_path,
+ &path,
+ &convert_rect,
+ &rect,
+ &inside)) {
+ return NULL;
+ }
+
+ CALL_CPP("clip_path_to_rect", (clip_path_to_rect(path, rect, inside, result)));
+
+ return convert_polygon_vector(result);
+}
+
+const char *Py_affine_transform__doc__ = "affine_transform(points, trans)";
+
+static PyObject *Py_affine_transform(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *vertices_obj;
+ agg::trans_affine trans;
+
+ if (!PyArg_ParseTuple(args,
+ "OO&:affine_transform",
+ &vertices_obj,
+ &convert_trans_affine,
+ &trans)) {
+ return NULL;
+ }
+
+ try {
+ numpy::array_view<double, 2> vertices(vertices_obj);
+ npy_intp dims[] = { (npy_intp)vertices.size(), 2 };
+ numpy::array_view<double, 2> result(dims);
+ CALL_CPP("affine_transform", (affine_transform_2d(vertices, trans, result)));
+ return result.pyobj();
+ } catch (py::exception &) {
+ PyErr_Clear();
+ try {
+ numpy::array_view<double, 1> vertices(vertices_obj);
+ npy_intp dims[] = { (npy_intp)vertices.size() };
+ numpy::array_view<double, 1> result(dims);
+ CALL_CPP("affine_transform", (affine_transform_1d(vertices, trans, result)));
+ return result.pyobj();
+ } catch (py::exception &) {
+ return NULL;
+ }
+ }
+}
+
+const char *Py_count_bboxes_overlapping_bbox__doc__ = "count_bboxes_overlapping_bbox(bbox, bboxes)";
+
+static PyObject *Py_count_bboxes_overlapping_bbox(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ agg::rect_d bbox;
+ numpy::array_view<const double, 3> bboxes;
+ int result;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&:count_bboxes_overlapping_bbox",
+ &convert_rect,
+ &bbox,
+ &convert_bboxes,
+ &bboxes)) {
+ return NULL;
+ }
+
+ CALL_CPP("count_bboxes_overlapping_bbox",
+ (result = count_bboxes_overlapping_bbox(bbox, bboxes)));
+
+ return PyLong_FromLong(result);
+}
+
+const char *Py_path_intersects_path__doc__ = "path_intersects_path(path1, path2, filled=False)";
+
+static PyObject *Py_path_intersects_path(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ py::PathIterator p1;
+ py::PathIterator p2;
+ agg::trans_affine t1;
+ agg::trans_affine t2;
+ int filled = 0;
+ const char *names[] = { "p1", "p2", "filled", NULL };
+ bool result;
+
+ if (!PyArg_ParseTupleAndKeywords(args,
+ kwds,
+ "O&O&i:path_intersects_path",
+ (char **)names,
+ &convert_path,
+ &p1,
+ &convert_path,
+ &p2,
+ &filled)) {
+ return NULL;
+ }
+
+ CALL_CPP("path_intersects_path", (result = path_intersects_path(p1, p2)));
+ if (filled) {
+ if (!result) {
+ CALL_CPP("path_intersects_path",
+ (result = path_in_path(p1, t1, p2, t2)));
+ }
+ if (!result) {
+ CALL_CPP("path_intersects_path",
+ (result = path_in_path(p2, t1, p1, t2)));
+ }
+ }
+
+ if (result) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+}
+
+const char *Py_path_intersects_rectangle__doc__ = "path_intersects_rectangle(path, rect_x1, rect_y1, rect_x2, rect_y2, filled=False)";
+
+static PyObject *Py_path_intersects_rectangle(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ py::PathIterator path;
+ double rect_x1, rect_y1, rect_x2, rect_y2;
+ int filled = 0;
+ const char *names[] = { "path", "rect_x1", "rect_y1", "rect_x2", "rect_y2", "filled", NULL };
+ bool result;
+
+ if (!PyArg_ParseTupleAndKeywords(args,
+ kwds,
+ "O&dddd|i:path_intersects_rectangle",
+ (char **)names,
+ &convert_path,
+ &path,
+ &rect_x1,
+ &rect_y1,
+ &rect_x2,
+ &rect_y2,
+ &filled)) {
+ return NULL;
+ }
+
+ CALL_CPP("path_intersects_rectangle", (result = path_intersects_rectangle(path, rect_x1, rect_y1, rect_x2, rect_y2, filled)));
+
+ if (result) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+}
+
+const char *Py_convert_path_to_polygons__doc__ =
+ "convert_path_to_polygons(path, trans, width=0, height=0)";
+
+static PyObject *Py_convert_path_to_polygons(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ py::PathIterator path;
+ agg::trans_affine trans;
+ double width = 0.0, height = 0.0;
+ int closed_only = 1;
+ std::vector<Polygon> result;
+ const char *names[] = { "path", "transform", "width", "height", "closed_only", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(args,
+ kwds,
+ "O&O&|ddi:convert_path_to_polygons",
+ (char **)names,
+ &convert_path,
+ &path,
+ &convert_trans_affine,
+ &trans,
+ &width,
+ &height,
+ &closed_only)) {
+ return NULL;
+ }
+
+ CALL_CPP("convert_path_to_polygons",
+ (convert_path_to_polygons(path, trans, width, height, closed_only, result)));
+
+ return convert_polygon_vector(result);
+}
+
+const char *Py_cleanup_path__doc__ =
+ "cleanup_path(path, trans, remove_nans, clip_rect, snap_mode, stroke_width, simplify, "
+ "return_curves, sketch)";
+
+static PyObject *Py_cleanup_path(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ py::PathIterator path;
+ agg::trans_affine trans;
+ int remove_nans;
+ agg::rect_d clip_rect;
+ e_snap_mode snap_mode;
+ double stroke_width;
+ PyObject *simplifyobj;
+ bool simplify = false;
+ int return_curves;
+ SketchParams sketch;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&iO&O&dOiO&:cleanup_path",
+ &convert_path,
+ &path,
+ &convert_trans_affine,
+ &trans,
+ &remove_nans,
+ &convert_rect,
+ &clip_rect,
+ &convert_snap,
+ &snap_mode,
+ &stroke_width,
+ &simplifyobj,
+ &return_curves,
+ &convert_sketch_params,
+ &sketch)) {
+ return NULL;
+ }
+
+ if (simplifyobj == Py_None) {
+ simplify = path.should_simplify();
+ } else if (PyObject_IsTrue(simplifyobj)) {
+ simplify = true;
+ }
+
+ bool do_clip = (clip_rect.x1 < clip_rect.x2 && clip_rect.y1 < clip_rect.y2);
+
+ std::vector<double> vertices;
+ std::vector<npy_uint8> codes;
+
+ CALL_CPP("cleanup_path",
+ (cleanup_path(path,
+ trans,
+ remove_nans,
+ do_clip,
+ clip_rect,
+ snap_mode,
+ stroke_width,
+ simplify,
+ return_curves,
+ sketch,
+ vertices,
+ codes)));
+
+ size_t length = codes.size();
+
+ npy_intp vertices_dims[] = {(npy_intp)length, 2 };
+ numpy::array_view<double, 2> pyvertices(vertices_dims);
+
+ npy_intp codes_dims[] = {(npy_intp)length };
+ numpy::array_view<unsigned char, 1> pycodes(codes_dims);
+
+ memcpy(pyvertices.data(), &vertices[0], sizeof(double) * 2 * length);
+ memcpy(pycodes.data(), &codes[0], sizeof(unsigned char) * length);
+
+ return Py_BuildValue("NN", pyvertices.pyobj(), pycodes.pyobj());
+}
+
+const char *Py_convert_to_string__doc__ = "convert_to_string(path, trans, "
+ "clip_rect, simplify, sketch, precision, codes, postfix)";
+
+static PyObject *Py_convert_to_string(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ py::PathIterator path;
+ agg::trans_affine trans;
+ agg::rect_d cliprect;
+ PyObject *simplifyobj;
+ bool simplify = false;
+ SketchParams sketch;
+ int precision;
+ PyObject *codesobj;
+ char *codes[5];
+ int postfix;
+ char *buffer = NULL;
+ size_t buffersize;
+ PyObject *result;
+ int status;
+
+ if (!PyArg_ParseTuple(args,
+ "O&O&O&OO&iOi:convert_to_string",
+ &convert_path,
+ &path,
+ &convert_trans_affine,
+ &trans,
+ &convert_rect,
+ &cliprect,
+ &simplifyobj,
+ &convert_sketch_params,
+ &sketch,
+ &precision,
+ &codesobj,
+ &postfix)) {
+ return NULL;
+ }
+
+ if (simplifyobj == Py_None) {
+ simplify = path.should_simplify();
+ } else if (PyObject_IsTrue(simplifyobj)) {
+ simplify = true;
+ }
+
+ if (!PySequence_Check(codesobj)) {
+ return NULL;
+ }
+ if (PySequence_Size(codesobj) != 5) {
+ PyErr_SetString(
+ PyExc_ValueError,
+ "codes must be a 5-length sequence of byte strings");
+ return NULL;
+ }
+ for (int i = 0; i < 5; ++i) {
+ PyObject *item = PySequence_GetItem(codesobj, i);
+ if (item == NULL) {
+ return NULL;
+ }
+ codes[i] = PyBytes_AsString(item);
+ if (codes[i] == NULL) {
+ return NULL;
+ }
+ }
+
+ CALL_CPP("convert_to_string",
+ (status = convert_to_string(
+ path, trans, cliprect, simplify, sketch,
+ precision, codes, (bool)postfix, &buffer,
+ &buffersize)));
+
+ if (status) {
+ free(buffer);
+ if (status == 1) {
+ PyErr_SetString(PyExc_MemoryError, "Memory error");
+ } else if (status == 2) {
+ PyErr_SetString(PyExc_ValueError, "Malformed path codes");
+ }
+ return NULL;
+ }
+
+ if (buffersize == 0) {
+ result = PyBytes_FromString("");
+ } else {
+ result = PyBytes_FromStringAndSize(buffer, buffersize);
+ }
+
+ free(buffer);
+
+ return result;
+}
+
+
+const char *Py_is_sorted__doc__ = "is_sorted(array)\n\n"
+ "Returns True if 1-D array is monotonically increasing, ignoring NaNs\n";
+
+static PyObject *Py_is_sorted(PyObject *self, PyObject *obj)
+{
+ npy_intp size;
+ bool result;
+
+ PyArrayObject *array = (PyArrayObject *)PyArray_FromAny(
+ obj, NULL, 1, 1, 0, NULL);
+
+ if (array == NULL) {
+ return NULL;
+ }
+
+ size = PyArray_DIM(array, 0);
+
+ if (size < 2) {
+ Py_DECREF(array);
+ Py_RETURN_TRUE;
+ }
+
+ /* Handle just the most common types here, otherwise coerce to
+ double */
+ switch(PyArray_TYPE(array)) {
+ case NPY_INT:
+ {
+ _is_sorted_int<npy_int> is_sorted;
+ result = is_sorted(array);
+ }
+ break;
+
+ case NPY_LONG:
+ {
+ _is_sorted_int<npy_long> is_sorted;
+ result = is_sorted(array);
+ }
+ break;
+
+ case NPY_LONGLONG:
+ {
+ _is_sorted_int<npy_longlong> is_sorted;
+ result = is_sorted(array);
+ }
+ break;
+
+ case NPY_FLOAT:
+ {
+ _is_sorted<npy_float> is_sorted;
+ result = is_sorted(array);
+ }
+ break;
+
+ case NPY_DOUBLE:
+ {
+ _is_sorted<npy_double> is_sorted;
+ result = is_sorted(array);
+ }
+ break;
+
+ default:
+ {
+ Py_DECREF(array);
+ array = (PyArrayObject *)PyArray_FromObject(obj, NPY_DOUBLE, 1, 1);
+
+ if (array == NULL) {
+ return NULL;
+ }
+
+ _is_sorted<npy_double> is_sorted;
+ result = is_sorted(array);
+ }
+ }
+
+ Py_DECREF(array);
+
+ if (result) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+}
+
+
+extern "C" {
+
+ static PyMethodDef module_functions[] = {
+ {"point_in_path", (PyCFunction)Py_point_in_path, METH_VARARGS, Py_point_in_path__doc__},
+ {"points_in_path", (PyCFunction)Py_points_in_path, METH_VARARGS, Py_points_in_path__doc__},
+ {"point_on_path", (PyCFunction)Py_point_on_path, METH_VARARGS, Py_point_on_path__doc__},
+ {"points_on_path", (PyCFunction)Py_points_on_path, METH_VARARGS, Py_points_on_path__doc__},
+ {"get_path_extents", (PyCFunction)Py_get_path_extents, METH_VARARGS, Py_get_path_extents__doc__},
+ {"update_path_extents", (PyCFunction)Py_update_path_extents, METH_VARARGS, Py_update_path_extents__doc__},
+ {"get_path_collection_extents", (PyCFunction)Py_get_path_collection_extents, METH_VARARGS, Py_get_path_collection_extents__doc__},
+ {"point_in_path_collection", (PyCFunction)Py_point_in_path_collection, METH_VARARGS, Py_point_in_path_collection__doc__},
+ {"path_in_path", (PyCFunction)Py_path_in_path, METH_VARARGS, Py_path_in_path__doc__},
+ {"clip_path_to_rect", (PyCFunction)Py_clip_path_to_rect, METH_VARARGS, Py_clip_path_to_rect__doc__},
+ {"affine_transform", (PyCFunction)Py_affine_transform, METH_VARARGS, Py_affine_transform__doc__},
+ {"count_bboxes_overlapping_bbox", (PyCFunction)Py_count_bboxes_overlapping_bbox, METH_VARARGS, Py_count_bboxes_overlapping_bbox__doc__},
+ {"path_intersects_path", (PyCFunction)Py_path_intersects_path, METH_VARARGS|METH_KEYWORDS, Py_path_intersects_path__doc__},
+ {"path_intersects_rectangle", (PyCFunction)Py_path_intersects_rectangle, METH_VARARGS|METH_KEYWORDS, Py_path_intersects_rectangle__doc__},
+ {"convert_path_to_polygons", (PyCFunction)Py_convert_path_to_polygons, METH_VARARGS|METH_KEYWORDS, Py_convert_path_to_polygons__doc__},
+ {"cleanup_path", (PyCFunction)Py_cleanup_path, METH_VARARGS, Py_cleanup_path__doc__},
+ {"convert_to_string", (PyCFunction)Py_convert_to_string, METH_VARARGS, Py_convert_to_string__doc__},
+ {"is_sorted", (PyCFunction)Py_is_sorted, METH_O, Py_is_sorted__doc__},
+ {NULL}
+ };
+
+#if PY3K
+ static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_path",
+ NULL,
+ 0,
+ module_functions,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ };
+
+#define INITERROR return NULL
+ PyMODINIT_FUNC PyInit__path(void)
+#else
+#define INITERROR return
+ PyMODINIT_FUNC init_path(void)
+#endif
+ {
+ PyObject *m;
+#if PY3K
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule3("_path", module_functions, NULL);
+#endif
+
+ if (m == NULL) {
+ INITERROR;
+ }
+
+ import_array();
+
+#if PY3K
+ return m;
+#endif
+ }
+}
diff --git a/contrib/python/matplotlib/py2/src/_png.cpp b/contrib/python/matplotlib/py2/src/_png.cpp
new file mode 100644
index 00000000000..1dcbf713f2d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_png.cpp
@@ -0,0 +1,793 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+// this code is heavily adapted from the paint license, which is in
+// the file paint.license (BSD compatible) included in this
+// distribution. TODO, add license file to MANIFEST.in and CVS
+
+/* For linux, png.h must be imported before Python.h because
+ png.h needs to be the one to define setjmp.
+ Undefining _POSIX_C_SOURCE and _XOPEN_SOURCE stops a couple
+ of harmless warnings.
+*/
+
+extern "C" {
+# include <png.h>
+# ifdef _POSIX_C_SOURCE
+# undef _POSIX_C_SOURCE
+# endif
+# ifndef _AIX
+# ifdef _XOPEN_SOURCE
+# undef _XOPEN_SOURCE
+# endif
+# endif
+}
+
+#include "numpy_cpp.h"
+#include "mplutils.h"
+#include "file_compat.h"
+
+# include <vector>
+# include "Python.h"
+
+
+// As reported in [3082058] build _png.so on aix
+#ifdef _AIX
+#undef jmpbuf
+#endif
+
+struct buffer_t {
+ PyObject *str;
+ size_t cursor;
+ size_t size;
+};
+
+
+static void write_png_data_buffer(png_structp png_ptr, png_bytep data, png_size_t length)
+{
+ buffer_t *buff = (buffer_t *)png_get_io_ptr(png_ptr);
+ if (buff->cursor + length < buff->size) {
+ memcpy(PyBytes_AS_STRING(buff->str) + buff->cursor, data, length);
+ buff->cursor += length;
+ }
+}
+
+static void flush_png_data_buffer(png_structp png_ptr)
+{
+
+}
+
+static void write_png_data(png_structp png_ptr, png_bytep data, png_size_t length)
+{
+ PyObject *py_file_obj = (PyObject *)png_get_io_ptr(png_ptr);
+ PyObject *write_method = PyObject_GetAttrString(py_file_obj, "write");
+ PyObject *result = NULL;
+ if (write_method) {
+#if PY3K
+ result = PyObject_CallFunction(write_method, (char *)"y#", data, length);
+#else
+ result = PyObject_CallFunction(write_method, (char *)"s#", data, length);
+#endif
+ }
+ Py_XDECREF(write_method);
+ Py_XDECREF(result);
+}
+
+static void flush_png_data(png_structp png_ptr)
+{
+ PyObject *py_file_obj = (PyObject *)png_get_io_ptr(png_ptr);
+ PyObject *flush_method = PyObject_GetAttrString(py_file_obj, "flush");
+ PyObject *result = NULL;
+ if (flush_method) {
+ result = PyObject_CallFunction(flush_method, (char *)"");
+ }
+ Py_XDECREF(flush_method);
+ Py_XDECREF(result);
+}
+
+const char *Py_write_png__doc__ =
+ "write_png(buffer, file, dpi=0, compression=6, filter=auto, metadata=None)\n"
+ "\n"
+ "Parameters\n"
+ "----------\n"
+ "buffer : numpy array of image data\n"
+ " Must be an MxNxD array of dtype uint8.\n"
+ " - if D is 1, the image is greyscale\n"
+ " - if D is 3, the image is RGB\n"
+ " - if D is 4, the image is RGBA\n"
+ "\n"
+ "file : str path, file-like object or None\n"
+ " - If a str, must be a file path\n"
+ " - If a file-like object, must write bytes\n"
+ " - If None, a byte string containing the PNG data will be returned\n"
+ "\n"
+ "dpi : float\n"
+ " The dpi to store in the file metadata.\n"
+ "\n"
+ "compression : int\n"
+ " The level of lossless zlib compression to apply. 0 indicates no\n"
+ " compression. Values 1-9 indicate low/fast through high/slow\n"
+ " compression. Default is 6.\n"
+ "\n"
+ "filter : int\n"
+ " Filter to apply. Must be one of the constants: PNG_FILTER_NONE,\n"
+ " PNG_FILTER_SUB, PNG_FILTER_UP, PNG_FILTER_AVG, PNG_FILTER_PAETH.\n"
+ " See the PNG standard for more information.\n"
+ " If not provided, libpng will try to automatically determine the\n"
+ " best filter on a line-by-line basis.\n"
+ "\n"
+ "metadata : dictionary\n"
+ " The keyword-text pairs that are stored as comments in the image.\n"
+ " Keys must be shorter than 79 chars. The only supported encoding\n"
+ " for both keywords and values is Latin-1 (ISO 8859-1).\n"
+ " Examples given in the PNG Specification are:\n"
+ " - Title: Short (one line) title or caption for image\n"
+ " - Author: Name of image's creator\n"
+ " - Description: Description of image (possibly long)\n"
+ " - Copyright: Copyright notice\n"
+ " - Creation Time: Time of original image creation\n"
+ " (usually RFC 1123 format, see below)\n"
+ " - Software: Software used to create the image\n"
+ " - Disclaimer: Legal disclaimer\n"
+ " - Warning: Warning of nature of content\n"
+ " - Source: Device used to create the image\n"
+ " - Comment: Miscellaneous comment; conversion\n"
+ " from other image format\n"
+ "\n"
+ "Returns\n"
+ "-------\n"
+ "buffer : bytes or None\n"
+ " Byte string containing the PNG content if None was passed in for\n"
+ " file, otherwise None is returned.\n";
+
+static PyObject *Py_write_png(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ numpy::array_view<unsigned char, 3> buffer;
+ PyObject *filein;
+ PyObject *metadata = NULL;
+ PyObject *meta_key, *meta_val;
+ png_text *text;
+ Py_ssize_t pos = 0;
+ int meta_pos = 0;
+ Py_ssize_t meta_size;
+ double dpi = 0;
+ int compression = 6;
+ int filter = -1;
+ const char *names[] = { "buffer", "file", "dpi", "compression", "filter", "metadata", NULL };
+
+ // We don't need strict contiguity, just for each row to be
+ // contiguous, and libpng has special handling for getting RGB out
+ // of RGBA, ARGB or BGR. But the simplest thing to do is to
+ // enforce contiguity using array_view::converter_contiguous.
+ if (!PyArg_ParseTupleAndKeywords(args,
+ kwds,
+ "O&O|diiO:write_png",
+ (char **)names,
+ &buffer.converter_contiguous,
+ &buffer,
+ &filein,
+ &dpi,
+ &compression,
+ &filter,
+ &metadata)) {
+ return NULL;
+ }
+
+ png_uint_32 width = (png_uint_32)buffer.dim(1);
+ png_uint_32 height = (png_uint_32)buffer.dim(0);
+ int channels = buffer.dim(2);
+ std::vector<png_bytep> row_pointers(height);
+ for (png_uint_32 row = 0; row < (png_uint_32)height; ++row) {
+ row_pointers[row] = (png_bytep)&buffer(row, 0, 0);
+ }
+
+ FILE *fp = NULL;
+ mpl_off_t offset = 0;
+ bool close_file = false;
+ bool close_dup_file = false;
+ PyObject *py_file = NULL;
+
+ png_structp png_ptr = NULL;
+ png_infop info_ptr = NULL;
+ struct png_color_8_struct sig_bit;
+ int png_color_type;
+ buffer_t buff;
+ buff.str = NULL;
+
+ switch (channels) {
+ case 1:
+ png_color_type = PNG_COLOR_TYPE_GRAY;
+ break;
+ case 3:
+ png_color_type = PNG_COLOR_TYPE_RGB;
+ break;
+ case 4:
+ png_color_type = PNG_COLOR_TYPE_RGB_ALPHA;
+ break;
+ default:
+ PyErr_SetString(PyExc_ValueError,
+ "Buffer must be an NxMxD array with D in 1, 3, 4 "
+ "(grayscale, RGB, RGBA)");
+ goto exit;
+ }
+
+ if (compression < 0 || compression > 9) {
+ PyErr_Format(PyExc_ValueError,
+ "compression must be in range 0-9, got %d", compression);
+ goto exit;
+ }
+
+ if (PyBytes_Check(filein) || PyUnicode_Check(filein)) {
+ if ((py_file = mpl_PyFile_OpenFile(filein, (char *)"wb")) == NULL) {
+ goto exit;
+ }
+ close_file = true;
+ } else {
+ py_file = filein;
+ }
+
+ if (filein == Py_None) {
+ buff.size = width * height * 4 + 1024;
+ buff.str = PyBytes_FromStringAndSize(NULL, buff.size);
+ if (buff.str == NULL) {
+ goto exit;
+ }
+ buff.cursor = 0;
+ } else {
+ #if PY3K
+ if (close_file) {
+ #else
+ if (close_file || PyFile_Check(py_file)) {
+ #endif
+ fp = mpl_PyFile_Dup(py_file, (char *)"wb", &offset);
+ }
+
+ if (fp) {
+ close_dup_file = true;
+ } else {
+ PyErr_Clear();
+ PyObject *write_method = PyObject_GetAttrString(py_file, "write");
+ if (!(write_method && PyCallable_Check(write_method))) {
+ Py_XDECREF(write_method);
+ PyErr_SetString(PyExc_TypeError,
+ "Object does not appear to be a 8-bit string path or "
+ "a Python file-like object");
+ goto exit;
+ }
+ Py_XDECREF(write_method);
+ }
+ }
+
+ png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
+ if (png_ptr == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "Could not create write struct");
+ goto exit;
+ }
+
+ png_set_compression_level(png_ptr, compression);
+ if (filter >= 0) {
+ png_set_filter(png_ptr, 0, filter);
+ }
+
+ info_ptr = png_create_info_struct(png_ptr);
+ if (info_ptr == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "Could not create info struct");
+ goto exit;
+ }
+
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ PyErr_SetString(PyExc_RuntimeError, "libpng signaled error");
+ goto exit;
+ }
+
+ if (buff.str) {
+ png_set_write_fn(png_ptr, (void *)&buff, &write_png_data_buffer, &flush_png_data_buffer);
+ } else if (fp) {
+ png_init_io(png_ptr, fp);
+ } else {
+ png_set_write_fn(png_ptr, (void *)py_file, &write_png_data, &flush_png_data);
+ }
+ png_set_IHDR(png_ptr,
+ info_ptr,
+ width,
+ height,
+ 8,
+ png_color_type,
+ PNG_INTERLACE_NONE,
+ PNG_COMPRESSION_TYPE_BASE,
+ PNG_FILTER_TYPE_BASE);
+
+ // Save the dpi of the image in the file
+ if (dpi > 0.0) {
+ png_uint_32 dots_per_meter = (png_uint_32)(dpi / (2.54 / 100.0));
+ png_set_pHYs(png_ptr, info_ptr, dots_per_meter, dots_per_meter, PNG_RESOLUTION_METER);
+ }
+
+#ifdef PNG_TEXT_SUPPORTED
+ // Save the metadata
+ if (metadata != NULL) {
+ meta_size = PyDict_Size(metadata);
+ text = new png_text[meta_size];
+
+ while (PyDict_Next(metadata, &pos, &meta_key, &meta_val)) {
+ text[meta_pos].compression = PNG_TEXT_COMPRESSION_NONE;
+#if PY3K
+ if (PyUnicode_Check(meta_key)) {
+ PyObject *temp_key = PyUnicode_AsEncodedString(meta_key, "latin_1", "strict");
+ if (temp_key != NULL) {
+ text[meta_pos].key = PyBytes_AsString(temp_key);
+ }
+ } else if (PyBytes_Check(meta_key)) {
+ text[meta_pos].key = PyBytes_AsString(meta_key);
+ } else {
+ char invalid_key[79];
+ sprintf(invalid_key,"INVALID KEY %d", meta_pos);
+ text[meta_pos].key = invalid_key;
+ }
+ if (PyUnicode_Check(meta_val)) {
+ PyObject *temp_val = PyUnicode_AsEncodedString(meta_val, "latin_1", "strict");
+ if (temp_val != NULL) {
+ text[meta_pos].text = PyBytes_AsString(temp_val);
+ }
+ } else if (PyBytes_Check(meta_val)) {
+ text[meta_pos].text = PyBytes_AsString(meta_val);
+ } else {
+ text[meta_pos].text = (char *)"Invalid value in metadata";
+ }
+#else
+ text[meta_pos].key = PyString_AsString(meta_key);
+ text[meta_pos].text = PyString_AsString(meta_val);
+#endif
+#ifdef PNG_iTXt_SUPPORTED
+ text[meta_pos].lang = NULL;
+#endif
+ meta_pos++;
+ }
+ png_set_text(png_ptr, info_ptr, text, meta_size);
+ delete[] text;
+ }
+#endif
+
+ sig_bit.alpha = 0;
+ switch (png_color_type) {
+ case PNG_COLOR_TYPE_GRAY:
+ sig_bit.gray = 8;
+ sig_bit.red = 0;
+ sig_bit.green = 0;
+ sig_bit.blue = 0;
+ break;
+ case PNG_COLOR_TYPE_RGB_ALPHA:
+ sig_bit.alpha = 8;
+ // fall through
+ case PNG_COLOR_TYPE_RGB:
+ sig_bit.gray = 0;
+ sig_bit.red = 8;
+ sig_bit.green = 8;
+ sig_bit.blue = 8;
+ break;
+ default:
+ PyErr_SetString(PyExc_RuntimeError, "internal error, bad png_color_type");
+ goto exit;
+ }
+ png_set_sBIT(png_ptr, info_ptr, &sig_bit);
+
+ png_write_info(png_ptr, info_ptr);
+ png_write_image(png_ptr, &row_pointers[0]);
+ png_write_end(png_ptr, info_ptr);
+
+exit:
+
+ if (png_ptr && info_ptr) {
+ png_destroy_write_struct(&png_ptr, &info_ptr);
+ }
+
+ if (close_dup_file) {
+ mpl_PyFile_DupClose(py_file, fp, offset);
+ }
+
+ if (close_file) {
+ mpl_PyFile_CloseFile(py_file);
+ Py_DECREF(py_file);
+ }
+
+ if (PyErr_Occurred()) {
+ Py_XDECREF(buff.str);
+ return NULL;
+ } else {
+ if (buff.str) {
+ _PyBytes_Resize(&buff.str, buff.cursor);
+ return buff.str;
+ }
+ Py_RETURN_NONE;
+ }
+}
+
+static void _read_png_data(PyObject *py_file_obj, png_bytep data, png_size_t length)
+{
+ PyObject *read_method = PyObject_GetAttrString(py_file_obj, "read");
+ PyObject *result = NULL;
+ char *buffer;
+ Py_ssize_t bufflen;
+ if (read_method) {
+ result = PyObject_CallFunction(read_method, (char *)"i", length);
+ if (result) {
+ if (PyBytes_AsStringAndSize(result, &buffer, &bufflen) == 0) {
+ if (bufflen == (Py_ssize_t)length) {
+ memcpy(data, buffer, length);
+ } else {
+ PyErr_SetString(PyExc_IOError, "read past end of file");
+ }
+ } else {
+ PyErr_SetString(PyExc_IOError, "failed to copy buffer");
+ }
+ } else {
+ PyErr_SetString(PyExc_IOError, "failed to read file");
+ }
+
+
+ }
+ Py_XDECREF(read_method);
+ Py_XDECREF(result);
+}
+
+static void read_png_data(png_structp png_ptr, png_bytep data, png_size_t length)
+{
+ PyObject *py_file_obj = (PyObject *)png_get_io_ptr(png_ptr);
+ _read_png_data(py_file_obj, data, length);
+ if (PyErr_Occurred()) {
+ png_error(png_ptr, "failed to read file");
+ }
+
+}
+
+static PyObject *_read_png(PyObject *filein, bool float_result)
+{
+ png_byte header[8]; // 8 is the maximum size that can be checked
+ FILE *fp = NULL;
+ mpl_off_t offset = 0;
+ bool close_file = false;
+ bool close_dup_file = false;
+ PyObject *py_file = NULL;
+ png_structp png_ptr = NULL;
+ png_infop info_ptr = NULL;
+ int num_dims;
+ std::vector<png_bytep> row_pointers;
+ png_uint_32 width = 0;
+ png_uint_32 height = 0;
+ int bit_depth;
+ PyObject *result = NULL;
+
+ // TODO: Remove direct calls to Numpy API here
+
+ if (PyBytes_Check(filein) || PyUnicode_Check(filein)) {
+ if ((py_file = mpl_PyFile_OpenFile(filein, (char *)"rb")) == NULL) {
+ goto exit;
+ }
+ close_file = true;
+ } else {
+ py_file = filein;
+ }
+
+ #if PY3K
+ if (close_file) {
+ #else
+ if (close_file || PyFile_Check(py_file)) {
+ #endif
+ fp = mpl_PyFile_Dup(py_file, (char *)"rb", &offset);
+ }
+
+ if (fp) {
+ close_dup_file = true;
+ if (fread(header, 1, 8, fp) != 8) {
+ PyErr_SetString(PyExc_IOError, "error reading PNG header");
+ goto exit;
+ }
+ } else {
+ PyErr_Clear();
+
+ PyObject *read_method = PyObject_GetAttrString(py_file, "read");
+ if (!(read_method && PyCallable_Check(read_method))) {
+ Py_XDECREF(read_method);
+ PyErr_SetString(PyExc_TypeError,
+ "Object does not appear to be a 8-bit string path or "
+ "a Python file-like object");
+ goto exit;
+ }
+ Py_XDECREF(read_method);
+ _read_png_data(py_file, header, 8);
+ if (PyErr_Occurred()) {
+ goto exit;
+ }
+ }
+
+ if (png_sig_cmp(header, 0, 8)) {
+ PyErr_SetString(PyExc_ValueError, "invalid PNG header");
+ goto exit;
+ }
+
+ /* initialize stuff */
+ png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
+
+ if (!png_ptr) {
+ PyErr_SetString(PyExc_RuntimeError, "png_create_read_struct failed");
+ goto exit;
+ }
+
+ info_ptr = png_create_info_struct(png_ptr);
+ if (!info_ptr) {
+ PyErr_SetString(PyExc_RuntimeError, "png_create_info_struct failed");
+ goto exit;
+ }
+
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_RuntimeError, "error setting jump");
+ }
+ goto exit;
+ }
+
+ if (fp) {
+ png_init_io(png_ptr, fp);
+ } else {
+ png_set_read_fn(png_ptr, (void *)py_file, &read_png_data);
+ }
+ png_set_sig_bytes(png_ptr, 8);
+ png_read_info(png_ptr, info_ptr);
+
+ width = png_get_image_width(png_ptr, info_ptr);
+ height = png_get_image_height(png_ptr, info_ptr);
+
+ bit_depth = png_get_bit_depth(png_ptr, info_ptr);
+
+ // Unpack 1, 2, and 4-bit images
+ if (bit_depth < 8) {
+ png_set_packing(png_ptr);
+ }
+
+ // If sig bits are set, shift data
+ png_color_8p sig_bit;
+ if ((png_get_color_type(png_ptr, info_ptr) != PNG_COLOR_TYPE_PALETTE) &&
+ png_get_sBIT(png_ptr, info_ptr, &sig_bit)) {
+ png_set_shift(png_ptr, sig_bit);
+ }
+
+#if NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
+ // Convert big endian to little
+ if (bit_depth == 16) {
+ png_set_swap(png_ptr);
+ }
+#endif
+
+ // Convert palletes to full RGB
+ if (png_get_color_type(png_ptr, info_ptr) == PNG_COLOR_TYPE_PALETTE) {
+ png_set_palette_to_rgb(png_ptr);
+ bit_depth = 8;
+ }
+
+ // If there's an alpha channel convert gray to RGB
+ if (png_get_color_type(png_ptr, info_ptr) == PNG_COLOR_TYPE_GRAY_ALPHA) {
+ png_set_gray_to_rgb(png_ptr);
+ }
+
+ png_set_interlace_handling(png_ptr);
+ png_read_update_info(png_ptr, info_ptr);
+
+ row_pointers.resize(height);
+ for (png_uint_32 row = 0; row < height; row++) {
+ row_pointers[row] = new png_byte[png_get_rowbytes(png_ptr, info_ptr)];
+ }
+
+ png_read_image(png_ptr, &row_pointers[0]);
+
+ npy_intp dimensions[3];
+ dimensions[0] = height; // numrows
+ dimensions[1] = width; // numcols
+ if (png_get_color_type(png_ptr, info_ptr) & PNG_COLOR_MASK_ALPHA) {
+ dimensions[2] = 4; // RGBA images
+ } else if (png_get_color_type(png_ptr, info_ptr) & PNG_COLOR_MASK_COLOR) {
+ dimensions[2] = 3; // RGB images
+ } else {
+ dimensions[2] = 1; // Greyscale images
+ }
+
+ if (float_result) {
+ double max_value = (1 << bit_depth) - 1;
+
+ numpy::array_view<float, 3> A(dimensions);
+
+ for (png_uint_32 y = 0; y < height; y++) {
+ png_byte *row = row_pointers[y];
+ for (png_uint_32 x = 0; x < width; x++) {
+ if (bit_depth == 16) {
+ png_uint_16 *ptr = &reinterpret_cast<png_uint_16 *>(row)[x * dimensions[2]];
+ for (png_uint_32 p = 0; p < (png_uint_32)dimensions[2]; p++) {
+ A(y, x, p) = (float)(ptr[p]) / max_value;
+ }
+ } else {
+ png_byte *ptr = &(row[x * dimensions[2]]);
+ for (png_uint_32 p = 0; p < (png_uint_32)dimensions[2]; p++) {
+ A(y, x, p) = (float)(ptr[p]) / max_value;
+ }
+ }
+ }
+ }
+
+ result = A.pyobj();
+ } else if (bit_depth == 16) {
+ numpy::array_view<png_uint_16, 3> A(dimensions);
+
+ for (png_uint_32 y = 0; y < height; y++) {
+ png_byte *row = row_pointers[y];
+ for (png_uint_32 x = 0; x < width; x++) {
+ png_uint_16 *ptr = &reinterpret_cast<png_uint_16 *>(row)[x * dimensions[2]];
+ for (png_uint_32 p = 0; p < (png_uint_32)dimensions[2]; p++) {
+ A(y, x, p) = ptr[p];
+ }
+ }
+ }
+
+ result = A.pyobj();
+ } else if (bit_depth == 8) {
+ numpy::array_view<png_byte, 3> A(dimensions);
+
+ for (png_uint_32 y = 0; y < height; y++) {
+ png_byte *row = row_pointers[y];
+ for (png_uint_32 x = 0; x < width; x++) {
+ png_byte *ptr = &(row[x * dimensions[2]]);
+ for (png_uint_32 p = 0; p < (png_uint_32)dimensions[2]; p++) {
+ A(y, x, p) = ptr[p];
+ }
+ }
+ }
+
+ result = A.pyobj();
+ } else {
+ PyErr_SetString(PyExc_RuntimeError, "image has unknown bit depth");
+ goto exit;
+ }
+
+ // free the png memory
+ png_read_end(png_ptr, info_ptr);
+
+ // For gray, return an x by y array, not an x by y by 1
+ num_dims = (png_get_color_type(png_ptr, info_ptr) & PNG_COLOR_MASK_COLOR) ? 3 : 2;
+
+ if (num_dims == 2) {
+ PyArray_Dims dims = {dimensions, 2};
+ PyObject *reshaped = PyArray_Newshape((PyArrayObject *)result, &dims, NPY_CORDER);
+ Py_DECREF(result);
+ result = reshaped;
+ }
+
+exit:
+ if (png_ptr && info_ptr) {
+#ifndef png_infopp_NULL
+ png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
+#else
+ png_destroy_read_struct(&png_ptr, &info_ptr, png_infopp_NULL);
+#endif
+ }
+
+ if (close_dup_file) {
+ mpl_PyFile_DupClose(py_file, fp, offset);
+ }
+
+ if (close_file) {
+ mpl_PyFile_CloseFile(py_file);
+ Py_DECREF(py_file);
+ }
+
+ for (png_uint_32 row = 0; row < height; row++) {
+ delete[] row_pointers[row];
+ }
+
+ if (PyErr_Occurred()) {
+ Py_XDECREF(result);
+ return NULL;
+ } else {
+ return result;
+ }
+}
+
+const char *Py_read_png_float__doc__ =
+ "read_png_float(file)\n"
+ "\n"
+ "Read in a PNG file, converting values to floating-point doubles\n"
+ "in the range (0, 1)\n"
+ "\n"
+ "Parameters\n"
+ "----------\n"
+ "file : str path or file-like object\n";
+
+static PyObject *Py_read_png_float(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ return _read_png(args, true);
+}
+
+const char *Py_read_png_int__doc__ =
+ "read_png_int(file)\n"
+ "\n"
+ "Read in a PNG file with original integer values.\n"
+ "\n"
+ "Parameters\n"
+ "----------\n"
+ "file : str path or file-like object\n";
+
+static PyObject *Py_read_png_int(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ return _read_png(args, false);
+}
+
+const char *Py_read_png__doc__ =
+ "read_png(file)\n"
+ "\n"
+ "Read in a PNG file, converting values to floating-point doubles\n"
+ "in the range (0, 1)\n"
+ "\n"
+ "Alias for read_png_float()\n"
+ "\n"
+ "Parameters\n"
+ "----------\n"
+ "file : str path or file-like object\n";
+
+static PyMethodDef module_methods[] = {
+ {"write_png", (PyCFunction)Py_write_png, METH_VARARGS|METH_KEYWORDS, Py_write_png__doc__},
+ {"read_png", (PyCFunction)Py_read_png_float, METH_O, Py_read_png__doc__},
+ {"read_png_float", (PyCFunction)Py_read_png_float, METH_O, Py_read_png_float__doc__},
+ {"read_png_int", (PyCFunction)Py_read_png_int, METH_O, Py_read_png_int__doc__},
+ {NULL}
+};
+
+extern "C" {
+
+#if PY3K
+ static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_png",
+ NULL,
+ 0,
+ module_methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ };
+
+#define INITERROR return NULL
+
+ PyMODINIT_FUNC PyInit__png(void)
+
+#else
+#define INITERROR return
+
+ PyMODINIT_FUNC init_png(void)
+#endif
+
+ {
+ PyObject *m;
+
+#if PY3K
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule3("_png", module_methods, NULL);
+#endif
+
+ if (m == NULL) {
+ INITERROR;
+ }
+
+ import_array();
+
+ if (PyModule_AddIntConstant(m, "PNG_FILTER_NONE", PNG_FILTER_NONE) ||
+ PyModule_AddIntConstant(m, "PNG_FILTER_SUB", PNG_FILTER_SUB) ||
+ PyModule_AddIntConstant(m, "PNG_FILTER_UP", PNG_FILTER_UP) ||
+ PyModule_AddIntConstant(m, "PNG_FILTER_AVG", PNG_FILTER_AVG) ||
+ PyModule_AddIntConstant(m, "PNG_FILTER_PAETH", PNG_FILTER_PAETH)) {
+ INITERROR;
+ }
+
+
+#if PY3K
+ return m;
+#endif
+ }
+}
diff --git a/contrib/python/matplotlib/py2/src/_tkagg.cpp b/contrib/python/matplotlib/py2/src/_tkagg.cpp
new file mode 100644
index 00000000000..ad5289b3d6e
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_tkagg.cpp
@@ -0,0 +1,475 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+/*
+ * This code is derived from The Python Imaging Library and is covered
+ * by the PIL license.
+ *
+ * See LICENSE/LICENSE.PIL for details.
+ *
+ */
+
+#include <Python.h>
+#include <cstdlib>
+#include <cstdio>
+#include <sstream>
+
+#include <agg_basics.h> // agg:int8u
+
+// Include our own excerpts from the Tcl / Tk headers
+#include "_tkmini.h"
+
+#if defined(_MSC_VER)
+# define IMG_FORMAT "%d %d %Iu"
+#else
+# define IMG_FORMAT "%d %d %zu"
+#endif
+#define BBOX_FORMAT "%f %f %f %f"
+
+typedef struct
+{
+ PyObject_HEAD
+ Tcl_Interp *interp;
+} TkappObject;
+
+// Global vars for Tcl / Tk functions. We load these symbols from the tkinter
+// extension module or loaded Tcl / Tk libraries at run-time.
+static Tcl_CreateCommand_t TCL_CREATE_COMMAND;
+static Tcl_AppendResult_t TCL_APPEND_RESULT;
+static Tk_MainWindow_t TK_MAIN_WINDOW;
+static Tk_FindPhoto_t TK_FIND_PHOTO;
+static Tk_PhotoPutBlock_NoComposite_t TK_PHOTO_PUT_BLOCK_NO_COMPOSITE;
+static Tk_PhotoBlank_t TK_PHOTO_BLANK;
+
+static int PyAggImagePhoto(ClientData clientdata, Tcl_Interp *interp, int
+ argc, char **argv)
+{
+ Tk_PhotoHandle photo;
+ Tk_PhotoImageBlock block;
+
+ // vars for blitting
+
+ size_t pdata;
+ int wdata, hdata, bbox_parse;
+ float x1, x2, y1, y2;
+ bool has_bbox;
+ agg::int8u *destbuffer, *buffer;
+ int destx, desty, destwidth, destheight, deststride;
+
+ long mode;
+ long nval;
+ if (TK_MAIN_WINDOW(interp) == NULL) {
+ // Will throw a _tkinter.TclError with "this isn't a Tk application"
+ return TCL_ERROR;
+ }
+
+ if (argc != 5) {
+ TCL_APPEND_RESULT(interp, "usage: ", argv[0], " destPhoto srcImage", (char *)NULL);
+ return TCL_ERROR;
+ }
+
+ /* get Tcl PhotoImage handle */
+ photo = TK_FIND_PHOTO(interp, argv[1]);
+ if (photo == NULL) {
+ TCL_APPEND_RESULT(interp, "destination photo must exist", (char *)NULL);
+ return TCL_ERROR;
+ }
+ /* get buffer from str which is "height width ptr" */
+ if (sscanf(argv[2], IMG_FORMAT, &hdata, &wdata, &pdata) != 3) {
+ TCL_APPEND_RESULT(interp,
+ "error reading data, expected height width ptr",
+ (char *)NULL);
+ return TCL_ERROR;
+ }
+ buffer = (agg::int8u*)pdata;
+
+ /* get array mode (0=mono, 1=rgb, 2=rgba) */
+ mode = atol(argv[3]);
+ if ((mode != 0) && (mode != 1) && (mode != 2)) {
+ TCL_APPEND_RESULT(interp, "illegal image mode", (char *)NULL);
+ return TCL_ERROR;
+ }
+
+ /* check for bbox/blitting */
+ bbox_parse = sscanf(argv[4], BBOX_FORMAT, &x1, &x2, &y1, &y2);
+ if (bbox_parse == 4) {
+ has_bbox = true;
+ }
+ else if ((bbox_parse == 1) && (x1 == 0)){
+ has_bbox = false;
+ } else {
+ TCL_APPEND_RESULT(interp, "illegal bbox", (char *)NULL);
+ return TCL_ERROR;
+ }
+
+ if (has_bbox) {
+ int srcstride = wdata * 4;
+ destx = (int)x1;
+ desty = (int)(hdata - y2);
+ destwidth = (int)(x2 - x1);
+ destheight = (int)(y2 - y1);
+ deststride = 4 * destwidth;
+
+ destbuffer = new agg::int8u[deststride * destheight];
+ if (destbuffer == NULL) {
+ TCL_APPEND_RESULT(interp, "could not allocate memory", (char *)NULL);
+ return TCL_ERROR;
+ }
+
+ for (int i = 0; i < destheight; ++i) {
+ memcpy(destbuffer + (deststride * i),
+ &buffer[(i + desty) * srcstride + (destx * 4)],
+ deststride);
+ }
+ } else {
+ destbuffer = NULL;
+ destx = desty = destwidth = destheight = deststride = 0;
+ }
+
+ /* setup tkblock */
+ block.pixelSize = 1;
+ if (mode == 0) {
+ block.offset[0] = block.offset[1] = block.offset[2] = 0;
+ nval = 1;
+ } else {
+ block.offset[0] = 0;
+ block.offset[1] = 1;
+ block.offset[2] = 2;
+ if (mode == 1) {
+ block.offset[3] = 0;
+ block.pixelSize = 3;
+ nval = 3;
+ } else {
+ block.offset[3] = 3;
+ block.pixelSize = 4;
+ nval = 4;
+ }
+ }
+
+ if (has_bbox) {
+ block.width = destwidth;
+ block.height = destheight;
+ block.pitch = deststride;
+ block.pixelPtr = destbuffer;
+
+ TK_PHOTO_PUT_BLOCK_NO_COMPOSITE(photo, &block, destx, desty,
+ destwidth, destheight);
+ delete[] destbuffer;
+
+ } else {
+ block.width = wdata;
+ block.height = hdata;
+ block.pitch = (int)block.width * nval;
+ block.pixelPtr = buffer;
+
+ /* Clear current contents */
+ TK_PHOTO_BLANK(photo);
+ /* Copy opaque block to photo image, and leave the rest to TK */
+ TK_PHOTO_PUT_BLOCK_NO_COMPOSITE(photo, &block, 0, 0, block.width,
+ block.height);
+ }
+
+ return TCL_OK;
+}
+
+static PyObject *_tkinit(PyObject *self, PyObject *args)
+{
+ Tcl_Interp *interp;
+ TkappObject *app;
+
+ PyObject *arg;
+ int is_interp;
+ if (!PyArg_ParseTuple(args, "Oi", &arg, &is_interp)) {
+ return NULL;
+ }
+
+ if (is_interp) {
+ interp = (Tcl_Interp *)PyLong_AsVoidPtr(arg);
+ } else {
+ /* Do it the hard way. This will break if the TkappObject
+ layout changes */
+ app = (TkappObject *)arg;
+ interp = app->interp;
+ }
+
+ /* This will bomb if interp is invalid... */
+
+ TCL_CREATE_COMMAND(interp,
+ "PyAggImagePhoto",
+ (Tcl_CmdProc *)PyAggImagePhoto,
+ (ClientData)0,
+ (Tcl_CmdDeleteProc *)NULL);
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyMethodDef functions[] = {
+ /* Tkinter interface stuff */
+ { "tkinit", (PyCFunction)_tkinit, 1 },
+ { NULL, NULL } /* sentinel */
+};
+
+// Functions to fill global TCL / Tk function pointers by dynamic loading
+#if defined(_WIN32) || defined(__WIN32__) || defined(WIN32)
+
+/*
+ * On Windows, we can't load the tkinter module to get the TCL or Tk symbols,
+ * because Windows does not load symbols into the library name-space of
+ * importing modules. So, knowing that tkinter has already been imported by
+ * Python, we scan all modules in the running process for the TCL and Tk
+ * function names.
+ */
+#include <windows.h>
+#define PSAPI_VERSION 1
+#include <psapi.h>
+// Must be linked with 'psapi' library
+
+FARPROC _dfunc(HMODULE lib_handle, const char *func_name)
+{
+ // Load function `func_name` from `lib_handle`.
+ // Set Python exception if we can't find `func_name` in `lib_handle`.
+ // Returns function pointer or NULL if not present.
+
+ char message[100];
+
+ FARPROC func = GetProcAddress(lib_handle, func_name);
+ if (func == NULL) {
+ sprintf(message, "Cannot load function %s", func_name);
+ PyErr_SetString(PyExc_RuntimeError, message);
+ }
+ return func;
+}
+
+int get_tcl(HMODULE hMod)
+{
+ // Try to fill TCL global vars with function pointers. Return 0 for no
+ // functions found, 1 for all functions found, -1 for some but not all
+ // functions found.
+ TCL_CREATE_COMMAND = (Tcl_CreateCommand_t)
+ GetProcAddress(hMod, "Tcl_CreateCommand");
+ if (TCL_CREATE_COMMAND == NULL) { // Maybe not TCL module
+ return 0;
+ }
+ TCL_APPEND_RESULT = (Tcl_AppendResult_t) _dfunc(hMod,
+ "Tcl_AppendResult");
+ return (TCL_APPEND_RESULT == NULL) ? -1 : 1;
+}
+
+int get_tk(HMODULE hMod)
+{
+ // Try to fill Tk global vars with function pointers. Return 0 for no
+ // functions found, 1 for all functions found, -1 for some but not all
+ // functions found.
+ TK_MAIN_WINDOW = (Tk_MainWindow_t)
+ GetProcAddress(hMod, "Tk_MainWindow");
+ if (TK_MAIN_WINDOW == NULL) { // Maybe not Tk module
+ return 0;
+ }
+ return ( // -1 if any remaining symbols are NULL
+ ((TK_FIND_PHOTO = (Tk_FindPhoto_t)
+ _dfunc(hMod, "Tk_FindPhoto")) == NULL) ||
+ ((TK_PHOTO_PUT_BLOCK_NO_COMPOSITE = (Tk_PhotoPutBlock_NoComposite_t)
+ _dfunc(hMod, "Tk_PhotoPutBlock_NoComposite")) == NULL) ||
+ ((TK_PHOTO_BLANK = (Tk_PhotoBlank_t)
+ _dfunc(hMod, "Tk_PhotoBlank")) == NULL))
+ ? -1 : 1;
+}
+
+int load_tkinter_funcs(void)
+{
+ // Load TCL and Tk functions by searching all modules in current process.
+ // Return 0 for success, non-zero for failure.
+
+ HMODULE hMods[1024];
+ HANDLE hProcess;
+ DWORD cbNeeded;
+ unsigned int i;
+ int found_tcl = 0;
+ int found_tk = 0;
+
+ // Returns pseudo-handle that does not need to be closed
+ hProcess = GetCurrentProcess();
+
+ // Iterate through modules in this process looking for TCL / Tk names
+ if (EnumProcessModules(hProcess, hMods, sizeof(hMods), &cbNeeded)) {
+ for (i = 0; i < (cbNeeded / sizeof(HMODULE)); i++) {
+ if (!found_tcl) {
+ found_tcl = get_tcl(hMods[i]);
+ if (found_tcl == -1) {
+ return 1;
+ }
+ }
+ if (!found_tk) {
+ found_tk = get_tk(hMods[i]);
+ if (found_tk == -1) {
+ return 1;
+ }
+ }
+ if (found_tcl && found_tk) {
+ return 0;
+ }
+ }
+ }
+
+ if (found_tcl == 0) {
+ PyErr_SetString(PyExc_RuntimeError, "Could not find TCL routines");
+ } else {
+ PyErr_SetString(PyExc_RuntimeError, "Could not find Tk routines");
+ }
+ return 1;
+}
+
+#else // not Windows
+
+/*
+ * On Unix, we can get the TCL and Tk synbols from the tkinter module, because
+ * tkinter uses these symbols, and the symbols are therefore visible in the
+ * tkinter dynamic library (module).
+ */
+#if PY_MAJOR_VERSION >= 3
+#define TKINTER_PKG "tkinter"
+#define TKINTER_MOD "_tkinter"
+// From module __file__ attribute to char *string for dlopen.
+char *fname2char(PyObject *fname)
+{
+ PyObject* bytes;
+ bytes = PyUnicode_EncodeFSDefault(fname);
+ if (bytes == NULL) {
+ return NULL;
+ }
+ return PyBytes_AsString(bytes);
+}
+#else
+#define TKINTER_PKG "Tkinter"
+#define TKINTER_MOD "tkinter"
+// From module __file__ attribute to char *string for dlopen
+#define fname2char(s) (PyString_AsString(s))
+#endif
+
+#include <dlfcn.h>
+
+void *_dfunc(void *lib_handle, const char *func_name)
+{
+ // Load function `func_name` from `lib_handle`.
+ // Set Python exception if we can't find `func_name` in `lib_handle`.
+ // Returns function pointer or NULL if not present.
+
+ void* func;
+ // Reset errors.
+ dlerror();
+ func = dlsym(lib_handle, func_name);
+ if (func == NULL) {
+ const char *error = dlerror();
+ PyErr_SetString(PyExc_RuntimeError, error);
+ }
+ return func;
+}
+
+int _func_loader(void *lib)
+{
+ // Fill global function pointers from dynamic lib.
+ // Return 1 if any pointer is NULL, 0 otherwise.
+ return (
+ ((TCL_CREATE_COMMAND = (Tcl_CreateCommand_t)
+ _dfunc(lib, "Tcl_CreateCommand")) == NULL) ||
+ ((TCL_APPEND_RESULT = (Tcl_AppendResult_t)
+ _dfunc(lib, "Tcl_AppendResult")) == NULL) ||
+ ((TK_MAIN_WINDOW = (Tk_MainWindow_t)
+ _dfunc(lib, "Tk_MainWindow")) == NULL) ||
+ ((TK_FIND_PHOTO = (Tk_FindPhoto_t)
+ _dfunc(lib, "Tk_FindPhoto")) == NULL) ||
+ ((TK_PHOTO_PUT_BLOCK_NO_COMPOSITE = (Tk_PhotoPutBlock_NoComposite_t)
+ _dfunc(lib, "Tk_PhotoPutBlock_NoComposite")) == NULL) ||
+ ((TK_PHOTO_BLANK = (Tk_PhotoBlank_t)
+ _dfunc(lib, "Tk_PhotoBlank")) == NULL));
+}
+
+int load_tkinter_funcs(void)
+{
+ // Load tkinter global funcs from tkinter compiled module.
+ // Return 0 for success, non-zero for failure.
+ int ret = -1;
+ void *main_program, *tkinter_lib;
+ char *tkinter_libname;
+ PyObject *pModule = NULL, *pSubmodule = NULL, *pString = NULL;
+
+ // Try loading from the main program namespace first
+ main_program = dlopen(NULL, RTLD_LAZY);
+ if (_func_loader(main_program) == 0) {
+ return 0;
+ }
+ // Clear exception triggered when we didn't find symbols above.
+ PyErr_Clear();
+
+ // Now try finding the tkinter compiled module
+ pModule = PyImport_ImportModule(TKINTER_PKG);
+ if (pModule == NULL) {
+ goto exit;
+ }
+ pSubmodule = PyObject_GetAttrString(pModule, TKINTER_MOD);
+ if (pSubmodule == NULL) {
+ goto exit;
+ }
+ pString = PyObject_GetAttrString(pSubmodule, "__file__");
+ if (pString == NULL) {
+ goto exit;
+ }
+ tkinter_libname = fname2char(pString);
+ if (tkinter_libname == NULL) {
+ goto exit;
+ }
+ tkinter_lib = dlopen(tkinter_libname, RTLD_LAZY);
+ if (tkinter_lib == NULL) {
+ /* Perhaps it is a cffi module, like in PyPy? */
+ pString = PyObject_GetAttrString(pSubmodule, "tklib_cffi");
+ if (pString == NULL) {
+ goto fail;
+ }
+ pString = PyObject_GetAttrString(pString, "__file__");
+ if (pString == NULL) {
+ goto fail;
+ }
+ tkinter_libname = fname2char(pString);
+ if (tkinter_libname == NULL) {
+ goto fail;
+ }
+ tkinter_lib = dlopen(tkinter_libname, RTLD_LAZY);
+ }
+ if (tkinter_lib == NULL) {
+ goto fail;
+ }
+ ret = _func_loader(tkinter_lib);
+ // dlclose probably safe because tkinter has been imported.
+ dlclose(tkinter_lib);
+ goto exit;
+fail:
+ PyErr_SetString(PyExc_RuntimeError,
+ "Cannot dlopen tkinter module file");
+exit:
+ Py_XDECREF(pModule);
+ Py_XDECREF(pSubmodule);
+ Py_XDECREF(pString);
+ return ret;
+}
+#endif // end not Windows
+
+#if PY_MAJOR_VERSION >= 3
+static PyModuleDef _tkagg_module = { PyModuleDef_HEAD_INIT, "_tkagg", "", -1, functions,
+ NULL, NULL, NULL, NULL };
+
+PyMODINIT_FUNC PyInit__tkagg(void)
+{
+ PyObject *m;
+
+ m = PyModule_Create(&_tkagg_module);
+
+ return (load_tkinter_funcs() == 0) ? m : NULL;
+}
+#else
+PyMODINIT_FUNC init_tkagg(void)
+{
+ Py_InitModule("_tkagg", functions);
+
+ load_tkinter_funcs();
+}
+#endif
diff --git a/contrib/python/matplotlib/py2/src/_tkmini.h b/contrib/python/matplotlib/py2/src/_tkmini.h
new file mode 100644
index 00000000000..9b730b6c8c1
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_tkmini.h
@@ -0,0 +1,128 @@
+/* Small excerpts from the Tcl / Tk 8.6 headers
+ *
+ * License terms copied from:
+ * http://www.tcl.tk/software/tcltk/license.html
+ * as of 20 May 2016.
+ *
+ * Copyright (c) 1987-1994 The Regents of the University of California.
+ * Copyright (c) 1993-1996 Lucent Technologies.
+ * Copyright (c) 1994-1998 Sun Microsystems, Inc.
+ * Copyright (c) 1998-2000 by Scriptics Corporation.
+ * Copyright (c) 2002 by Kevin B. Kenny. All rights reserved.
+ *
+ * This software is copyrighted by the Regents of the University
+ * of California, Sun Microsystems, Inc., Scriptics Corporation,
+ * and other parties. The following terms apply to all files
+ * associated with the software unless explicitly disclaimed in
+ * individual files.
+ *
+ * The authors hereby grant permission to use, copy, modify,
+ * distribute, and license this software and its documentation
+ * for any purpose, provided that existing copyright notices are
+ * retained in all copies and that this notice is included
+ * verbatim in any distributions. No written agreement, license,
+ * or royalty fee is required for any of the authorized uses.
+ * Modifications to this software may be copyrighted by their
+ * authors and need not follow the licensing terms described
+ * here, provided that the new terms are clearly indicated on
+ * the first page of each file where they apply.
+ *
+ * IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO
+ * ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR
+ * CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS
+ * SOFTWARE, ITS DOCUMENTATION, OR ANY DERIVATIVES THEREOF, EVEN
+ * IF THE AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ * PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE IS PROVIDED ON
+ * AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO
+ * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,
+ * ENHANCEMENTS, OR MODIFICATIONS.
+ *
+ * GOVERNMENT USE: If you are acquiring this software on behalf
+ * of the U.S. government, the Government shall have only
+ * "Restricted Rights" in the software and related documentation
+ * as defined in the Federal Acquisition Regulations (FARs) in
+ * Clause 52.227.19 (c) (2). If you are acquiring the software
+ * on behalf of the Department of Defense, the software shall be
+ * classified as "Commercial Computer Software" and the
+ * Government shall have only "Restricted Rights" as defined in
+ * Clause 252.227-7013 (c) (1) of DFARs. Notwithstanding the
+ * foregoing, the authors grant the U.S. Government and others
+ * acting in its behalf permission to use and distribute the
+ * software in accordance with the terms specified in this
+ * license
+ */
+
+/*
+ * Unless otherwise noted, these definitions are stable from Tcl / Tk 8.5
+ * through Tck / Tk master as of 21 May 2016
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Tcl header excerpts */
+#define TCL_OK 0
+#define TCL_ERROR 1
+
+/*
+ * Users of versions of Tcl >= 8.6 encouraged to tread Tcl_Interp as an opaque
+ * pointer. The following definition results when TCL_NO_DEPRECATED defined.
+ */
+typedef struct Tcl_Interp Tcl_Interp;
+
+typedef struct Tcl_Command_ *Tcl_Command;
+typedef void *ClientData;
+
+typedef int (Tcl_CmdProc) (ClientData clientData, Tcl_Interp
+ *interp, int argc, const char *argv[]);
+typedef void (Tcl_CmdDeleteProc) (ClientData clientData);
+
+/* Typedefs derived from function signatures in Tcl header */
+/* Tcl_CreateCommand */
+typedef Tcl_Command (*Tcl_CreateCommand_t)(Tcl_Interp *interp,
+ const char *cmdName, Tcl_CmdProc *proc,
+ ClientData clientData,
+ Tcl_CmdDeleteProc *deleteProc);
+/* Tcl_AppendResult */
+typedef void (*Tcl_AppendResult_t) (Tcl_Interp *interp, ...);
+
+/* Tk header excerpts */
+typedef struct Tk_Window_ *Tk_Window;
+
+typedef void *Tk_PhotoHandle;
+
+typedef struct Tk_PhotoImageBlock
+{
+ unsigned char *pixelPtr;
+ int width;
+ int height;
+ int pitch;
+ int pixelSize;
+ int offset[4];
+} Tk_PhotoImageBlock;
+
+/* Typedefs derived from function signatures in Tk header */
+/* Tk_MainWindow */
+typedef Tk_Window (*Tk_MainWindow_t) (Tcl_Interp *interp);
+typedef Tk_PhotoHandle (*Tk_FindPhoto_t) (Tcl_Interp *interp, const char
+ *imageName);
+/* Tk_PhotoPutBLock_NoComposite typedef */
+typedef void (*Tk_PhotoPutBlock_NoComposite_t) (Tk_PhotoHandle handle,
+ Tk_PhotoImageBlock *blockPtr, int x, int y,
+ int width, int height);
+/* Tk_PhotoBlank */
+typedef void (*Tk_PhotoBlank_t) (Tk_PhotoHandle handle);
+
+/*
+ * end block for C++
+ */
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/contrib/python/matplotlib/py2/src/_ttconv.cpp b/contrib/python/matplotlib/py2/src/_ttconv.cpp
new file mode 100644
index 00000000000..e0aa4611d28
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/_ttconv.cpp
@@ -0,0 +1,307 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+/*
+ _ttconv.c
+
+ Python wrapper for TrueType conversion library in ../ttconv.
+ */
+
+#include "mplutils.h"
+
+#include <Python.h>
+#include "ttconv/pprdrv.h"
+#include "py_exceptions.h"
+#include <vector>
+#include <cassert>
+
+/**
+ * An implementation of TTStreamWriter that writes to a Python
+ * file-like object.
+ */
+class PythonFileWriter : public TTStreamWriter
+{
+ PyObject *_write_method;
+
+ public:
+ PythonFileWriter()
+ {
+ _write_method = NULL;
+ }
+
+ ~PythonFileWriter()
+ {
+ Py_XDECREF(_write_method);
+ }
+
+ void set(PyObject *write_method)
+ {
+ Py_XDECREF(_write_method);
+ _write_method = write_method;
+ Py_XINCREF(_write_method);
+ }
+
+ virtual void write(const char *a)
+ {
+ PyObject *result = NULL;
+ if (_write_method) {
+ PyObject *decoded = NULL;
+ decoded = PyUnicode_DecodeLatin1(a, strlen(a), "");
+ if (decoded == NULL) {
+ throw py::exception();
+ }
+ result = PyObject_CallFunction(_write_method, (char *)"O", decoded);
+ Py_DECREF(decoded);
+ if (!result) {
+ throw py::exception();
+ }
+ Py_DECREF(result);
+ }
+ }
+};
+
+int fileobject_to_PythonFileWriter(PyObject *object, void *address)
+{
+ PythonFileWriter *file_writer = (PythonFileWriter *)address;
+
+ PyObject *write_method = PyObject_GetAttrString(object, "write");
+ if (write_method == NULL || !PyCallable_Check(write_method)) {
+ PyErr_SetString(PyExc_TypeError, "Expected a file-like object with a write method.");
+ return 0;
+ }
+
+ file_writer->set(write_method);
+ Py_DECREF(write_method);
+
+ return 1;
+}
+
+int pyiterable_to_vector_int(PyObject *object, void *address)
+{
+ std::vector<int> *result = (std::vector<int> *)address;
+
+ PyObject *iterator = PyObject_GetIter(object);
+ if (!iterator) {
+ return 0;
+ }
+
+ PyObject *item;
+ while ((item = PyIter_Next(iterator))) {
+#if PY3K
+ long value = PyLong_AsLong(item);
+#else
+ long value = PyInt_AsLong(item);
+#endif
+ Py_DECREF(item);
+ if (value == -1 && PyErr_Occurred()) {
+ return 0;
+ }
+ result->push_back((int)value);
+ }
+
+ Py_DECREF(iterator);
+
+ return 1;
+}
+
+static PyObject *convert_ttf_to_ps(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ const char *filename;
+ PythonFileWriter output;
+ int fonttype;
+ std::vector<int> glyph_ids;
+
+ static const char *kwlist[] = { "filename", "output", "fonttype", "glyph_ids", NULL };
+ if (!PyArg_ParseTupleAndKeywords(args,
+ kwds,
+#if PY_MAJOR_VERSION == 3
+ "yO&i|O&:convert_ttf_to_ps",
+#else
+ "sO&i|O&:convert_ttf_to_ps",
+#endif
+ (char **)kwlist,
+ &filename,
+ fileobject_to_PythonFileWriter,
+ &output,
+ &fonttype,
+ pyiterable_to_vector_int,
+ &glyph_ids)) {
+ return NULL;
+ }
+
+ if (fonttype != 3 && fonttype != 42) {
+ PyErr_SetString(PyExc_ValueError,
+ "fonttype must be either 3 (raw Postscript) or 42 "
+ "(embedded Truetype)");
+ return NULL;
+ }
+
+ try
+ {
+ insert_ttfont(filename, output, (font_type_enum)fonttype, glyph_ids);
+ }
+ catch (TTException &e)
+ {
+ PyErr_SetString(PyExc_RuntimeError, e.getMessage());
+ return NULL;
+ }
+ catch (const py::exception &)
+ {
+ return NULL;
+ }
+ catch (...)
+ {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return NULL;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+class PythonDictionaryCallback : public TTDictionaryCallback
+{
+ PyObject *_dict;
+
+ public:
+ PythonDictionaryCallback(PyObject *dict)
+ {
+ _dict = dict;
+ }
+
+ virtual void add_pair(const char *a, const char *b)
+ {
+ assert(a != NULL);
+ assert(b != NULL);
+ PyObject *value = PyBytes_FromString(b);
+ if (!value) {
+ throw py::exception();
+ }
+ if (PyDict_SetItemString(_dict, a, value)) {
+ Py_DECREF(value);
+ throw py::exception();
+ }
+ Py_DECREF(value);
+ }
+};
+
+static PyObject *py_get_pdf_charprocs(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ const char *filename;
+ std::vector<int> glyph_ids;
+ PyObject *result;
+
+ static const char *kwlist[] = { "filename", "glyph_ids", NULL };
+ if (!PyArg_ParseTupleAndKeywords(args,
+ kwds,
+#if PY_MAJOR_VERSION == 3
+ "y|O&:get_pdf_charprocs",
+#else
+ "s|O&:get_pdf_charprocs",
+#endif
+ (char **)kwlist,
+ &filename,
+ pyiterable_to_vector_int,
+ &glyph_ids)) {
+ return NULL;
+ }
+
+ result = PyDict_New();
+ if (!result) {
+ return NULL;
+ }
+
+ PythonDictionaryCallback dict(result);
+
+ try
+ {
+ ::get_pdf_charprocs(filename, glyph_ids, dict);
+ }
+ catch (TTException &e)
+ {
+ Py_DECREF(result);
+ PyErr_SetString(PyExc_RuntimeError, e.getMessage());
+ return NULL;
+ }
+ catch (const py::exception &)
+ {
+ Py_DECREF(result);
+ return NULL;
+ }
+ catch (...)
+ {
+ Py_DECREF(result);
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return NULL;
+ }
+
+ return result;
+}
+
+static PyMethodDef ttconv_methods[] =
+{
+ {
+ "convert_ttf_to_ps", (PyCFunction)convert_ttf_to_ps, METH_VARARGS | METH_KEYWORDS,
+ "convert_ttf_to_ps(filename, output, fonttype, glyph_ids)\n"
+ "\n"
+ "Converts the Truetype font into a Type 3 or Type 42 Postscript font, "
+ "optionally subsetting the font to only the desired set of characters.\n"
+ "\n"
+ "filename is the path to a TTF font file.\n"
+ "output is a Python file-like object with a write method that the Postscript "
+ "font data will be written to.\n"
+ "fonttype may be either 3 or 42. Type 3 is a \"raw Postscript\" font. "
+ "Type 42 is an embedded Truetype font. Glyph subsetting is not supported "
+ "for Type 42 fonts.\n"
+ "glyph_ids (optional) is a list of glyph ids (integers) to keep when "
+ "subsetting to a Type 3 font. If glyph_ids is not provided or is None, "
+ "then all glyphs will be included. If any of the glyphs specified are "
+ "composite glyphs, then the component glyphs will also be included."
+ },
+ {
+ "get_pdf_charprocs", (PyCFunction)py_get_pdf_charprocs, METH_VARARGS | METH_KEYWORDS,
+ "get_pdf_charprocs(filename, glyph_ids)\n"
+ "\n"
+ "Given a Truetype font file, returns a dictionary containing the PDF Type 3\n"
+ "representation of its paths. Useful for subsetting a Truetype font inside\n"
+ "of a PDF file.\n"
+ "\n"
+ "filename is the path to a TTF font file.\n"
+ "glyph_ids is a list of the numeric glyph ids to include.\n"
+ "The return value is a dictionary where the keys are glyph names and\n"
+ "the values are the stream content needed to render that glyph. This\n"
+ "is useful to generate the CharProcs dictionary in a PDF Type 3 font.\n"
+ },
+ {0, 0, 0, 0} /* Sentinel */
+};
+
+static const char *module_docstring =
+ "Module to handle converting and subsetting TrueType "
+ "fonts to Postscript Type 3, Postscript Type 42 and "
+ "Pdf Type 3 fonts.";
+
+#if PY3K
+static PyModuleDef ttconv_module = {
+ PyModuleDef_HEAD_INIT,
+ "ttconv",
+ module_docstring,
+ -1,
+ ttconv_methods,
+ NULL, NULL, NULL, NULL
+};
+
+PyMODINIT_FUNC
+PyInit_ttconv(void)
+{
+ PyObject* m;
+
+ m = PyModule_Create(&ttconv_module);
+
+ return m;
+}
+#else
+PyMODINIT_FUNC
+initttconv(void)
+{
+ Py_InitModule3("ttconv", ttconv_methods, module_docstring);
+}
+#endif
diff --git a/contrib/python/matplotlib/py2/src/agg_workaround.h b/contrib/python/matplotlib/py2/src/agg_workaround.h
new file mode 100644
index 00000000000..bfadf39284d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/agg_workaround.h
@@ -0,0 +1,85 @@
+#ifndef __AGG_WORKAROUND_H__
+#define __AGG_WORKAROUND_H__
+
+#include "agg_pixfmt_rgba.h"
+
+/**********************************************************************
+ WORKAROUND: This class is to workaround a bug in Agg SVN where the
+ blending of RGBA32 pixels does not preserve enough precision
+*/
+
+template<class ColorT, class Order>
+struct fixed_blender_rgba_pre : agg::conv_rgba_pre<ColorT, Order>
+{
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+ enum base_scale_e
+ {
+ base_shift = color_type::base_shift,
+ base_mask = color_type::base_mask
+ };
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb,
+ value_type alpha, agg::cover_type cover)
+ {
+ blend_pix(p,
+ color_type::mult_cover(cr, cover),
+ color_type::mult_cover(cg, cover),
+ color_type::mult_cover(cb, cover),
+ color_type::mult_cover(alpha, cover));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb,
+ value_type alpha)
+ {
+ alpha = base_mask - alpha;
+ p[Order::R] = (value_type)(((p[Order::R] * alpha) >> base_shift) + cr);
+ p[Order::G] = (value_type)(((p[Order::G] * alpha) >> base_shift) + cg);
+ p[Order::B] = (value_type)(((p[Order::B] * alpha) >> base_shift) + cb);
+ p[Order::A] = (value_type)(base_mask - ((alpha * (base_mask - p[Order::A])) >> base_shift));
+ }
+};
+
+
+template<class ColorT, class Order>
+struct fixed_blender_rgba_plain : agg::conv_rgba_plain<ColorT, Order>
+{
+ typedef ColorT color_type;
+ typedef Order order_type;
+ typedef typename color_type::value_type value_type;
+ typedef typename color_type::calc_type calc_type;
+ typedef typename color_type::long_type long_type;
+ enum base_scale_e { base_shift = color_type::base_shift };
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha, agg::cover_type cover)
+ {
+ blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
+ }
+
+ //--------------------------------------------------------------------
+ static AGG_INLINE void blend_pix(value_type* p,
+ value_type cr, value_type cg, value_type cb, value_type alpha)
+ {
+ if(alpha == 0) return;
+ calc_type a = p[Order::A];
+ calc_type r = p[Order::R] * a;
+ calc_type g = p[Order::G] * a;
+ calc_type b = p[Order::B] * a;
+ a = ((alpha + a) << base_shift) - alpha * a;
+ p[Order::A] = (value_type)(a >> base_shift);
+ p[Order::R] = (value_type)((((cr << base_shift) - r) * alpha + (r << base_shift)) / a);
+ p[Order::G] = (value_type)((((cg << base_shift) - g) * alpha + (g << base_shift)) / a);
+ p[Order::B] = (value_type)((((cb << base_shift) - b) * alpha + (b << base_shift)) / a);
+ }
+};
+
+#endif
diff --git a/contrib/python/matplotlib/py2/src/array.h b/contrib/python/matplotlib/py2/src/array.h
new file mode 100644
index 00000000000..8056366a1c9
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/array.h
@@ -0,0 +1,80 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+/* Utilities to create scalars and empty arrays that behave like the
+ Numpy array wrappers in numpy_cpp.h */
+
+#ifndef _SCALAR_H_
+#define _SCALAR_H_
+
+namespace array
+{
+
+template <typename T, int ND>
+class scalar
+{
+ public:
+ T m_value;
+
+ scalar(const T value) : m_value(value)
+ {
+ }
+
+ T &operator()(int i, int j = 0, int k = 0)
+ {
+ return m_value;
+ }
+
+ const T &operator()(int i, int j = 0, int k = 0) const
+ {
+ return m_value;
+ }
+
+ int dim(size_t i)
+ {
+ return 1;
+ }
+
+ size_t size()
+ {
+ return 1;
+ }
+};
+
+template <typename T>
+class empty
+{
+ public:
+ typedef empty<T> sub_t;
+
+ empty()
+ {
+ }
+
+ T &operator()(int i, int j = 0, int k = 0)
+ {
+ throw std::runtime_error("Accessed empty array");
+ }
+
+ const T &operator()(int i, int j = 0, int k = 0) const
+ {
+ throw std::runtime_error("Accessed empty array");
+ }
+
+ sub_t operator[](int i) const
+ {
+ return empty<T>();
+ }
+
+ int dim(size_t i) const
+ {
+ return 0;
+ }
+
+ size_t size() const
+ {
+ return 0;
+ }
+};
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/src/file_compat.h b/contrib/python/matplotlib/py2/src/file_compat.h
new file mode 100644
index 00000000000..691133dcbb7
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/file_compat.h
@@ -0,0 +1,240 @@
+#ifndef __FILE_COMPAT_H__
+#define __FILE_COMPAT_H__
+
+#include <Python.h>
+#include <stdio.h>
+#include "numpy/npy_common.h"
+#include "numpy/ndarrayobject.h"
+#include "mplutils.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400)
+ #include <io.h>
+ #define mpl_fseek _fseeki64
+ #define mpl_ftell _ftelli64
+ #define mpl_lseek _lseeki64
+ #define mpl_off_t npy_int64
+
+ #if NPY_SIZEOF_INT == 8
+ #define MPL_OFF_T_PYFMT "i"
+ #elif NPY_SIZEOF_LONG == 8
+ #define MPL_OFF_T_PYFMT "l"
+ #elif NPY_SIZEOF_LONGLONG == 8
+ #define MPL_OFF_T_PYFMT "L"
+ #else
+ #error Unsupported size for type off_t
+ #endif
+#else
+ #define mpl_fseek fseek
+ #define mpl_ftell ftell
+ #define mpl_lseek lseek
+ #define mpl_off_t off_t
+
+ #if NPY_SIZEOF_INT == NPY_SIZEOF_SHORT
+ #define MPL_OFF_T_PYFMT "h"
+ #elif NPY_SIZEOF_INT == NPY_SIZEOF_INT
+ #define MPL_OFF_T_PYFMT "i"
+ #elif NPY_SIZEOF_INT == NPY_SIZEOF_LONG
+ #define MPL_OFF_T_PYFMT "l"
+ #elif NPY_SIZEOF_INT == NPY_SIZEOF_LONGLONG
+ #define MPL_OFF_T_PYFMT "L"
+ #else
+ #error Unsupported size for type off_t
+ #endif
+#endif
+
+/*
+ * PyFile_* compatibility
+ */
+#if PY3K | defined(PYPY_VERSION)
+
+/*
+ * Get a FILE* handle to the file represented by the Python object
+ */
+static NPY_INLINE FILE *mpl_PyFile_Dup(PyObject *file, char *mode, mpl_off_t *orig_pos)
+{
+ int fd, fd2;
+ PyObject *ret, *os;
+ mpl_off_t pos;
+ FILE *handle;
+
+ if (mode[0] != 'r') {
+ /* Flush first to ensure things end up in the file in the correct order */
+ ret = PyObject_CallMethod(file, (char *)"flush", (char *)"");
+ if (ret == NULL) {
+ return NULL;
+ }
+ Py_DECREF(ret);
+ }
+
+ fd = PyObject_AsFileDescriptor(file);
+ if (fd == -1) {
+ return NULL;
+ }
+
+ /* The handle needs to be dup'd because we have to call fclose
+ at the end */
+ os = PyImport_ImportModule("os");
+ if (os == NULL) {
+ return NULL;
+ }
+ ret = PyObject_CallMethod(os, (char *)"dup", (char *)"i", fd);
+ Py_DECREF(os);
+ if (ret == NULL) {
+ return NULL;
+ }
+ fd2 = PyNumber_AsSsize_t(ret, NULL);
+ Py_DECREF(ret);
+
+/* Convert to FILE* handle */
+#ifdef _WIN32
+ handle = _fdopen(fd2, mode);
+#else
+ handle = fdopen(fd2, mode);
+#endif
+ if (handle == NULL) {
+ PyErr_SetString(PyExc_IOError, "Getting a FILE* from a Python file object failed");
+ }
+
+ /* Record the original raw file handle position */
+ *orig_pos = mpl_ftell(handle);
+ if (*orig_pos == -1) {
+ // handle is a stream, so we don't have to worry about this
+ return handle;
+ }
+
+ /* Seek raw handle to the Python-side position */
+ ret = PyObject_CallMethod(file, (char *)"tell", (char *)"");
+ if (ret == NULL) {
+ fclose(handle);
+ return NULL;
+ }
+ pos = PyNumber_AsSsize_t(ret, PyExc_OverflowError);
+ Py_DECREF(ret);
+ if (PyErr_Occurred()) {
+ fclose(handle);
+ return NULL;
+ }
+ if (mpl_fseek(handle, pos, SEEK_SET) == -1) {
+ PyErr_SetString(PyExc_IOError, "seeking file failed");
+ return NULL;
+ }
+ return handle;
+}
+
+/*
+ * Close the dup-ed file handle, and seek the Python one to the current position
+ */
+static NPY_INLINE int mpl_PyFile_DupClose(PyObject *file, FILE *handle, mpl_off_t orig_pos)
+{
+ PyObject *exc_type = NULL, *exc_value = NULL, *exc_tb = NULL;
+ PyErr_Fetch(&exc_type, &exc_value, &exc_tb);
+
+ int fd;
+ PyObject *ret;
+ mpl_off_t position;
+
+ position = mpl_ftell(handle);
+
+ /* Close the FILE* handle */
+ fclose(handle);
+
+ /* Restore original file handle position, in order to not confuse
+ Python-side data structures. Note that this would fail if an exception
+ is currently set, which can happen as this function is called in cleanup
+ code, so we need to carefully fetch and restore the exception state. */
+ fd = PyObject_AsFileDescriptor(file);
+ if (fd == -1) {
+ goto fail;
+ }
+ if (mpl_lseek(fd, orig_pos, SEEK_SET) != -1) {
+ if (position == -1) {
+ PyErr_SetString(PyExc_IOError, "obtaining file position failed");
+ goto fail;
+ }
+
+ /* Seek Python-side handle to the FILE* handle position */
+ ret = PyObject_CallMethod(file, (char *)"seek", (char *)(MPL_OFF_T_PYFMT "i"), position, 0);
+ if (ret == NULL) {
+ goto fail;
+ }
+ Py_DECREF(ret);
+ }
+ PyErr_Restore(exc_type, exc_value, exc_tb);
+ return 0;
+fail:
+ Py_XDECREF(exc_type);
+ Py_XDECREF(exc_value);
+ Py_XDECREF(exc_tb);
+ return -1;
+}
+
+static NPY_INLINE int mpl_PyFile_Check(PyObject *file)
+{
+ int fd;
+ fd = PyObject_AsFileDescriptor(file);
+ if (fd == -1) {
+ PyErr_Clear();
+ return 0;
+ }
+ return 1;
+}
+
+#else
+
+static NPY_INLINE FILE *mpl_PyFile_Dup(PyObject *file, const char *mode, mpl_off_t *orig_pos)
+{
+ return PyFile_AsFile(file);
+}
+
+static NPY_INLINE int mpl_PyFile_DupClose(PyObject *file, FILE *handle, mpl_off_t orig_pos)
+{
+ // deliberately nothing
+ return 0;
+}
+
+static NPY_INLINE int mpl_PyFile_Check(PyObject *file)
+{
+ return PyFile_Check(file);
+}
+
+#endif
+
+static NPY_INLINE PyObject *mpl_PyFile_OpenFile(PyObject *filename, const char *mode)
+{
+ PyObject *open;
+ open = PyDict_GetItemString(PyEval_GetBuiltins(), "open");
+ if (open == NULL) {
+ return NULL;
+ }
+ return PyObject_CallFunction(open, (char *)"Os", filename, mode);
+}
+
+static NPY_INLINE int mpl_PyFile_CloseFile(PyObject *file)
+{
+ PyObject *type, *value, *tb;
+ PyErr_Fetch(&type, &value, &tb);
+
+ PyObject *ret;
+
+ ret = PyObject_CallMethod(file, (char *)"close", NULL);
+ if (ret == NULL) {
+ goto fail;
+ }
+ Py_DECREF(ret);
+ PyErr_Restore(type, value, tb);
+ return 0;
+fail:
+ Py_XDECREF(type);
+ Py_XDECREF(value);
+ Py_XDECREF(tb);
+ return -1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ifndef __FILE_COMPAT_H__ */
diff --git a/contrib/python/matplotlib/py2/src/ft2font.cpp b/contrib/python/matplotlib/py2/src/ft2font.cpp
new file mode 100644
index 00000000000..4b46ec823ec
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/ft2font.cpp
@@ -0,0 +1,808 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+#define NO_IMPORT_ARRAY
+
+#include <algorithm>
+#include <stdexcept>
+#include <string>
+
+#include "ft2font.h"
+#include "mplutils.h"
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846264338328
+#endif
+
+/**
+ To improve the hinting of the fonts, this code uses a hack
+ presented here:
+
+ http://antigrain.com/research/font_rasterization/index.html
+
+ The idea is to limit the effect of hinting in the x-direction, while
+ preserving hinting in the y-direction. Since freetype does not
+ support this directly, the dpi in the x-direction is set higher than
+ in the y-direction, which affects the hinting grid. Then, a global
+ transform is placed on the font to shrink it back to the desired
+ size. While it is a bit surprising that the dpi setting affects
+ hinting, whereas the global transform does not, this is documented
+ behavior of FreeType, and therefore hopefully unlikely to change.
+ The FreeType 2 tutorial says:
+
+ NOTE: The transformation is applied to every glyph that is
+ loaded through FT_Load_Glyph and is completely independent of
+ any hinting process. This means that you won't get the same
+ results if you load a glyph at the size of 24 pixels, or a glyph
+ at the size at 12 pixels scaled by 2 through a transform,
+ because the hints will have been computed differently (except
+ you have disabled hints).
+ */
+
+FT_Library _ft2Library;
+
+FT2Image::FT2Image() : m_dirty(true), m_buffer(NULL), m_width(0), m_height(0)
+{
+}
+
+FT2Image::FT2Image(unsigned long width, unsigned long height)
+ : m_dirty(true), m_buffer(NULL), m_width(0), m_height(0)
+{
+ resize(width, height);
+}
+
+FT2Image::~FT2Image()
+{
+ delete[] m_buffer;
+}
+
+void FT2Image::resize(long width, long height)
+{
+ if (width <= 0) {
+ width = 1;
+ }
+ if (height <= 0) {
+ height = 1;
+ }
+ size_t numBytes = width * height;
+
+ if ((unsigned long)width != m_width || (unsigned long)height != m_height) {
+ if (numBytes > m_width * m_height) {
+ delete[] m_buffer;
+ m_buffer = NULL;
+ m_buffer = new unsigned char[numBytes];
+ }
+
+ m_width = (unsigned long)width;
+ m_height = (unsigned long)height;
+ }
+
+ if (numBytes && m_buffer) {
+ memset(m_buffer, 0, numBytes);
+ }
+
+ m_dirty = true;
+}
+
+void FT2Image::draw_bitmap(FT_Bitmap *bitmap, FT_Int x, FT_Int y)
+{
+ FT_Int image_width = (FT_Int)m_width;
+ FT_Int image_height = (FT_Int)m_height;
+ FT_Int char_width = bitmap->width;
+ FT_Int char_height = bitmap->rows;
+
+ FT_Int x1 = CLAMP(x, 0, image_width);
+ FT_Int y1 = CLAMP(y, 0, image_height);
+ FT_Int x2 = CLAMP(x + char_width, 0, image_width);
+ FT_Int y2 = CLAMP(y + char_height, 0, image_height);
+
+ FT_Int x_start = MAX(0, -x);
+ FT_Int y_offset = y1 - MAX(0, -y);
+
+ if (bitmap->pixel_mode == FT_PIXEL_MODE_GRAY) {
+ for (FT_Int i = y1; i < y2; ++i) {
+ unsigned char *dst = m_buffer + (i * image_width + x1);
+ unsigned char *src = bitmap->buffer + (((i - y_offset) * bitmap->pitch) + x_start);
+ for (FT_Int j = x1; j < x2; ++j, ++dst, ++src)
+ *dst |= *src;
+ }
+ } else if (bitmap->pixel_mode == FT_PIXEL_MODE_MONO) {
+ for (FT_Int i = y1; i < y2; ++i) {
+ unsigned char *dst = m_buffer + (i * image_width + x1);
+ unsigned char *src = bitmap->buffer + ((i - y_offset) * bitmap->pitch);
+ for (FT_Int j = x1; j < x2; ++j, ++dst) {
+ int x = (j - x1 + x_start);
+ int val = *(src + (x >> 3)) & (1 << (7 - (x & 0x7)));
+ *dst = val ? 255 : *dst;
+ }
+ }
+ } else {
+ throw std::runtime_error("Unknown pixel mode");
+ }
+
+ m_dirty = true;
+}
+
+void FT2Image::draw_rect(unsigned long x0, unsigned long y0, unsigned long x1, unsigned long y1)
+{
+ if (x0 > m_width || x1 > m_width || y0 > m_height || y1 > m_height) {
+ throw std::runtime_error("Rect coords outside image bounds");
+ }
+
+ size_t top = y0 * m_width;
+ size_t bottom = y1 * m_width;
+ for (size_t i = x0; i < x1 + 1; ++i) {
+ m_buffer[i + top] = 255;
+ m_buffer[i + bottom] = 255;
+ }
+
+ for (size_t j = y0 + 1; j < y1; ++j) {
+ m_buffer[x0 + j * m_width] = 255;
+ m_buffer[x1 + j * m_width] = 255;
+ }
+
+ m_dirty = true;
+}
+
+void
+FT2Image::draw_rect_filled(unsigned long x0, unsigned long y0, unsigned long x1, unsigned long y1)
+{
+ x0 = std::min(x0, m_width);
+ y0 = std::min(y0, m_height);
+ x1 = std::min(x1 + 1, m_width);
+ y1 = std::min(y1 + 1, m_height);
+
+ for (size_t j = y0; j < y1; j++) {
+ for (size_t i = x0; i < x1; i++) {
+ m_buffer[i + j * m_width] = 255;
+ }
+ }
+
+ m_dirty = true;
+}
+
+inline double conv(long v)
+{
+ return double(v) / 64.0;
+}
+
+int FT2Font::get_path_count()
+{
+ // get the glyph as a path, a list of (COMMAND, *args) as described in matplotlib.path
+ // this code is from agg's decompose_ft_outline with minor modifications
+
+ if (!face->glyph) {
+ throw std::runtime_error("No glyph loaded");
+ }
+
+ FT_Outline &outline = face->glyph->outline;
+
+ FT_Vector v_last;
+ FT_Vector v_control;
+ FT_Vector v_start;
+
+ FT_Vector *point;
+ FT_Vector *limit;
+ char *tags;
+
+ int n; // index of contour in outline
+ int first; // index of first point in contour
+ char tag; // current point's state
+ int count;
+
+ count = 0;
+ first = 0;
+ for (n = 0; n < outline.n_contours; n++) {
+ int last; // index of last point in contour
+ bool starts_with_last;
+
+ last = outline.contours[n];
+ limit = outline.points + last;
+
+ v_start = outline.points[first];
+ v_last = outline.points[last];
+
+ v_control = v_start;
+
+ point = outline.points + first;
+ tags = outline.tags + first;
+ tag = FT_CURVE_TAG(tags[0]);
+
+ // A contour cannot start with a cubic control point!
+ if (tag == FT_CURVE_TAG_CUBIC) {
+ throw std::runtime_error("A contour cannot start with a cubic control point");
+ } else if (tag == FT_CURVE_TAG_CONIC) {
+ starts_with_last = true;
+ } else {
+ starts_with_last = false;
+ }
+
+ count++;
+
+ while (point < limit) {
+ if (!starts_with_last) {
+ point++;
+ tags++;
+ }
+ starts_with_last = false;
+
+ tag = FT_CURVE_TAG(tags[0]);
+ switch (tag) {
+ case FT_CURVE_TAG_ON: // emit a single line_to
+ {
+ count++;
+ continue;
+ }
+
+ case FT_CURVE_TAG_CONIC: // consume conic arcs
+ {
+ Count_Do_Conic:
+ if (point < limit) {
+ point++;
+ tags++;
+ tag = FT_CURVE_TAG(tags[0]);
+
+ if (tag == FT_CURVE_TAG_ON) {
+ count += 2;
+ continue;
+ }
+
+ if (tag != FT_CURVE_TAG_CONIC) {
+ throw std::runtime_error("Invalid font");
+ }
+
+ count += 2;
+
+ goto Count_Do_Conic;
+ }
+
+ count += 2;
+
+ goto Count_Close;
+ }
+
+ default: // FT_CURVE_TAG_CUBIC
+ {
+ if (point + 1 > limit || FT_CURVE_TAG(tags[1]) != FT_CURVE_TAG_CUBIC) {
+ throw std::runtime_error("Invalid font");
+ }
+
+ point += 2;
+ tags += 2;
+
+ if (point <= limit) {
+ count += 3;
+ continue;
+ }
+
+ count += 3;
+
+ goto Count_Close;
+ }
+ }
+ }
+
+ Count_Close:
+ count++;
+ first = last + 1;
+ }
+
+ return count;
+}
+
+void FT2Font::get_path(double *outpoints, unsigned char *outcodes)
+{
+ FT_Outline &outline = face->glyph->outline;
+ bool flip_y = false; // todo, pass me as kwarg
+
+ FT_Vector v_last;
+ FT_Vector v_control;
+ FT_Vector v_start;
+
+ FT_Vector *point;
+ FT_Vector *limit;
+ char *tags;
+
+ int n; // index of contour in outline
+ int first; // index of first point in contour
+ char tag; // current point's state
+
+ first = 0;
+ for (n = 0; n < outline.n_contours; n++) {
+ int last; // index of last point in contour
+ bool starts_with_last;
+
+ last = outline.contours[n];
+ limit = outline.points + last;
+
+ v_start = outline.points[first];
+ v_last = outline.points[last];
+
+ v_control = v_start;
+
+ point = outline.points + first;
+ tags = outline.tags + first;
+ tag = FT_CURVE_TAG(tags[0]);
+
+ double x, y;
+ if (tag != FT_CURVE_TAG_ON) {
+ x = conv(v_last.x);
+ y = flip_y ? -conv(v_last.y) : conv(v_last.y);
+ starts_with_last = true;
+ } else {
+ x = conv(v_start.x);
+ y = flip_y ? -conv(v_start.y) : conv(v_start.y);
+ starts_with_last = false;
+ }
+
+ *(outpoints++) = x;
+ *(outpoints++) = y;
+ *(outcodes++) = MOVETO;
+
+ while (point < limit) {
+ if (!starts_with_last) {
+ point++;
+ tags++;
+ }
+ starts_with_last = false;
+
+ tag = FT_CURVE_TAG(tags[0]);
+ switch (tag) {
+ case FT_CURVE_TAG_ON: // emit a single line_to
+ {
+ double x = conv(point->x);
+ double y = flip_y ? -conv(point->y) : conv(point->y);
+ *(outpoints++) = x;
+ *(outpoints++) = y;
+ *(outcodes++) = LINETO;
+ continue;
+ }
+
+ case FT_CURVE_TAG_CONIC: // consume conic arcs
+ {
+ v_control.x = point->x;
+ v_control.y = point->y;
+
+ Do_Conic:
+ if (point < limit) {
+ FT_Vector vec;
+ FT_Vector v_middle;
+
+ point++;
+ tags++;
+ tag = FT_CURVE_TAG(tags[0]);
+
+ vec.x = point->x;
+ vec.y = point->y;
+
+ if (tag == FT_CURVE_TAG_ON) {
+ double xctl = conv(v_control.x);
+ double yctl = flip_y ? -conv(v_control.y) : conv(v_control.y);
+ double xto = conv(vec.x);
+ double yto = flip_y ? -conv(vec.y) : conv(vec.y);
+ *(outpoints++) = xctl;
+ *(outpoints++) = yctl;
+ *(outpoints++) = xto;
+ *(outpoints++) = yto;
+ *(outcodes++) = CURVE3;
+ *(outcodes++) = CURVE3;
+ continue;
+ }
+
+ v_middle.x = (v_control.x + vec.x) / 2;
+ v_middle.y = (v_control.y + vec.y) / 2;
+
+ double xctl = conv(v_control.x);
+ double yctl = flip_y ? -conv(v_control.y) : conv(v_control.y);
+ double xto = conv(v_middle.x);
+ double yto = flip_y ? -conv(v_middle.y) : conv(v_middle.y);
+ *(outpoints++) = xctl;
+ *(outpoints++) = yctl;
+ *(outpoints++) = xto;
+ *(outpoints++) = yto;
+ *(outcodes++) = CURVE3;
+ *(outcodes++) = CURVE3;
+
+ v_control = vec;
+ goto Do_Conic;
+ }
+ double xctl = conv(v_control.x);
+ double yctl = flip_y ? -conv(v_control.y) : conv(v_control.y);
+ double xto = conv(v_start.x);
+ double yto = flip_y ? -conv(v_start.y) : conv(v_start.y);
+
+ *(outpoints++) = xctl;
+ *(outpoints++) = yctl;
+ *(outpoints++) = xto;
+ *(outpoints++) = yto;
+ *(outcodes++) = CURVE3;
+ *(outcodes++) = CURVE3;
+
+ goto Close;
+ }
+
+ default: // FT_CURVE_TAG_CUBIC
+ {
+ FT_Vector vec1, vec2;
+
+ vec1.x = point[0].x;
+ vec1.y = point[0].y;
+ vec2.x = point[1].x;
+ vec2.y = point[1].y;
+
+ point += 2;
+ tags += 2;
+
+ if (point <= limit) {
+ FT_Vector vec;
+
+ vec.x = point->x;
+ vec.y = point->y;
+
+ double xctl1 = conv(vec1.x);
+ double yctl1 = flip_y ? -conv(vec1.y) : conv(vec1.y);
+ double xctl2 = conv(vec2.x);
+ double yctl2 = flip_y ? -conv(vec2.y) : conv(vec2.y);
+ double xto = conv(vec.x);
+ double yto = flip_y ? -conv(vec.y) : conv(vec.y);
+
+ (*outpoints++) = xctl1;
+ (*outpoints++) = yctl1;
+ (*outpoints++) = xctl2;
+ (*outpoints++) = yctl2;
+ (*outpoints++) = xto;
+ (*outpoints++) = yto;
+ (*outcodes++) = CURVE4;
+ (*outcodes++) = CURVE4;
+ (*outcodes++) = CURVE4;
+ continue;
+ }
+
+ double xctl1 = conv(vec1.x);
+ double yctl1 = flip_y ? -conv(vec1.y) : conv(vec1.y);
+ double xctl2 = conv(vec2.x);
+ double yctl2 = flip_y ? -conv(vec2.y) : conv(vec2.y);
+ double xto = conv(v_start.x);
+ double yto = flip_y ? -conv(v_start.y) : conv(v_start.y);
+ (*outpoints++) = xctl1;
+ (*outpoints++) = yctl1;
+ (*outpoints++) = xctl2;
+ (*outpoints++) = yctl2;
+ (*outpoints++) = xto;
+ (*outpoints++) = yto;
+ (*outcodes++) = CURVE4;
+ (*outcodes++) = CURVE4;
+ (*outcodes++) = CURVE4;
+
+ goto Close;
+ }
+ }
+ }
+
+ Close:
+ (*outpoints++) = 0.0;
+ (*outpoints++) = 0.0;
+ (*outcodes++) = ENDPOLY;
+ first = last + 1;
+ }
+}
+
+FT2Font::FT2Font(FT_Open_Args &open_args, long hinting_factor_) : image(), face(NULL)
+{
+ clear();
+
+ int error = FT_Open_Face(_ft2Library, &open_args, 0, &face);
+
+ if (error == FT_Err_Unknown_File_Format) {
+ throw std::runtime_error("Can not load face. Unknown file format.");
+ } else if (error == FT_Err_Cannot_Open_Resource) {
+ throw std::runtime_error("Can not load face. Can not open resource.");
+ } else if (error == FT_Err_Invalid_File_Format) {
+ throw std::runtime_error("Can not load face. Invalid file format.");
+ } else if (error) {
+ throw std::runtime_error("Can not load face.");
+ }
+
+ // set a default fontsize 12 pt at 72dpi
+ hinting_factor = hinting_factor_;
+
+ error = FT_Set_Char_Size(face, 12 * 64, 0, 72 * (unsigned int)hinting_factor, 72);
+ if (error) {
+ FT_Done_Face(face);
+ throw std::runtime_error("Could not set the fontsize");
+ }
+
+ if (open_args.stream != NULL) {
+ face->face_flags |= FT_FACE_FLAG_EXTERNAL_STREAM;
+ }
+
+ FT_Matrix transform = { 65536 / hinting_factor, 0, 0, 65536 };
+ FT_Set_Transform(face, &transform, 0);
+}
+
+FT2Font::~FT2Font()
+{
+ for (size_t i = 0; i < glyphs.size(); i++) {
+ FT_Done_Glyph(glyphs[i]);
+ }
+
+ if (face) {
+ FT_Done_Face(face);
+ }
+}
+
+void FT2Font::clear()
+{
+ angle = 0.0;
+
+ pen.x = 0;
+ pen.y = 0;
+
+ for (size_t i = 0; i < glyphs.size(); i++) {
+ FT_Done_Glyph(glyphs[i]);
+ }
+
+ glyphs.clear();
+}
+
+void FT2Font::set_size(double ptsize, double dpi)
+{
+ int error = FT_Set_Char_Size(
+ face, (long)(ptsize * 64), 0, (unsigned int)(dpi * hinting_factor), (unsigned int)dpi);
+ FT_Matrix transform = { 65536 / hinting_factor, 0, 0, 65536 };
+ FT_Set_Transform(face, &transform, 0);
+
+ if (error) {
+ throw std::runtime_error("Could not set the fontsize");
+ }
+}
+
+void FT2Font::set_charmap(int i)
+{
+ if (i >= face->num_charmaps) {
+ throw std::runtime_error("i exceeds the available number of char maps");
+ }
+ FT_CharMap charmap = face->charmaps[i];
+ if (FT_Set_Charmap(face, charmap)) {
+ throw std::runtime_error("Could not set the charmap");
+ }
+}
+
+void FT2Font::select_charmap(unsigned long i)
+{
+ if (FT_Select_Charmap(face, (FT_Encoding)i)) {
+ throw std::runtime_error("Could not set the charmap");
+ }
+}
+
+int FT2Font::get_kerning(FT_UInt left, FT_UInt right, FT_UInt mode)
+{
+ if (!FT_HAS_KERNING(face)) {
+ return 0;
+ }
+ FT_Vector delta;
+
+ if (!FT_Get_Kerning(face, left, right, mode, &delta)) {
+ return (int)(delta.x) / (hinting_factor << 6);
+ } else {
+ return 0;
+ }
+}
+
+void FT2Font::set_text(
+ size_t N, uint32_t *codepoints, double angle, FT_Int32 flags, std::vector<double> &xys)
+{
+ angle = angle / 360.0 * 2 * M_PI;
+
+ // this computes width and height in subpixels so we have to divide by 64
+ matrix.xx = (FT_Fixed)(cos(angle) * 0x10000L);
+ matrix.xy = (FT_Fixed)(-sin(angle) * 0x10000L);
+ matrix.yx = (FT_Fixed)(sin(angle) * 0x10000L);
+ matrix.yy = (FT_Fixed)(cos(angle) * 0x10000L);
+
+ FT_Bool use_kerning = FT_HAS_KERNING(face);
+ FT_UInt previous = 0;
+
+ clear();
+
+ bbox.xMin = bbox.yMin = 32000;
+ bbox.xMax = bbox.yMax = -32000;
+
+ for (unsigned int n = 0; n < N; n++) {
+ std::string thischar("?");
+ FT_UInt glyph_index;
+ FT_BBox glyph_bbox;
+ FT_Pos last_advance;
+
+ glyph_index = FT_Get_Char_Index(face, codepoints[n]);
+
+ // retrieve kerning distance and move pen position
+ if (use_kerning && previous && glyph_index) {
+ FT_Vector delta;
+ FT_Get_Kerning(face, previous, glyph_index, FT_KERNING_DEFAULT, &delta);
+ pen.x += (delta.x << 10) / (hinting_factor << 16);
+ }
+ error = FT_Load_Glyph(face, glyph_index, flags);
+ if (error) {
+ throw std::runtime_error("could not load glyph");
+ }
+ // ignore errors, jump to next glyph
+
+ // extract glyph image and store it in our table
+
+ FT_Glyph thisGlyph;
+ error = FT_Get_Glyph(face->glyph, &thisGlyph);
+
+ if (error) {
+ throw std::runtime_error("could not get glyph");
+ }
+ // ignore errors, jump to next glyph
+
+ last_advance = face->glyph->advance.x;
+ FT_Glyph_Transform(thisGlyph, 0, &pen);
+ FT_Glyph_Transform(thisGlyph, &matrix, 0);
+ xys.push_back(pen.x);
+ xys.push_back(pen.y);
+
+ FT_Glyph_Get_CBox(thisGlyph, ft_glyph_bbox_subpixels, &glyph_bbox);
+
+ bbox.xMin = std::min(bbox.xMin, glyph_bbox.xMin);
+ bbox.xMax = std::max(bbox.xMax, glyph_bbox.xMax);
+ bbox.yMin = std::min(bbox.yMin, glyph_bbox.yMin);
+ bbox.yMax = std::max(bbox.yMax, glyph_bbox.yMax);
+
+ pen.x += last_advance;
+
+ previous = glyph_index;
+ glyphs.push_back(thisGlyph);
+ }
+
+ FT_Vector_Transform(&pen, &matrix);
+ advance = pen.x;
+
+ if (bbox.xMin > bbox.xMax) {
+ bbox.xMin = bbox.yMin = bbox.xMax = bbox.yMax = 0;
+ }
+}
+
+void FT2Font::load_char(long charcode, FT_Int32 flags)
+{
+ int error = FT_Load_Char(face, (unsigned long)charcode, flags);
+
+ if (error) {
+ throw std::runtime_error("Could not load charcode");
+ }
+
+ FT_Glyph thisGlyph;
+ error = FT_Get_Glyph(face->glyph, &thisGlyph);
+
+ if (error) {
+ throw std::runtime_error("Could not get glyph");
+ }
+
+ glyphs.push_back(thisGlyph);
+}
+
+void FT2Font::load_glyph(FT_UInt glyph_index, FT_Int32 flags)
+{
+ int error = FT_Load_Glyph(face, glyph_index, flags);
+
+ if (error) {
+ throw std::runtime_error("Could not load glyph");
+ }
+
+ FT_Glyph thisGlyph;
+ error = FT_Get_Glyph(face->glyph, &thisGlyph);
+
+ if (error) {
+ throw std::runtime_error("Could not load glyph");
+ }
+
+ glyphs.push_back(thisGlyph);
+}
+
+void FT2Font::get_width_height(long *width, long *height)
+{
+ *width = advance;
+ *height = bbox.yMax - bbox.yMin;
+}
+
+long FT2Font::get_descent()
+{
+ return -bbox.yMin;
+}
+
+void FT2Font::get_bitmap_offset(long *x, long *y)
+{
+ *x = bbox.xMin;
+ *y = 0;
+}
+
+void FT2Font::draw_glyphs_to_bitmap(bool antialiased)
+{
+ size_t width = (bbox.xMax - bbox.xMin) / 64 + 2;
+ size_t height = (bbox.yMax - bbox.yMin) / 64 + 2;
+
+ image.resize(width, height);
+
+ for (size_t n = 0; n < glyphs.size(); n++) {
+ error = FT_Glyph_To_Bitmap(
+ &glyphs[n], antialiased ? FT_RENDER_MODE_NORMAL : FT_RENDER_MODE_MONO, 0, 1);
+ if (error) {
+ throw std::runtime_error("Could not convert glyph to bitmap");
+ }
+
+ FT_BitmapGlyph bitmap = (FT_BitmapGlyph)glyphs[n];
+ // now, draw to our target surface (convert position)
+
+ // bitmap left and top in pixel, string bbox in subpixel
+ FT_Int x = (FT_Int)(bitmap->left - (bbox.xMin / 64.));
+ FT_Int y = (FT_Int)((bbox.yMax / 64.) - bitmap->top + 1);
+
+ image.draw_bitmap(&bitmap->bitmap, x, y);
+ }
+}
+
+void FT2Font::get_xys(bool antialiased, std::vector<double> &xys)
+{
+ for (size_t n = 0; n < glyphs.size(); n++) {
+
+ error = FT_Glyph_To_Bitmap(
+ &glyphs[n], antialiased ? FT_RENDER_MODE_NORMAL : FT_RENDER_MODE_MONO, 0, 1);
+ if (error) {
+ throw std::runtime_error("Could not convert glyph to bitmap");
+ }
+
+ FT_BitmapGlyph bitmap = (FT_BitmapGlyph)glyphs[n];
+
+ // bitmap left and top in pixel, string bbox in subpixel
+ FT_Int x = (FT_Int)(bitmap->left - bbox.xMin / 64.);
+ FT_Int y = (FT_Int)(bbox.yMax / 64. - bitmap->top + 1);
+ // make sure the index is non-neg
+ x = x < 0 ? 0 : x;
+ y = y < 0 ? 0 : y;
+ xys.push_back(x);
+ xys.push_back(y);
+ }
+}
+
+void FT2Font::draw_glyph_to_bitmap(FT2Image &im, int x, int y, size_t glyphInd, bool antialiased)
+{
+ FT_Vector sub_offset;
+ sub_offset.x = 0; // int((xd - (double)x) * 64.0);
+ sub_offset.y = 0; // int((yd - (double)y) * 64.0);
+
+ if (glyphInd >= glyphs.size()) {
+ throw std::runtime_error("glyph num is out of range");
+ }
+
+ error = FT_Glyph_To_Bitmap(&glyphs[glyphInd],
+ antialiased ? FT_RENDER_MODE_NORMAL : FT_RENDER_MODE_MONO,
+ &sub_offset, // additional translation
+ 1 // destroy image
+ );
+ if (error) {
+ throw std::runtime_error("Could not convert glyph to bitmap");
+ }
+
+ FT_BitmapGlyph bitmap = (FT_BitmapGlyph)glyphs[glyphInd];
+
+ im.draw_bitmap(&bitmap->bitmap, x + bitmap->left, y);
+}
+
+void FT2Font::get_glyph_name(unsigned int glyph_number, char *buffer)
+{
+ if (!FT_HAS_GLYPH_NAMES(face)) {
+ /* Note that this generated name must match the name that
+ is generated by ttconv in ttfont_CharStrings_getname. */
+ PyOS_snprintf(buffer, 128, "uni%08x", glyph_number);
+ } else {
+ if (FT_Get_Glyph_Name(face, glyph_number, buffer, 128)) {
+ throw std::runtime_error("Could not get glyph names.");
+ }
+ }
+}
+
+long FT2Font::get_name_index(char *name)
+{
+ return FT_Get_Name_Index(face, (FT_String *)name);
+}
diff --git a/contrib/python/matplotlib/py2/src/ft2font.h b/contrib/python/matplotlib/py2/src/ft2font.h
new file mode 100644
index 00000000000..c60d5432cff
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/ft2font.h
@@ -0,0 +1,139 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+/* A python interface to FreeType */
+#ifndef _FT2FONT_H
+#define _FT2FONT_H
+#include <vector>
+#include <stdint.h>
+
+extern "C" {
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#include FT_GLYPH_H
+#include FT_SFNT_NAMES_H
+#include FT_TYPE1_TABLES_H
+#include FT_TRUETYPE_TABLES_H
+}
+
+/*
+ By definition, FT_FIXED as 2 16bit values stored in a single long.
+ */
+#define FIXED_MAJOR(val) (long)((val & 0xffff000) >> 16)
+#define FIXED_MINOR(val) (long)(val & 0xffff)
+
+// the FreeType string rendered into a width, height buffer
+class FT2Image
+{
+ public:
+ FT2Image();
+ FT2Image(unsigned long width, unsigned long height);
+ virtual ~FT2Image();
+
+ void resize(long width, long height);
+ void draw_bitmap(FT_Bitmap *bitmap, FT_Int x, FT_Int y);
+ void write_bitmap(FILE *fp) const;
+ void draw_rect(unsigned long x0, unsigned long y0, unsigned long x1, unsigned long y1);
+ void draw_rect_filled(unsigned long x0, unsigned long y0, unsigned long x1, unsigned long y1);
+
+ unsigned char *get_buffer()
+ {
+ return m_buffer;
+ }
+ unsigned long get_width()
+ {
+ return m_width;
+ }
+ unsigned long get_height()
+ {
+ return m_height;
+ }
+
+ private:
+ bool m_dirty;
+ unsigned char *m_buffer;
+ unsigned long m_width;
+ unsigned long m_height;
+
+ // prevent copying
+ FT2Image(const FT2Image &);
+ FT2Image &operator=(const FT2Image &);
+};
+
+extern FT_Library _ft2Library;
+
+class FT2Font
+{
+
+ public:
+ FT2Font(FT_Open_Args &open_args, long hinting_factor);
+ virtual ~FT2Font();
+ void clear();
+ void set_size(double ptsize, double dpi);
+ void set_charmap(int i);
+ void select_charmap(unsigned long i);
+ void set_text(
+ size_t N, uint32_t *codepoints, double angle, FT_Int32 flags, std::vector<double> &xys);
+ int get_kerning(FT_UInt left, FT_UInt right, FT_UInt mode);
+ void load_char(long charcode, FT_Int32 flags);
+ void load_glyph(FT_UInt glyph_index, FT_Int32 flags);
+ void get_width_height(long *width, long *height);
+ void get_bitmap_offset(long *x, long *y);
+ long get_descent();
+ // TODO: Since we know the size of the array upfront, we probably don't
+ // need to dynamically allocate like this
+ void get_xys(bool antialiased, std::vector<double> &xys);
+ void draw_glyphs_to_bitmap(bool antialiased);
+ void draw_glyph_to_bitmap(FT2Image &im, int x, int y, size_t glyphInd, bool antialiased);
+ void get_glyph_name(unsigned int glyph_number, char *buffer);
+ long get_name_index(char *name);
+ int get_path_count();
+ void get_path(double *outpoints, unsigned char *outcodes);
+
+ FT_Face &get_face()
+ {
+ return face;
+ }
+ FT2Image &get_image()
+ {
+ return image;
+ }
+ FT_Glyph &get_last_glyph()
+ {
+ return glyphs.back();
+ }
+ size_t get_last_glyph_index()
+ {
+ return glyphs.size() - 1;
+ }
+ size_t get_num_glyphs()
+ {
+ return glyphs.size();
+ }
+ long get_hinting_factor()
+ {
+ return hinting_factor;
+ }
+
+ private:
+ FT2Image image;
+ FT_Face face;
+ FT_Matrix matrix; /* transformation matrix */
+ FT_Vector pen; /* untransformed origin */
+ FT_Error error;
+ std::vector<FT_Glyph> glyphs;
+ std::vector<FT_Vector> pos;
+ FT_BBox bbox;
+ FT_Pos advance;
+ double angle;
+ double ptsize;
+ double dpi;
+ long hinting_factor;
+
+ void set_scalable_attributes();
+
+ // prevent copying
+ FT2Font(const FT2Font &);
+ FT2Font &operator=(const FT2Font &);
+};
+
+#endif
diff --git a/contrib/python/matplotlib/py2/src/ft2font_wrapper.cpp b/contrib/python/matplotlib/py2/src/ft2font_wrapper.cpp
new file mode 100644
index 00000000000..49c33b79435
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/ft2font_wrapper.cpp
@@ -0,0 +1,1805 @@
+#include "mplutils.h"
+#include "ft2font.h"
+#include "file_compat.h"
+#include "py_exceptions.h"
+#include "numpy_cpp.h"
+
+// From Python
+#include <structmember.h>
+
+#define STRINGIFY(s) XSTRINGIFY(s)
+#define XSTRINGIFY(s) #s
+
+static PyObject *convert_xys_to_array(std::vector<double> &xys)
+{
+ npy_intp dims[] = {(npy_intp)xys.size() / 2, 2 };
+ if (dims[0] > 0) {
+ return PyArray_SimpleNewFromData(2, dims, NPY_DOUBLE, &xys[0]);
+ } else {
+ return PyArray_SimpleNew(2, dims, NPY_DOUBLE);
+ }
+}
+
+/**********************************************************************
+ * FT2Image
+ * */
+
+typedef struct
+{
+ PyObject_HEAD
+ FT2Image *x;
+ Py_ssize_t shape[2];
+ Py_ssize_t strides[2];
+ Py_ssize_t suboffsets[2];
+} PyFT2Image;
+
+static PyObject *PyFT2Image_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyFT2Image *self;
+ self = (PyFT2Image *)type->tp_alloc(type, 0);
+ self->x = NULL;
+ return (PyObject *)self;
+}
+
+static int PyFT2Image_init(PyFT2Image *self, PyObject *args, PyObject *kwds)
+{
+ double width;
+ double height;
+
+ if (!PyArg_ParseTuple(args, "dd:FT2Image", &width, &height)) {
+ return -1;
+ }
+
+ CALL_CPP_INIT("FT2Image", (self->x = new FT2Image(width, height)));
+
+ return 0;
+}
+
+static void PyFT2Image_dealloc(PyFT2Image *self)
+{
+ delete self->x;
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+const char *PyFT2Image_draw_rect__doc__ =
+ "draw_rect(x0, y0, x1, y1)\n"
+ "\n"
+ "Draw a rect to the image.\n"
+ "\n";
+
+static PyObject *PyFT2Image_draw_rect(PyFT2Image *self, PyObject *args, PyObject *kwds)
+{
+ double x0, y0, x1, y1;
+
+ if (!PyArg_ParseTuple(args, "dddd:draw_rect", &x0, &y0, &x1, &y1)) {
+ return NULL;
+ }
+
+ CALL_CPP("draw_rect", (self->x->draw_rect(x0, y0, x1, y1)));
+
+ Py_RETURN_NONE;
+}
+
+const char *PyFT2Image_draw_rect_filled__doc__ =
+ "draw_rect_filled(x0, y0, x1, y1)\n"
+ "\n"
+ "Draw a filled rect to the image.\n"
+ "\n";
+
+static PyObject *PyFT2Image_draw_rect_filled(PyFT2Image *self, PyObject *args, PyObject *kwds)
+{
+ double x0, y0, x1, y1;
+
+ if (!PyArg_ParseTuple(args, "dddd:draw_rect_filled", &x0, &y0, &x1, &y1)) {
+ return NULL;
+ }
+
+ CALL_CPP("draw_rect_filled", (self->x->draw_rect_filled(x0, y0, x1, y1)));
+
+ Py_RETURN_NONE;
+}
+
+const char *PyFT2Image_as_str__doc__ =
+ "s = image.as_str()\n"
+ "\n"
+ "Return the image buffer as a string\n"
+ "\n";
+
+static PyObject *PyFT2Image_as_str(PyFT2Image *self, PyObject *args, PyObject *kwds)
+{
+ // TODO: Use a buffer to avoid the copy
+ return PyBytes_FromStringAndSize((const char *)self->x->get_buffer(),
+ self->x->get_width() * self->x->get_height());
+}
+
+const char *PyFT2Image_as_rgba_str__doc__ =
+ "s = image.as_rgba_str()\n"
+ "\n"
+ "Return the image buffer as a RGBA string\n"
+ "\n";
+
+static PyObject *PyFT2Image_as_rgba_str(PyFT2Image *self, PyObject *args, PyObject *kwds)
+{
+ npy_intp dims[] = {(npy_intp)self->x->get_height(), (npy_intp)self->x->get_width(), 4 };
+ numpy::array_view<unsigned char, 3> result(dims);
+
+ unsigned char *src = self->x->get_buffer();
+ unsigned char *end = src + (self->x->get_width() * self->x->get_height());
+ unsigned char *dst = result.data();
+
+ while (src != end) {
+ *dst++ = 0;
+ *dst++ = 0;
+ *dst++ = 0;
+ *dst++ = *src++;
+ }
+
+ return result.pyobj();
+}
+
+const char *PyFT2Image_as_array__doc__ =
+ "x = image.as_array()\n"
+ "\n"
+ "Return the image buffer as a width x height numpy array of ubyte \n"
+ "\n";
+
+static PyObject *PyFT2Image_as_array(PyFT2Image *self, PyObject *args, PyObject *kwds)
+{
+ npy_intp dims[] = {(npy_intp)self->x->get_height(), (npy_intp)self->x->get_width() };
+ return PyArray_SimpleNewFromData(2, dims, NPY_UBYTE, self->x->get_buffer());
+}
+
+static PyObject *PyFT2Image_get_width(PyFT2Image *self, PyObject *args, PyObject *kwds)
+{
+ return PyLong_FromLong(self->x->get_width());
+}
+
+static PyObject *PyFT2Image_get_height(PyFT2Image *self, PyObject *args, PyObject *kwds)
+{
+ return PyLong_FromLong(self->x->get_height());
+}
+
+static int PyFT2Image_get_buffer(PyFT2Image *self, Py_buffer *buf, int flags)
+{
+ FT2Image *im = self->x;
+
+ Py_INCREF(self);
+ buf->obj = (PyObject *)self;
+ buf->buf = im->get_buffer();
+ buf->len = im->get_width() * im->get_height();
+ buf->readonly = 0;
+ buf->format = (char *)"B";
+ buf->ndim = 2;
+ self->shape[0] = im->get_height();
+ self->shape[1] = im->get_width();
+ buf->shape = self->shape;
+ self->strides[0] = im->get_width();
+ self->strides[1] = 1;
+ buf->strides = self->strides;
+ buf->suboffsets = NULL;
+ buf->itemsize = 1;
+ buf->internal = NULL;
+
+ return 1;
+}
+
+static PyTypeObject PyFT2ImageType;
+
+static PyTypeObject *PyFT2Image_init_type(PyObject *m, PyTypeObject *type)
+{
+ static PyMethodDef methods[] = {
+ {"draw_rect", (PyCFunction)PyFT2Image_draw_rect, METH_VARARGS, PyFT2Image_draw_rect__doc__},
+ {"draw_rect_filled", (PyCFunction)PyFT2Image_draw_rect_filled, METH_VARARGS, PyFT2Image_draw_rect_filled__doc__},
+ {"as_str", (PyCFunction)PyFT2Image_as_str, METH_NOARGS, PyFT2Image_as_str__doc__},
+ {"as_rgba_str", (PyCFunction)PyFT2Image_as_rgba_str, METH_NOARGS, PyFT2Image_as_rgba_str__doc__},
+ {"as_array", (PyCFunction)PyFT2Image_as_array, METH_NOARGS, PyFT2Image_as_array__doc__},
+ {"get_width", (PyCFunction)PyFT2Image_get_width, METH_NOARGS, NULL},
+ {"get_height", (PyCFunction)PyFT2Image_get_height, METH_NOARGS, NULL},
+ {NULL}
+ };
+
+ static PyBufferProcs buffer_procs;
+ memset(&buffer_procs, 0, sizeof(PyBufferProcs));
+ buffer_procs.bf_getbuffer = (getbufferproc)PyFT2Image_get_buffer;
+
+ memset(type, 0, sizeof(PyTypeObject));
+ type->tp_name = "matplotlib.ft2font.FT2Image";
+ type->tp_basicsize = sizeof(PyFT2Image);
+ type->tp_dealloc = (destructor)PyFT2Image_dealloc;
+ type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_NEWBUFFER;
+ type->tp_methods = methods;
+ type->tp_new = PyFT2Image_new;
+ type->tp_init = (initproc)PyFT2Image_init;
+ type->tp_as_buffer = &buffer_procs;
+
+ if (PyType_Ready(type) < 0) {
+ return NULL;
+ }
+
+ if (PyModule_AddObject(m, "FT2Image", (PyObject *)type)) {
+ return NULL;
+ }
+
+ return type;
+}
+
+/**********************************************************************
+ * Glyph
+ * */
+
+typedef struct
+{
+ PyObject_HEAD
+ size_t glyphInd;
+ long width;
+ long height;
+ long horiBearingX;
+ long horiBearingY;
+ long horiAdvance;
+ long linearHoriAdvance;
+ long vertBearingX;
+ long vertBearingY;
+ long vertAdvance;
+ FT_BBox bbox;
+} PyGlyph;
+
+static PyTypeObject PyGlyphType;
+
+static PyObject *
+PyGlyph_new(const FT_Face &face, const FT_Glyph &glyph, size_t ind, long hinting_factor)
+{
+ PyGlyph *self;
+ self = (PyGlyph *)PyGlyphType.tp_alloc(&PyGlyphType, 0);
+
+ self->glyphInd = ind;
+
+ FT_Glyph_Get_CBox(glyph, ft_glyph_bbox_subpixels, &self->bbox);
+
+ self->width = face->glyph->metrics.width / hinting_factor;
+ self->height = face->glyph->metrics.height;
+ self->horiBearingX = face->glyph->metrics.horiBearingX / hinting_factor;
+ self->horiBearingY = face->glyph->metrics.horiBearingY;
+ self->horiAdvance = face->glyph->metrics.horiAdvance;
+ self->linearHoriAdvance = face->glyph->linearHoriAdvance / hinting_factor;
+ self->vertBearingX = face->glyph->metrics.vertBearingX;
+ self->vertBearingY = face->glyph->metrics.vertBearingY;
+ self->vertAdvance = face->glyph->metrics.vertAdvance;
+
+ return (PyObject *)self;
+}
+
+static void PyGlyph_dealloc(PyGlyph *self)
+{
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+static PyObject *PyGlyph_get_bbox(PyGlyph *self, void *closure)
+{
+ return Py_BuildValue(
+ "iiii", self->bbox.xMin, self->bbox.yMin, self->bbox.xMax, self->bbox.yMax);
+}
+
+static PyTypeObject *PyGlyph_init_type(PyObject *m, PyTypeObject *type)
+{
+ static PyMemberDef members[] = {
+ {(char *)"width", T_LONG, offsetof(PyGlyph, width), READONLY, (char *)""},
+ {(char *)"height", T_LONG, offsetof(PyGlyph, height), READONLY, (char *)""},
+ {(char *)"horiBearingX", T_LONG, offsetof(PyGlyph, horiBearingX), READONLY, (char *)""},
+ {(char *)"horiBearingY", T_LONG, offsetof(PyGlyph, horiBearingY), READONLY, (char *)""},
+ {(char *)"horiAdvance", T_LONG, offsetof(PyGlyph, horiAdvance), READONLY, (char *)""},
+ {(char *)"linearHoriAdvance", T_LONG, offsetof(PyGlyph, linearHoriAdvance), READONLY, (char *)""},
+ {(char *)"vertBearingX", T_LONG, offsetof(PyGlyph, vertBearingX), READONLY, (char *)""},
+ {(char *)"vertBearingY", T_LONG, offsetof(PyGlyph, vertBearingY), READONLY, (char *)""},
+ {(char *)"vertAdvance", T_LONG, offsetof(PyGlyph, vertAdvance), READONLY, (char *)""},
+ {NULL}
+ };
+
+ static PyGetSetDef getset[] = {
+ {(char *)"bbox", (getter)PyGlyph_get_bbox, NULL, NULL, NULL},
+ {NULL}
+ };
+
+ memset(type, 0, sizeof(PyTypeObject));
+ type->tp_name = "matplotlib.ft2font.Glyph";
+ type->tp_basicsize = sizeof(PyGlyph);
+ type->tp_dealloc = (destructor)PyGlyph_dealloc;
+ type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
+ type->tp_members = members;
+ type->tp_getset = getset;
+
+ if (PyType_Ready(type) < 0) {
+ return NULL;
+ }
+
+ /* Don't need to add to module, since you can't create glyphs
+ directly from Python */
+
+ return type;
+}
+
+/**********************************************************************
+ * FT2Font
+ * */
+
+typedef struct
+{
+ PyObject_HEAD
+ FT2Font *x;
+ PyObject *fname;
+ PyObject *py_file;
+ FILE *fp;
+ int close_file;
+ mpl_off_t offset;
+ FT_StreamRec stream;
+ FT_Byte *mem;
+ size_t mem_size;
+ Py_ssize_t shape[2];
+ Py_ssize_t strides[2];
+ Py_ssize_t suboffsets[2];
+} PyFT2Font;
+
+static unsigned long read_from_file_callback(FT_Stream stream,
+ unsigned long offset,
+ unsigned char *buffer,
+ unsigned long count)
+{
+
+ PyFT2Font *def = (PyFT2Font *)stream->descriptor.pointer;
+
+ if (fseek(def->fp, offset, SEEK_SET) == -1) {
+ return 0;
+ }
+
+ if (count > 0) {
+ return fread(buffer, 1, count, def->fp);
+ }
+
+ return 0;
+}
+
+static void close_file_callback(FT_Stream stream)
+{
+ PyFT2Font *def = (PyFT2Font *)stream->descriptor.pointer;
+
+ if (mpl_PyFile_DupClose(def->py_file, def->fp, def->offset)) {
+ throw std::runtime_error("Couldn't close file");
+ }
+
+ if (def->close_file) {
+ mpl_PyFile_CloseFile(def->py_file);
+ }
+
+ Py_DECREF(def->py_file);
+ def->py_file = NULL;
+}
+
+static int convert_open_args(PyFT2Font *self, PyObject *py_file_arg, FT_Open_Args *open_args)
+{
+ PyObject *py_file = NULL;
+ int close_file = 0;
+ FILE *fp;
+ PyObject *data = NULL;
+ char *data_ptr;
+ Py_ssize_t data_len;
+ long file_size;
+ FT_Byte *new_memory;
+ mpl_off_t offset = 0;
+
+ int result = 0;
+
+ memset((void *)open_args, 0, sizeof(FT_Open_Args));
+
+ if (PyBytes_Check(py_file_arg) || PyUnicode_Check(py_file_arg)) {
+ if ((py_file = mpl_PyFile_OpenFile(py_file_arg, (char *)"rb")) == NULL) {
+ goto exit;
+ }
+ close_file = 1;
+ } else {
+ Py_INCREF(py_file_arg);
+ py_file = py_file_arg;
+ }
+
+ if ((fp = mpl_PyFile_Dup(py_file, (char *)"rb", &offset))) {
+ Py_INCREF(py_file);
+ self->py_file = py_file;
+ self->close_file = close_file;
+ self->fp = fp;
+ self->offset = offset;
+ fseek(fp, 0, SEEK_END);
+ file_size = ftell(fp);
+ fseek(fp, 0, SEEK_SET);
+
+ self->stream.base = NULL;
+ self->stream.size = (unsigned long)file_size;
+ self->stream.pos = 0;
+ self->stream.descriptor.pointer = self;
+ self->stream.read = &read_from_file_callback;
+ self->stream.close = &close_file_callback;
+
+ open_args->flags = FT_OPEN_STREAM;
+ open_args->stream = &self->stream;
+ } else {
+ if (PyObject_HasAttrString(py_file_arg, "read") &&
+ (data = PyObject_CallMethod(py_file_arg, (char *)"read", (char *)""))) {
+ if (PyBytes_AsStringAndSize(data, &data_ptr, &data_len)) {
+ goto exit;
+ }
+
+ if (self->mem) {
+ free(self->mem);
+ }
+ self->mem = (FT_Byte *)malloc((self->mem_size + data_len) * sizeof(FT_Byte));
+ if (self->mem == NULL) {
+ goto exit;
+ }
+ new_memory = self->mem + self->mem_size;
+ self->mem_size += data_len;
+
+ memcpy(new_memory, data_ptr, data_len);
+ open_args->flags = FT_OPEN_MEMORY;
+ open_args->memory_base = new_memory;
+ open_args->memory_size = data_len;
+ open_args->stream = NULL;
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "First argument must be a path or file object reading bytes");
+ goto exit;
+ }
+ }
+
+ result = 1;
+
+exit:
+
+ Py_XDECREF(py_file);
+ Py_XDECREF(data);
+
+ return result;
+}
+
+static PyTypeObject PyFT2FontType;
+
+static PyObject *PyFT2Font_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyFT2Font *self;
+ self = (PyFT2Font *)type->tp_alloc(type, 0);
+ self->x = NULL;
+ self->fname = NULL;
+ self->py_file = NULL;
+ self->fp = NULL;
+ self->close_file = 0;
+ self->offset = 0;
+ memset(&self->stream, 0, sizeof(FT_StreamRec));
+ self->mem = 0;
+ self->mem_size = 0;
+ return (PyObject *)self;
+}
+
+const char *PyFT2Font_init__doc__ =
+ "FT2Font(ttffile)\n"
+ "\n"
+ "Create a new FT2Font object\n"
+ "The following global font attributes are defined:\n"
+ " num_faces number of faces in file\n"
+ " face_flags face flags (int type); see the ft2font constants\n"
+ " style_flags style flags (int type); see the ft2font constants\n"
+ " num_glyphs number of glyphs in the face\n"
+ " family_name face family name\n"
+ " style_name face style name\n"
+ " num_fixed_sizes number of bitmap in the face\n"
+ " scalable face is scalable\n"
+ "\n"
+ "The following are available, if scalable is true:\n"
+ " bbox face global bounding box (xmin, ymin, xmax, ymax)\n"
+ " units_per_EM number of font units covered by the EM\n"
+ " ascender ascender in 26.6 units\n"
+ " descender descender in 26.6 units\n"
+ " height height in 26.6 units; used to compute a default\n"
+ " line spacing (baseline-to-baseline distance)\n"
+ " max_advance_width maximum horizontal cursor advance for all glyphs\n"
+ " max_advance_height same for vertical layout\n"
+ " underline_position vertical position of the underline bar\n"
+ " underline_thickness vertical thickness of the underline\n"
+ " postscript_name PostScript name of the font\n";
+
+static void PyFT2Font_fail(PyFT2Font *self)
+{
+ free(self->mem);
+ self->mem = NULL;
+ Py_XDECREF(self->py_file);
+ self->py_file = NULL;
+}
+
+static int PyFT2Font_init(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *fname;
+ FT_Open_Args open_args;
+ long hinting_factor = 8;
+ const char *names[] = { "filename", "hinting_factor", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(
+ args, kwds, "O|l:FT2Font", (char **)names, &fname, &hinting_factor)) {
+ return -1;
+ }
+
+ if (!convert_open_args(self, fname, &open_args)) {
+ return -1;
+ }
+
+ CALL_CPP_FULL(
+ "FT2Font", (self->x = new FT2Font(open_args, hinting_factor)), PyFT2Font_fail(self), -1);
+
+ Py_INCREF(fname);
+ self->fname = fname;
+
+ return 0;
+}
+
+static void PyFT2Font_dealloc(PyFT2Font *self)
+{
+ delete self->x;
+ free(self->mem);
+ Py_XDECREF(self->py_file);
+ Py_XDECREF(self->fname);
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+const char *PyFT2Font_clear__doc__ =
+ "clear()\n"
+ "\n"
+ "Clear all the glyphs, reset for a new set_text";
+
+static PyObject *PyFT2Font_clear(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ CALL_CPP("clear", (self->x->clear()));
+
+ Py_RETURN_NONE;
+}
+
+const char *PyFT2Font_set_size__doc__ =
+ "set_size(ptsize, dpi)\n"
+ "\n"
+ "Set the point size and dpi of the text.\n";
+
+static PyObject *PyFT2Font_set_size(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ double ptsize;
+ double dpi;
+
+ if (!PyArg_ParseTuple(args, "dd:set_size", &ptsize, &dpi)) {
+ return NULL;
+ }
+
+ CALL_CPP("set_size", (self->x->set_size(ptsize, dpi)));
+
+ Py_RETURN_NONE;
+}
+
+const char *PyFT2Font_set_charmap__doc__ =
+ "set_charmap(i)\n"
+ "\n"
+ "Make the i-th charmap current\n";
+
+static PyObject *PyFT2Font_set_charmap(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ int i;
+
+ if (!PyArg_ParseTuple(args, "i:set_charmap", &i)) {
+ return NULL;
+ }
+
+ CALL_CPP("set_charmap", (self->x->set_charmap(i)));
+
+ Py_RETURN_NONE;
+}
+
+const char *PyFT2Font_select_charmap__doc__ =
+ "select_charmap(i)\n"
+ "\n"
+ "select charmap i where i is one of the FT_Encoding number\n";
+
+static PyObject *PyFT2Font_select_charmap(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ unsigned long i;
+
+ if (!PyArg_ParseTuple(args, "k:select_charmap", &i)) {
+ return NULL;
+ }
+
+ CALL_CPP("select_charmap", self->x->select_charmap(i));
+
+ Py_RETURN_NONE;
+}
+
+const char *PyFT2Font_get_kerning__doc__ =
+ "dx = get_kerning(left, right, mode)\n"
+ "\n"
+ "Get the kerning between left char and right glyph indices\n"
+ "mode is a kerning mode constant\n"
+ " KERNING_DEFAULT - Return scaled and grid-fitted kerning distances\n"
+ " KERNING_UNFITTED - Return scaled but un-grid-fitted kerning distances\n"
+ " KERNING_UNSCALED - Return the kerning vector in original font units\n";
+
+static PyObject *PyFT2Font_get_kerning(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ FT_UInt left, right, mode;
+ int result;
+
+ if (!PyArg_ParseTuple(args, "III:get_kerning", &left, &right, &mode)) {
+ return NULL;
+ }
+
+ CALL_CPP("get_kerning", (result = self->x->get_kerning(left, right, mode)));
+
+ return PyLong_FromLong(result);
+}
+
+const char *PyFT2Font_set_text__doc__ =
+ "set_text(s, angle)\n"
+ "\n"
+ "Set the text string and angle.\n"
+ "You must call this before draw_glyphs_to_bitmap\n"
+ "A sequence of x,y positions is returned";
+
+static PyObject *PyFT2Font_set_text(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *textobj;
+ double angle = 0.0;
+ FT_Int32 flags = FT_LOAD_FORCE_AUTOHINT;
+ std::vector<double> xys;
+ const char *names[] = { "string", "angle", "flags", NULL };
+
+ /* This makes a technically incorrect assumption that FT_Int32 is
+ int. In theory it can also be long, if the size of int is less
+ than 32 bits. This is very unlikely on modern platforms. */
+ if (!PyArg_ParseTupleAndKeywords(
+ args, kwds, "O|di:set_text", (char **)names, &textobj, &angle, &flags)) {
+ return NULL;
+ }
+
+ std::vector<uint32_t> codepoints;
+ size_t size;
+
+ if (PyUnicode_Check(textobj)) {
+ size = PyUnicode_GET_SIZE(textobj);
+ codepoints.resize(size);
+ Py_UNICODE *unistr = PyUnicode_AsUnicode(textobj);
+ for (size_t i = 0; i < size; ++i) {
+ codepoints[i] = unistr[i];
+ }
+ } else if (PyBytes_Check(textobj)) {
+ size = PyBytes_Size(textobj);
+ codepoints.resize(size);
+ char *bytestr = PyBytes_AsString(textobj);
+ for (size_t i = 0; i < size; ++i) {
+ codepoints[i] = bytestr[i];
+ }
+ } else {
+ PyErr_SetString(PyExc_TypeError, "String must be unicode or bytes");
+ return NULL;
+ }
+
+ uint32_t* codepoints_array = NULL;
+ if (size > 0) {
+ codepoints_array = &codepoints[0];
+ }
+ CALL_CPP("set_text", self->x->set_text(size, codepoints_array, angle, flags, xys));
+
+ return convert_xys_to_array(xys);
+}
+
+const char *PyFT2Font_get_num_glyphs__doc__ =
+ "get_num_glyphs()\n"
+ "\n"
+ "Return the number of loaded glyphs\n";
+
+static PyObject *PyFT2Font_get_num_glyphs(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ return PyLong_FromLong(self->x->get_num_glyphs());
+}
+
+const char *PyFT2Font_load_char__doc__ =
+ "load_char(charcode, flags=LOAD_FORCE_AUTOHINT)\n"
+ "\n"
+ "Load character with charcode in current fontfile and set glyph.\n"
+ "The flags argument can be a bitwise-or of the LOAD_XXX constants.\n"
+ "Return value is a Glyph object, with attributes\n"
+ " width # glyph width\n"
+ " height # glyph height\n"
+ " bbox # the glyph bbox (xmin, ymin, xmax, ymax)\n"
+ " horiBearingX # left side bearing in horizontal layouts\n"
+ " horiBearingY # top side bearing in horizontal layouts\n"
+ " horiAdvance # advance width for horizontal layout\n"
+ " vertBearingX # left side bearing in vertical layouts\n"
+ " vertBearingY # top side bearing in vertical layouts\n"
+ " vertAdvance # advance height for vertical layout\n";
+
+static PyObject *PyFT2Font_load_char(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ long charcode;
+ FT_Int32 flags = FT_LOAD_FORCE_AUTOHINT;
+ const char *names[] = { "charcode", "flags", NULL };
+
+ /* This makes a technically incorrect assumption that FT_Int32 is
+ int. In theory it can also be long, if the size of int is less
+ than 32 bits. This is very unlikely on modern platforms. */
+ if (!PyArg_ParseTupleAndKeywords(
+ args, kwds, "l|i:load_char", (char **)names, &charcode, &flags)) {
+ return NULL;
+ }
+
+ CALL_CPP("load_char", (self->x->load_char(charcode, flags)));
+
+ return PyGlyph_new(self->x->get_face(),
+ self->x->get_last_glyph(),
+ self->x->get_last_glyph_index(),
+ self->x->get_hinting_factor());
+}
+
+const char *PyFT2Font_load_glyph__doc__ =
+ "load_glyph(glyphindex, flags=LOAD_FORCE_AUTOHINT)\n"
+ "\n"
+ "Load character with glyphindex in current fontfile and set glyph.\n"
+ "The flags argument can be a bitwise-or of the LOAD_XXX constants.\n"
+ "Return value is a Glyph object, with attributes\n"
+ " width # glyph width\n"
+ " height # glyph height\n"
+ " bbox # the glyph bbox (xmin, ymin, xmax, ymax)\n"
+ " horiBearingX # left side bearing in horizontal layouts\n"
+ " horiBearingY # top side bearing in horizontal layouts\n"
+ " horiAdvance # advance width for horizontal layout\n"
+ " vertBearingX # left side bearing in vertical layouts\n"
+ " vertBearingY # top side bearing in vertical layouts\n"
+ " vertAdvance # advance height for vertical layout\n";
+
+static PyObject *PyFT2Font_load_glyph(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ FT_UInt glyph_index;
+ FT_Int32 flags = FT_LOAD_FORCE_AUTOHINT;
+ const char *names[] = { "glyph_index", "flags", NULL };
+
+ /* This makes a technically incorrect assumption that FT_Int32 is
+ int. In theory it can also be long, if the size of int is less
+ than 32 bits. This is very unlikely on modern platforms. */
+ if (!PyArg_ParseTupleAndKeywords(
+ args, kwds, "I|i:load_glyph", (char **)names, &glyph_index, &flags)) {
+ return NULL;
+ }
+
+ CALL_CPP("load_glyph", (self->x->load_glyph(glyph_index, flags)));
+
+ return PyGlyph_new(self->x->get_face(),
+ self->x->get_last_glyph(),
+ self->x->get_last_glyph_index(),
+ self->x->get_hinting_factor());
+}
+
+const char *PyFT2Font_get_width_height__doc__ =
+ "w, h = get_width_height()\n"
+ "\n"
+ "Get the width and height in 26.6 subpixels of the current string set by set_text\n"
+ "The rotation of the string is accounted for. To get width and height\n"
+ "in pixels, divide these values by 64\n";
+
+static PyObject *PyFT2Font_get_width_height(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ long width, height;
+
+ CALL_CPP("get_width_height", (self->x->get_width_height(&width, &height)));
+
+ return Py_BuildValue("ll", width, height);
+}
+
+const char *PyFT2Font_get_bitmap_offset__doc__ =
+ "x, y = get_bitmap_offset()\n"
+ "\n"
+ "Get the offset in 26.6 subpixels for the bitmap if ink hangs left or below (0, 0).\n"
+ "Since matplotlib only supports left-to-right text, y is always 0.\n";
+
+static PyObject *PyFT2Font_get_bitmap_offset(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ long x, y;
+
+ CALL_CPP("get_bitmap_offset", (self->x->get_bitmap_offset(&x, &y)));
+
+ return Py_BuildValue("ll", x, y);
+}
+
+const char *PyFT2Font_get_descent__doc__ =
+ "d = get_descent()\n"
+ "\n"
+ "Get the descent of the current string set by set_text in 26.6 subpixels.\n"
+ "The rotation of the string is accounted for. To get the descent\n"
+ "in pixels, divide this value by 64.\n";
+
+static PyObject *PyFT2Font_get_descent(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ long descent;
+
+ CALL_CPP("get_descent", (descent = self->x->get_descent()));
+
+ return PyLong_FromLong(descent);
+}
+
+const char *PyFT2Font_draw_glyphs_to_bitmap__doc__ =
+ "draw_glyphs_to_bitmap()\n"
+ "\n"
+ "Draw the glyphs that were loaded by set_text to the bitmap\n"
+ "The bitmap size will be automatically set to include the glyphs\n";
+
+static PyObject *PyFT2Font_draw_glyphs_to_bitmap(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ int antialiased = 1;
+ const char *names[] = { "antialiased", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(
+ args, kwds, "|i:draw_glyphs_to_bitmap", (char **)names, &antialiased)) {
+ return NULL;
+ }
+
+ CALL_CPP("draw_glyphs_to_bitmap", (self->x->draw_glyphs_to_bitmap(antialiased)));
+
+ Py_RETURN_NONE;
+}
+
+const char *PyFT2Font_get_xys__doc__ =
+ "get_xys()\n"
+ "\n"
+ "Get the xy locations of the current glyphs\n";
+
+static PyObject *PyFT2Font_get_xys(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ int antialiased = 1;
+ std::vector<double> xys;
+ const char *names[] = { "antialiased", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|i:get_xys", (char **)names, &antialiased)) {
+ return NULL;
+ }
+
+ CALL_CPP("get_xys", (self->x->get_xys(antialiased, xys)));
+
+ return convert_xys_to_array(xys);
+}
+
+const char *PyFT2Font_draw_glyph_to_bitmap__doc__ =
+ "draw_glyph_to_bitmap(bitmap, x, y, glyph)\n"
+ "\n"
+ "Draw a single glyph to the bitmap at pixel locations x,y\n"
+ "Note it is your responsibility to set up the bitmap manually\n"
+ "with set_bitmap_size(w,h) before this call is made.\n"
+ "\n"
+ "If you want automatic layout, use set_text in combinations with\n"
+ "draw_glyphs_to_bitmap. This function is intended for people who\n"
+ "want to render individual glyphs at precise locations, eg, a\n"
+ "a glyph returned by load_char\n";
+
+static PyObject *PyFT2Font_draw_glyph_to_bitmap(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ PyFT2Image *image;
+ double xd, yd;
+ PyGlyph *glyph;
+ int antialiased = 1;
+ const char *names[] = { "image", "x", "y", "glyph", "antialiased", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(args,
+ kwds,
+ "O!ddO!|i:draw_glyph_to_bitmap",
+ (char **)names,
+ &PyFT2ImageType,
+ &image,
+ &xd,
+ &yd,
+ &PyGlyphType,
+ &glyph,
+ &antialiased)) {
+ return NULL;
+ }
+
+ CALL_CPP("draw_glyph_to_bitmap",
+ self->x->draw_glyph_to_bitmap(*(image->x), xd, yd, glyph->glyphInd, antialiased));
+
+ Py_RETURN_NONE;
+}
+
+const char *PyFT2Font_get_glyph_name__doc__ =
+ "get_glyph_name(index)\n"
+ "\n"
+ "Retrieves the ASCII name of a given glyph in a face.\n";
+
+static PyObject *PyFT2Font_get_glyph_name(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ unsigned int glyph_number;
+ char buffer[128];
+
+ if (!PyArg_ParseTuple(args, "I:get_glyph_name", &glyph_number)) {
+ return NULL;
+ }
+
+ CALL_CPP("get_glyph_name", (self->x->get_glyph_name(glyph_number, buffer)));
+
+ return PyUnicode_FromString(buffer);
+}
+
+const char *PyFT2Font_get_charmap__doc__ =
+ "get_charmap()\n"
+ "\n"
+ "Returns a dictionary that maps the character codes of the selected charmap\n"
+ "(Unicode by default) to their corresponding glyph indices.\n";
+
+static PyObject *PyFT2Font_get_charmap(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *charmap;
+
+ charmap = PyDict_New();
+ if (charmap == NULL) {
+ return NULL;
+ }
+
+ FT_UInt index;
+ FT_ULong code = FT_Get_First_Char(self->x->get_face(), &index);
+ while (index != 0) {
+ PyObject *key;
+ PyObject *val;
+
+ key = PyLong_FromLong(code);
+ if (key == NULL) {
+ Py_DECREF(charmap);
+ return NULL;
+ }
+
+ val = PyLong_FromLong(index);
+ if (val == NULL) {
+ Py_DECREF(key);
+ Py_DECREF(charmap);
+ return NULL;
+ }
+
+ if (PyDict_SetItem(charmap, key, val)) {
+ Py_DECREF(key);
+ Py_DECREF(val);
+ Py_DECREF(charmap);
+ return NULL;
+ }
+
+ Py_DECREF(key);
+ Py_DECREF(val);
+
+ code = FT_Get_Next_Char(self->x->get_face(), code, &index);
+ }
+
+ return charmap;
+}
+
+
+const char *PyFT2Font_get_char_index__doc__ =
+ "get_char_index()\n"
+ "\n"
+ "Given a character code, returns a glyph index.\n";
+
+static PyObject *PyFT2Font_get_char_index(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ FT_UInt index;
+ FT_ULong ccode;
+
+ if (!PyArg_ParseTuple(args, "k:get_char_index", &ccode)) {
+ return NULL;
+ }
+
+ index = FT_Get_Char_Index(self->x->get_face(), ccode);
+
+ return PyLong_FromLong(index);
+}
+
+
+const char *PyFT2Font_get_sfnt__doc__ =
+ "get_sfnt(name)\n"
+ "\n"
+ "Get all values from the SFNT names table. Result is a dictionary whose"
+ "key is the platform-ID, ISO-encoding-scheme, language-code, and"
+ "description.\n";
+
+static PyObject *PyFT2Font_get_sfnt(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *names;
+
+ if (!(self->x->get_face()->face_flags & FT_FACE_FLAG_SFNT)) {
+ PyErr_SetString(PyExc_ValueError, "No SFNT name table");
+ return NULL;
+ }
+
+ size_t count = FT_Get_Sfnt_Name_Count(self->x->get_face());
+
+ names = PyDict_New();
+ if (names == NULL) {
+ return NULL;
+ }
+
+ for (FT_UInt j = 0; j < count; ++j) {
+ FT_SfntName sfnt;
+ FT_Error error = FT_Get_Sfnt_Name(self->x->get_face(), j, &sfnt);
+
+ if (error) {
+ Py_DECREF(names);
+ PyErr_SetString(PyExc_ValueError, "Could not get SFNT name");
+ return NULL;
+ }
+
+ PyObject *key = Py_BuildValue(
+ "iiii", sfnt.platform_id, sfnt.encoding_id, sfnt.language_id, sfnt.name_id);
+ if (key == NULL) {
+ Py_DECREF(names);
+ return NULL;
+ }
+
+ PyObject *val = PyBytes_FromStringAndSize((const char *)sfnt.string, sfnt.string_len);
+ if (val == NULL) {
+ Py_DECREF(key);
+ Py_DECREF(names);
+ return NULL;
+ }
+
+ if (PyDict_SetItem(names, key, val)) {
+ Py_DECREF(key);
+ Py_DECREF(val);
+ Py_DECREF(names);
+ return NULL;
+ }
+
+ Py_DECREF(key);
+ Py_DECREF(val);
+ }
+
+ return names;
+}
+
+const char *PyFT2Font_get_name_index__doc__ =
+ "get_name_index(name)\n"
+ "\n"
+ "Returns the glyph index of a given glyph name.\n"
+ "The glyph index 0 means `undefined character code'.\n";
+
+static PyObject *PyFT2Font_get_name_index(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ char *glyphname;
+ long name_index;
+
+ if (!PyArg_ParseTuple(args, "es:get_name_index", "ascii", &glyphname)) {
+ return NULL;
+ }
+
+ CALL_CPP("get_name_index", name_index = self->x->get_name_index(glyphname));
+
+ PyMem_Free(glyphname);
+
+ return PyLong_FromLong(name_index);
+}
+
+const char *PyFT2Font_get_ps_font_info__doc__ =
+ "get_ps_font_info()\n"
+ "\n"
+ "Return the information in the PS Font Info structure.\n";
+
+static PyObject *PyFT2Font_get_ps_font_info(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ PS_FontInfoRec fontinfo;
+
+ FT_Error error = FT_Get_PS_Font_Info(self->x->get_face(), &fontinfo);
+ if (error) {
+ PyErr_SetString(PyExc_ValueError, "Could not get PS font info");
+ return NULL;
+ }
+
+ return Py_BuildValue("sssssliii",
+ fontinfo.version ? fontinfo.version : "",
+ fontinfo.notice ? fontinfo.notice : "",
+ fontinfo.full_name ? fontinfo.full_name : "",
+ fontinfo.family_name ? fontinfo.family_name : "",
+ fontinfo.weight ? fontinfo.weight : "",
+ fontinfo.italic_angle,
+ fontinfo.is_fixed_pitch,
+ fontinfo.underline_position,
+ fontinfo.underline_thickness);
+}
+
+const char *PyFT2Font_get_sfnt_table__doc__ =
+ "get_sfnt_table(name)\n"
+ "\n"
+ "Return one of the following SFNT tables: head, maxp, OS/2, hhea, "
+ "vhea, post, or pclt.\n";
+
+static PyObject *PyFT2Font_get_sfnt_table(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ char *tagname;
+
+ if (!PyArg_ParseTuple(args, "es:get_sfnt_table", "ascii", &tagname)) {
+ return NULL;
+ }
+
+ int tag;
+ const char *tags[] = { "head", "maxp", "OS/2", "hhea", "vhea", "post", "pclt", NULL };
+
+ for (tag = 0; tags[tag] != NULL; tag++) {
+ if (strncmp(tagname, tags[tag], 5) == 0) {
+ break;
+ }
+ }
+
+ PyMem_Free(tagname);
+
+ void *table = FT_Get_Sfnt_Table(self->x->get_face(), (FT_Sfnt_Tag)tag);
+ if (!table) {
+ Py_RETURN_NONE;
+ }
+
+ switch (tag) {
+ case 0: {
+ char head_dict[] =
+ "{s:(h,h), s:(h,h), s:l, s:l, s:i, s:i,"
+ "s:(l,l), s:(l,l), s:h, s:h, s:h, s:h, s:i, s:i, s:h, s:h, s:h}";
+ TT_Header *t = (TT_Header *)table;
+ return Py_BuildValue(head_dict,
+ "version",
+ FIXED_MAJOR(t->Table_Version),
+ FIXED_MINOR(t->Table_Version),
+ "fontRevision",
+ FIXED_MAJOR(t->Font_Revision),
+ FIXED_MINOR(t->Font_Revision),
+ "checkSumAdjustment",
+ t->CheckSum_Adjust,
+ "magicNumber",
+ t->Magic_Number,
+ "flags",
+ (unsigned)t->Flags,
+ "unitsPerEm",
+ (unsigned)t->Units_Per_EM,
+ "created",
+ t->Created[0],
+ t->Created[1],
+ "modified",
+ t->Modified[0],
+ t->Modified[1],
+ "xMin",
+ t->xMin,
+ "yMin",
+ t->yMin,
+ "xMax",
+ t->xMax,
+ "yMax",
+ t->yMax,
+ "macStyle",
+ (unsigned)t->Mac_Style,
+ "lowestRecPPEM",
+ (unsigned)t->Lowest_Rec_PPEM,
+ "fontDirectionHint",
+ t->Font_Direction,
+ "indexToLocFormat",
+ t->Index_To_Loc_Format,
+ "glyphDataFormat",
+ t->Glyph_Data_Format);
+ }
+ case 1: {
+ char maxp_dict[] =
+ "{s:(h,h), s:i, s:i, s:i, s:i, s:i, s:i,"
+ "s:i, s:i, s:i, s:i, s:i, s:i, s:i, s:i}";
+ TT_MaxProfile *t = (TT_MaxProfile *)table;
+ return Py_BuildValue(maxp_dict,
+ "version",
+ FIXED_MAJOR(t->version),
+ FIXED_MINOR(t->version),
+ "numGlyphs",
+ (unsigned)t->numGlyphs,
+ "maxPoints",
+ (unsigned)t->maxPoints,
+ "maxContours",
+ (unsigned)t->maxContours,
+ "maxComponentPoints",
+ (unsigned)t->maxCompositePoints,
+ "maxComponentContours",
+ (unsigned)t->maxCompositeContours,
+ "maxZones",
+ (unsigned)t->maxZones,
+ "maxTwilightPoints",
+ (unsigned)t->maxTwilightPoints,
+ "maxStorage",
+ (unsigned)t->maxStorage,
+ "maxFunctionDefs",
+ (unsigned)t->maxFunctionDefs,
+ "maxInstructionDefs",
+ (unsigned)t->maxInstructionDefs,
+ "maxStackElements",
+ (unsigned)t->maxStackElements,
+ "maxSizeOfInstructions",
+ (unsigned)t->maxSizeOfInstructions,
+ "maxComponentElements",
+ (unsigned)t->maxComponentElements,
+ "maxComponentDepth",
+ (unsigned)t->maxComponentDepth);
+ }
+ case 2: {
+#if PY3K
+ char os_2_dict[] =
+ "{s:h, s:h, s:h, s:h, s:h, s:h, s:h, s:h,"
+ "s:h, s:h, s:h, s:h, s:h, s:h, s:h, s:h, s:y#, s:(llll),"
+ "s:y#, s:h, s:h, s:h}";
+#else
+ char os_2_dict[] =
+ "{s:h, s:h, s:h, s:h, s:h, s:h, s:h, s:h,"
+ "s:h, s:h, s:h, s:h, s:h, s:h, s:h, s:h, s:s#, s:(llll),"
+ "s:s#, s:h, s:h, s:h}";
+#endif
+ TT_OS2 *t = (TT_OS2 *)table;
+ return Py_BuildValue(os_2_dict,
+ "version",
+ (unsigned)t->version,
+ "xAvgCharWidth",
+ t->xAvgCharWidth,
+ "usWeightClass",
+ (unsigned)t->usWeightClass,
+ "usWidthClass",
+ (unsigned)t->usWidthClass,
+ "fsType",
+ t->fsType,
+ "ySubscriptXSize",
+ t->ySubscriptXSize,
+ "ySubscriptYSize",
+ t->ySubscriptYSize,
+ "ySubscriptXOffset",
+ t->ySubscriptXOffset,
+ "ySubscriptYOffset",
+ t->ySubscriptYOffset,
+ "ySuperscriptXSize",
+ t->ySuperscriptXSize,
+ "ySuperscriptYSize",
+ t->ySuperscriptYSize,
+ "ySuperscriptXOffset",
+ t->ySuperscriptXOffset,
+ "ySuperscriptYOffset",
+ t->ySuperscriptYOffset,
+ "yStrikeoutSize",
+ t->yStrikeoutSize,
+ "yStrikeoutPosition",
+ t->yStrikeoutPosition,
+ "sFamilyClass",
+ t->sFamilyClass,
+ "panose",
+ t->panose,
+ 10,
+ "ulCharRange",
+ (unsigned long)t->ulUnicodeRange1,
+ (unsigned long)t->ulUnicodeRange2,
+ (unsigned long)t->ulUnicodeRange3,
+ (unsigned long)t->ulUnicodeRange4,
+ "achVendID",
+ t->achVendID,
+ 4,
+ "fsSelection",
+ (unsigned)t->fsSelection,
+ "fsFirstCharIndex",
+ (unsigned)t->usFirstCharIndex,
+ "fsLastCharIndex",
+ (unsigned)t->usLastCharIndex);
+ }
+ case 3: {
+ char hhea_dict[] =
+ "{s:(h,h), s:h, s:h, s:h, s:i, s:h, s:h, s:h,"
+ "s:h, s:h, s:h, s:h, s:i}";
+ TT_HoriHeader *t = (TT_HoriHeader *)table;
+ return Py_BuildValue(hhea_dict,
+ "version",
+ FIXED_MAJOR(t->Version),
+ FIXED_MINOR(t->Version),
+ "ascent",
+ t->Ascender,
+ "descent",
+ t->Descender,
+ "lineGap",
+ t->Line_Gap,
+ "advanceWidthMax",
+ (unsigned)t->advance_Width_Max,
+ "minLeftBearing",
+ t->min_Left_Side_Bearing,
+ "minRightBearing",
+ t->min_Right_Side_Bearing,
+ "xMaxExtent",
+ t->xMax_Extent,
+ "caretSlopeRise",
+ t->caret_Slope_Rise,
+ "caretSlopeRun",
+ t->caret_Slope_Run,
+ "caretOffset",
+ t->caret_Offset,
+ "metricDataFormat",
+ t->metric_Data_Format,
+ "numOfLongHorMetrics",
+ (unsigned)t->number_Of_HMetrics);
+ }
+ case 4: {
+ char vhea_dict[] =
+ "{s:(h,h), s:h, s:h, s:h, s:i, s:h, s:h, s:h,"
+ "s:h, s:h, s:h, s:h, s:i}";
+ TT_VertHeader *t = (TT_VertHeader *)table;
+ return Py_BuildValue(vhea_dict,
+ "version",
+ FIXED_MAJOR(t->Version),
+ FIXED_MINOR(t->Version),
+ "vertTypoAscender",
+ t->Ascender,
+ "vertTypoDescender",
+ t->Descender,
+ "vertTypoLineGap",
+ t->Line_Gap,
+ "advanceHeightMax",
+ (unsigned)t->advance_Height_Max,
+ "minTopSideBearing",
+ t->min_Top_Side_Bearing,
+ "minBottomSizeBearing",
+ t->min_Bottom_Side_Bearing,
+ "yMaxExtent",
+ t->yMax_Extent,
+ "caretSlopeRise",
+ t->caret_Slope_Rise,
+ "caretSlopeRun",
+ t->caret_Slope_Run,
+ "caretOffset",
+ t->caret_Offset,
+ "metricDataFormat",
+ t->metric_Data_Format,
+ "numOfLongVerMetrics",
+ (unsigned)t->number_Of_VMetrics);
+ }
+ case 5: {
+ char post_dict[] = "{s:(h,h), s:(h,h), s:h, s:h, s:k, s:k, s:k, s:k, s:k}";
+ TT_Postscript *t = (TT_Postscript *)table;
+ return Py_BuildValue(post_dict,
+ "format",
+ FIXED_MAJOR(t->FormatType),
+ FIXED_MINOR(t->FormatType),
+ "italicAngle",
+ FIXED_MAJOR(t->italicAngle),
+ FIXED_MINOR(t->italicAngle),
+ "underlinePosition",
+ t->underlinePosition,
+ "underlineThickness",
+ t->underlineThickness,
+ "isFixedPitch",
+ t->isFixedPitch,
+ "minMemType42",
+ t->minMemType42,
+ "maxMemType42",
+ t->maxMemType42,
+ "minMemType1",
+ t->minMemType1,
+ "maxMemType1",
+ t->maxMemType1);
+ }
+ case 6: {
+ #if PY3K
+ char pclt_dict[] =
+ "{s:(h,h), s:k, s:H, s:H, s:H, s:H, s:H, s:H, s:y, s:y, s:b, s:b, "
+ "s:b}";
+ #else
+ char pclt_dict[] =
+ "{s:(h,h), s:k, s:H, s:H, s:H, s:H, s:H, s:H, s:s, s:s, s:b, s:b, "
+ "s:b}";
+ #endif
+ TT_PCLT *t = (TT_PCLT *)table;
+ return Py_BuildValue(pclt_dict,
+ "version",
+ FIXED_MAJOR(t->Version),
+ FIXED_MINOR(t->Version),
+ "fontNumber",
+ t->FontNumber,
+ "pitch",
+ t->Pitch,
+ "xHeight",
+ t->xHeight,
+ "style",
+ t->Style,
+ "typeFamily",
+ t->TypeFamily,
+ "capHeight",
+ t->CapHeight,
+ "symbolSet",
+ t->SymbolSet,
+ "typeFace",
+ t->TypeFace,
+ "characterComplement",
+ t->CharacterComplement,
+ "strokeWeight",
+ t->StrokeWeight,
+ "widthType",
+ t->WidthType,
+ "serifStyle",
+ t->SerifStyle);
+ }
+ default:
+ Py_RETURN_NONE;
+ }
+}
+
+const char *PyFT2Font_get_path__doc__ =
+ "get_path()\n"
+ "\n"
+ "Get the path data from the currently loaded glyph as a tuple of vertices, "
+ "codes.\n";
+
+static PyObject *PyFT2Font_get_path(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ int count;
+
+ CALL_CPP("get_path", (count = self->x->get_path_count()));
+
+ npy_intp vertices_dims[2] = { count, 2 };
+ numpy::array_view<double, 2> vertices(vertices_dims);
+
+ npy_intp codes_dims[1] = { count };
+ numpy::array_view<unsigned char, 1> codes(codes_dims);
+
+ self->x->get_path(vertices.data(), codes.data());
+
+ return Py_BuildValue("NN", vertices.pyobj(), codes.pyobj());
+}
+
+const char *PyFT2Font_get_image__doc__ =
+ "get_image()\n"
+ "\n"
+ "Returns the underlying image buffer for this font object.\n";
+
+static PyObject *PyFT2Font_get_image(PyFT2Font *self, PyObject *args, PyObject *kwds)
+{
+ FT2Image &im = self->x->get_image();
+ npy_intp dims[] = {(npy_intp)im.get_height(), (npy_intp)im.get_width() };
+ return PyArray_SimpleNewFromData(2, dims, NPY_UBYTE, im.get_buffer());
+}
+
+static PyObject *PyFT2Font_postscript_name(PyFT2Font *self, void *closure)
+{
+ const char *ps_name = FT_Get_Postscript_Name(self->x->get_face());
+ if (ps_name == NULL) {
+ ps_name = "UNAVAILABLE";
+ }
+
+ return PyUnicode_FromString(ps_name);
+}
+
+static PyObject *PyFT2Font_num_faces(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->num_faces);
+}
+
+static PyObject *PyFT2Font_family_name(PyFT2Font *self, void *closure)
+{
+ const char *name = self->x->get_face()->family_name;
+ if (name == NULL) {
+ name = "UNAVAILABLE";
+ }
+ return PyUnicode_FromString(name);
+}
+
+static PyObject *PyFT2Font_style_name(PyFT2Font *self, void *closure)
+{
+ const char *name = self->x->get_face()->style_name;
+ if (name == NULL) {
+ name = "UNAVAILABLE";
+ }
+ return PyUnicode_FromString(name);
+}
+
+static PyObject *PyFT2Font_face_flags(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->face_flags);
+}
+
+static PyObject *PyFT2Font_style_flags(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->style_flags);
+}
+
+static PyObject *PyFT2Font_num_glyphs(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->num_glyphs);
+}
+
+static PyObject *PyFT2Font_num_fixed_sizes(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->num_fixed_sizes);
+}
+
+static PyObject *PyFT2Font_num_charmaps(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->num_charmaps);
+}
+
+static PyObject *PyFT2Font_scalable(PyFT2Font *self, void *closure)
+{
+ if (FT_IS_SCALABLE(self->x->get_face())) {
+ Py_RETURN_TRUE;
+ }
+ Py_RETURN_FALSE;
+}
+
+static PyObject *PyFT2Font_units_per_EM(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->units_per_EM);
+}
+
+static PyObject *PyFT2Font_get_bbox(PyFT2Font *self, void *closure)
+{
+ FT_BBox *bbox = &(self->x->get_face()->bbox);
+
+ return Py_BuildValue("iiii", bbox->xMin, bbox->yMin, bbox->xMax, bbox->yMax);
+}
+
+static PyObject *PyFT2Font_ascender(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->ascender);
+}
+
+static PyObject *PyFT2Font_descender(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->descender);
+}
+
+static PyObject *PyFT2Font_height(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->height);
+}
+
+static PyObject *PyFT2Font_max_advance_width(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->max_advance_width);
+}
+
+static PyObject *PyFT2Font_max_advance_height(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->max_advance_height);
+}
+
+static PyObject *PyFT2Font_underline_position(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->underline_position);
+}
+
+static PyObject *PyFT2Font_underline_thickness(PyFT2Font *self, void *closure)
+{
+ return PyLong_FromLong(self->x->get_face()->underline_thickness);
+}
+
+static PyObject *PyFT2Font_fname(PyFT2Font *self, void *closure)
+{
+ if (self->fname) {
+ Py_INCREF(self->fname);
+ return self->fname;
+ }
+
+ Py_RETURN_NONE;
+}
+
+static int PyFT2Font_get_buffer(PyFT2Font *self, Py_buffer *buf, int flags)
+{
+ FT2Image &im = self->x->get_image();
+
+ Py_INCREF(self);
+ buf->obj = (PyObject *)self;
+ buf->buf = im.get_buffer();
+ buf->len = im.get_width() * im.get_height();
+ buf->readonly = 0;
+ buf->format = (char *)"B";
+ buf->ndim = 2;
+ self->shape[0] = im.get_height();
+ self->shape[1] = im.get_width();
+ buf->shape = self->shape;
+ self->strides[0] = im.get_width();
+ self->strides[1] = 1;
+ buf->strides = self->strides;
+ buf->suboffsets = NULL;
+ buf->itemsize = 1;
+ buf->internal = NULL;
+
+ return 1;
+}
+
+static PyTypeObject *PyFT2Font_init_type(PyObject *m, PyTypeObject *type)
+{
+ static PyGetSetDef getset[] = {
+ {(char *)"postscript_name", (getter)PyFT2Font_postscript_name, NULL, NULL, NULL},
+ {(char *)"num_faces", (getter)PyFT2Font_num_faces, NULL, NULL, NULL},
+ {(char *)"family_name", (getter)PyFT2Font_family_name, NULL, NULL, NULL},
+ {(char *)"style_name", (getter)PyFT2Font_style_name, NULL, NULL, NULL},
+ {(char *)"face_flags", (getter)PyFT2Font_face_flags, NULL, NULL, NULL},
+ {(char *)"style_flags", (getter)PyFT2Font_style_flags, NULL, NULL, NULL},
+ {(char *)"num_glyphs", (getter)PyFT2Font_num_glyphs, NULL, NULL, NULL},
+ {(char *)"num_fixed_sizes", (getter)PyFT2Font_num_fixed_sizes, NULL, NULL, NULL},
+ {(char *)"num_charmaps", (getter)PyFT2Font_num_charmaps, NULL, NULL, NULL},
+ {(char *)"scalable", (getter)PyFT2Font_scalable, NULL, NULL, NULL},
+ {(char *)"units_per_EM", (getter)PyFT2Font_units_per_EM, NULL, NULL, NULL},
+ {(char *)"bbox", (getter)PyFT2Font_get_bbox, NULL, NULL, NULL},
+ {(char *)"ascender", (getter)PyFT2Font_ascender, NULL, NULL, NULL},
+ {(char *)"descender", (getter)PyFT2Font_descender, NULL, NULL, NULL},
+ {(char *)"height", (getter)PyFT2Font_height, NULL, NULL, NULL},
+ {(char *)"max_advance_width", (getter)PyFT2Font_max_advance_width, NULL, NULL, NULL},
+ {(char *)"max_advance_height", (getter)PyFT2Font_max_advance_height, NULL, NULL, NULL},
+ {(char *)"underline_position", (getter)PyFT2Font_underline_position, NULL, NULL, NULL},
+ {(char *)"underline_thickness", (getter)PyFT2Font_underline_thickness, NULL, NULL, NULL},
+ {(char *)"fname", (getter)PyFT2Font_fname, NULL, NULL, NULL},
+ {NULL}
+ };
+
+ static PyMethodDef methods[] = {
+ {"clear", (PyCFunction)PyFT2Font_clear, METH_NOARGS, PyFT2Font_clear__doc__},
+ {"set_size", (PyCFunction)PyFT2Font_set_size, METH_VARARGS, PyFT2Font_set_size__doc__},
+ {"set_charmap", (PyCFunction)PyFT2Font_set_charmap, METH_VARARGS, PyFT2Font_set_charmap__doc__},
+ {"select_charmap", (PyCFunction)PyFT2Font_select_charmap, METH_VARARGS, PyFT2Font_select_charmap__doc__},
+ {"get_kerning", (PyCFunction)PyFT2Font_get_kerning, METH_VARARGS, PyFT2Font_get_kerning__doc__},
+ {"set_text", (PyCFunction)PyFT2Font_set_text, METH_VARARGS|METH_KEYWORDS, PyFT2Font_set_text__doc__},
+ {"get_num_glyphs", (PyCFunction)PyFT2Font_get_num_glyphs, METH_NOARGS, PyFT2Font_get_num_glyphs__doc__},
+ {"load_char", (PyCFunction)PyFT2Font_load_char, METH_VARARGS|METH_KEYWORDS, PyFT2Font_load_char__doc__},
+ {"load_glyph", (PyCFunction)PyFT2Font_load_glyph, METH_VARARGS|METH_KEYWORDS, PyFT2Font_load_glyph__doc__},
+ {"get_width_height", (PyCFunction)PyFT2Font_get_width_height, METH_NOARGS, PyFT2Font_get_width_height__doc__},
+ {"get_bitmap_offset", (PyCFunction)PyFT2Font_get_bitmap_offset, METH_NOARGS, PyFT2Font_get_bitmap_offset__doc__},
+ {"get_descent", (PyCFunction)PyFT2Font_get_descent, METH_NOARGS, PyFT2Font_get_descent__doc__},
+ {"draw_glyphs_to_bitmap", (PyCFunction)PyFT2Font_draw_glyphs_to_bitmap, METH_VARARGS|METH_KEYWORDS, PyFT2Font_draw_glyphs_to_bitmap__doc__},
+ {"get_xys", (PyCFunction)PyFT2Font_get_xys, METH_VARARGS|METH_KEYWORDS, PyFT2Font_get_xys__doc__},
+ {"draw_glyph_to_bitmap", (PyCFunction)PyFT2Font_draw_glyph_to_bitmap, METH_VARARGS|METH_KEYWORDS, PyFT2Font_draw_glyph_to_bitmap__doc__},
+ {"get_glyph_name", (PyCFunction)PyFT2Font_get_glyph_name, METH_VARARGS, PyFT2Font_get_glyph_name__doc__},
+ {"get_charmap", (PyCFunction)PyFT2Font_get_charmap, METH_NOARGS, PyFT2Font_get_charmap__doc__},
+ {"get_char_index", (PyCFunction)PyFT2Font_get_char_index, METH_VARARGS, PyFT2Font_get_char_index__doc__},
+ {"get_sfnt", (PyCFunction)PyFT2Font_get_sfnt, METH_NOARGS, PyFT2Font_get_sfnt__doc__},
+ {"get_name_index", (PyCFunction)PyFT2Font_get_name_index, METH_VARARGS, PyFT2Font_get_name_index__doc__},
+ {"get_ps_font_info", (PyCFunction)PyFT2Font_get_ps_font_info, METH_NOARGS, PyFT2Font_get_ps_font_info__doc__},
+ {"get_sfnt_table", (PyCFunction)PyFT2Font_get_sfnt_table, METH_VARARGS, PyFT2Font_get_sfnt_table__doc__},
+ {"get_path", (PyCFunction)PyFT2Font_get_path, METH_NOARGS, PyFT2Font_get_path__doc__},
+ {"get_image", (PyCFunction)PyFT2Font_get_image, METH_NOARGS, PyFT2Font_get_path__doc__},
+ {NULL}
+ };
+
+ static PyBufferProcs buffer_procs;
+ memset(&buffer_procs, 0, sizeof(PyBufferProcs));
+ buffer_procs.bf_getbuffer = (getbufferproc)PyFT2Font_get_buffer;
+
+ memset(type, 0, sizeof(PyTypeObject));
+ type->tp_name = "matplotlib.ft2font.FT2Font";
+ type->tp_doc = PyFT2Font_init__doc__;
+ type->tp_basicsize = sizeof(PyFT2Font);
+ type->tp_dealloc = (destructor)PyFT2Font_dealloc;
+ type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_NEWBUFFER;
+ type->tp_methods = methods;
+ type->tp_getset = getset;
+ type->tp_new = PyFT2Font_new;
+ type->tp_init = (initproc)PyFT2Font_init;
+ type->tp_as_buffer = &buffer_procs;
+
+ if (PyType_Ready(type) < 0) {
+ return NULL;
+ }
+
+ if (PyModule_AddObject(m, "FT2Font", (PyObject *)type)) {
+ return NULL;
+ }
+
+ return type;
+}
+
+extern "C" {
+
+#if PY3K
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "ft2font",
+ NULL,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+#define INITERROR return NULL
+
+PyMODINIT_FUNC PyInit_ft2font(void)
+
+#else
+#define INITERROR return
+
+PyMODINIT_FUNC initft2font(void)
+#endif
+
+{
+ PyObject *m;
+
+#if PY3K
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule3("ft2font", NULL, NULL);
+#endif
+
+ if (m == NULL) {
+ INITERROR;
+ }
+
+ if (!PyFT2Image_init_type(m, &PyFT2ImageType)) {
+ INITERROR;
+ }
+
+ if (!PyGlyph_init_type(m, &PyGlyphType)) {
+ INITERROR;
+ }
+
+ if (!PyFT2Font_init_type(m, &PyFT2FontType)) {
+ INITERROR;
+ }
+
+ PyObject *d = PyModule_GetDict(m);
+
+ if (add_dict_int(d, "SCALABLE", FT_FACE_FLAG_SCALABLE) ||
+ add_dict_int(d, "FIXED_SIZES", FT_FACE_FLAG_FIXED_SIZES) ||
+ add_dict_int(d, "FIXED_WIDTH", FT_FACE_FLAG_FIXED_WIDTH) ||
+ add_dict_int(d, "SFNT", FT_FACE_FLAG_SFNT) ||
+ add_dict_int(d, "HORIZONTAL", FT_FACE_FLAG_HORIZONTAL) ||
+ add_dict_int(d, "VERTICAL", FT_FACE_FLAG_VERTICAL) ||
+ add_dict_int(d, "KERNING", FT_FACE_FLAG_KERNING) ||
+ add_dict_int(d, "FAST_GLYPHS", FT_FACE_FLAG_FAST_GLYPHS) ||
+ add_dict_int(d, "MULTIPLE_MASTERS", FT_FACE_FLAG_MULTIPLE_MASTERS) ||
+ add_dict_int(d, "GLYPH_NAMES", FT_FACE_FLAG_GLYPH_NAMES) ||
+ add_dict_int(d, "EXTERNAL_STREAM", FT_FACE_FLAG_EXTERNAL_STREAM) ||
+ add_dict_int(d, "ITALIC", FT_STYLE_FLAG_ITALIC) ||
+ add_dict_int(d, "BOLD", FT_STYLE_FLAG_BOLD) ||
+ add_dict_int(d, "KERNING_DEFAULT", FT_KERNING_DEFAULT) ||
+ add_dict_int(d, "KERNING_UNFITTED", FT_KERNING_UNFITTED) ||
+ add_dict_int(d, "KERNING_UNSCALED", FT_KERNING_UNSCALED) ||
+ add_dict_int(d, "LOAD_DEFAULT", FT_LOAD_DEFAULT) ||
+ add_dict_int(d, "LOAD_NO_SCALE", FT_LOAD_NO_SCALE) ||
+ add_dict_int(d, "LOAD_NO_HINTING", FT_LOAD_NO_HINTING) ||
+ add_dict_int(d, "LOAD_RENDER", FT_LOAD_RENDER) ||
+ add_dict_int(d, "LOAD_NO_BITMAP", FT_LOAD_NO_BITMAP) ||
+ add_dict_int(d, "LOAD_VERTICAL_LAYOUT", FT_LOAD_VERTICAL_LAYOUT) ||
+ add_dict_int(d, "LOAD_FORCE_AUTOHINT", FT_LOAD_FORCE_AUTOHINT) ||
+ add_dict_int(d, "LOAD_CROP_BITMAP", FT_LOAD_CROP_BITMAP) ||
+ add_dict_int(d, "LOAD_PEDANTIC", FT_LOAD_PEDANTIC) ||
+ add_dict_int(d, "LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH", FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH) ||
+ add_dict_int(d, "LOAD_NO_RECURSE", FT_LOAD_NO_RECURSE) ||
+ add_dict_int(d, "LOAD_IGNORE_TRANSFORM", FT_LOAD_IGNORE_TRANSFORM) ||
+ add_dict_int(d, "LOAD_MONOCHROME", FT_LOAD_MONOCHROME) ||
+ add_dict_int(d, "LOAD_LINEAR_DESIGN", FT_LOAD_LINEAR_DESIGN) ||
+ add_dict_int(d, "LOAD_NO_AUTOHINT", (unsigned long)FT_LOAD_NO_AUTOHINT) ||
+ add_dict_int(d, "LOAD_TARGET_NORMAL", (unsigned long)FT_LOAD_TARGET_NORMAL) ||
+ add_dict_int(d, "LOAD_TARGET_LIGHT", (unsigned long)FT_LOAD_TARGET_LIGHT) ||
+ add_dict_int(d, "LOAD_TARGET_MONO", (unsigned long)FT_LOAD_TARGET_MONO) ||
+ add_dict_int(d, "LOAD_TARGET_LCD", (unsigned long)FT_LOAD_TARGET_LCD) ||
+ add_dict_int(d, "LOAD_TARGET_LCD_V", (unsigned long)FT_LOAD_TARGET_LCD_V)) {
+ INITERROR;
+ }
+
+ // initialize library
+ int error = FT_Init_FreeType(&_ft2Library);
+
+ if (error) {
+ PyErr_SetString(PyExc_RuntimeError, "Could not initialize the freetype2 library");
+ INITERROR;
+ }
+
+ {
+ FT_Int major, minor, patch;
+ char version_string[64];
+
+ FT_Library_Version(_ft2Library, &major, &minor, &patch);
+ sprintf(version_string, "%d.%d.%d", major, minor, patch);
+ if (PyModule_AddStringConstant(m, "__freetype_version__", version_string)) {
+ INITERROR;
+ }
+ }
+
+ if (PyModule_AddStringConstant(m, "__freetype_build_type__", STRINGIFY(FREETYPE_BUILD_TYPE))) {
+ INITERROR;
+ }
+
+ import_array();
+
+#if PY3K
+ return m;
+#endif
+}
+
+} // extern "C"
diff --git a/contrib/python/matplotlib/py2/src/mplutils.cpp b/contrib/python/matplotlib/py2/src/mplutils.cpp
new file mode 100644
index 00000000000..bc09db52aa9
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/mplutils.cpp
@@ -0,0 +1,21 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+#include "mplutils.h"
+
+int add_dict_int(PyObject *dict, const char *key, long val)
+{
+ PyObject *valobj;
+ valobj = PyLong_FromLong(val);
+ if (valobj == NULL) {
+ return 1;
+ }
+
+ if (PyDict_SetItemString(dict, (char *)key, valobj)) {
+ Py_DECREF(valobj);
+ return 1;
+ }
+
+ Py_DECREF(valobj);
+
+ return 0;
+}
diff --git a/contrib/python/matplotlib/py2/src/mplutils.h b/contrib/python/matplotlib/py2/src/mplutils.h
new file mode 100644
index 00000000000..140a8156347
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/mplutils.h
@@ -0,0 +1,72 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+/* Small utilities that are shared by most extension modules. */
+
+#ifndef _MPLUTILS_H
+#define _MPLUTILS_H
+
+#if defined(_MSC_VER) && _MSC_VER <= 1600
+typedef unsigned __int8 uint8_t;
+#else
+#include <stdint.h>
+#endif
+
+#ifdef _POSIX_C_SOURCE
+# undef _POSIX_C_SOURCE
+#endif
+#ifndef _AIX
+#ifdef _XOPEN_SOURCE
+# undef _XOPEN_SOURCE
+#endif
+#endif
+
+// Prevent multiple conflicting definitions of swab from stdlib.h and unistd.h
+#if defined(__sun) || defined(sun)
+#if defined(_XPG4)
+#undef _XPG4
+#endif
+#if defined(_XPG3)
+#undef _XPG3
+#endif
+#endif
+
+#include <Python.h>
+
+#if PY_MAJOR_VERSION >= 3
+#define PY3K 1
+#define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#else
+#define PY3K 0
+#endif
+
+#undef CLAMP
+#define CLAMP(x, low, high) (((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x)))
+
+#undef MAX
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+
+inline double mpl_round(double v)
+{
+ return (double)(int)(v + ((v >= 0.0) ? 0.5 : -0.5));
+}
+
+enum {
+ STOP = 0,
+ MOVETO = 1,
+ LINETO = 2,
+ CURVE3 = 3,
+ CURVE4 = 4,
+ ENDPOLY = 0x4f
+};
+
+const size_t NUM_VERTICES[] = { 1, 1, 1, 2, 3, 1 };
+
+extern "C" int add_dict_int(PyObject *dict, const char *key, long val);
+
+#if defined(_MSC_VER) && (_MSC_VER < 1800)
+namespace std {
+ inline bool isfinite(double num) { return _finite(num); }
+}
+#endif
+
+#endif
diff --git a/contrib/python/matplotlib/py2/src/numpy_cpp.h b/contrib/python/matplotlib/py2/src/numpy_cpp.h
new file mode 100644
index 00000000000..03b4a695d18
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/numpy_cpp.h
@@ -0,0 +1,569 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+#ifndef _NUMPY_CPP_H_
+#define _NUMPY_CPP_H_
+
+/***************************************************************************
+ * This file is based on original work by Mark Wiebe, available at:
+ *
+ * http://github.com/mwiebe/numpy-cpp
+ *
+ * However, the needs of matplotlib wrappers, such as treating an
+ * empty array as having the correct dimensions, have made this rather
+ * matplotlib-specific, so it's no longer compatible with the
+ * original.
+ */
+
+#include "py_exceptions.h"
+
+#include <complex>
+
+#ifdef _POSIX_C_SOURCE
+# undef _POSIX_C_SOURCE
+#endif
+#ifndef _AIX
+#ifdef _XOPEN_SOURCE
+# undef _XOPEN_SOURCE
+#endif
+#endif
+
+// Prevent multiple conflicting definitions of swab from stdlib.h and unistd.h
+#if defined(__sun) || defined(sun)
+#if defined(_XPG4)
+#undef _XPG4
+#endif
+#if defined(_XPG3)
+#undef _XPG3
+#endif
+#endif
+
+#include <Python.h>
+#include <numpy/ndarrayobject.h>
+
+namespace numpy
+{
+
+// Type traits for the NumPy types
+template <typename T>
+struct type_num_of;
+
+/* Be careful with bool arrays as python has sizeof(npy_bool) == 1, but it is
+ * not always the case that sizeof(bool) == 1. Using the array_view_accessors
+ * is always fine regardless of sizeof(bool), so do this rather than using
+ * array.data() and pointer arithmetic which will not work correctly if
+ * sizeof(bool) != 1. */
+template <> struct type_num_of<bool>
+{
+ enum {
+ value = NPY_BOOL
+ };
+};
+template <>
+struct type_num_of<npy_byte>
+{
+ enum {
+ value = NPY_BYTE
+ };
+};
+template <>
+struct type_num_of<npy_ubyte>
+{
+ enum {
+ value = NPY_UBYTE
+ };
+};
+template <>
+struct type_num_of<npy_short>
+{
+ enum {
+ value = NPY_SHORT
+ };
+};
+template <>
+struct type_num_of<npy_ushort>
+{
+ enum {
+ value = NPY_USHORT
+ };
+};
+template <>
+struct type_num_of<npy_int>
+{
+ enum {
+ value = NPY_INT
+ };
+};
+template <>
+struct type_num_of<npy_uint>
+{
+ enum {
+ value = NPY_UINT
+ };
+};
+template <>
+struct type_num_of<npy_long>
+{
+ enum {
+ value = NPY_LONG
+ };
+};
+template <>
+struct type_num_of<npy_ulong>
+{
+ enum {
+ value = NPY_ULONG
+ };
+};
+template <>
+struct type_num_of<npy_longlong>
+{
+ enum {
+ value = NPY_LONGLONG
+ };
+};
+template <>
+struct type_num_of<npy_ulonglong>
+{
+ enum {
+ value = NPY_ULONGLONG
+ };
+};
+template <>
+struct type_num_of<npy_float>
+{
+ enum {
+ value = NPY_FLOAT
+ };
+};
+template <>
+struct type_num_of<npy_double>
+{
+ enum {
+ value = NPY_DOUBLE
+ };
+};
+#if NPY_LONGDOUBLE != NPY_DOUBLE
+template <>
+struct type_num_of<npy_longdouble>
+{
+ enum {
+ value = NPY_LONGDOUBLE
+ };
+};
+#endif
+template <>
+struct type_num_of<npy_cfloat>
+{
+ enum {
+ value = NPY_CFLOAT
+ };
+};
+template <>
+struct type_num_of<std::complex<npy_float> >
+{
+ enum {
+ value = NPY_CFLOAT
+ };
+};
+template <>
+struct type_num_of<npy_cdouble>
+{
+ enum {
+ value = NPY_CDOUBLE
+ };
+};
+template <>
+struct type_num_of<std::complex<npy_double> >
+{
+ enum {
+ value = NPY_CDOUBLE
+ };
+};
+#if NPY_CLONGDOUBLE != NPY_CDOUBLE
+template <>
+struct type_num_of<npy_clongdouble>
+{
+ enum {
+ value = NPY_CLONGDOUBLE
+ };
+};
+template <>
+struct type_num_of<std::complex<npy_longdouble> >
+{
+ enum {
+ value = NPY_CLONGDOUBLE
+ };
+};
+#endif
+template <>
+struct type_num_of<PyObject *>
+{
+ enum {
+ value = NPY_OBJECT
+ };
+};
+template <typename T>
+struct type_num_of<T &>
+{
+ enum {
+ value = type_num_of<T>::value
+ };
+};
+template <typename T>
+struct type_num_of<const T>
+{
+ enum {
+ value = type_num_of<T>::value
+ };
+};
+
+template <typename T>
+struct is_const
+{
+ enum {
+ value = false
+ };
+};
+template <typename T>
+struct is_const<const T>
+{
+ enum {
+ value = true
+ };
+};
+
+namespace detail
+{
+template <template <typename, int> class AV, typename T, int ND>
+class array_view_accessors;
+
+template <template <typename, int> class AV, typename T>
+class array_view_accessors<AV, T, 1>
+{
+ public:
+ typedef AV<T, 1> AVC;
+ typedef T sub_t;
+
+ T &operator()(npy_intp i)
+ {
+ AVC *self = static_cast<AVC *>(this);
+
+ return *reinterpret_cast<T *>(self->m_data + self->m_strides[0] * i);
+ }
+
+ const T &operator()(npy_intp i) const
+ {
+ const AVC *self = static_cast<const AVC *>(this);
+
+ return *reinterpret_cast<const T *>(self->m_data + self->m_strides[0] * i);
+ }
+
+ T &operator[](npy_intp i)
+ {
+ AVC *self = static_cast<AVC *>(this);
+
+ return *reinterpret_cast<T *>(self->m_data + self->m_strides[0] * i);
+ }
+
+ const T &operator[](npy_intp i) const
+ {
+ const AVC *self = static_cast<const AVC *>(this);
+
+ return *reinterpret_cast<const T *>(self->m_data + self->m_strides[0] * i);
+ }
+};
+
+template <template <typename, int> class AV, typename T>
+class array_view_accessors<AV, T, 2>
+{
+ public:
+ typedef AV<T, 2> AVC;
+ typedef AV<T, 1> sub_t;
+
+ T &operator()(npy_intp i, npy_intp j)
+ {
+ AVC *self = static_cast<AVC *>(this);
+
+ return *reinterpret_cast<T *>(self->m_data + self->m_strides[0] * i +
+ self->m_strides[1] * j);
+ }
+
+ const T &operator()(npy_intp i, npy_intp j) const
+ {
+ const AVC *self = static_cast<const AVC *>(this);
+
+ return *reinterpret_cast<const T *>(self->m_data + self->m_strides[0] * i +
+ self->m_strides[1] * j);
+ }
+
+ sub_t subarray(npy_intp i) const
+ {
+ const AVC *self = static_cast<const AVC *>(this);
+
+ return sub_t(self->m_arr,
+ self->m_data + self->m_strides[0] * i,
+ self->m_shape + 1,
+ self->m_strides + 1);
+ }
+};
+
+template <template <typename, int> class AV, typename T>
+class array_view_accessors<AV, T, 3>
+{
+ public:
+ typedef AV<T, 3> AVC;
+ typedef AV<T, 2> sub_t;
+
+ T &operator()(npy_intp i, npy_intp j, npy_intp k)
+ {
+ AVC *self = static_cast<AVC *>(this);
+
+ return *reinterpret_cast<T *>(self->m_data + self->m_strides[0] * i +
+ self->m_strides[1] * j + self->m_strides[2] * k);
+ }
+
+ const T &operator()(npy_intp i, npy_intp j, npy_intp k) const
+ {
+ const AVC *self = static_cast<const AVC *>(this);
+
+ return *reinterpret_cast<const T *>(self->m_data + self->m_strides[0] * i +
+ self->m_strides[1] * j + self->m_strides[2] * k);
+ }
+
+ sub_t subarray(npy_intp i) const
+ {
+ const AVC *self = static_cast<const AVC *>(this);
+
+ return sub_t(self->m_arr,
+ self->m_data + self->m_strides[0] * i,
+ self->m_shape + 1,
+ self->m_strides + 1);
+ }
+
+
+};
+
+// When adding instantiations of array_view_accessors, remember to add entries
+// to zeros[] below.
+
+}
+
+static npy_intp zeros[] = { 0, 0, 0 };
+
+template <typename T, int ND>
+class array_view : public detail::array_view_accessors<array_view, T, ND>
+{
+ friend class detail::array_view_accessors<numpy::array_view, T, ND>;
+
+ private:
+ // Copies of the array data
+ PyArrayObject *m_arr;
+ npy_intp *m_shape;
+ npy_intp *m_strides;
+ char *m_data;
+
+ public:
+ typedef T value_type;
+
+ enum {
+ ndim = ND
+ };
+
+ array_view() : m_arr(NULL), m_data(NULL)
+ {
+ m_shape = zeros;
+ m_strides = zeros;
+ }
+
+ array_view(PyObject *arr, bool contiguous = false) : m_arr(NULL), m_data(NULL)
+ {
+ if (!set(arr, contiguous)) {
+ throw py::exception();
+ }
+ }
+
+ array_view(const array_view &other) : m_arr(NULL), m_data(NULL)
+ {
+ m_arr = other.m_arr;
+ Py_XINCREF(m_arr);
+ m_data = other.m_data;
+ m_shape = other.m_shape;
+ m_strides = other.m_strides;
+ }
+
+ array_view(PyArrayObject *arr, char *data, npy_intp *shape, npy_intp *strides)
+ {
+ m_arr = arr;
+ Py_XINCREF(arr);
+ m_data = data;
+ m_shape = shape;
+ m_strides = strides;
+ }
+
+ array_view(npy_intp shape[ND]) : m_arr(NULL), m_shape(NULL), m_strides(NULL), m_data(NULL)
+ {
+ PyObject *arr = PyArray_SimpleNew(ND, shape, type_num_of<T>::value);
+ if (arr == NULL) {
+ throw py::exception();
+ }
+ if (!set(arr, true)) {
+ Py_DECREF(arr);
+ throw py::exception();
+ }
+ Py_DECREF(arr);
+ }
+
+ ~array_view()
+ {
+ Py_XDECREF(m_arr);
+ }
+
+ array_view& operator=(const array_view &other)
+ {
+ if (this != &other)
+ {
+ Py_XDECREF(m_arr);
+ m_arr = other.m_arr;
+ Py_XINCREF(m_arr);
+ m_data = other.m_data;
+ m_shape = other.m_shape;
+ m_strides = other.m_strides;
+ }
+ return *this;
+ }
+
+ int set(PyObject *arr, bool contiguous = false)
+ {
+ PyArrayObject *tmp;
+
+ if (arr == NULL || arr == Py_None) {
+ Py_XDECREF(m_arr);
+ m_arr = NULL;
+ m_data = NULL;
+ m_shape = zeros;
+ m_strides = zeros;
+ } else {
+ if (contiguous) {
+ tmp = (PyArrayObject *)PyArray_ContiguousFromAny(arr, type_num_of<T>::value, 0, ND);
+ } else {
+ tmp = (PyArrayObject *)PyArray_FromObject(arr, type_num_of<T>::value, 0, ND);
+ }
+ if (tmp == NULL) {
+ return 0;
+ }
+
+ if (PyArray_NDIM(tmp) == 0 || PyArray_DIM(tmp, 0) == 0) {
+ Py_XDECREF(m_arr);
+ m_arr = NULL;
+ m_data = NULL;
+ m_shape = zeros;
+ m_strides = zeros;
+ if (PyArray_NDIM(tmp) == 0 && ND == 0) {
+ m_arr = tmp;
+ return 1;
+ }
+ }
+ if (PyArray_NDIM(tmp) != ND) {
+ PyErr_Format(PyExc_ValueError,
+ "Expected %d-dimensional array, got %d",
+ ND,
+ PyArray_NDIM(tmp));
+ Py_DECREF(tmp);
+ return 0;
+ }
+
+ /* Copy some of the data to the view object for faster access */
+ Py_XDECREF(m_arr);
+ m_arr = tmp;
+ m_shape = PyArray_DIMS(m_arr);
+ m_strides = PyArray_STRIDES(m_arr);
+ m_data = (char *)PyArray_BYTES(tmp);
+ }
+
+ return 1;
+ }
+
+ npy_intp dim(size_t i) const
+ {
+ if (i >= ND) {
+ return 0;
+ }
+ return m_shape[i];
+ }
+
+ /*
+ In most cases, code should use size() instead of dim(0), since
+ size() == 0 when any dimension is 0.
+ */
+ size_t size() const
+ {
+ bool empty = (ND == 0);
+ for (size_t i = 0; i < ND; i++) {
+ if (m_shape[i] == 0) {
+ empty = true;
+ }
+ }
+ if (empty) {
+ return 0;
+ } else {
+ return (size_t)dim(0);
+ }
+ }
+
+ bool empty() const
+ {
+ return size() == 0;
+ }
+
+ // Do not use this for array_view<bool, ND>. See comment near top of file.
+ const T *data() const
+ {
+ return (const T *)m_data;
+ }
+
+ // Do not use this for array_view<bool, ND>. See comment near top of file.
+ T *data()
+ {
+ return (T *)m_data;
+ }
+
+ // Return a new reference.
+ PyObject *pyobj()
+ {
+ Py_XINCREF(m_arr);
+ return (PyObject *)m_arr;
+ }
+
+ // Steal a reference.
+ PyObject *pyobj_steal()
+ {
+ return (PyObject *)m_arr;
+ }
+
+ static int converter(PyObject *obj, void *arrp)
+ {
+ array_view<T, ND> *arr = (array_view<T, ND> *)arrp;
+
+ if (!arr->set(obj)) {
+ return 0;
+ }
+
+ return 1;
+ }
+
+ static int converter_contiguous(PyObject *obj, void *arrp)
+ {
+ array_view<T, ND> *arr = (array_view<T, ND> *)arrp;
+
+ if (!arr->set(obj, true)) {
+ return 0;
+ }
+
+ return 1;
+ }
+};
+
+} // namespace numpy
+
+
+#endif
diff --git a/contrib/python/matplotlib/py2/src/path_converters.h b/contrib/python/matplotlib/py2/src/path_converters.h
new file mode 100644
index 00000000000..db40c18d5ab
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/path_converters.h
@@ -0,0 +1,1011 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+#ifndef __PATH_CONVERTERS_H__
+#define __PATH_CONVERTERS_H__
+
+#include <cmath>
+#include <stdint.h>
+#include "agg_path_storage.h"
+#include "agg_clip_liang_barsky.h"
+#include "mplutils.h"
+#include "agg_conv_segmentator.h"
+
+/*
+ This file contains a number of vertex converters that modify
+ paths. They all work as iterators, where the output is generated
+ on-the-fly, and don't require a copy of the full data.
+
+ Each class represents a discrete step in a "path-cleansing" pipeline.
+ They are currently applied in the following order in the Agg backend:
+
+ 1. Affine transformation (implemented in Agg, not here)
+
+ 2. PathNanRemover: skips over segments containing non-finite numbers
+ by inserting MOVETO commands
+
+ 3. PathClipper: Clips line segments to a given rectangle. This is
+ helpful for data reduction, and also to avoid a limitation in
+ Agg where coordinates can not be larger than 24-bit signed
+ integers.
+
+ 4. PathSnapper: Rounds the path to the nearest center-pixels.
+ This makes rectilinear curves look much better.
+
+ 5. PathSimplifier: Removes line segments from highly dense paths
+ that would not have an impact on their appearance. Speeds up
+ rendering and reduces file sizes.
+
+ 6. curve-to-line-segment conversion (implemented in Agg, not here)
+
+ 7. stroking (implemented in Agg, not here)
+ */
+
+/************************************************************
+ This is a base class for vertex converters that need to queue their
+ output. It is designed to be as fast as possible vs. the STL's queue
+ which is more flexible.
+ */
+template <int QueueSize>
+class EmbeddedQueue
+{
+ protected:
+ EmbeddedQueue() : m_queue_read(0), m_queue_write(0)
+ {
+ // empty
+ }
+
+ struct item
+ {
+ item()
+ {
+ }
+
+ inline void set(const unsigned cmd_, const double x_, const double y_)
+ {
+ cmd = cmd_;
+ x = x_;
+ y = y_;
+ }
+ unsigned cmd;
+ double x;
+ double y;
+ };
+ int m_queue_read;
+ int m_queue_write;
+ item m_queue[QueueSize];
+
+ inline void queue_push(const unsigned cmd, const double x, const double y)
+ {
+ m_queue[m_queue_write++].set(cmd, x, y);
+ }
+
+ inline bool queue_nonempty()
+ {
+ return m_queue_read < m_queue_write;
+ }
+
+ inline bool queue_pop(unsigned *cmd, double *x, double *y)
+ {
+ if (queue_nonempty()) {
+ const item &front = m_queue[m_queue_read++];
+ *cmd = front.cmd;
+ *x = front.x;
+ *y = front.y;
+
+ return true;
+ }
+
+ m_queue_read = 0;
+ m_queue_write = 0;
+
+ return false;
+ }
+
+ inline void queue_clear()
+ {
+ m_queue_read = 0;
+ m_queue_write = 0;
+ }
+};
+
+/* Defines when path segment types have more than one vertex */
+static const size_t num_extra_points_map[] =
+ {0, 0, 0, 1,
+ 2, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0
+ };
+
+/* An implementation of a simple linear congruential random number
+ generator. This is a "classic" and fast RNG which works fine for
+ our purposes of sketching lines, but should not be used for things
+ that matter, like crypto. We are implementing this ourselves
+ rather than using the C stdlib so that the seed state is not shared
+ with other third-party code. There are recent C++ options, but we
+ still require nothing later than C++98 for compatibility
+ reasons. */
+class RandomNumberGenerator
+{
+private:
+ /* These are the same constants from MS Visual C++, which
+ has the nice property that the modulus is 2^32, thus
+ saving an explicit modulo operation
+ */
+ static const uint32_t a = 214013;
+ static const uint32_t c = 2531011;
+ uint32_t m_seed;
+
+public:
+ RandomNumberGenerator() : m_seed(0) {}
+ RandomNumberGenerator(int seed) : m_seed(seed) {}
+
+ void seed(int seed)
+ {
+ m_seed = seed;
+ }
+
+ double get_double()
+ {
+ m_seed = (a * m_seed + c);
+ return (double)m_seed / (double)(1LL << 32);
+ }
+};
+
+/*
+ PathNanRemover is a vertex converter that removes non-finite values
+ from the vertices list, and inserts MOVETO commands as necessary to
+ skip over them. If a curve segment contains at least one non-finite
+ value, the entire curve segment will be skipped.
+ */
+template <class VertexSource>
+class PathNanRemover : protected EmbeddedQueue<4>
+{
+ VertexSource *m_source;
+ bool m_remove_nans;
+ bool m_has_curves;
+
+ public:
+ /* has_curves should be true if the path contains bezier curve
+ segments, as this requires a slower algorithm to remove the
+ NaNs. When in doubt, set to true.
+ */
+ PathNanRemover(VertexSource &source, bool remove_nans, bool has_curves)
+ : m_source(&source), m_remove_nans(remove_nans), m_has_curves(has_curves)
+ {
+ // empty
+ }
+
+ inline void rewind(unsigned path_id)
+ {
+ queue_clear();
+ m_source->rewind(path_id);
+ }
+
+ inline unsigned vertex(double *x, double *y)
+ {
+ unsigned code;
+
+ if (!m_remove_nans) {
+ return m_source->vertex(x, y);
+ }
+
+ if (m_has_curves) {
+ /* This is the slow method for when there might be curves. */
+ if (queue_pop(&code, x, y)) {
+ return code;
+ }
+
+ bool needs_move_to = false;
+ while (true) {
+ /* The approach here is to push each full curve
+ segment into the queue. If any non-finite values
+ are found along the way, the queue is emptied, and
+ the next curve segment is handled. */
+ code = m_source->vertex(x, y);
+ if (code == agg::path_cmd_stop ||
+ code == (agg::path_cmd_end_poly | agg::path_flags_close)) {
+ return code;
+ }
+
+ if (needs_move_to) {
+ queue_push(agg::path_cmd_move_to, *x, *y);
+ }
+
+ size_t num_extra_points = num_extra_points_map[code & 0xF];
+ bool has_nan = (!(std::isfinite(*x) && std::isfinite(*y)));
+ queue_push(code, *x, *y);
+
+ /* Note: this test can not be short-circuited, since we need to
+ advance through the entire curve no matter what */
+ for (size_t i = 0; i < num_extra_points; ++i) {
+ m_source->vertex(x, y);
+ has_nan = has_nan || !(std::isfinite(*x) && std::isfinite(*y));
+ queue_push(code, *x, *y);
+ }
+
+ if (!has_nan) {
+ break;
+ }
+
+ queue_clear();
+
+ /* If the last point is finite, we use that for the
+ moveto, otherwise, we'll use the first vertex of
+ the next curve. */
+ if (std::isfinite(*x) && std::isfinite(*y)) {
+ queue_push(agg::path_cmd_move_to, *x, *y);
+ needs_move_to = false;
+ } else {
+ needs_move_to = true;
+ }
+ }
+
+ if (queue_pop(&code, x, y)) {
+ return code;
+ } else {
+ return agg::path_cmd_stop;
+ }
+ } else // !m_has_curves
+ {
+ /* This is the fast path for when we know we have no curves */
+ code = m_source->vertex(x, y);
+
+ if (code == agg::path_cmd_stop ||
+ code == (agg::path_cmd_end_poly | agg::path_flags_close)) {
+ return code;
+ }
+
+ if (!(std::isfinite(*x) && std::isfinite(*y))) {
+ do {
+ code = m_source->vertex(x, y);
+ if (code == agg::path_cmd_stop ||
+ code == (agg::path_cmd_end_poly | agg::path_flags_close)) {
+ return code;
+ }
+ } while (!(std::isfinite(*x) && std::isfinite(*y)));
+ return agg::path_cmd_move_to;
+ }
+
+ return code;
+ }
+ }
+};
+
+/************************************************************
+ PathClipper uses the Liang-Barsky line clipping algorithm (as
+ implemented in Agg) to clip the path to a given rectangle. Lines
+ will never extend outside of the rectangle. Curve segments are not
+ clipped, but are always included in their entirety.
+ */
+template <class VertexSource>
+class PathClipper : public EmbeddedQueue<3>
+{
+ VertexSource *m_source;
+ bool m_do_clipping;
+ agg::rect_base<double> m_cliprect;
+ double m_lastX;
+ double m_lastY;
+ bool m_moveto;
+ double m_initX;
+ double m_initY;
+ bool m_has_init;
+
+ public:
+ PathClipper(VertexSource &source, bool do_clipping, double width, double height)
+ : m_source(&source),
+ m_do_clipping(do_clipping),
+ m_cliprect(-1.0, -1.0, width + 1.0, height + 1.0),
+ m_moveto(true),
+ m_has_init(false)
+ {
+ // empty
+ }
+
+ PathClipper(VertexSource &source, bool do_clipping, const agg::rect_base<double> &rect)
+ : m_source(&source),
+ m_do_clipping(do_clipping),
+ m_cliprect(rect),
+ m_moveto(true),
+ m_has_init(false)
+ {
+ m_cliprect.x1 -= 1.0;
+ m_cliprect.y1 -= 1.0;
+ m_cliprect.x2 += 1.0;
+ m_cliprect.y2 += 1.0;
+ }
+
+ inline void rewind(unsigned path_id)
+ {
+ m_has_init = false;
+ m_moveto = true;
+ m_source->rewind(path_id);
+ }
+
+ int draw_clipped_line(double x0, double y0, double x1, double y1)
+ {
+ unsigned moved = agg::clip_line_segment(&x0, &y0, &x1, &y1, m_cliprect);
+ // moved >= 4 - Fully clipped
+ // moved & 1 != 0 - First point has been moved
+ // moved & 2 != 0 - Second point has been moved
+ if (moved < 4) {
+ if (moved & 1 || m_moveto) {
+ queue_push(agg::path_cmd_move_to, x0, y0);
+ }
+ queue_push(agg::path_cmd_line_to, x1, y1);
+
+ m_moveto = false;
+ return 1;
+ }
+
+ return 0;
+ }
+
+ unsigned vertex(double *x, double *y)
+ {
+ unsigned code;
+ bool emit_moveto = false;
+
+ if (m_do_clipping) {
+ /* This is the slow path where we actually do clipping */
+
+ if (queue_pop(&code, x, y)) {
+ return code;
+ }
+
+ while ((code = m_source->vertex(x, y)) != agg::path_cmd_stop) {
+ emit_moveto = false;
+
+ switch (code) {
+ case (agg::path_cmd_end_poly | agg::path_flags_close):
+ if (m_has_init) {
+ draw_clipped_line(m_lastX, m_lastY, m_initX, m_initY);
+ }
+ queue_push(
+ agg::path_cmd_end_poly | agg::path_flags_close,
+ m_lastX, m_lastY);
+ goto exit_loop;
+
+ case agg::path_cmd_move_to:
+
+ // was the last command a moveto (and we have
+ // seen at least one command ?
+ // if so, shove it in the queue if in clip box
+ if (m_moveto && m_has_init &&
+ m_lastX >= m_cliprect.x1 &&
+ m_lastX <= m_cliprect.x2 &&
+ m_lastY >= m_cliprect.y1 &&
+ m_lastY <= m_cliprect.y2) {
+ // push the last moveto onto the queue
+ queue_push(agg::path_cmd_move_to, m_lastX, m_lastY);
+ // flag that we need to emit it
+ emit_moveto = true;
+ }
+ // update the internal state for this moveto
+ m_initX = m_lastX = *x;
+ m_initY = m_lastY = *y;
+ m_has_init = true;
+ m_moveto = true;
+ // if the last command was moveto exit the loop to emit the code
+ if (emit_moveto) {
+ goto exit_loop;
+ }
+ // else, break and get the next point
+ break;
+
+ case agg::path_cmd_line_to:
+ if (draw_clipped_line(m_lastX, m_lastY, *x, *y)) {
+ m_lastX = *x;
+ m_lastY = *y;
+ goto exit_loop;
+ }
+ m_lastX = *x;
+ m_lastY = *y;
+ break;
+
+ default:
+ if (m_moveto) {
+ queue_push(agg::path_cmd_move_to, m_lastX, m_lastY);
+ m_moveto = false;
+ }
+
+ queue_push(code, *x, *y);
+ m_lastX = *x;
+ m_lastY = *y;
+ goto exit_loop;
+ }
+ }
+
+ exit_loop:
+
+ if (queue_pop(&code, x, y)) {
+ return code;
+ }
+
+ if (m_moveto &&
+ m_lastX >= m_cliprect.x1 &&
+ m_lastX <= m_cliprect.x2 &&
+ m_lastY >= m_cliprect.y1 &&
+ m_lastY <= m_cliprect.y2) {
+ *x = m_lastX;
+ *y = m_lastY;
+ m_moveto = false;
+ return agg::path_cmd_move_to;
+ }
+
+ return agg::path_cmd_stop;
+ } else {
+ // If not doing any clipping, just pass along the vertices
+ // verbatim
+ return m_source->vertex(x, y);
+ }
+ }
+};
+
+/************************************************************
+ PathSnapper rounds vertices to their nearest center-pixels. This
+ makes rectilinear paths (rectangles, horizontal and vertical lines
+ etc.) look much cleaner.
+*/
+enum e_snap_mode {
+ SNAP_AUTO,
+ SNAP_FALSE,
+ SNAP_TRUE
+};
+
+template <class VertexSource>
+class PathSnapper
+{
+ private:
+ VertexSource *m_source;
+ bool m_snap;
+ double m_snap_value;
+
+ static bool should_snap(VertexSource &path, e_snap_mode snap_mode, unsigned total_vertices)
+ {
+ // If this contains only straight horizontal or vertical lines, it should be
+ // snapped to the nearest pixels
+ double x0 = 0, y0 = 0, x1 = 0, y1 = 0;
+ unsigned code;
+
+ switch (snap_mode) {
+ case SNAP_AUTO:
+ if (total_vertices > 1024) {
+ return false;
+ }
+
+ code = path.vertex(&x0, &y0);
+ if (code == agg::path_cmd_stop) {
+ return false;
+ }
+
+ while ((code = path.vertex(&x1, &y1)) != agg::path_cmd_stop) {
+ switch (code) {
+ case agg::path_cmd_curve3:
+ case agg::path_cmd_curve4:
+ return false;
+ case agg::path_cmd_line_to:
+ if (!(fabs(x0 - x1) < 1e-4 || fabs(y0 - y1) < 1e-4)) {
+ return false;
+ }
+ }
+ x0 = x1;
+ y0 = y1;
+ }
+
+ return true;
+ case SNAP_FALSE:
+ return false;
+ case SNAP_TRUE:
+ return true;
+ }
+
+ return false;
+ }
+
+ public:
+ /*
+ snap_mode should be one of:
+ - SNAP_AUTO: Examine the path to determine if it should be snapped
+ - SNAP_TRUE: Force snapping
+ - SNAP_FALSE: No snapping
+ */
+ PathSnapper(VertexSource &source,
+ e_snap_mode snap_mode,
+ unsigned total_vertices = 15,
+ double stroke_width = 0.0)
+ : m_source(&source)
+ {
+ m_snap = should_snap(source, snap_mode, total_vertices);
+
+ if (m_snap) {
+ int is_odd = (int)mpl_round(stroke_width) % 2;
+ m_snap_value = (is_odd) ? 0.5 : 0.0;
+ }
+
+ source.rewind(0);
+ }
+
+ inline void rewind(unsigned path_id)
+ {
+ m_source->rewind(path_id);
+ }
+
+ inline unsigned vertex(double *x, double *y)
+ {
+ unsigned code;
+ code = m_source->vertex(x, y);
+ if (m_snap && agg::is_vertex(code)) {
+ *x = floor(*x + 0.5) + m_snap_value;
+ *y = floor(*y + 0.5) + m_snap_value;
+ }
+ return code;
+ }
+
+ inline bool is_snapping()
+ {
+ return m_snap;
+ }
+};
+
+/************************************************************
+ PathSimplifier reduces the number of vertices in a dense path without
+ changing its appearance.
+*/
+template <class VertexSource>
+class PathSimplifier : protected EmbeddedQueue<9>
+{
+ public:
+ /* Set simplify to true to perform simplification */
+ PathSimplifier(VertexSource &source, bool do_simplify, double simplify_threshold)
+ : m_source(&source),
+ m_simplify(do_simplify),
+ /* we square simplify_threshold so that we can compute
+ norms without doing the square root every step. */
+ m_simplify_threshold(simplify_threshold * simplify_threshold),
+
+ m_moveto(true),
+ m_after_moveto(false),
+ m_clipped(false),
+
+ // the x, y values from last iteration
+ m_lastx(0.0),
+ m_lasty(0.0),
+
+ // the dx, dy comprising the original vector, used in conjunction
+ // with m_currVecStart* to define the original vector.
+ m_origdx(0.0),
+ m_origdy(0.0),
+
+ // the squared norm of the original vector
+ m_origdNorm2(0.0),
+
+ // maximum squared norm of vector in forward (parallel) direction
+ m_dnorm2ForwardMax(0.0),
+ // maximum squared norm of vector in backward (anti-parallel) direction
+ m_dnorm2BackwardMax(0.0),
+
+ // was the last point the furthest from lastWritten in the
+ // forward (parallel) direction?
+ m_lastForwardMax(false),
+ // was the last point the furthest from lastWritten in the
+ // backward (anti-parallel) direction?
+ m_lastBackwardMax(false),
+
+ // added to queue when _push is called
+ m_nextX(0.0),
+ m_nextY(0.0),
+
+ // added to queue when _push is called if any backwards
+ // (anti-parallel) vectors were observed
+ m_nextBackwardX(0.0),
+ m_nextBackwardY(0.0),
+
+ // start of the current vector that is being simplified
+ m_currVecStartX(0.0),
+ m_currVecStartY(0.0)
+ {
+ // empty
+ }
+
+ inline void rewind(unsigned path_id)
+ {
+ queue_clear();
+ m_moveto = true;
+ m_source->rewind(path_id);
+ }
+
+ unsigned vertex(double *x, double *y)
+ {
+ unsigned cmd;
+
+ /* The simplification algorithm doesn't support curves or compound paths
+ so we just don't do it at all in that case... */
+ if (!m_simplify) {
+ return m_source->vertex(x, y);
+ }
+
+ /* idea: we can skip drawing many lines: we can combine
+ sequential parallel lines into a
+ single line instead of redrawing lines over the same
+ points. The loop below works a bit like a state machine,
+ where what it does depends on what it did in the last
+ looping. To test whether sequential lines are close to
+ parallel, I calculate the distance moved perpendicular to
+ the last line. Once it gets too big, the lines cannot be
+ combined. */
+
+ /* This code was originally written by Allan Haldane and I
+ have modified to work in-place -- meaning not creating an
+ entirely new path list each time. In order to do that
+ without too much additional code complexity, it keeps a
+ small queue around so that multiple points can be emitted
+ in a single call, and those points will be popped from the
+ queue in subsequent calls. The following block will empty
+ the queue before proceeding to the main loop below.
+ -- Michael Droettboom */
+
+ /* This code was originally written by Allan Haldane and
+ updated by Michael Droettboom. I have modified it to
+ handle anti-parallel vectors. This is done essentially
+ the same way as parallel vectors, but requires a little
+ additional book-keeping to track whether or not we have
+ observed an anti-parallel vector during the current run.
+ -- Kevin Rose */
+
+ if (queue_pop(&cmd, x, y)) {
+ return cmd;
+ }
+
+ /* The main simplification loop. The point is to consume only
+ as many points as necessary until something has been added
+ to the outbound queue, not to run through the entire path
+ in one go. This eliminates the need to allocate and fill
+ an entire additional path array on each draw. */
+ while ((cmd = m_source->vertex(x, y)) != agg::path_cmd_stop) {
+ /* if we are starting a new path segment, move to the first point
+ + init */
+
+ if (m_moveto || cmd == agg::path_cmd_move_to) {
+ /* m_moveto check is not generally needed because
+ m_source generates an initial moveto; but it is
+ retained for safety in case circumstances arise
+ where this is not true. */
+ if (m_origdNorm2 != 0.0 && !m_after_moveto) {
+ /* m_origdNorm2 is nonzero only if we have a
+ vector; the m_after_moveto check ensures we
+ push this vector to the queue only once. */
+ _push(x, y);
+ }
+ m_after_moveto = true;
+ m_lastx = *x;
+ m_lasty = *y;
+ m_moveto = false;
+ m_origdNorm2 = 0.0;
+ m_dnorm2BackwardMax = 0.0;
+ m_clipped = true;
+ if (queue_nonempty()) {
+ /* If we did a push, empty the queue now. */
+ break;
+ }
+ continue;
+ }
+ m_after_moveto = false;
+
+ /* NOTE: We used to skip this very short segments, but if
+ you have a lot of them cumulatively, you can miss
+ maxima or minima in the data. */
+
+ /* Don't render line segments less than one pixel long */
+ /* if (fabs(*x - m_lastx) < 1.0 && fabs(*y - m_lasty) < 1.0) */
+ /* { */
+ /* continue; */
+ /* } */
+
+ /* if we have no orig vector, set it to this vector and
+ continue. this orig vector is the reference vector we
+ will build up the line to */
+ if (m_origdNorm2 == 0.0) {
+ if (m_clipped) {
+ queue_push(agg::path_cmd_move_to, m_lastx, m_lasty);
+ m_clipped = false;
+ }
+
+ m_origdx = *x - m_lastx;
+ m_origdy = *y - m_lasty;
+ m_origdNorm2 = m_origdx * m_origdx + m_origdy * m_origdy;
+
+ // set all the variables to reflect this new orig vector
+ m_dnorm2ForwardMax = m_origdNorm2;
+ m_dnorm2BackwardMax = 0.0;
+ m_lastForwardMax = true;
+ m_lastBackwardMax = false;
+
+ m_currVecStartX = m_lastx;
+ m_currVecStartY = m_lasty;
+ m_nextX = m_lastx = *x;
+ m_nextY = m_lasty = *y;
+ continue;
+ }
+
+ /* If got to here, then we have an orig vector and we just got
+ a vector in the sequence. */
+
+ /* Check that the perpendicular distance we have moved
+ from the last written point compared to the line we are
+ building is not too much. If o is the orig vector (we
+ are building on), and v is the vector from the last
+ written point to the current point, then the
+ perpendicular vector is p = v - (o.v)o/(o.o)
+ (here, a.b indicates the dot product of a and b). */
+
+ /* get the v vector */
+ double totdx = *x - m_currVecStartX;
+ double totdy = *y - m_currVecStartY;
+
+ /* get the dot product o.v */
+ double totdot = m_origdx * totdx + m_origdy * totdy;
+
+ /* get the para vector ( = (o.v)o/(o.o)) */
+ double paradx = totdot * m_origdx / m_origdNorm2;
+ double parady = totdot * m_origdy / m_origdNorm2;
+
+ /* get the perp vector ( = v - para) */
+ double perpdx = totdx - paradx;
+ double perpdy = totdy - parady;
+
+ /* get the squared norm of perp vector ( = p.p) */
+ double perpdNorm2 = perpdx * perpdx + perpdy * perpdy;
+
+ /* If the perpendicular vector is less than
+ m_simplify_threshold pixels in size, then merge
+ current x,y with the current vector */
+ if (perpdNorm2 < m_simplify_threshold) {
+ /* check if the current vector is parallel or
+ anti-parallel to the orig vector. In either case,
+ test if it is the longest of the vectors
+ we are merging in that direction. If it is, then
+ update the current vector in that direction. */
+ double paradNorm2 = paradx * paradx + parady * parady;
+
+ m_lastForwardMax = false;
+ m_lastBackwardMax = false;
+ if (totdot > 0.0) {
+ if (paradNorm2 > m_dnorm2ForwardMax) {
+ m_lastForwardMax = true;
+ m_dnorm2ForwardMax = paradNorm2;
+ m_nextX = *x;
+ m_nextY = *y;
+ }
+ } else {
+ if (paradNorm2 > m_dnorm2BackwardMax) {
+ m_lastBackwardMax = true;
+ m_dnorm2BackwardMax = paradNorm2;
+ m_nextBackwardX = *x;
+ m_nextBackwardY = *y;
+ }
+ }
+
+ m_lastx = *x;
+ m_lasty = *y;
+ continue;
+ }
+
+ /* If we get here, then this vector was not similar enough to the
+ line we are building, so we need to draw that line and start the
+ next one. */
+
+ /* If the line needs to extend in the opposite direction from the
+ direction we are drawing in, move back to we start drawing from
+ back there. */
+ _push(x, y);
+
+ break;
+ }
+
+ /* Fill the queue with the remaining vertices if we've finished the
+ path in the above loop. */
+ if (cmd == agg::path_cmd_stop) {
+ if (m_origdNorm2 != 0.0) {
+ queue_push((m_moveto || m_after_moveto) ? agg::path_cmd_move_to
+ : agg::path_cmd_line_to,
+ m_nextX,
+ m_nextY);
+ if (m_dnorm2BackwardMax > 0.0) {
+ queue_push((m_moveto || m_after_moveto) ? agg::path_cmd_move_to
+ : agg::path_cmd_line_to,
+ m_nextBackwardX,
+ m_nextBackwardY);
+ }
+ m_moveto = false;
+ }
+ queue_push((m_moveto || m_after_moveto) ? agg::path_cmd_move_to : agg::path_cmd_line_to,
+ m_lastx,
+ m_lasty);
+ m_moveto = false;
+ queue_push(agg::path_cmd_stop, 0.0, 0.0);
+ }
+
+ /* Return the first item in the queue, if any, otherwise
+ indicate that we're done. */
+ if (queue_pop(&cmd, x, y)) {
+ return cmd;
+ } else {
+ return agg::path_cmd_stop;
+ }
+ }
+
+ private:
+ VertexSource *m_source;
+ bool m_simplify;
+ double m_simplify_threshold;
+
+ bool m_moveto;
+ bool m_after_moveto;
+ bool m_clipped;
+ double m_lastx, m_lasty;
+
+ double m_origdx;
+ double m_origdy;
+ double m_origdNorm2;
+ double m_dnorm2ForwardMax;
+ double m_dnorm2BackwardMax;
+ bool m_lastForwardMax;
+ bool m_lastBackwardMax;
+ double m_nextX;
+ double m_nextY;
+ double m_nextBackwardX;
+ double m_nextBackwardY;
+ double m_currVecStartX;
+ double m_currVecStartY;
+
+ inline void _push(double *x, double *y)
+ {
+ bool needToPushBack = (m_dnorm2BackwardMax > 0.0);
+
+ /* If we observed any backward (anti-parallel) vectors, then
+ we need to push both forward and backward vectors. */
+ if (needToPushBack) {
+ /* If the last vector seen was the maximum in the forward direction,
+ then we need to push the forward after the backward. Otherwise,
+ the last vector seen was the maximum in the backward direction,
+ or somewhere in between, either way we are safe pushing forward
+ before backward. */
+ if (m_lastForwardMax) {
+ queue_push(agg::path_cmd_line_to, m_nextBackwardX, m_nextBackwardY);
+ queue_push(agg::path_cmd_line_to, m_nextX, m_nextY);
+ } else {
+ queue_push(agg::path_cmd_line_to, m_nextX, m_nextY);
+ queue_push(agg::path_cmd_line_to, m_nextBackwardX, m_nextBackwardY);
+ }
+ } else {
+ /* If we did not observe any backwards vectors, just push forward. */
+ queue_push(agg::path_cmd_line_to, m_nextX, m_nextY);
+ }
+
+ /* If we clipped some segments between this line and the next line
+ we are starting, we also need to move to the last point. */
+ if (m_clipped) {
+ queue_push(agg::path_cmd_move_to, m_lastx, m_lasty);
+ } else if ((!m_lastForwardMax) && (!m_lastBackwardMax)) {
+ /* If the last line was not the longest line, then move
+ back to the end point of the last line in the
+ sequence. Only do this if not clipped, since in that
+ case lastx,lasty is not part of the line just drawn. */
+
+ /* Would be move_to if not for the artifacts */
+ queue_push(agg::path_cmd_line_to, m_lastx, m_lasty);
+ }
+
+ /* Now reset all the variables to get ready for the next line */
+ m_origdx = *x - m_lastx;
+ m_origdy = *y - m_lasty;
+ m_origdNorm2 = m_origdx * m_origdx + m_origdy * m_origdy;
+
+ m_dnorm2ForwardMax = m_origdNorm2;
+ m_lastForwardMax = true;
+ m_currVecStartX = m_queue[m_queue_write - 1].x;
+ m_currVecStartY = m_queue[m_queue_write - 1].y;
+ m_lastx = m_nextX = *x;
+ m_lasty = m_nextY = *y;
+ m_dnorm2BackwardMax = 0.0;
+ m_lastBackwardMax = false;
+
+ m_clipped = false;
+ }
+};
+
+template <class VertexSource>
+class Sketch
+{
+ public:
+ /*
+ scale: the scale of the wiggle perpendicular to the original
+ line (in pixels)
+
+ length: the base wavelength of the wiggle along the
+ original line (in pixels)
+
+ randomness: the factor that the sketch length will randomly
+ shrink and expand.
+ */
+ Sketch(VertexSource &source, double scale, double length, double randomness)
+ : m_source(&source),
+ m_scale(scale),
+ m_length(length),
+ m_randomness(randomness),
+ m_segmented(source),
+ m_last_x(0.0),
+ m_last_y(0.0),
+ m_has_last(false),
+ m_p(0.0),
+ m_rand(0)
+ {
+ rewind(0);
+ }
+
+ unsigned vertex(double *x, double *y)
+ {
+ if (m_scale == 0.0) {
+ return m_source->vertex(x, y);
+ }
+
+ unsigned code = m_segmented.vertex(x, y);
+
+ if (code == agg::path_cmd_move_to) {
+ m_has_last = false;
+ m_p = 0.0;
+ }
+
+ if (m_has_last) {
+ // We want the "cursor" along the sine wave to move at a
+ // random rate.
+ double d_rand = m_rand.get_double();
+ double d_M_PI = 3.14159265358979323846;
+ m_p += pow(m_randomness, d_rand * 2.0 - 1.0);
+ double r = sin(m_p / (m_length / (d_M_PI * 2.0))) * m_scale;
+ double den = m_last_x - *x;
+ double num = m_last_y - *y;
+ double len = num * num + den * den;
+ m_last_x = *x;
+ m_last_y = *y;
+ if (len != 0) {
+ len = sqrt(len);
+ *x += r * num / len;
+ *y += r * -den / len;
+ }
+ } else {
+ m_last_x = *x;
+ m_last_y = *y;
+ }
+
+ m_has_last = true;
+
+ return code;
+ }
+
+ inline void rewind(unsigned path_id)
+ {
+ m_has_last = false;
+ m_p = 0.0;
+ if (m_scale != 0.0) {
+ m_rand.seed(0);
+ m_segmented.rewind(path_id);
+ } else {
+ m_source->rewind(path_id);
+ }
+ }
+
+ private:
+ VertexSource *m_source;
+ double m_scale;
+ double m_length;
+ double m_randomness;
+ agg::conv_segmentator<VertexSource> m_segmented;
+ double m_last_x;
+ double m_last_y;
+ bool m_has_last;
+ double m_p;
+ RandomNumberGenerator m_rand;
+};
+
+#endif // __PATH_CONVERTERS_H__
diff --git a/contrib/python/matplotlib/py2/src/py_adaptors.h b/contrib/python/matplotlib/py2/src/py_adaptors.h
new file mode 100644
index 00000000000..8eaa7ad6c71
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/py_adaptors.h
@@ -0,0 +1,251 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+#ifndef __PY_ADAPTORS_H__
+#define __PY_ADAPTORS_H__
+
+/***************************************************************************
+ * This module contains a number of C++ classes that adapt Python data
+ * structures to C++ and Agg-friendly interfaces.
+ */
+
+#include <Python.h>
+
+#include "numpy/arrayobject.h"
+
+#include "py_exceptions.h"
+
+extern "C" {
+int convert_path(PyObject *obj, void *pathp);
+}
+
+namespace py
+{
+
+/************************************************************
+ * py::PathIterator acts as a bridge between Numpy and Agg. Given a
+ * pair of Numpy arrays, vertices and codes, it iterates over
+ * those vertices and codes, using the standard Agg vertex source
+ * interface:
+ *
+ * unsigned vertex(double* x, double* y)
+ */
+class PathIterator
+{
+ /* We hold references to the Python objects, not just the
+ underlying data arrays, so that Python reference counting
+ can work.
+ */
+ PyArrayObject *m_vertices;
+ PyArrayObject *m_codes;
+
+ unsigned m_iterator;
+ unsigned m_total_vertices;
+
+ /* This class doesn't actually do any simplification, but we
+ store the value here, since it is obtained from the Python
+ object.
+ */
+ bool m_should_simplify;
+ double m_simplify_threshold;
+
+ public:
+ inline PathIterator()
+ : m_vertices(NULL),
+ m_codes(NULL),
+ m_iterator(0),
+ m_total_vertices(0),
+ m_should_simplify(false),
+ m_simplify_threshold(1.0 / 9.0)
+ {
+ }
+
+ inline PathIterator(PyObject *vertices,
+ PyObject *codes,
+ bool should_simplify,
+ double simplify_threshold)
+ : m_vertices(NULL), m_codes(NULL), m_iterator(0)
+ {
+ if (!set(vertices, codes, should_simplify, simplify_threshold))
+ throw py::exception();
+ }
+
+ inline PathIterator(PyObject *vertices, PyObject *codes)
+ : m_vertices(NULL), m_codes(NULL), m_iterator(0)
+ {
+ if (!set(vertices, codes))
+ throw py::exception();
+ }
+
+ inline PathIterator(const PathIterator &other)
+ {
+ Py_XINCREF(other.m_vertices);
+ m_vertices = other.m_vertices;
+
+ Py_XINCREF(other.m_codes);
+ m_codes = other.m_codes;
+
+ m_iterator = 0;
+ m_total_vertices = other.m_total_vertices;
+
+ m_should_simplify = other.m_should_simplify;
+ m_simplify_threshold = other.m_simplify_threshold;
+ }
+
+ ~PathIterator()
+ {
+ Py_XDECREF(m_vertices);
+ Py_XDECREF(m_codes);
+ }
+
+ inline int
+ set(PyObject *vertices, PyObject *codes, bool should_simplify, double simplify_threshold)
+ {
+ m_should_simplify = should_simplify;
+ m_simplify_threshold = simplify_threshold;
+
+ Py_XDECREF(m_vertices);
+ m_vertices = (PyArrayObject *)PyArray_FromObject(vertices, NPY_DOUBLE, 2, 2);
+
+ if (!m_vertices || PyArray_DIM(m_vertices, 1) != 2) {
+ PyErr_SetString(PyExc_ValueError, "Invalid vertices array");
+ return 0;
+ }
+
+ Py_XDECREF(m_codes);
+ m_codes = NULL;
+
+ if (codes != NULL && codes != Py_None) {
+ m_codes = (PyArrayObject *)PyArray_FromObject(codes, NPY_UINT8, 1, 1);
+
+ if (!m_codes || PyArray_DIM(m_codes, 0) != PyArray_DIM(m_vertices, 0)) {
+ PyErr_SetString(PyExc_ValueError, "Invalid codes array");
+ return 0;
+ }
+ }
+
+ m_total_vertices = (unsigned)PyArray_DIM(m_vertices, 0);
+ m_iterator = 0;
+
+ return 1;
+ }
+
+ inline int set(PyObject *vertices, PyObject *codes)
+ {
+ return set(vertices, codes, false, 0.0);
+ }
+
+ inline unsigned vertex(double *x, double *y)
+ {
+ if (m_iterator >= m_total_vertices) {
+ *x = 0.0;
+ *y = 0.0;
+ return agg::path_cmd_stop;
+ }
+
+ const size_t idx = m_iterator++;
+
+ char *pair = (char *)PyArray_GETPTR2(m_vertices, idx, 0);
+ *x = *(double *)pair;
+ *y = *(double *)(pair + PyArray_STRIDE(m_vertices, 1));
+
+ if (m_codes != NULL) {
+ return (unsigned)(*(char *)PyArray_GETPTR1(m_codes, idx));
+ } else {
+ return idx == 0 ? agg::path_cmd_move_to : agg::path_cmd_line_to;
+ }
+ }
+
+ inline void rewind(unsigned path_id)
+ {
+ m_iterator = path_id;
+ }
+
+ inline unsigned total_vertices() const
+ {
+ return m_total_vertices;
+ }
+
+ inline bool should_simplify() const
+ {
+ return m_should_simplify;
+ }
+
+ inline double simplify_threshold() const
+ {
+ return m_simplify_threshold;
+ }
+
+ inline bool has_curves() const
+ {
+ return m_codes != NULL;
+ }
+
+ inline void *get_id()
+ {
+ return (void *)m_vertices;
+ }
+};
+
+class PathGenerator
+{
+ PyObject *m_paths;
+ Py_ssize_t m_npaths;
+
+ public:
+ typedef PathIterator path_iterator;
+
+ PathGenerator(PyObject *obj) : m_paths(NULL), m_npaths(0)
+ {
+ if (!set(obj)) {
+ throw py::exception();
+ }
+ }
+
+ ~PathGenerator()
+ {
+ Py_XDECREF(m_paths);
+ }
+
+ int set(PyObject *obj)
+ {
+ if (!PySequence_Check(obj)) {
+ return 0;
+ }
+
+ m_paths = obj;
+ Py_INCREF(m_paths);
+
+ m_npaths = PySequence_Size(m_paths);
+
+ return 1;
+ }
+
+ Py_ssize_t num_paths() const
+ {
+ return m_npaths;
+ }
+
+ Py_ssize_t size() const
+ {
+ return m_npaths;
+ }
+
+ path_iterator operator()(size_t i)
+ {
+ path_iterator path;
+ PyObject *item;
+
+ item = PySequence_GetItem(m_paths, i % m_npaths);
+ if (item == NULL) {
+ throw py::exception();
+ }
+ if (!convert_path(item, &path)) {
+ throw py::exception();
+ }
+ Py_DECREF(item);
+ return path;
+ }
+};
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/src/py_converters.cpp b/contrib/python/matplotlib/py2/src/py_converters.cpp
new file mode 100644
index 00000000000..c36fc59f59d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/py_converters.cpp
@@ -0,0 +1,619 @@
+#define NO_IMPORT_ARRAY
+
+#include "py_converters.h"
+#include "numpy_cpp.h"
+
+#include "agg_basics.h"
+#include "agg_color_rgba.h"
+#include "agg_math_stroke.h"
+
+extern "C" {
+
+static int convert_string_enum(PyObject *obj, const char *name, const char **names, int *values, int *result)
+{
+ PyObject *bytesobj;
+ char *str;
+
+ if (obj == NULL || obj == Py_None) {
+ return 1;
+ }
+
+ if (PyUnicode_Check(obj)) {
+ bytesobj = PyUnicode_AsASCIIString(obj);
+ if (bytesobj == NULL) {
+ return 0;
+ }
+ } else if (PyBytes_Check(obj)) {
+ Py_INCREF(obj);
+ bytesobj = obj;
+ } else {
+ PyErr_Format(PyExc_TypeError, "%s must be bytes or unicode", name);
+ return 0;
+ }
+
+ str = PyBytes_AsString(bytesobj);
+ if (str == NULL) {
+ Py_DECREF(bytesobj);
+ return 0;
+ }
+
+ for ( ; *names != NULL; names++, values++) {
+ if (strncmp(str, *names, 64) == 0) {
+ *result = *values;
+ Py_DECREF(bytesobj);
+ return 1;
+ }
+ }
+
+ PyErr_Format(PyExc_ValueError, "invalid %s value", name);
+ Py_DECREF(bytesobj);
+ return 0;
+}
+
+int convert_from_method(PyObject *obj, const char *name, converter func, void *p)
+{
+ PyObject *value;
+
+ value = PyObject_CallMethod(obj, (char *)name, NULL);
+ if (value == NULL) {
+ if (!PyObject_HasAttrString(obj, (char *)name)) {
+ PyErr_Clear();
+ return 1;
+ }
+ return 0;
+ }
+
+ if (!func(value, p)) {
+ Py_DECREF(value);
+ return 0;
+ }
+
+ Py_DECREF(value);
+ return 1;
+}
+
+int convert_from_attr(PyObject *obj, const char *name, converter func, void *p)
+{
+ PyObject *value;
+
+ value = PyObject_GetAttrString(obj, (char *)name);
+ if (value == NULL) {
+ if (!PyObject_HasAttrString(obj, (char *)name)) {
+ PyErr_Clear();
+ return 1;
+ }
+ return 0;
+ }
+
+ if (!func(value, p)) {
+ Py_DECREF(value);
+ return 0;
+ }
+
+ Py_DECREF(value);
+ return 1;
+}
+
+int convert_double(PyObject *obj, void *p)
+{
+ double *val = (double *)p;
+
+ *val = PyFloat_AsDouble(obj);
+ if (PyErr_Occurred()) {
+ return 0;
+ }
+
+ return 1;
+}
+
+int convert_bool(PyObject *obj, void *p)
+{
+ bool *val = (bool *)p;
+
+ *val = PyObject_IsTrue(obj);
+
+ return 1;
+}
+
+int convert_cap(PyObject *capobj, void *capp)
+{
+ const char *names[] = {"butt", "round", "projecting", NULL};
+ int values[] = {agg::butt_cap, agg::round_cap, agg::square_cap};
+ int result = agg::butt_cap;
+
+ if (!convert_string_enum(capobj, "capstyle", names, values, &result)) {
+ return 0;
+ }
+
+ *(agg::line_cap_e *)capp = (agg::line_cap_e)result;
+ return 1;
+}
+
+int convert_join(PyObject *joinobj, void *joinp)
+{
+ const char *names[] = {"miter", "round", "bevel", NULL};
+ int values[] = {agg::miter_join_revert, agg::round_join, agg::bevel_join};
+ int result = agg::miter_join_revert;
+
+ if (!convert_string_enum(joinobj, "joinstyle", names, values, &result)) {
+ return 0;
+ }
+
+ *(agg::line_join_e *)joinp = (agg::line_join_e)result;
+ return 1;
+}
+
+int convert_rect(PyObject *rectobj, void *rectp)
+{
+ agg::rect_d *rect = (agg::rect_d *)rectp;
+
+ if (rectobj == NULL || rectobj == Py_None) {
+ rect->x1 = 0.0;
+ rect->y1 = 0.0;
+ rect->x2 = 0.0;
+ rect->y2 = 0.0;
+ } else {
+ try
+ {
+ numpy::array_view<const double, 2> rect_arr(rectobj);
+
+ if (rect_arr.dim(0) != 2 || rect_arr.dim(1) != 2) {
+ PyErr_SetString(PyExc_ValueError, "Invalid bounding box");
+ return 0;
+ }
+
+ rect->x1 = rect_arr(0, 0);
+ rect->y1 = rect_arr(0, 1);
+ rect->x2 = rect_arr(1, 0);
+ rect->y2 = rect_arr(1, 1);
+ }
+ catch (py::exception &)
+ {
+ PyErr_Clear();
+
+ try
+ {
+ numpy::array_view<const double, 1> rect_arr(rectobj);
+
+ if (rect_arr.dim(0) != 4) {
+ PyErr_SetString(PyExc_ValueError, "Invalid bounding box");
+ return 0;
+ }
+
+ rect->x1 = rect_arr(0);
+ rect->y1 = rect_arr(1);
+ rect->x2 = rect_arr(2);
+ rect->y2 = rect_arr(3);
+ }
+ catch (py::exception &)
+ {
+ return 0;
+ }
+ }
+ }
+
+ return 1;
+}
+
+int convert_rgba(PyObject *rgbaobj, void *rgbap)
+{
+ agg::rgba *rgba = (agg::rgba *)rgbap;
+
+ if (rgbaobj == NULL || rgbaobj == Py_None) {
+ rgba->r = 0.0;
+ rgba->g = 0.0;
+ rgba->b = 0.0;
+ rgba->a = 0.0;
+ } else {
+ rgba->a = 1.0;
+ if (!PyArg_ParseTuple(
+ rgbaobj, "ddd|d:rgba", &(rgba->r), &(rgba->g), &(rgba->b), &(rgba->a))) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+int convert_dashes(PyObject *dashobj, void *dashesp)
+{
+ Dashes *dashes = (Dashes *)dashesp;
+
+ if (dashobj == NULL && dashobj == Py_None) {
+ return 1;
+ }
+
+ PyObject *dash_offset_obj = NULL;
+ double dash_offset = 0.0;
+ PyObject *dashes_seq = NULL;
+ Py_ssize_t nentries;
+
+ if (!PyArg_ParseTuple(dashobj, "OO:dashes", &dash_offset_obj, &dashes_seq)) {
+ return 0;
+ }
+
+ if (dash_offset_obj != Py_None) {
+ dash_offset = PyFloat_AsDouble(dash_offset_obj);
+ if (PyErr_Occurred()) {
+ return 0;
+ }
+ }
+
+ if (dashes_seq == Py_None) {
+ return 1;
+ }
+
+ if (!PySequence_Check(dashes_seq)) {
+ PyErr_SetString(PyExc_TypeError, "Invalid dashes sequence");
+ return 0;
+ }
+
+ nentries = PySequence_Size(dashes_seq);
+ if (nentries % 2 != 0) {
+ PyErr_Format(PyExc_ValueError, "dashes sequence must have an even number of elements");
+ return 0;
+ }
+
+ for (Py_ssize_t i = 0; i < nentries; ++i) {
+ PyObject *item;
+ double length;
+ double skip;
+
+ item = PySequence_GetItem(dashes_seq, i);
+ if (item == NULL) {
+ return 0;
+ }
+ length = PyFloat_AsDouble(item);
+ if (PyErr_Occurred()) {
+ Py_DECREF(item);
+ return 0;
+ }
+ Py_DECREF(item);
+
+ ++i;
+
+ item = PySequence_GetItem(dashes_seq, i);
+ if (item == NULL) {
+ return 0;
+ }
+ skip = PyFloat_AsDouble(item);
+ if (PyErr_Occurred()) {
+ Py_DECREF(item);
+ return 0;
+ }
+ Py_DECREF(item);
+
+ dashes->add_dash_pair(length, skip);
+ }
+
+ dashes->set_dash_offset(dash_offset);
+
+ return 1;
+}
+
+int convert_dashes_vector(PyObject *obj, void *dashesp)
+{
+ DashesVector *dashes = (DashesVector *)dashesp;
+
+ if (!PySequence_Check(obj)) {
+ return 0;
+ }
+
+ Py_ssize_t n = PySequence_Size(obj);
+
+ for (Py_ssize_t i = 0; i < n; ++i) {
+ PyObject *item;
+ Dashes subdashes;
+
+ item = PySequence_GetItem(obj, i);
+ if (item == NULL) {
+ return 0;
+ }
+
+ if (!convert_dashes(item, &subdashes)) {
+ Py_DECREF(item);
+ return 0;
+ }
+ Py_DECREF(item);
+
+ dashes->push_back(subdashes);
+ }
+
+ return 1;
+}
+
+int convert_trans_affine(PyObject *obj, void *transp)
+{
+ agg::trans_affine *trans = (agg::trans_affine *)transp;
+
+ /** If None assume identity transform. */
+ if (obj == NULL || obj == Py_None) {
+ return 1;
+ }
+
+ try
+ {
+ numpy::array_view<const double, 2> matrix(obj);
+
+ if (matrix.dim(0) == 3 && matrix.dim(1) == 3) {
+ trans->sx = matrix(0, 0);
+ trans->shx = matrix(0, 1);
+ trans->tx = matrix(0, 2);
+
+ trans->shy = matrix(1, 0);
+ trans->sy = matrix(1, 1);
+ trans->ty = matrix(1, 2);
+
+ return 1;
+ }
+ }
+ catch (py::exception &)
+ {
+ return 0;
+ }
+
+ PyErr_SetString(PyExc_ValueError, "Invalid affine transformation matrix");
+ return 0;
+}
+
+int convert_path(PyObject *obj, void *pathp)
+{
+ py::PathIterator *path = (py::PathIterator *)pathp;
+
+ PyObject *vertices_obj = NULL;
+ PyObject *codes_obj = NULL;
+ PyObject *should_simplify_obj = NULL;
+ PyObject *simplify_threshold_obj = NULL;
+ bool should_simplify;
+ double simplify_threshold;
+
+ int status = 0;
+
+ if (obj == NULL || obj == Py_None) {
+ return 1;
+ }
+
+ vertices_obj = PyObject_GetAttrString(obj, "vertices");
+ if (vertices_obj == NULL) {
+ goto exit;
+ }
+
+ codes_obj = PyObject_GetAttrString(obj, "codes");
+ if (codes_obj == NULL) {
+ goto exit;
+ }
+
+ should_simplify_obj = PyObject_GetAttrString(obj, "should_simplify");
+ if (should_simplify_obj == NULL) {
+ goto exit;
+ }
+ should_simplify = PyObject_IsTrue(should_simplify_obj);
+
+ simplify_threshold_obj = PyObject_GetAttrString(obj, "simplify_threshold");
+ if (simplify_threshold_obj == NULL) {
+ goto exit;
+ }
+ simplify_threshold = PyFloat_AsDouble(simplify_threshold_obj);
+ if (PyErr_Occurred()) {
+ goto exit;
+ }
+
+ if (!path->set(vertices_obj, codes_obj, should_simplify, simplify_threshold)) {
+ goto exit;
+ }
+
+ status = 1;
+
+exit:
+ Py_XDECREF(vertices_obj);
+ Py_XDECREF(codes_obj);
+ Py_XDECREF(should_simplify_obj);
+ Py_XDECREF(simplify_threshold_obj);
+
+ return status;
+}
+
+int convert_clippath(PyObject *clippath_tuple, void *clippathp)
+{
+ ClipPath *clippath = (ClipPath *)clippathp;
+ py::PathIterator path;
+ agg::trans_affine trans;
+
+ if (clippath_tuple != NULL && clippath_tuple != Py_None) {
+ if (!PyArg_ParseTuple(clippath_tuple,
+ "O&O&:clippath",
+ &convert_path,
+ &clippath->path,
+ &convert_trans_affine,
+ &clippath->trans)) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+int convert_snap(PyObject *obj, void *snapp)
+{
+ e_snap_mode *snap = (e_snap_mode *)snapp;
+
+ if (obj == NULL || obj == Py_None) {
+ *snap = SNAP_AUTO;
+ } else if (PyObject_IsTrue(obj)) {
+ *snap = SNAP_TRUE;
+ } else {
+ *snap = SNAP_FALSE;
+ }
+
+ return 1;
+}
+
+int convert_sketch_params(PyObject *obj, void *sketchp)
+{
+ SketchParams *sketch = (SketchParams *)sketchp;
+
+ if (obj == NULL || obj == Py_None) {
+ sketch->scale = 0.0;
+ } else if (!PyArg_ParseTuple(obj,
+ "ddd:sketch_params",
+ &sketch->scale,
+ &sketch->length,
+ &sketch->randomness)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+int convert_gcagg(PyObject *pygc, void *gcp)
+{
+ GCAgg *gc = (GCAgg *)gcp;
+
+ if (!(convert_from_attr(pygc, "_linewidth", &convert_double, &gc->linewidth) &&
+ convert_from_attr(pygc, "_alpha", &convert_double, &gc->alpha) &&
+ convert_from_attr(pygc, "_forced_alpha", &convert_bool, &gc->forced_alpha) &&
+ convert_from_attr(pygc, "_rgb", &convert_rgba, &gc->color) &&
+ convert_from_attr(pygc, "_antialiased", &convert_bool, &gc->isaa) &&
+ convert_from_attr(pygc, "_capstyle", &convert_cap, &gc->cap) &&
+ convert_from_attr(pygc, "_joinstyle", &convert_join, &gc->join) &&
+ convert_from_method(pygc, "get_dashes", &convert_dashes, &gc->dashes) &&
+ convert_from_attr(pygc, "_cliprect", &convert_rect, &gc->cliprect) &&
+ convert_from_method(pygc, "get_clip_path", &convert_clippath, &gc->clippath) &&
+ convert_from_method(pygc, "get_snap", &convert_snap, &gc->snap_mode) &&
+ convert_from_method(pygc, "get_hatch_path", &convert_path, &gc->hatchpath) &&
+ convert_from_method(pygc, "get_hatch_color", &convert_rgba, &gc->hatch_color) &&
+ convert_from_method(pygc, "get_hatch_linewidth", &convert_double, &gc->hatch_linewidth) &&
+ convert_from_method(pygc, "get_sketch_params", &convert_sketch_params, &gc->sketch))) {
+ return 0;
+ }
+
+ return 1;
+}
+
+int convert_offset_position(PyObject *obj, void *offsetp)
+{
+ e_offset_position *offset = (e_offset_position *)offsetp;
+ const char *names[] = {"data", NULL};
+ int values[] = {OFFSET_POSITION_DATA};
+ int result = (int)OFFSET_POSITION_FIGURE;
+
+ if (!convert_string_enum(obj, "offset_position", names, values, &result)) {
+ PyErr_Clear();
+ }
+
+ *offset = (e_offset_position)result;
+
+ return 1;
+}
+
+int convert_face(PyObject *color, GCAgg &gc, agg::rgba *rgba)
+{
+ if (!convert_rgba(color, rgba)) {
+ return 0;
+ }
+
+ if (color != NULL && color != Py_None) {
+ if (gc.forced_alpha || PySequence_Size(color) == 3) {
+ rgba->a = gc.alpha;
+ }
+ }
+
+ return 1;
+}
+
+int convert_points(PyObject *obj, void *pointsp)
+{
+ numpy::array_view<double, 2> *points = (numpy::array_view<double, 2> *)pointsp;
+
+ if (obj == NULL || obj == Py_None) {
+ return 1;
+ }
+
+ points->set(obj);
+
+ if (points->size() == 0) {
+ return 1;
+ }
+
+ if (points->dim(1) != 2) {
+ PyErr_Format(PyExc_ValueError,
+ "Points must be Nx2 array, got %" NPY_INTP_FMT "x%" NPY_INTP_FMT,
+ points->dim(0), points->dim(1));
+ return 0;
+ }
+
+ return 1;
+}
+
+int convert_transforms(PyObject *obj, void *transp)
+{
+ numpy::array_view<double, 3> *trans = (numpy::array_view<double, 3> *)transp;
+
+ if (obj == NULL || obj == Py_None) {
+ return 1;
+ }
+
+ trans->set(obj);
+
+ if (trans->size() == 0) {
+ return 1;
+ }
+
+ if (trans->dim(1) != 3 || trans->dim(2) != 3) {
+ PyErr_Format(PyExc_ValueError,
+ "Transforms must be Nx3x3 array, got %" NPY_INTP_FMT "x%" NPY_INTP_FMT "x%" NPY_INTP_FMT,
+ trans->dim(0), trans->dim(1), trans->dim(2));
+ return 0;
+ }
+
+ return 1;
+}
+
+int convert_bboxes(PyObject *obj, void *bboxp)
+{
+ numpy::array_view<double, 3> *bbox = (numpy::array_view<double, 3> *)bboxp;
+
+ if (obj == NULL || obj == Py_None) {
+ return 1;
+ }
+
+ bbox->set(obj);
+
+ if (bbox->size() == 0) {
+ return 1;
+ }
+
+ if (bbox->dim(1) != 2 || bbox->dim(2) != 2) {
+ PyErr_Format(PyExc_ValueError,
+ "Bbox array must be Nx2x2 array, got %" NPY_INTP_FMT "x%" NPY_INTP_FMT "x%" NPY_INTP_FMT,
+ bbox->dim(0), bbox->dim(1), bbox->dim(2));
+ return 0;
+ }
+
+ return 1;
+}
+
+int convert_colors(PyObject *obj, void *colorsp)
+{
+ numpy::array_view<double, 2> *colors = (numpy::array_view<double, 2> *)colorsp;
+
+ if (obj == NULL || obj == Py_None) {
+ return 1;
+ }
+
+ colors->set(obj);
+
+ if (colors->size() == 0) {
+ return 1;
+ }
+
+ if (colors->dim(1) != 4) {
+ PyErr_Format(PyExc_ValueError,
+ "Colors array must be Nx4 array, got %" NPY_INTP_FMT "x%" NPY_INTP_FMT,
+ colors->dim(0), colors->dim(1));
+ return 0;
+ }
+
+ return 1;
+}
+}
diff --git a/contrib/python/matplotlib/py2/src/py_converters.h b/contrib/python/matplotlib/py2/src/py_converters.h
new file mode 100644
index 00000000000..02d84affe85
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/py_converters.h
@@ -0,0 +1,49 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+#ifndef __PY_CONVERTERS_H__
+#define __PY_CONVERTERS_H__
+
+/***************************************************************************
+ * This module contains a number of conversion functions from Python types
+ * to C++ types. Most of them meet the Python "converter" signature:
+ *
+ * typedef int (*converter)(PyObject *, void *);
+ *
+ * and thus can be passed as conversion functions to PyArg_ParseTuple
+ * and friends.
+ */
+
+#include <Python.h>
+#include "numpy_cpp.h"
+#include "_backend_agg_basic_types.h"
+
+extern "C" {
+typedef int (*converter)(PyObject *, void *);
+
+int convert_from_attr(PyObject *obj, const char *name, converter func, void *p);
+int convert_from_method(PyObject *obj, const char *name, converter func, void *p);
+
+int convert_double(PyObject *obj, void *p);
+int convert_bool(PyObject *obj, void *p);
+int convert_cap(PyObject *capobj, void *capp);
+int convert_join(PyObject *joinobj, void *joinp);
+int convert_rect(PyObject *rectobj, void *rectp);
+int convert_rgba(PyObject *rgbaocj, void *rgbap);
+int convert_dashes(PyObject *dashobj, void *gcp);
+int convert_dashes_vector(PyObject *obj, void *dashesp);
+int convert_trans_affine(PyObject *obj, void *transp);
+int convert_path(PyObject *obj, void *pathp);
+int convert_clippath(PyObject *clippath_tuple, void *clippathp);
+int convert_snap(PyObject *obj, void *snapp);
+int convert_offset_position(PyObject *obj, void *offsetp);
+int convert_sketch_params(PyObject *obj, void *sketchp);
+int convert_gcagg(PyObject *pygc, void *gcp);
+int convert_points(PyObject *pygc, void *pointsp);
+int convert_transforms(PyObject *pygc, void *transp);
+int convert_bboxes(PyObject *pygc, void *bboxp);
+int convert_colors(PyObject *pygc, void *colorsp);
+
+int convert_face(PyObject *color, GCAgg &gc, agg::rgba *rgba);
+}
+
+#endif
diff --git a/contrib/python/matplotlib/py2/src/py_exceptions.h b/contrib/python/matplotlib/py2/src/py_exceptions.h
new file mode 100644
index 00000000000..1ee2d51903c
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/py_exceptions.h
@@ -0,0 +1,72 @@
+/* -*- mode: c++; c-basic-offset: 4 -*- */
+
+#ifndef __PY_EXCEPTIONS_H__
+#define __PY_EXCEPTIONS_H__
+
+#include <exception>
+#include <stdexcept>
+
+namespace py
+{
+class exception : public std::exception
+{
+ public:
+ const char *what() const throw()
+ {
+ return "python error has been set";
+ }
+};
+}
+
+#define CALL_CPP_FULL(name, a, cleanup, errorcode) \
+ try \
+ { \
+ a; \
+ } \
+ catch (const py::exception &) \
+ { \
+ { \
+ cleanup; \
+ } \
+ return (errorcode); \
+ } \
+ catch (const std::bad_alloc &) \
+ { \
+ PyErr_Format(PyExc_MemoryError, "In %s: Out of memory", (name)); \
+ { \
+ cleanup; \
+ } \
+ return (errorcode); \
+ } \
+ catch (const std::overflow_error &e) \
+ { \
+ PyErr_Format(PyExc_OverflowError, "In %s: %s", (name), e.what()); \
+ { \
+ cleanup; \
+ } \
+ return (errorcode); \
+ } \
+ catch (const std::runtime_error &e) \
+ { \
+ PyErr_Format(PyExc_RuntimeError, "In %s: %s", (name), e.what()); \
+ { \
+ cleanup; \
+ } \
+ return (errorcode); \
+ } \
+ catch (...) \
+ { \
+ PyErr_Format(PyExc_RuntimeError, "Unknown exception in %s", (name)); \
+ { \
+ cleanup; \
+ } \
+ return (errorcode); \
+ }
+
+#define CALL_CPP_CLEANUP(name, a, cleanup) CALL_CPP_FULL(name, a, cleanup, NULL)
+
+#define CALL_CPP(name, a) CALL_CPP_FULL(name, a, , NULL)
+
+#define CALL_CPP_INIT(name, a) CALL_CPP_FULL(name, a, , -1)
+
+#endif
diff --git a/contrib/python/matplotlib/py2/src/qhull_wrap.c b/contrib/python/matplotlib/py2/src/qhull_wrap.c
new file mode 100644
index 00000000000..9cbaf64f01d
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/qhull_wrap.c
@@ -0,0 +1,377 @@
+/*
+ * Wrapper module for libqhull, providing Delaunay triangulation.
+ *
+ * This module's methods should not be accessed directly. To obtain a Delaunay
+ * triangulation, construct an instance of the matplotlib.tri.Triangulation
+ * class without specifying a triangles array.
+ */
+#include "Python.h"
+#include "numpy/noprefix.h"
+#include "qhull_ra.h"
+#include <stdio.h>
+
+
+#if PY_MAJOR_VERSION >= 3
+#define PY3K 1
+#else
+#define PY3K 0
+#endif
+
+#ifndef MPL_DEVNULL
+#error "MPL_DEVNULL must be defined as the OS-equivalent of /dev/null"
+#endif
+
+#define STRINGIFY(x) STR(x)
+#define STR(x) #x
+
+static qhT qhData;
+static qhT* qh = &qhData;
+
+static const char* qhull_error_msg[6] = {
+ "", /* 0 = qh_ERRnone */
+ "input inconsistency", /* 1 = qh_ERRinput */
+ "singular input data", /* 2 = qh_ERRsingular */
+ "precision error", /* 3 = qh_ERRprec */
+ "insufficient memory", /* 4 = qh_ERRmem */
+ "internal error"}; /* 5 = qh_ERRqhull */
+
+
+/* Return the indices of the 3 vertices that comprise the specified facet (i.e.
+ * triangle). */
+static void
+get_facet_vertices(const facetT* facet, int indices[3])
+{
+ vertexT *vertex, **vertexp;
+ FOREACHvertex_(facet->vertices)
+ *indices++ = qh_pointid(qh, vertex->point);
+}
+
+/* Return the indices of the 3 triangles that are neighbors of the specified
+ * facet (triangle). */
+static void
+get_facet_neighbours(const facetT* facet, const int* tri_indices,
+ int indices[3])
+{
+ facetT *neighbor, **neighborp;
+ FOREACHneighbor_(facet)
+ *indices++ = (neighbor->upperdelaunay ? -1 : tri_indices[neighbor->id]);
+}
+
+/* Return 1 if the specified points arrays contain at least 3 unique points,
+ * or 0 otherwise. */
+static int
+at_least_3_unique_points(int npoints, const double* x, const double* y)
+{
+ int i;
+ const int unique1 = 0; /* First unique point has index 0. */
+ int unique2 = 0; /* Second unique point index is 0 until set. */
+
+ if (npoints < 3)
+ return 0;
+
+ for (i = 1; i < npoints; ++i) {
+ if (unique2 == 0) {
+ /* Looking for second unique point. */
+ if (x[i] != x[unique1] || y[i] != y[unique1])
+ unique2 = i;
+ }
+ else {
+ /* Looking for third unique point. */
+ if ( (x[i] != x[unique1] || y[i] != y[unique1]) &&
+ (x[i] != x[unique2] || y[i] != y[unique2]) ) {
+ /* 3 unique points found, with indices 0, unique2 and i. */
+ return 1;
+ }
+ }
+ }
+
+ /* Run out of points before 3 unique points found. */
+ return 0;
+}
+
+/* Delaunay implementation methyod. If hide_qhull_errors is 1 then qhull error
+ * messages are discarded; if it is 0 then they are written to stderr. */
+static PyObject*
+delaunay_impl(int npoints, const double* x, const double* y,
+ int hide_qhull_errors)
+{
+ coordT* points = NULL;
+ facetT* facet;
+ int i, ntri, max_facet_id;
+ FILE* error_file = NULL; /* qhull expects a FILE* to write errors to. */
+ int exitcode; /* Value returned from qh_new_qhull(). */
+ int* tri_indices = NULL; /* Maps qhull facet id to triangle index. */
+ int indices[3];
+ int curlong, totlong; /* Memory remaining after qh_memfreeshort. */
+ PyObject* tuple; /* Return tuple (triangles, neighbors). */
+ const int ndim = 2;
+ npy_intp dims[2];
+ PyArrayObject* triangles = NULL;
+ PyArrayObject* neighbors = NULL;
+ int* triangles_ptr;
+ int* neighbors_ptr;
+ double x_mean = 0.0;
+ double y_mean = 0.0;
+
+ QHULL_LIB_CHECK
+
+ /* Allocate points. */
+ points = (coordT*)malloc(npoints*ndim*sizeof(coordT));
+ if (points == NULL) {
+ PyErr_SetString(PyExc_MemoryError,
+ "Could not allocate points array in qhull.delaunay");
+ goto error_before_qhull;
+ }
+
+ /* Determine mean x, y coordinates. */
+ for (i = 0; i < npoints; ++i) {
+ x_mean += x[i];
+ y_mean += y[i];
+ }
+ x_mean /= npoints;
+ y_mean /= npoints;
+
+ /* Prepare points array to pass to qhull. */
+ for (i = 0; i < npoints; ++i) {
+ points[2*i ] = x[i] - x_mean;
+ points[2*i+1] = y[i] - y_mean;
+ }
+
+ /* qhull expects a FILE* to write errors to. */
+ if (hide_qhull_errors) {
+ /* qhull errors are ignored by writing to OS-equivalent of /dev/null.
+ * Rather than have OS-specific code here, instead it is determined by
+ * setupext.py and passed in via the macro MPL_DEVNULL. */
+ error_file = fopen(STRINGIFY(MPL_DEVNULL), "w");
+ if (error_file == NULL) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Could not open devnull in qhull.delaunay");
+ goto error_before_qhull;
+ }
+ }
+ else {
+ /* qhull errors written to stderr. */
+ error_file = stderr;
+ }
+
+ /* Perform Delaunay triangulation. */
+ exitcode = qh_new_qhull(qh, ndim, npoints, points, False,
+ "qhull d Qt Qbb Qc Qz", NULL, error_file);
+ if (exitcode != qh_ERRnone) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Error in qhull Delaunay triangulation calculation: %s (exitcode=%d)%s",
+ qhull_error_msg[exitcode], exitcode,
+ hide_qhull_errors ? "; use python verbose option (-v) to see original qhull error." : "");
+ goto error;
+ }
+
+ /* Split facets so that they only have 3 points each. */
+ qh_triangulate(qh);
+
+ /* Determine ntri and max_facet_id.
+ Note that libqhull uses macros to iterate through collections. */
+ ntri = 0;
+ FORALLfacets {
+ if (!facet->upperdelaunay)
+ ++ntri;
+ }
+
+ max_facet_id = qh->facet_id - 1;
+
+ /* Create array to map facet id to triangle index. */
+ tri_indices = (int*)malloc((max_facet_id+1)*sizeof(int));
+ if (tri_indices == NULL) {
+ PyErr_SetString(PyExc_MemoryError,
+ "Could not allocate triangle map in qhull.delaunay");
+ goto error;
+ }
+
+ /* Allocate python arrays to return. */
+ dims[0] = ntri;
+ dims[1] = 3;
+ triangles = (PyArrayObject*)PyArray_SimpleNew(ndim, dims, NPY_INT);
+ if (triangles == NULL) {
+ PyErr_SetString(PyExc_MemoryError,
+ "Could not allocate triangles array in qhull.delaunay");
+ goto error;
+ }
+
+ neighbors = (PyArrayObject*)PyArray_SimpleNew(ndim, dims, NPY_INT);
+ if (neighbors == NULL) {
+ PyErr_SetString(PyExc_MemoryError,
+ "Could not allocate neighbors array in qhull.delaunay");
+ goto error;
+ }
+
+ triangles_ptr = (int*)PyArray_DATA(triangles);
+ neighbors_ptr = (int*)PyArray_DATA(neighbors);
+
+ /* Determine triangles array and set tri_indices array. */
+ i = 0;
+ FORALLfacets {
+ if (!facet->upperdelaunay) {
+ tri_indices[facet->id] = i++;
+ get_facet_vertices(facet, indices);
+ *triangles_ptr++ = (facet->toporient ? indices[0] : indices[2]);
+ *triangles_ptr++ = indices[1];
+ *triangles_ptr++ = (facet->toporient ? indices[2] : indices[0]);
+ }
+ else
+ tri_indices[facet->id] = -1;
+ }
+
+ /* Determine neighbors array. */
+ FORALLfacets {
+ if (!facet->upperdelaunay) {
+ get_facet_neighbours(facet, tri_indices, indices);
+ *neighbors_ptr++ = (facet->toporient ? indices[2] : indices[0]);
+ *neighbors_ptr++ = (facet->toporient ? indices[0] : indices[2]);
+ *neighbors_ptr++ = indices[1];
+ }
+ }
+
+ /* Clean up. */
+ qh_freeqhull(qh, !qh_ALL);
+ qh_memfreeshort(qh, &curlong, &totlong);
+ if (curlong || totlong)
+ PyErr_WarnEx(PyExc_RuntimeWarning,
+ "Qhull could not free all allocated memory", 1);
+ if (hide_qhull_errors)
+ fclose(error_file);
+ free(tri_indices);
+ free(points);
+
+ tuple = PyTuple_New(2);
+ PyTuple_SetItem(tuple, 0, (PyObject*)triangles);
+ PyTuple_SetItem(tuple, 1, (PyObject*)neighbors);
+ return tuple;
+
+error:
+ /* Clean up. */
+ Py_XDECREF(triangles);
+ Py_XDECREF(neighbors);
+ qh_freeqhull(qh, !qh_ALL);
+ qh_memfreeshort(qh, &curlong, &totlong);
+ /* Don't bother checking curlong and totlong as raising error anyway. */
+ if (hide_qhull_errors)
+ fclose(error_file);
+ free(tri_indices);
+
+error_before_qhull:
+ free(points);
+
+ return NULL;
+}
+
+/* Process python arguments and call Delaunay implementation method. */
+static PyObject*
+delaunay(PyObject *self, PyObject *args)
+{
+ PyObject* xarg;
+ PyObject* yarg;
+ PyArrayObject* xarray;
+ PyArrayObject* yarray;
+ PyObject* ret;
+ int npoints;
+ const double* x;
+ const double* y;
+
+ if (!PyArg_ParseTuple(args, "OO", &xarg, &yarg)) {
+ PyErr_SetString(PyExc_ValueError, "expecting x and y arrays");
+ return NULL;
+ }
+
+ xarray = (PyArrayObject*)PyArray_ContiguousFromObject(xarg, NPY_DOUBLE,
+ 1, 1);
+ yarray = (PyArrayObject*)PyArray_ContiguousFromObject(yarg, NPY_DOUBLE,
+ 1, 1);
+ if (xarray == 0 || yarray == 0 ||
+ PyArray_DIM(xarray,0) != PyArray_DIM(yarray, 0)) {
+ Py_XDECREF(xarray);
+ Py_XDECREF(yarray);
+ PyErr_SetString(PyExc_ValueError,
+ "x and y must be 1D arrays of the same length");
+ return NULL;
+ }
+
+ npoints = PyArray_DIM(xarray, 0);
+
+ if (npoints < 3) {
+ Py_XDECREF(xarray);
+ Py_XDECREF(yarray);
+ PyErr_SetString(PyExc_ValueError,
+ "x and y arrays must have a length of at least 3");
+ return NULL;
+ }
+
+ x = (const double*)PyArray_DATA(xarray);
+ y = (const double*)PyArray_DATA(yarray);
+
+ if (!at_least_3_unique_points(npoints, x, y)) {
+ Py_XDECREF(xarray);
+ Py_XDECREF(yarray);
+ PyErr_SetString(PyExc_ValueError,
+ "x and y arrays must consist of at least 3 unique points");
+ return NULL;
+ }
+
+ ret = delaunay_impl(npoints, x, y, Py_VerboseFlag == 0);
+
+ Py_XDECREF(xarray);
+ Py_XDECREF(yarray);
+ return ret;
+}
+
+/* Return qhull version string for assistance in debugging. */
+static PyObject*
+version(void)
+{
+ return PyBytes_FromString(qh_version);
+}
+
+static PyMethodDef qhull_methods[] = {
+ {"delaunay", (PyCFunction)delaunay, METH_VARARGS, ""},
+ {"version", (PyCFunction)version, METH_NOARGS, ""},
+ {NULL, NULL, 0, NULL}
+};
+
+#if PY3K
+static struct PyModuleDef qhull_module = {
+ PyModuleDef_HEAD_INIT,
+ "qhull",
+ "Computing Delaunay triangulations.\n",
+ -1,
+ qhull_methods,
+ NULL, NULL, NULL, NULL
+};
+
+#define ERROR_RETURN return NULL
+
+PyMODINIT_FUNC
+PyInit__qhull(void)
+#else
+#define ERROR_RETURN return
+
+PyMODINIT_FUNC
+init_qhull(void)
+#endif
+{
+ PyObject* m;
+
+ #if PY3K
+ m = PyModule_Create(&qhull_module);
+ #else
+ m = Py_InitModule3("_qhull", qhull_methods,
+ "Computing Delaunay triangulations.\n");
+ #endif
+
+ if (m == NULL) {
+ ERROR_RETURN;
+ }
+
+ import_array();
+
+ #if PY3K
+ return m;
+ #endif
+}
diff --git a/contrib/python/matplotlib/py2/src/ya.make b/contrib/python/matplotlib/py2/src/ya.make
new file mode 100644
index 00000000000..544aba39961
--- /dev/null
+++ b/contrib/python/matplotlib/py2/src/ya.make
@@ -0,0 +1,66 @@
+PY2_LIBRARY()
+
+LICENSE(PSF-2.0)
+
+NO_COMPILER_WARNINGS()
+
+PEERDIR(
+ ADDINCL contrib/libs/freetype
+ ADDINCL contrib/libs/libpng
+ ADDINCL contrib/python/numpy
+ contrib/libs/qhull
+ contrib/python/matplotlib/py2/extern/agg24-svn
+ contrib/python/matplotlib/py2/extern/ttconv
+)
+
+ADDINCL(
+ contrib/libs/qhull/libqhull_r
+ contrib/python/matplotlib/py2
+ contrib/python/matplotlib/py2/extern
+ contrib/python/matplotlib/py2/extern/agg24-svn/include
+)
+
+CFLAGS(
+ -D_MULTIARRAYMODULE
+ -DFREETYPE_BUILD_TYPE=local
+ -DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION
+ -DMPL_DEVNULL=/dev/null
+)
+
+IF (OS_WINDOWS)
+ LDFLAGS(
+ Psapi.lib
+ )
+ENDIF()
+
+PY_REGISTER(
+ matplotlib._contour
+ matplotlib._image # peerdir agg24-svn
+ matplotlib._path # peerdir agg24-svn
+ matplotlib._png
+ matplotlib._qhull # peerdir libqhull
+ matplotlib.backends._backend_agg # peerdir agg24-svn
+ matplotlib.backends._tkagg
+ matplotlib.ft2font
+ matplotlib.ttconv # peerdir ttconv
+)
+
+SRCS(
+ _backend_agg.cpp
+ _backend_agg_wrapper.cpp
+ _contour.cpp
+ _contour_wrapper.cpp
+ _image.cpp
+ _image_wrapper.cpp
+ _path_wrapper.cpp
+ _png.cpp
+ _tkagg.cpp
+ _ttconv.cpp
+ ft2font.cpp
+ ft2font_wrapper.cpp
+ mplutils.cpp
+ py_converters.cpp
+ qhull_wrap.c
+)
+
+END()
diff --git a/contrib/python/matplotlib/py2/ya.make b/contrib/python/matplotlib/py2/ya.make
new file mode 100644
index 00000000000..7242059690a
--- /dev/null
+++ b/contrib/python/matplotlib/py2/ya.make
@@ -0,0 +1,243 @@
+PY2_LIBRARY()
+
+LICENSE(PSF-2.0)
+
+VERSION(2.2.4)
+
+PEERDIR(
+ contrib/deprecated/python/backports.functools-lru-cache
+ contrib/deprecated/python/functools32
+ contrib/python/mock
+ contrib/deprecated/python/subprocess32
+ contrib/python/cycler
+ contrib/python/python-dateutil
+ contrib/python/kiwisolver
+ contrib/python/matplotlib/py2/src
+ contrib/python/matplotlib/py2/matplotlib/tri
+ contrib/python/numpy
+ contrib/python/pyparsing
+ contrib/python/pytz
+ contrib/python/six
+)
+
+NO_CHECK_IMPORTS(
+ matplotlib.backends.*
+ matplotlib.sphinxext.*
+ matplotlib.testing.*
+ mpl_toolkits.*
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_LINT()
+
+FROM_SANDBOX(FILE 1119073729 OUT_NOAUTO mpl-data.tar.gz)
+
+RESOURCE(
+ mpl-data.tar.gz /mpl-data/mpl-data.tar.gz
+)
+
+PY_SRCS(
+ TOP_LEVEL
+ matplotlib/__init__.py
+ matplotlib/_animation_data.py
+ matplotlib/_cm.py
+ matplotlib/_cm_listed.py
+ matplotlib/_color_data.py
+ matplotlib/_constrained_layout.py
+ matplotlib/_layoutbox.py
+ matplotlib/_mathtext_data.py
+ matplotlib/_pylab_helpers.py
+ matplotlib/_version.py
+ matplotlib/afm.py
+ matplotlib/animation.py
+ matplotlib/artist.py
+ matplotlib/axes/__init__.py
+ matplotlib/axes/_axes.py
+ matplotlib/axes/_base.py
+ matplotlib/axes/_subplots.py
+ matplotlib/axis.py
+ matplotlib/backend_bases.py
+ matplotlib/backend_managers.py
+ matplotlib/backend_tools.py
+ matplotlib/backends/__init__.py
+ matplotlib/backends/_backend_tk.py
+ matplotlib/backends/_gtk3_compat.py
+ matplotlib/backends/backend_agg.py
+ matplotlib/backends/backend_cairo.py
+ matplotlib/backends/backend_gdk.py
+ matplotlib/backends/backend_gtk.py
+ matplotlib/backends/backend_gtk3.py
+ matplotlib/backends/backend_gtk3agg.py
+ matplotlib/backends/backend_gtk3cairo.py
+ matplotlib/backends/backend_gtkagg.py
+ matplotlib/backends/backend_gtkcairo.py
+ matplotlib/backends/backend_macosx.py
+ matplotlib/backends/backend_mixed.py
+ matplotlib/backends/backend_nbagg.py
+ matplotlib/backends/backend_pdf.py
+ matplotlib/backends/backend_pgf.py
+ matplotlib/backends/backend_ps.py
+ matplotlib/backends/backend_qt4.py
+ matplotlib/backends/backend_qt4agg.py
+ matplotlib/backends/backend_qt4cairo.py
+ matplotlib/backends/backend_qt5.py
+ matplotlib/backends/backend_qt5agg.py
+ matplotlib/backends/backend_qt5cairo.py
+ matplotlib/backends/backend_svg.py
+ matplotlib/backends/backend_template.py
+ matplotlib/backends/backend_tkagg.py
+ matplotlib/backends/backend_tkcairo.py
+ matplotlib/backends/backend_webagg.py
+ matplotlib/backends/backend_webagg_core.py
+ matplotlib/backends/backend_wx.py
+ matplotlib/backends/backend_wxagg.py
+ matplotlib/backends/backend_wxcairo.py
+ matplotlib/backends/qt_compat.py
+ matplotlib/backends/qt_editor/__init__.py
+ matplotlib/backends/qt_editor/figureoptions.py
+ matplotlib/backends/qt_editor/formlayout.py
+ matplotlib/backends/qt_editor/formsubplottool.py
+ matplotlib/backends/tkagg.py
+ matplotlib/backends/windowing.py
+ matplotlib/backends/wx_compat.py
+ matplotlib/bezier.py
+ matplotlib/blocking_input.py
+ matplotlib/category.py
+ matplotlib/cbook/__init__.py
+ matplotlib/cbook/_backports.py
+ matplotlib/cbook/deprecation.py
+ matplotlib/cm.py
+ matplotlib/collections.py
+ matplotlib/colorbar.py
+ matplotlib/colors.py
+ matplotlib/compat/__init__.py
+ matplotlib/compat/subprocess.py
+ matplotlib/container.py
+ matplotlib/contour.py
+ matplotlib/dates.py
+ matplotlib/docstring.py
+ matplotlib/dviread.py
+ matplotlib/figure.py
+ matplotlib/font_manager.py
+ matplotlib/fontconfig_pattern.py
+ matplotlib/gridspec.py
+ matplotlib/hatch.py
+ matplotlib/image.py
+ matplotlib/legend.py
+ matplotlib/legend_handler.py
+ matplotlib/lines.py
+ matplotlib/markers.py
+ matplotlib/mathtext.py
+ matplotlib/mlab.py
+ matplotlib/offsetbox.py
+ matplotlib/patches.py
+ matplotlib/path.py
+ matplotlib/patheffects.py
+ matplotlib/projections/__init__.py
+ matplotlib/projections/geo.py
+ matplotlib/projections/polar.py
+ matplotlib/pylab.py
+ matplotlib/pyplot.py
+ matplotlib/quiver.py
+ matplotlib/rcsetup.py
+ matplotlib/sankey.py
+ matplotlib/scale.py
+ matplotlib/sphinxext/__init__.py
+ matplotlib/sphinxext/mathmpl.py
+ matplotlib/sphinxext/only_directives.py
+ matplotlib/sphinxext/plot_directive.py
+ matplotlib/spines.py
+ matplotlib/stackplot.py
+ matplotlib/streamplot.py
+ matplotlib/style/__init__.py
+ matplotlib/style/core.py
+ matplotlib/table.py
+ matplotlib/testing/__init__.py
+ matplotlib/testing/_nose/__init__.py
+ matplotlib/testing/_nose/decorators.py
+ matplotlib/testing/_nose/exceptions.py
+ matplotlib/testing/_nose/plugins/__init__.py
+ matplotlib/testing/_nose/plugins/knownfailure.py
+ matplotlib/testing/_nose/plugins/performgc.py
+ matplotlib/testing/compare.py
+ matplotlib/testing/conftest.py
+ matplotlib/testing/decorators.py
+ matplotlib/testing/determinism.py
+ matplotlib/testing/disable_internet.py
+ matplotlib/testing/exceptions.py
+ matplotlib/testing/jpl_units/Duration.py
+ matplotlib/testing/jpl_units/Epoch.py
+ matplotlib/testing/jpl_units/EpochConverter.py
+ matplotlib/testing/jpl_units/StrConverter.py
+ matplotlib/testing/jpl_units/UnitDbl.py
+ matplotlib/testing/jpl_units/UnitDblConverter.py
+ matplotlib/testing/jpl_units/UnitDblFormatter.py
+ matplotlib/testing/jpl_units/__init__.py
+ matplotlib/testing/noseclasses.py
+ matplotlib/texmanager.py
+ matplotlib/text.py
+ matplotlib/textpath.py
+ matplotlib/ticker.py
+ matplotlib/tight_bbox.py
+ matplotlib/tight_layout.py
+ matplotlib/transforms.py
+ matplotlib/type1font.py
+ matplotlib/units.py
+ matplotlib/widgets.py
+ mpl_toolkits/__init__.py
+ mpl_toolkits/axes_grid/__init__.py
+ mpl_toolkits/axes_grid/anchored_artists.py
+ mpl_toolkits/axes_grid/angle_helper.py
+ mpl_toolkits/axes_grid/axes_divider.py
+ mpl_toolkits/axes_grid/axes_grid.py
+ mpl_toolkits/axes_grid/axes_rgb.py
+ mpl_toolkits/axes_grid/axes_size.py
+ mpl_toolkits/axes_grid/axis_artist.py
+ mpl_toolkits/axes_grid/axisline_style.py
+ mpl_toolkits/axes_grid/axislines.py
+ mpl_toolkits/axes_grid/clip_path.py
+ mpl_toolkits/axes_grid/colorbar.py
+ mpl_toolkits/axes_grid/floating_axes.py
+ mpl_toolkits/axes_grid/grid_finder.py
+ mpl_toolkits/axes_grid/grid_helper_curvelinear.py
+ mpl_toolkits/axes_grid/inset_locator.py
+ mpl_toolkits/axes_grid/parasite_axes.py
+ mpl_toolkits/axes_grid1/__init__.py
+ mpl_toolkits/axes_grid1/anchored_artists.py
+ mpl_toolkits/axes_grid1/axes_divider.py
+ mpl_toolkits/axes_grid1/axes_grid.py
+ mpl_toolkits/axes_grid1/axes_rgb.py
+ mpl_toolkits/axes_grid1/axes_size.py
+ mpl_toolkits/axes_grid1/colorbar.py
+ mpl_toolkits/axes_grid1/inset_locator.py
+ mpl_toolkits/axes_grid1/mpl_axes.py
+ mpl_toolkits/axes_grid1/parasite_axes.py
+ mpl_toolkits/axisartist/__init__.py
+ mpl_toolkits/axisartist/angle_helper.py
+ mpl_toolkits/axisartist/axes_divider.py
+ mpl_toolkits/axisartist/axes_grid.py
+ mpl_toolkits/axisartist/axes_rgb.py
+ mpl_toolkits/axisartist/axis_artist.py
+ mpl_toolkits/axisartist/axisline_style.py
+ mpl_toolkits/axisartist/axislines.py
+ mpl_toolkits/axisartist/clip_path.py
+ mpl_toolkits/axisartist/floating_axes.py
+ mpl_toolkits/axisartist/grid_finder.py
+ mpl_toolkits/axisartist/grid_helper_curvelinear.py
+ mpl_toolkits/axisartist/parasite_axes.py
+ mpl_toolkits/mplot3d/__init__.py
+ mpl_toolkits/mplot3d/art3d.py
+ mpl_toolkits/mplot3d/axes3d.py
+ mpl_toolkits/mplot3d/axis3d.py
+ mpl_toolkits/mplot3d/proj3d.py
+ pylab.py
+)
+
+END()
+
+RECURSE(
+ extern
+ matplotlib/tri
+ src
+)