ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
82,113 | 277,639 | 441 | keras/layers/reshaping/reshape.py | 105 | 18 | def _fix_unknown_dimension(self, input_shape, output_shape):
output_shape = list(output_shape)
msg = (
"total size of new array must be unchanged, "
"input_shape = {}, output_shape = {}".format(
input_shape, output_shape
)
)
k... | reduce layers line-too-long | _fix_unknown_dimension | 8401e08334d4b1f102a6ee9479738bacfee0600c | keras | reshape.py | 17 | 28 | https://github.com/keras-team/keras.git | 8 | 128 | 0 | 65 | 212 | Python | {
"docstring": "Find and replace a missing dimension in an output shape.\n\n This is a near direct port of the internal Numpy function\n `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`\n\n Args:\n input_shape: Shape of array being reshaped\n output_shape: Desired... | def _fix_unknown_dimension(self, input_shape, output_shape):
output_shape = list(output_shape)
msg = (
"total size of new array must be unchanged, "
"input_shape = {}, output_shape = {}".format(
input_shape, output_shape
)
)
k... | |
38,437 | 159,898 | 52 | numpy/lib/tests/test_loadtxt.py | 23 | 15 | def test_converter_with_unicode_dtype():
txt = StringIO('abc,def\nrst,xyz')
conv = bytes.upper
res = np.loadtxt(
txt, dtype=np.dtype("U3"), converters=conv, delimiter=",")
expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']])
assert_equal(res, expec | TST,STY: Add small additional tests for converters/usecols
Also fix style a bit to silence linter (hopefully), removes some
black style, but I am not too opinionated about that :) | test_converter_with_unicode_dtype | 1e6b72b42292e62c1c86e4f77e30324e43aaa218 | numpy | test_loadtxt.py | 12 | 7 | https://github.com/numpy/numpy.git | 1 | 67 | 0 | 20 | 118 | Python | {
"docstring": "\n With the default 'bytes' encoding, tokens are encoded prior to being\n passed to the converter. This means that the output of the converter may\n be bytes instead of unicode as expected by `read_rows`.\n\n This test checks that outputs from the above scenario are properly decoded\n p... | def test_converter_with_unicode_dtype():
txt = StringIO('abc,def\nrst,xyz')
conv = bytes.upper
res = np.loadtxt(
txt, dtype=np.dtype("U3"), converters=conv, delimiter=",")
expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']])
assert_equal(res, expected)
| |
10,767 | 53,272 | 23 | src/prefect/orion/database/alembic_commands.py | 11 | 6 | def alembic_stamp(revision):
# lazy import for performance
import alembic.command
alemb | code review revisions pt3 | alembic_stamp | 36e7e0838aeaffc9492b330297e4905f3ab4b11f | prefect | alembic_commands.py | 9 | 3 | https://github.com/PrefectHQ/prefect.git | 1 | 24 | 0 | 10 | 42 | Python | {
"docstring": "\n Stamp the revision table with the given revision; don’t run any migrations\n\n Args:\n revision: The revision passed to `alembic stamp`.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 20,
"vocab_size": 18
} | def alembic_stamp(revision):
# lazy import for performance
import alembic.command
alembic.command.stamp(alembic_config(), revision=revision)
| |
27,261 | 122,886 | 182 | jax/_src/pjit.py | 98 | 17 | def unflatten_superdims(assignment):
def check(cond):
if cond: return
raise NotImplementedError("Failed to convert OpSharding into a ShardingSpec. "
"Please open a bug report!")
flat_assignment = np.asarray(assignment, dtype=np.int64)
check(flat_assignment[0] == 0)
dims ... | Move `pjit.py` to `jax/_src` in preparation for merging the `jit` and `pjit` frontend APIs
PiperOrigin-RevId: 495944279 | unflatten_superdims | 4b587fa1f0049db5366fd04812ab940d80a71a22 | jax | pjit.py | 11 | 16 | https://github.com/google/jax.git | 4 | 101 | 0 | 74 | 192 | Python | {
"docstring": "Unflatten a list of dimension sizes and their strides that generates assignment.\n\n If this function succeeds for a given ``assignment``, then the following property\n should be satisfied::\n\n dims_with_strides = unflatten_superdims(assignment)\n base_array = np.arange(map(fst, sorted(dims_w... | def unflatten_superdims(assignment):
def check(cond):
if cond: return
raise NotImplementedError("Failed to convert OpSharding into a ShardingSpec. "
"Please open a bug report!")
flat_assignment = np.asarray(assignment, dtype=np.int64)
check(flat_assignment[0] == 0)
dims ... | |
18,080 | 86,128 | 262 | tests/sentry/event_manager/test_event_manager.py | 50 | 21 | def test_perf_issue_no_associate_error_event(self):
self.project.update_option("sentry:performance_issue_creation_rate", 1.0)
with mock.patch("sentry_sdk.tracing.Span.containing_transaction"), self.feature(
{
"projects:performance-suspect-spans-ingestion": True,
... | chore(perf issues): Check group type before adding event (#39171)
Ensure the group type matches the kind of event before association, e.g.
don't put an error event on a performance issue and vice versa. | test_perf_issue_no_associate_error_event | bbd7137b3d379744265f46564d5878490046dd3b | sentry | test_event_manager.py | 12 | 19 | https://github.com/getsentry/sentry.git | 1 | 132 | 0 | 35 | 222 | Python | {
"docstring": "Test that you can't associate an error event with a performance issue",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def test_perf_issue_no_associate_error_event(self):
self.project.update_option("sentry:performance_issue_creation_rate", 1.0)
with mock.patch("sentry_sdk.tracing.Span.containing_transaction"), self.feature(
{
"projects:performance-suspect-spans-ingestion": True,
... | |
29,495 | 131,286 | 476 | python/ray/tests/test_autoscaler.py | 134 | 45 | def testNodeTerminatedDuringUpdate(self):
cluster_config = copy.deepcopy(MOCK_DEFAULT_CONFIG)
cluster_config["available_node_types"]["ray.worker.default"]["min_workers"] = 2
cluster_config["worker_start_ray_commands"] = ["ray_start_cmd"]
# Don't need the extra node type or a do... | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | testNodeTerminatedDuringUpdate | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | test_autoscaler.py | 11 | 89 | https://github.com/ray-project/ray.git | 3 | 545 | 0 | 99 | 392 | Python | {
"docstring": "\n Tests autoscaler handling a node getting terminated during an update\n triggered by the node missing a heartbeat.\n\n Extension of testRecoverUnhealthyWorkers.\n\n In this test, two nodes miss a heartbeat.\n One of them (node 0) is terminated during its recovery u... | def testNodeTerminatedDuringUpdate(self):
cluster_config = copy.deepcopy(MOCK_DEFAULT_CONFIG)
cluster_config["available_node_types"]["ray.worker.default"]["min_workers"] = 2
cluster_config["worker_start_ray_commands"] = ["ray_start_cmd"]
# Don't need the extra node type or a do... | |
84,626 | 284,059 | 529 | openbb_terminal/cryptocurrency/overview/overview_controller.py | 66 | 35 | def call_cr(self, other_args):
parser = argparse.ArgumentParser(
prog="cr",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=,
)
parser.add_argument(
"-t",
"--type",
d... | Replaces coingecko deprecated commands (#1650)
* removes cgproducts and cgplatforms and replaces with cr
* add ignore word
* added .openbb script
* reverted crypto change
* doc
* failing tests
* trying chart and fixed minh issues
* Create barh
* Fix ticker labels
* fix test
* loanscan mo... | call_cr | 670402396e7e25e95bd6497affb143565d9bd4ea | OpenBBTerminal | overview_controller.py | 13 | 49 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 4 | 196 | 0 | 56 | 346 | Python | {
"docstring": "Process cr commandDisplays crypto {borrow,supply} interest rates for cryptocurrencies across several platforms.\n You can select rate type with --type {borrow,supply}\n You can display only N number of platforms with --limit parameter.Cryptocurrencies to search interest r... | def call_cr(self, other_args):
parser = argparse.ArgumentParser(
prog="cr",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=,
)
parser.add_argument(
"-t",
"--type",
d... | |
24,479 | 111,750 | 74 | nni/retiarii/oneshot/pytorch/base_lightning.py | 20 | 6 | def export(self):
result = {}
for na | Lightning implementation for retiarii oneshot nas (#4479) | export | 8b2eb425274cdb4537fbce4a315aec12a378d6db | nni | base_lightning.py | 12 | 6 | https://github.com/microsoft/nni.git | 3 | 37 | 0 | 17 | 61 | Python | {
"docstring": "\n Export the NAS result, ideally the best choice of each nas_modules.\n You may implement an ``export`` method for your customized nas_module.\n\n Returns\n --------\n result : Dict[str, int]\n Keys are names of nas_modules, and values are the choice indi... | def export(self):
result = {}
for name, module in self.nas_modules:
if name not in result:
result[name] = module.export()
return result
| |
43,614 | 181,840 | 162 | tpot/base.py | 70 | 13 | def clean_pipeline_string(self, individual):
dirty_string = str(individual)
# There are many parameter prefixes in the pipeline strings, used solely for
# making the terminal name unique, eg. LinearSVC__.
parameter_prefixes = [
(m.start(), m.end()) for m in re.findit... | Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068. | clean_pipeline_string | 388616b6247ca4ea8de4e2f340d6206aee523541 | tpot | base.py | 12 | 9 | https://github.com/EpistasisLab/tpot.git | 3 | 74 | 0 | 55 | 120 | Python | {
"docstring": "Provide a string of the individual without the parameter prefixes.\n\n Parameters\n ----------\n individual: individual\n Individual which should be represented by a pretty string\n\n Returns\n -------\n A string like str(individual), but with param... | def clean_pipeline_string(self, individual):
dirty_string = str(individual)
# There are many parameter prefixes in the pipeline strings, used solely for
# making the terminal name unique, eg. LinearSVC__.
parameter_prefixes = [
(m.start(), m.end()) for m in re.findit... | |
49,151 | 199,118 | 448 | sympy/polys/matrices/linsolve.py | 129 | 33 | def _lin_eq2dict(a, symset):
if a in symset:
return S.Zero, {a: S.One}
elif a.is_Add:
terms_list = defaultdict(list)
coeff_list = []
for ai in a.args:
ci, ti = _lin_eq2dict(ai, symset)
coeff_list.append(ci)
for mij, cij in ti.items():
... | Revert "solve changes" | _lin_eq2dict | 5534ff6796b8d515192576f771af8488a838775c | sympy | linsolve.py | 16 | 38 | https://github.com/sympy/sympy.git | 14 | 252 | 0 | 62 | 402 | Python | {
"docstring": "Efficiently convert a linear equation to a dict of coefficients",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def _lin_eq2dict(a, symset):
if a in symset:
return S.Zero, {a: S.One}
elif a.is_Add:
terms_list = defaultdict(list)
coeff_list = []
for ai in a.args:
ci, ti = _lin_eq2dict(ai, symset)
coeff_list.append(ci)
for mij, cij in ti.items():
... | |
29,231 | 130,325 | 62 | python/ray/autoscaler/_private/_azure/node_provider.py | 12 | 6 | def internal_ip(self, node_id):
ip = (
self._get_cached_node(node_id=node_id)["internal_ip"]
or self._get_node(node_id=node_id)["internal_ip"]
)
return ip
| [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | internal_ip | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | node_provider.py | 12 | 6 | https://github.com/ray-project/ray.git | 2 | 37 | 0 | 11 | 63 | Python | {
"docstring": "Returns the internal ip (Ray ip) of the given node.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def internal_ip(self, node_id):
ip = (
self._get_cached_node(node_id=node_id)["internal_ip"]
or self._get_node(node_id=node_id)["internal_ip"]
)
return ip
| |
77,374 | 262,793 | 261 | PyInstaller/archive/writers.py | 49 | 30 | def _write_file(self, source, dest, type, compress=False):
start = self.lib.tell()
length = os.stat(source).st_size
with open(source, 'rb') as f:
| Fix file handle leaks.
This is mostly a refactoring of CArchiveWriter().add() which has gotten somewhat
tangled trying to apply various file modifications whilst simultaneously
juggling file streaming and optional zip compression. Since the modifications
are all done on Python source/byte code files which are small, s... | _write_file | 9541ad638f73c1442c35ea870ad9c6e4f8cd9b62 | pyinstaller | writers.py | 17 | 16 | https://github.com/pyinstaller/pyinstaller.git | 4 | 152 | 0 | 42 | 243 | Python | {
"docstring": "\n Stream copy a large file into the archive and update the table of contents.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 13
} | def _write_file(self, source, dest, type, compress=False):
start = self.lib.tell()
length = os.stat(source).st_size
with open(source, 'rb') as f:
if compress:
buffer = bytearray(16 * 1024)
compressor = zlib.compressobj(self.LEVEL)
... | |
41,755 | 176,189 | 517 | networkx/linalg/graphmatrix.py | 164 | 38 | def incidence_matrix(G, nodelist=None, edgelist=None, oriented=False, weight=None):
import scipy as sp
import scipy.sparse # call as sp.sparse
if nodelist is None:
nodelist = list(G)
if edgelist is None:
if G.is_multigraph():
edgelist = list(G.edges(keys=True))
... | Use scipy.sparse array datastructure (#5139)
* Step 1: use sparse arrays in nx.to_scipy_sparse_matrix.
Seems like a reasonable place to start.
nx.to_scipy_sparse_matrix is one of the primary interfaces to
scipy.sparse from within NetworkX.
* 1: Use np.outer instead of mult col/row vectors
Fix two instances ... | incidence_matrix | 5dfd57af2a141a013ae3753e160180b82bec9469 | networkx | graphmatrix.py | 18 | 44 | https://github.com/networkx/networkx.git | 11 | 290 | 0 | 103 | 463 | Python | {
"docstring": "Returns incidence matrix of G.\n\n The incidence matrix assigns each row to a node and each column to an edge.\n For a standard incidence matrix a 1 appears wherever a row's node is\n incident on the column's edge. For an oriented incidence matrix each\n edge is assigned an orientation (a... | def incidence_matrix(G, nodelist=None, edgelist=None, oriented=False, weight=None):
import scipy as sp
import scipy.sparse # call as sp.sparse
if nodelist is None:
nodelist = list(G)
if edgelist is None:
if G.is_multigraph():
edgelist = list(G.edges(keys=True))
... | |
82,375 | 278,117 | 677 | keras/feature_column/sequence_feature_column.py | 98 | 31 | def call(self, features, training=None):
if not isinstance(features, dict):
raise ValueError(
"We expected a dictionary here. Instead we got: ", features
)
if training is None:
training = backend.learning_phase()
transformation_cache =... | resolve line-too-long in feature_column | call | 6fafb567af4e4d9f42974d0b6c55b18bc03e17eb | keras | sequence_feature_column.py | 16 | 39 | https://github.com/keras-team/keras.git | 5 | 167 | 0 | 73 | 264 | Python | {
"docstring": "Returns sequence input corresponding to the `feature_columns`.\n\n Args:\n features: A dict mapping keys to tensors.\n training: Python boolean or None, indicating whether to the layer is\n being run in training mode. This argument is passed to the call\n ... | def call(self, features, training=None):
if not isinstance(features, dict):
raise ValueError(
"We expected a dictionary here. Instead we got: ", features
)
if training is None:
training = backend.learning_phase()
transformation_cache =... | |
56,094 | 220,706 | 43 | python3.10.4/Lib/asyncio/sslproto.py | 15 | 7 | def feed_eof(self):
self._incoming.write | add python 3.10.4 for windows | feed_eof | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | sslproto.py | 9 | 4 | https://github.com/XX-net/XX-Net.git | 2 | 36 | 0 | 12 | 62 | Python | {
"docstring": "Send a potentially \"ragged\" EOF.\n\n This method will raise an SSL_ERROR_EOF exception if the EOF is\n unexpected.\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 17,
"vocab_size": 17
} | def feed_eof(self):
self._incoming.write_eof()
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
| |
44,714 | 184,614 | 59 | src/textual/app.py | 16 | 6 | def screen(self) -> Screen:
try:
return | lots of docstrings | screen | b22436933acc0d7440ec300f971a249bd6105a5b | textual | app.py | 11 | 13 | https://github.com/Textualize/textual.git | 2 | 28 | 0 | 16 | 49 | Python | {
"docstring": "Get the current screen.\n\n Raises:\n ScreenStackError: If there are no screens on the stack.\n\n Returns:\n Screen: The currently active screen.\n ",
"language": "en",
"n_whitespaces": 63,
"n_words": 20,
"vocab_size": 18
} | def screen(self) -> Screen:
try:
return self._screen_stack[-1]
except IndexError:
raise ScreenStackError("No screens on stack") from None
| |
49,872 | 201,102 | 31 | tests/apps/tests.py | 6 | 7 | def test_empty_dunder_path_no_dunder_file(self):
with self.assertRaises(ImproperlyConfigured):
AppConfig("label", Stub(__path__=[] | Refs #33476 -- Reformatted code with Black. | test_empty_dunder_path_no_dunder_file | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 13 | 3 | https://github.com/django/django.git | 1 | 26 | 0 | 6 | 49 | Python | {
"docstring": "If the __path__ attr is empty and there is no __file__, raise.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def test_empty_dunder_path_no_dunder_file(self):
with self.assertRaises(ImproperlyConfigured):
AppConfig("label", Stub(__path__=[]))
| |
37,440 | 158,287 | 352 | d2l/tensorflow.py | 134 | 33 | def train_epoch_ch3(net, train_iter, loss, updater):
# Sum of training loss, sum of training accuracy, no. of examples
metric = Accumulator(3)
for X, y | [PaddlePaddle] Merge master into Paddle branch (#1186)
* change 15.2 title in chinese version (#1109)
change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘
* 修改部分语义表述 (#1105)
* Update r0.17.5 (#1120)
* Bump versions in installation
* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)
* line 313: "b... | train_epoch_ch3 | b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2 | d2l-zh | tensorflow.py | 15 | 19 | https://github.com/d2l-ai/d2l-zh.git | 5 | 207 | 0 | 98 | 324 | Python | {
"docstring": "The training loop defined in Chapter 3.\n\n Defined in :numref:`sec_softmax_scratch`",
"language": "en",
"n_whitespaces": 12,
"n_words": 10,
"vocab_size": 9
} | def train_epoch_ch3(net, train_iter, loss, updater):
# Sum of training loss, sum of training accuracy, no. of examples
metric = Accumulator(3)
for X, y in train_iter:
# Compute gradients and update parameters
with tf.GradientTape() as tape:
y_hat = net(X)
# Keras... | |
76,674 | 261,169 | 82 | sklearn/utils/discovery.py | 29 | 11 | def all_estimators(type_filter=None):
# lazy import to avoid circular imports from sklearn.base
from . import IS_PYPY
from ._testing import ignore_warnings
| MNT numpydoc validation for Displays (#21469)
Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org>
Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com> | all_estimators | b22f7fa552c03aa7f6b9b4d661470d0173f8db5d | scikit-learn | discovery.py | 7 | 67 | https://github.com/scikit-learn/scikit-learn.git | 23 | 361 | 0 | 23 | 62 | Python | {
"docstring": "Get a list of all estimators from `sklearn`.\n\n This function crawls the module and gets all classes that inherit\n from BaseEstimator. Classes that are defined in test-modules are not\n included.\n\n Parameters\n ----------\n type_filter : {\"classifier\", \"regressor\", \"cluster\... | def all_estimators(type_filter=None):
# lazy import to avoid circular imports from sklearn.base
from . import IS_PYPY
from ._testing import ignore_warnings
from ..base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
TransformerMixin,
ClusterMixin,
... | |
@image_comparison(['legend_various_labels'], remove_text=True) | 24,225 | 110,587 | 319 | lib/matplotlib/tests/test_legend.py | 109 | 39 | def test_legend_auto5():
fig, axs = plt.subplots(ncols=2, figsize=(9.6, 4.8))
leg_bboxes = []
for ax, loc in zip(axs.flat, ("center", "best")):
# An Ellipse patch at the top, a U-shaped Polygon patch at the
# bottom and a ring-like Wedge patch: the correct placement of
# the le... | ENH: rely on non-rectangular patch paths rather than bboxes for legend auto-placing (fix #9580) (#9598)
* use path rather than bbox for non rectangular patches
* Add tests
* Add a short breadcrumb note in api_changes | test_legend_auto5 | d8bb1a52316c38434e526412c27d9c4b01960084 | matplotlib | test_legend.py | 14 | 19 | https://github.com/matplotlib/matplotlib.git | 3 | 300 | 1 | 82 | 390 | Python | {
"docstring": "\n Check that the automatic placement handle a rather complex\n case with non rectangular patch. Related to issue #9580.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 18,
"vocab_size": 18
} | def test_legend_auto5():
fig, axs = plt.subplots(ncols=2, figsize=(9.6, 4.8))
leg_bboxes = []
for ax, loc in zip(axs.flat, ("center", "best")):
# An Ellipse patch at the top, a U-shaped Polygon patch at the
# bottom and a ring-like Wedge patch: the correct placement of
# the le... |
44,195 | 183,454 | 49 | src/textual/widgets/text_input.py | 13 | 8 | def _toggle_cursor_visible(self):
if time.monotonic() - self._last_keypress_time > self.cursor | Conditional blinking | _toggle_cursor_visible | d8179c70dc06e06b2f445fdfb47fb7012d4cb2ed | textual | text_input.py | 10 | 4 | https://github.com/Textualize/textual.git | 2 | 34 | 0 | 12 | 59 | Python | {
"docstring": "Manages the blinking of the cursor - ensuring blinking only starts when the\n user hasn't pressed a key in some time",
"language": "en",
"n_whitespaces": 27,
"n_words": 21,
"vocab_size": 18
} | def _toggle_cursor_visible(self):
if time.monotonic() - self._last_keypress_time > self.cursor_blink_period:
self._cursor_blink_visible = not self._cursor_blink_visible
self.refresh()
| |
19,571 | 98,450 | 651 | src/sentry/search/events/filter.py | 191 | 55 | def parse_semver(version, operator) -> Optional[SemverFilter]:
(operator, negated) = handle_operator_negation(operator)
try:
operator = OPERATOR_TO_DJANGO[operator]
except KeyError:
raise InvalidSearchQuery("Invalid operation 'IN' for semantic version filter.")
version = version if... | fix(events-search): Return helpful error message on semver filter (#33785)
'IN' type queries currently raise an unhandled KeyError, raising an
InvalidSearchQuery instead. | parse_semver | 4ffb52489e662029a08169351cd997d525977e88 | sentry | filter.py | 18 | 50 | https://github.com/getsentry/sentry.git | 14 | 224 | 0 | 132 | 498 | Python | {
"docstring": "\n Attempts to parse a release version using our semver syntax. version should be in\n format `<package_name>@<version>` or `<version>`, where package_name is a string and\n version is a version string matching semver format (https://semver.org/). We've\n slightly extended this format to a... | def parse_semver(version, operator) -> Optional[SemverFilter]:
(operator, negated) = handle_operator_negation(operator)
try:
operator = OPERATOR_TO_DJANGO[operator]
except KeyError:
raise InvalidSearchQuery("Invalid operation 'IN' for semantic version filter.")
version = version if... | |
15,757 | 71,810 | 79 | wagtail/admin/tests/test_account_management.py | 26 | 9 | def test_not_logged_in_gives_403_to_ajax_requests(self):
# Get dashboard
response = self.client.get(
reverse("wagtailadmin_home"), HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
# AJAX requests should be given a 403 error instead of being redirected
self.assertEqu... | Reformat with black | test_not_logged_in_gives_403_to_ajax_requests | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_account_management.py | 11 | 5 | https://github.com/wagtail/wagtail.git | 1 | 33 | 0 | 25 | 60 | Python | {
"docstring": "\n This tests that a not logged in user is given a 403 error on AJAX requests\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 16,
"vocab_size": 15
} | def test_not_logged_in_gives_403_to_ajax_requests(self):
# Get dashboard
response = self.client.get(
reverse("wagtailadmin_home"), HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
# AJAX requests should be given a 403 error instead of being redirected
self.assertEqu... | |
47,077 | 194,784 | 207 | parlai/utils/bpe.py | 62 | 18 | def bytes_to_unicode(self) -> Dict[int, str]:
bs: List[int] = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs: List[int] = bs[:]
| autoformat (#4378) | bytes_to_unicode | 81f722d29045a7a5841d0931a082ded1d1f13863 | ParlAI | bpe.py | 17 | 25 | https://github.com/facebookresearch/ParlAI.git | 4 | 151 | 0 | 43 | 247 | Python | {
"docstring": "\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n\n The reversible bpe codes work on unicode strings. This means you need a large #\n of unicode characters in your vocab if you want to avoid UNKs. When you're at\n something like a 10B token datase... | def bytes_to_unicode(self) -> Dict[int, str]:
bs: List[int] = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs: List[int] = bs[:]
n = 0
for b in range(2 ** 8):
... | |
90,913 | 291,809 | 200 | tests/components/caldav/test_calendar.py | 64 | 10 | async def test_get_events_custom_calendars(hass, calendar, get_api_events):
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": "This is a normal event"}
]
assert await async_setup_component(hass, "calendar", {"calend | Local calendar integration (#79601) | test_get_events_custom_calendars | 532ab12a48b6832180599088250fc23446a45d1e | core | test_calendar.py | 12 | 20 | https://github.com/home-assistant/core.git | 1 | 110 | 0 | 48 | 212 | Python | {
"docstring": "Test that only searched events are returned on API.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | async def test_get_events_custom_calendars(hass, calendar, get_api_events):
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": "This is a normal event"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config}... | |
24,940 | 113,538 | 54 | nni/mutable/symbol.py | 14 | 8 | def leaf_symbols(self) -> Iterable[Symbol]:
for arg in self.arguments:
if isinstanc | Mutable V3 (Stage 2) - Symbolic execution engine (#5195) | leaf_symbols | 8f454f3bf29e2c3cd0d359231a46edd8ee768d42 | nni | symbol.py | 12 | 10 | https://github.com/microsoft/nni.git | 3 | 33 | 0 | 14 | 54 | Python | {
"docstring": "\n Return a generator of all leaf symbols.\n\n Useful for when you want to inspect when the symbols come from.\n No deduplication even if the symbols has duplicates.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 27,
"vocab_size": 24
} | def leaf_symbols(self) -> Iterable[Symbol]:
for arg in self.arguments:
if isinstance(arg, SymbolicExpression):
yield from arg.leaf_symbols()
| |
@eval_app.command() | 31,241 | 137,777 | 175 | rllib/evaluate.py | 35 | 16 | def append_step(self, obs, action, next_obs, reward, terminated, truncated, info):
if self._outfile:
if self._save_info:
self._current_rollout.append(
[obs, action, next_obs, reward, terminated, truncated, info]
)
else:
... | [RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369) | append_step | 8e680c483ce326cefc62e44f68ab1a6948b1c3d2 | ray | evaluate.py | 13 | 11 | https://github.com/ray-project/ray.git | 3 | 79 | 1 | 22 | 120 | Python | {
"docstring": "Add a step to the current rollout, if we are saving them",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def append_step(self, obs, action, next_obs, reward, terminated, truncated, info):
if self._outfile:
if self._save_info:
self._current_rollout.append(
[obs, action, next_obs, reward, terminated, truncated, info]
)
else:
... |
50,188 | 202,956 | 122 | tests/prefetch_related/tests.py | 21 | 17 | def test_nested_prefetch_is_not_overwritten_by_related_object(self):
queryset = House.objects.only('name').prefetch_related(
Prefetch('rooms', queryset=Room.objects.prefetch_related(
Prefetch('house', queryset=House.objects.only('address')),
)),
)
... | Fixed #32511 -- Corrected handling prefetched nested reverse relationships.
When prefetching a set of child objects related to a set of parent
objects, we usually want to populate the relationship back from the
child to the parent to avoid a query when accessing that relationship
attribute. However, there's an edge ca... | test_nested_prefetch_is_not_overwritten_by_related_object | f5233dce309543c826224be9dfa9c9f4f855f73c | django | tests.py | 19 | 11 | https://github.com/django/django.git | 1 | 102 | 0 | 19 | 175 | Python | {
"docstring": "\n The prefetched relationship is used rather than populating the reverse\n relationship from the parent, when prefetching a set of child objects\n related to a set of parent objects and the child queryset itself\n specifies a prefetch back to the parent.\n ",
"lan... | def test_nested_prefetch_is_not_overwritten_by_related_object(self):
queryset = House.objects.only('name').prefetch_related(
Prefetch('rooms', queryset=Room.objects.prefetch_related(
Prefetch('house', queryset=House.objects.only('address')),
)),
)
... | |
49,612 | 200,382 | 147 | sympy/combinatorics/permutations.py | 51 | 13 | def apply(self, i):
r
i = _sympify(i)
if i.is_integer is False:
raise NotImplementedError("{} should be an integer.".format(i))
n = self.size
if (i < 0) == True or (i >= n) == True:
raise NotImplementedError(
"{} should be an integer betwe... | Fix various typos
Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet` | apply | 24f1e7730119fe958cc8e28411f790c9a5ec04eb | sympy | permutations.py | 12 | 46 | https://github.com/sympy/sympy.git | 5 | 90 | 0 | 41 | 144 | Python | {
"docstring": "Apply the permutation to an expression.\n\n Parameters\n ==========\n\n i : Expr\n It should be an integer between $0$ and $n-1$ where $n$\n is the size of the permutation.\n\n If it is a symbol or a symbolic expression that can\n have i... | def apply(self, i):
r
i = _sympify(i)
if i.is_integer is False:
raise NotImplementedError("{} should be an integer.".format(i))
n = self.size
if (i < 0) == True or (i >= n) == True:
raise NotImplementedError(
"{} should be an integer betwe... | |
23,670 | 109,611 | 355 | lib/matplotlib/collections.py | 112 | 27 | def _convert_mesh_to_triangles(self, coordinates):
if isinstance(coordinates, np.ma.MaskedArray):
p = coordinates.data
else:
p = coordinates
p_a = p[:-1, :-1]
p_b = p[:-1, 1:]
p_c = p[1:, 1:]
p_d = p[1:, :-1]
p_center = (p_a + p_ | Deprecate draw_gouraud_triangle (#23824)
* Deprecate draw_gouraud_triangle
* DOC: minor rewording
Co-authored-by: Elliott Sales de Andrade <quantum.analyst@gmail.com>
Co-authored-by: Thomas A Caswell <tcaswell@gmail.com>
Co-authored-by: Elliott Sales de Andrade <quantum.analyst@gmail.com> | _convert_mesh_to_triangles | 4a5d09cba5f4a20e14553cebd8f70c1f34d20d35 | matplotlib | collections.py | 12 | 29 | https://github.com/matplotlib/matplotlib.git | 2 | 273 | 0 | 56 | 390 | Python | {
"docstring": "\n Convert a given mesh into a sequence of triangles, each point\n with its own color. The result can be used to construct a call to\n `~.RendererBase.draw_gouraud_triangles`.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 26,
"vocab_size": 23
} | def _convert_mesh_to_triangles(self, coordinates):
if isinstance(coordinates, np.ma.MaskedArray):
p = coordinates.data
else:
p = coordinates
p_a = p[:-1, :-1]
p_b = p[:-1, 1:]
p_c = p[1:, 1:]
p_d = p[1:, :-1]
p_center = (p_a + p_b... | |
39,415 | 163,270 | 419 | pandas/core/series.py | 126 | 40 | def count(self, level=None):
if level is None:
return notna(self._values).sum().astype("int64")
else:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Us... | TYP: Ignore numpy related issues (#45244) | count | d603d43df2057ecdf74010d9dadc735e37f8f7b5 | pandas | series.py | 14 | 28 | https://github.com/pandas-dev/pandas.git | 6 | 211 | 0 | 98 | 343 | Python | {
"docstring": "\n Return number of non-NA/null observations in the Series.\n\n Parameters\n ----------\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a smaller Series.\n\n ... | def count(self, level=None):
if level is None:
return notna(self._values).sum().astype("int64")
else:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Us... | |
55,704 | 219,678 | 951 | python3.10.4/Lib/_pydecimal.py | 350 | 31 | def __format__(self, specifier, context=None, _localeconv=None):
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to ... | add python 3.10.4 for windows | __format__ | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _pydecimal.py | 16 | 49 | https://github.com/XX-net/XX-Net.git | 24 | 411 | 0 | 171 | 704 | Python | {
"docstring": "Format a Decimal instance according to the given specifier.\n\n The specifier should be a standard format specifier, with the\n form described in PEP 3101. Formatting types 'e', 'E', 'f',\n 'F', 'g', 'G', 'n' and '%' are supported. If the formatting\n type is omitted it d... | def __format__(self, specifier, context=None, _localeconv=None):
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to ... | |
75,368 | 258,694 | 30 | sklearn/tests/test_base.py | 15 | 13 | def test_feature_names_in():
pd = pytest.importorskip("pandas")
iris = datasets.load_iris()
X_np = iris.data
df = pd.DataFrame(X_np, columns=iris.feature_names)
| TST Better info when checking for no warnings in tests (#22362) | test_feature_names_in | 9f85c9d44965b764f40169ef2917e5f7a798684f | scikit-learn | test_base.py | 10 | 47 | https://github.com/scikit-learn/scikit-learn.git | 4 | 339 | 0 | 12 | 68 | Python | {
"docstring": "Check that feature_name_in are recorded by `_validate_data`",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def test_feature_names_in():
pd = pytest.importorskip("pandas")
iris = datasets.load_iris()
X_np = iris.data
df = pd.DataFrame(X_np, columns=iris.feature_names)
| |
69,642 | 241,658 | 200 | pytorch_lightning/trainer/connectors/accelerator_connector.py | 67 | 12 | def check_interactive_compatibility(self):
from pytorch_lightning.utilities import _IS_INTERACTIVE
if _IS_INTERACTIVE and self._strategy_type is not None and not self._strategy_type.is_interactive_compatible():
raise MisconfigurationException(
f"`Trainer(strategy={s... | Rename `_distrib_type` to `_strategy_type` (#11328)
Co-authored-by: Jirka Borovec <Borda@users.noreply.github.com> | check_interactive_compatibility | e15579a4f32ee3c08318a466583f4a0a8517d654 | lightning | accelerator_connector.py | 16 | 11 | https://github.com/Lightning-AI/lightning.git | 4 | 44 | 0 | 56 | 121 | Python | {
"docstring": "Raises a `MisconfigurationException` if the accelerator and/or plugin is not compatible with an\n interactive environment.",
"language": "en",
"n_whitespaces": 21,
"n_words": 15,
"vocab_size": 15
} | def check_interactive_compatibility(self):
from pytorch_lightning.utilities import _IS_INTERACTIVE
if _IS_INTERACTIVE and self._strategy_type is not None and not self._strategy_type.is_interactive_compatible():
raise MisconfigurationException(
f"`Trainer(strategy={s... | |
53,522 | 212,929 | 256 | PySimpleGUI.py | 76 | 10 | def theme_global(new_theme=None):
if new_theme is not None:
if new_theme not in theme_list():
popup_error_with_traceback('Cannot use custom themes with theme_global call',
'Your request to use theme {} cannot be performed.'.format(new_theme),
... | Better error checking/reporting in theme_global. NEW THEME DarkGrey15 | theme_global | dfad2e3b7671b7128895c8a0e29fff38d7efe6e9 | PySimpleGUI | PySimpleGUI.py | 14 | 13 | https://github.com/PySimpleGUI/PySimpleGUI.git | 3 | 71 | 0 | 59 | 125 | Python | {
"docstring": "\n Sets / Gets the global PySimpleGUI Theme. If none is specified then returns the global theme from user settings.\n Note the theme must be a standard, built-in PySimpleGUI theme... not a user-created theme.\n\n :param new_theme: the new theme name to use\n :type new_theme: (str)\n :... | def theme_global(new_theme=None):
if new_theme is not None:
if new_theme not in theme_list():
popup_error_with_traceback('Cannot use custom themes with theme_global call',
'Your request to use theme {} cannot be performed.'.format(new_theme),
... | |
57,796 | 226,116 | 141 | packages/python/chart-studio/chart_studio/plotly/chunked_requests/chunked_request.py | 45 | 11 | def close(self): | switch to black .22 | close | 43e3a4011080911901176aab919c0ecf5046ddd3 | plotly.py | chunked_request.py | 12 | 8 | https://github.com/plotly/plotly.py.git | 2 | 46 | 0 | 41 | 95 | Python | {
"docstring": "Close the connection to server.\n\n If available, return a http_client.HTTPResponse object.\n\n Closing the connection involves sending the\n Transfer-Encoding terminating bytes.\n ",
"language": "en",
"n_whitespaces": 48,
"n_words": 20,
"vocab_size": 17
} | def close(self):
self._reset_retries()
self._closed = True
# Chunked-encoded posts are terminated with '0\r\n\r\n'
# For some reason, either Python or node.js seems to
# require an extra \r\n.
try:
self._conn.send("\r\n0\r\n\r\n".encode("utf-8"))
... | |
54,636 | 216,555 | 381 | salt/modules/runit.py | 164 | 25 | def _get_svc_path(name="*", status=None):
# This is the core routine to work with services, called by many
# other functions of this module.
#
# The name of a service is the "apparent" folder's name that contains its
# "run" script. If its "folder" is a symlink, the service is an "alias" of
... | fix runit module failing on non-symlinked service | _get_svc_path | 5bf2904e7ac79d438ce03a673aa9a5c99f4e8e0f | salt | runit.py | 15 | 24 | https://github.com/saltstack/salt.git | 10 | 175 | 0 | 95 | 304 | Python | {
"docstring": "\n Return a list of paths to services with ``name`` that have the specified ``status``\n\n name\n a glob for service name. default is '*'\n\n status\n None : all services (no filter, default choice)\n 'DISABLED' : available service(s) that is not enabled\n 'E... | def _get_svc_path(name="*", status=None):
# This is the core routine to work with services, called by many
# other functions of this module.
#
# The name of a service is the "apparent" folder's name that contains its
# "run" script. If its "folder" is a symlink, the service is an "alias" of
... | |
@RunIf(min_gpus=2) | 69,688 | 241,761 | 70 | tests/checkpointing/test_torch_saving.py | 34 | 22 | def test_model_torch_save_ddp_cpu(tmpdir):
model = BoringModel()
num_epochs = 1
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", accelerator="cpu", devices=2, logger=False
)
temp_path = os.path.jo | Update `tests/checkpointing/*.py` to use `devices` instead of `gpus` or `ipus` (#11408)
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> | test_model_torch_save_ddp_cpu | d2d284fd6e3e8f53e9a44ab233771850af1e4dab | lightning | test_torch_saving.py | 10 | 10 | https://github.com/Lightning-AI/lightning.git | 1 | 78 | 1 | 30 | 139 | Python | {
"docstring": "Test to ensure torch save does not fail for model and trainer using cpu ddp.",
"language": "en",
"n_whitespaces": 14,
"n_words": 15,
"vocab_size": 15
} | def test_model_torch_save_ddp_cpu(tmpdir):
model = BoringModel()
num_epochs = 1
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", accelerator="cpu", devices=2, logger=False
)
temp_path = os.path.join(tmpdir, "temp.pt")
trainer.fit(model)
#... |
6,562 | 36,020 | 38 | src/transformers/onnx/config.py | 17 | 5 | def default_batch_size(self) -> int:
# Using 2 avoid ONNX making assumption about single sample batch
return On | Add ONNX export for ViT (#15658)
* Add ONNX support for ViT
* Refactor to use generic preprocessor
* Add vision dep to tests
* Extend ONNX slow tests to ViT
* Add dummy image generator
* Use model_type to determine modality
* Add deprecation warnings for tokenizer argument
* Add warning when overw... | default_batch_size | 50dd314d939a86f3a81e19af01459f449fbaeeca | transformers | config.py | 6 | 8 | https://github.com/huggingface/transformers.git | 1 | 12 | 0 | 17 | 23 | Python | {
"docstring": "\n The default batch size to use if no other indication\n\n Returns:\n Integer > 0\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 14,
"vocab_size": 14
} | def default_batch_size(self) -> int:
# Using 2 avoid ONNX making assumption about single sample batch
return OnnxConfig.default_fixed_batch
| |
42,870 | 178,942 | 156 | nuitka/utils/Signing.py | 32 | 16 | def addMacOSCodeSignature(filenames):
# Weak signing.
identity = getMacOSSigningIdentity()
command = [
"codesign",
"-s",
identity,
"--force",
"--deep",
"--preserve-metadata=entitlements",
]
assert type(filenames) is not str
command.extend(f... | macOS: Add support for specifying signing identity and access to protected resources. | addMacOSCodeSignature | 51ca460bd8c382cc165cbb1325e7cb65895d1a0b | Nuitka | Signing.py | 10 | 19 | https://github.com/Nuitka/Nuitka.git | 1 | 66 | 0 | 31 | 112 | Python | {
"docstring": "Remove the code signature from a filename.\n\n Args:\n filenames - The files to be signed.\n\n Returns:\n None\n\n Notes:\n This is macOS specific.\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 22,
"vocab_size": 22
} | def addMacOSCodeSignature(filenames):
# Weak signing.
identity = getMacOSSigningIdentity()
command = [
"codesign",
"-s",
identity,
"--force",
"--deep",
"--preserve-metadata=entitlements",
]
assert type(filenames) is not str
command.extend(f... | |
9,566 | 48,680 | 178 | rest_framework/views.py | 56 | 22 | def exception_handler(exc, context):
if isinstance(exc, Http | Preserve exception messages for wrapped Django exceptions (#8051)
* Preserve messages for wrapped Django exceptions
* Fix the test
* Update test_generics.py
* Update test_generics.py
Co-authored-by: Tom Christie <tom@tomchristie.com> | exception_handler | 56946fac8f29aa44ce84391f138d63c4c8a2a285 | django-rest-framework | views.py | 14 | 18 | https://github.com/encode/django-rest-framework.git | 7 | 152 | 0 | 39 | 246 | Python | {
"docstring": "\n Returns the response that should be used for any given exception.\n\n By default we handle the REST framework `APIException`, and also\n Django's built-in `Http404` and `PermissionDenied` exceptions.\n\n Any unhandled exceptions may return `None`, which will cause a 500 error\n to be... | def exception_handler(exc, context):
if isinstance(exc, Http404):
exc = exceptions.NotFound(*(exc.args))
elif isinstance(exc, PermissionDenied):
exc = exceptions.PermissionDenied(*(exc.args))
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'aut... | |
7,156 | 39,239 | 107 | recommenders/models/sar/sar_singlenode.py | 25 | 22 | def compute_cooccurrence_matrix(self, df):
u | Remove drop_duplicates() from SAR method fix #1464 (#1588)
* Remove drop_duplicates() from SAR method fix #1464
* flake is complaining
* Typos
* Define self.unity_user_affinity inside __init__()
* Remove drop_duplicates() from SAR method
* Remove duplicates in testing data
* Remove duplicates in test... | compute_cooccurrence_matrix | 96b5053fa688bec79a729f9ea238e5f916bced01 | recommenders | sar_singlenode.py | 15 | 10 | https://github.com/microsoft/recommenders.git | 1 | 101 | 0 | 21 | 153 | Python | {
"docstring": "Co-occurrence matrix.\n\n The co-occurrence matrix is defined as :math:`C = U^T * U`\n\n where U is the user_affinity matrix with 1's as values (instead of ratings).\n\n Args:\n df (pandas.DataFrame): DataFrame of users and items\n\n Returns:\n numpy.n... | def compute_cooccurrence_matrix(self, df):
user_item_hits = sparse.coo_matrix(
(np.repeat(1, df.shape[0]), (df[self.col_user_id], df[self.col_item_id])),
shape=(self.n_users, self.n_items),
).tocsr()
item_cooccurrence = user_item_hits.transpose().dot(user_item_... | |
3,452 | 20,609 | 615 | pipenv/patched/notpip/_vendor/pyparsing/helpers.py | 164 | 48 | def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
if isinstance(tagStr, str_type):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas, alphanums + "_-:")
if xml:
tagAttrVa... | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | _makeTags | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | helpers.py | 26 | 53 | https://github.com/pypa/pipenv.git | 3 | 365 | 0 | 96 | 627 | Python | {
"docstring": "Internal helper to construct opening and closing tag expressions, given a tag name",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 12
} | def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
if isinstance(tagStr, str_type):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas, alphanums + "_-:")
if xml:
tagAttrVa... | |
80,398 | 270,135 | 208 | keras/datasets/cifar100.py | 88 | 24 | def load_data(label_mode="fine"):
if label_mode not in ["fine", "coarse"]:
raise ValueError(
'`label_mode` must be one of `"fine"`, `"coarse"`. '
f"Received: label_mode= | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | load_data | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | cifar100.py | 12 | 24 | https://github.com/keras-team/keras.git | 3 | 185 | 0 | 64 | 303 | Python | {
"docstring": "Loads the CIFAR100 dataset.\n\n This is a dataset of 50,000 32x32 color training images and\n 10,000 test images, labeled over 100 fine-grained classes that are\n grouped into 20 coarse-grained classes. See more info at the\n [CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).\n... | def load_data(label_mode="fine"):
if label_mode not in ["fine", "coarse"]:
raise ValueError(
'`label_mode` must be one of `"fine"`, `"coarse"`. '
f"Received: label_mode={label_mode}."
)
dirname = "cifar-100-python"
origin = "https://www.cs.toronto.edu/~kriz/cifa... | |
8,993 | 46,789 | 194 | dev/breeze/src/airflow_breeze/utils/path_utils.py | 114 | 27 | def find_airflow_sources_root() -> Path:
default_airflow_sources_root = Path.cwd()
# Try to find airflow sources in current working dir
airflow_sources_root = search_upwards_for_airflow_sources_root(Path.cwd())
if not airflow_sources_root:
# Or if it fails, find it in parents of the directo... | Prepare Breeze2 for prime time :) (#22713)
This is a review and clean-up for all the parameters and
commands for Breeze2 in order to prepare it for being
used by the contribugors.
There are various small fixes here and there, removal
of duplicated code, refactoring and moving code around
as well as cleanup and ... | find_airflow_sources_root | 4ffd4f09532fceb67675fce4c1f5cd383eff992e | airflow | path_utils.py | 15 | 26 | https://github.com/apache/airflow.git | 3 | 79 | 0 | 71 | 267 | Python | {
"docstring": "\n Find the root of airflow sources. When Breeze is run from sources, it is easy, but this one also\n has to handle the case when Breeze is installed via `pipx` so it searches upwards of the current\n directory to find the right root of airflow directory.\n\n If not found, current director... | def find_airflow_sources_root() -> Path:
default_airflow_sources_root = Path.cwd()
# Try to find airflow sources in current working dir
airflow_sources_root = search_upwards_for_airflow_sources_root(Path.cwd())
if not airflow_sources_root:
# Or if it fails, find it in parents of the directo... | |
117,527 | 321,097 | 346 | qutebrowser/browser/network/pac.py | 94 | 19 | def _parse_proxy_entry(proxy_str):
config = [c.strip() for c in proxy_str.split(' ') if c]
if not config:
raise ParseProxyError("Empty proxy entry")
if config[0] == "DIRECT":
if len(config) != 1:
raise ParseProxyError("Invalid number of parameter... | Run scripts/dev/rewrite_enums.py | _parse_proxy_entry | 0877fb0d78635692e481c8bde224fac5ad0dd430 | qutebrowser | pac.py | 13 | 22 | https://github.com/qutebrowser/qutebrowser.git | 10 | 183 | 0 | 52 | 307 | Python | {
"docstring": "Parse one proxy string entry, as described in PAC specification.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def _parse_proxy_entry(proxy_str):
config = [c.strip() for c in proxy_str.split(' ') if c]
if not config:
raise ParseProxyError("Empty proxy entry")
if config[0] == "DIRECT":
if len(config) != 1:
raise ParseProxyError("Invalid number of parameter... | |
38,505 | 160,133 | 81 | numpy/f2py/tests/test_f2py2e.py | 37 | 19 | def test_gen_pyf(capfd, hello_world_f90, monkeypatch):
ipath = Path(hello_world_f90)
opath = Path(hello_world_f90).stem + ".pyf"
monkeypatch.setattr(sys, | TST: Initialize f2py2e tests of the F2PY CLI (#20668)
Increases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff.
More importantly, sets the groundwork for #20056, in that passing the same testsuite should ind... | test_gen_pyf | 729ad4f92420231e2a7009b3223c6c7620b8b808 | numpy | test_f2py2e.py | 13 | 9 | https://github.com/numpy/numpy.git | 1 | 77 | 0 | 34 | 147 | Python | {
"docstring": "Ensures that a signature file is generated via the CLI\n CLI :: -h\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 12
} | def test_gen_pyf(capfd, hello_world_f90, monkeypatch):
ipath = Path(hello_world_f90)
opath = Path(hello_world_f90).stem + ".pyf"
monkeypatch.setattr(sys, "argv", f'f2py -h {opath} {ipath}'.split())
with util.switchdir(ipath.parent):
f2pycli() # Generate wrappers
out, _ = capfd.rea... | |
12,237 | 60,671 | 100 | .venv/lib/python3.8/site-packages/pip/_internal/configuration.py | 34 | 7 | def _dictionary(self):
# type: () -> Dict[str, Any]
# NOTE: Dictionaries are not populated if not loaded. So, conditionals
# are not needed here.
retval = {}
for variant in OVERRIDE_ORDER: | upd; format | _dictionary | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | configuration.py | 11 | 5 | https://github.com/jindongwang/transferlearning.git | 2 | 28 | 0 | 28 | 50 | Python | {
"docstring": "A dictionary representing the loaded configuration.\n ",
"language": "en",
"n_whitespaces": 13,
"n_words": 6,
"vocab_size": 6
} | def _dictionary(self):
# type: () -> Dict[str, Any]
# NOTE: Dictionaries are not populated if not loaded. So, conditionals
# are not needed here.
retval = {}
for variant in OVERRIDE_ORDER:
retval.update(self._config[variant])
return retval
| |
51,204 | 205,770 | 99 | django/db/models/query.py | 12 | 10 | def using(self, alias):
return RawQuerySet(
self.raw_query,
model=self.model,
query=self.query.chain(using=alias),
params=self.params,
translations=self.translations,
using=alias,
)
| Refs #33476 -- Reformatted code with Black. | using | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | query.py | 11 | 9 | https://github.com/django/django.git | 1 | 51 | 0 | 12 | 75 | Python | {
"docstring": "Select the database this RawQuerySet should execute against.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def using(self, alias):
return RawQuerySet(
self.raw_query,
model=self.model,
query=self.query.chain(using=alias),
params=self.params,
translations=self.translations,
using=alias,
)
| |
81,394 | 275,492 | 39 | keras/optimizers/optimizer_v2/optimizer_v2.py | 15 | 6 | def get_weights(self):
params = self.we | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | get_weights | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | optimizer_v2.py | 7 | 3 | https://github.com/keras-team/keras.git | 1 | 18 | 0 | 15 | 33 | Python | {
"docstring": "Returns the current weights of the optimizer.\n\n The weights of an optimizer are its state (ie, variables).\n This function returns the weight values associated with this\n optimizer as a list of Numpy arrays. The first value is always the\n iterations count of the optimiz... | def get_weights(self):
params = self.weights
return backend.batch_get_value(params)
# TODO(tanzheny): Maybe share this logic with base_layer. | |
70,202 | 244,030 | 108 | mmdet/core/bbox/match_costs/match_cost.py | 44 | 14 | def _focal_loss_cost(self, cls_pred, gt_labels):
cls_pred = cls_pred.sigmoid()
neg_cost = -(1 - cls_pred + self.eps).log() * (
1 - self.alpha) * cls_pred.pow(self.gamma)
pos_cost = -(cls_pred + sel | [Feature] Add Maskformer to mmdet (#7212)
* first commit
* add README
* move model description from config to readme
add description for binary_input
add description for dice loss
add a independent panoptic gt processing function
add a independent panoptic gt processing function
remove compatibili... | _focal_loss_cost | cac356380d505bf15587f07c0529218cc36b9652 | mmdetection | match_cost.py | 14 | 8 | https://github.com/open-mmlab/mmdetection.git | 1 | 102 | 0 | 27 | 161 | Python | {
"docstring": "\n Args:\n cls_pred (Tensor): Predicted classification logits, shape\n (num_query, num_class).\n gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n\n Returns:\n torch.Tensor: cls_cost value with weight\n ",
"language": "e... | def _focal_loss_cost(self, cls_pred, gt_labels):
cls_pred = cls_pred.sigmoid()
neg_cost = -(1 - cls_pred + self.eps).log() * (
1 - self.alpha) * cls_pred.pow(self.gamma)
pos_cost = -(cls_pred + self.eps).log() * self.alpha * (
1 - cls_pred).pow(self.gamma)
... | |
50,958 | 204,887 | 47 | django/db/backends/base/operations.py | 15 | 5 | def date_extract_sql(self, lookup_type, field_name):
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a date_extract_sq | Refs #33476 -- Reformatted code with Black. | date_extract_sql | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | operations.py | 8 | 4 | https://github.com/django/django.git | 1 | 15 | 0 | 15 | 27 | Python | {
"docstring": "\n Given a lookup_type of 'year', 'month', or 'day', return the SQL that\n extracts a value from the given date field field_name.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 21,
"vocab_size": 19
} | def date_extract_sql(self, lookup_type, field_name):
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a date_extract_sql() method"
)
| |
72,349 | 248,557 | 714 | tests/rest/client/test_rooms.py | 227 | 36 | def test_threepid_invite_spamcheck(self) -> None:
# Mock a few functions to prevent the test from failing due to failing to talk to
# a remote IS. We keep the mock for make_and_store_3pid_invite around so we
# can check its call_count later on during the test.
make_invite_mock =... | Uniformize spam-checker API, part 4: port other spam-checker callbacks to return `Union[Allow, Codes]`. (#12857)
Co-authored-by: Brendan Abolivier <babolivier@matrix.org> | test_threepid_invite_spamcheck | a164a46038b0e51142781619db0e6dec8e0c2aaa | synapse | test_rooms.py | 13 | 44 | https://github.com/matrix-org/synapse.git | 1 | 243 | 0 | 131 | 424 | Python | {
"docstring": "\n Test allowing/blocking threepid invites with a spam-check module.\n\n In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal[\"NOT_SPAM\"]]`.",
"language": "en",
"n_whitespaces": 38,
"n_words": 24,
"vocab_size": 23
} | def test_threepid_invite_spamcheck(self) -> None:
# Mock a few functions to prevent the test from failing due to failing to talk to
# a remote IS. We keep the mock for make_and_store_3pid_invite around so we
# can check its call_count later on during the test.
make_invite_mock =... | |
@pytest.mark.parametrize(
"constraint",
[
_ArrayLikes,
_Callables,
_InstancesOf,
_NoneConstraint,
_RandomStates,
_SparseMatrices,
],
) | 76,095 | 260,155 | 79 | sklearn/utils/tests/test_param_validation.py | 17 | 14 | def test_generate_invalid_param_val_all_valid(constraints):
with pytest.raises(NotImplementedError):
generate_invalid_param_val(constraints[0], constraints=constraints)
@pytest.mark.parametrize(
"constraint",
[
_ArrayLikes,
_Callables,
_InstancesOf,
_NoneConstr... | FIX Param validation: fix generating invalid param when 2 interval constraints (#23513)
Co-authored-by: Julien Jerphanion <git@jjerphan.xyz>
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com> | test_generate_invalid_param_val_all_valid | 02cbe01e67165d7d38e5e441cfccd6b57b2207b6 | scikit-learn | test_param_validation.py | 10 | 3 | https://github.com/scikit-learn/scikit-learn.git | 1 | 25 | 1 | 17 | 78 | Python | {
"docstring": "Check that the function raises NotImplementedError when there's no invalid value\n for the constraint.\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 14,
"vocab_size": 13
} | def test_generate_invalid_param_val_all_valid(constraints):
with pytest.raises(NotImplementedError):
generate_invalid_param_val(constraints[0], constraints=constraints)
@pytest.mark.parametrize(
"constraint",
[
_ArrayLikes,
_Callables,
_InstancesOf,
_NoneConstr... |
34,607 | 149,953 | 163 | freqtrade/freqai/data_drawer.py | 47 | 16 | def load_drawer_from_disk(self):
exists = Path(se | rehaul of backend data management - increasing performance by holding history in memory, reducing load on the ratelimit by only pinging exchange once per candle. Improve code readability. | load_drawer_from_disk | 16b4a5b71ff140f5de31e5d5572f1f193457cf6b | freqtrade | data_drawer.py | 16 | 11 | https://github.com/freqtrade/freqtrade.git | 3 | 81 | 0 | 41 | 156 | Python | {
"docstring": "\n Locate and load a previously saved data drawer full of all pair model metadata in\n present model folder.\n :returns:\n exists: bool = whether or not the drawer was located\n ",
"language": "en",
"n_whitespaces": 65,
"n_words": 29,
"vocab_size": 27
} | def load_drawer_from_disk(self):
exists = Path(self.full_path / str('pair_dictionary.json')).resolve().exists()
if exists:
with open(self.full_path / str('pair_dictionary.json'), "r") as fp:
self.pair_dict = json.load(fp)
elif not self.follow_mode:
... | |
55,356 | 218,510 | 658 | python3.10.4/Lib/ipaddress.py | 157 | 16 | def address_exclude(self, other):
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
| add python 3.10.4 for windows | address_exclude | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | ipaddress.py | 15 | 32 | https://github.com/XX-net/XX-Net.git | 11 | 191 | 0 | 77 | 324 | Python | {
"docstring": "Remove an address from a larger block.\n\n For example:\n\n addr1 = ip_network('192.0.2.0/28')\n addr2 = ip_network('192.0.2.1/32')\n list(addr1.address_exclude(addr2)) =\n [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),\n ... | def address_exclude(self, other):
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)... | |
7,232 | 39,440 | 28 | recommenders/utils/python_utils.py | 12 | 11 | def mutual_information(cooccurrence):
with np.errstate(invalid="ignore", divide="ignore"):
result = np.log2(cooccurrence.shape[0] * lift(cooccurrence))
return np.array(result)
| Add new item similarity metrics for SAR (#1754)
* Add mutual information similarity in SAR
* Add lexicographers mutual information similarity for SAR
* Add cosine similarity for SAR
* Add inclusion index for SAR
* Typos
* Change SARSingleNode to SAR
* Convert item similarity matrix to np.array
* U... | mutual_information | 1d7341e93d1f03387699fb3c6ae0b6c0e464296f | recommenders | python_utils.py | 13 | 4 | https://github.com/microsoft/recommenders.git | 1 | 45 | 0 | 12 | 79 | Python | {
"docstring": "Helper method to calculate the Mutual Information of a matrix of\n co-occurrences.\n\n Mutual information is a measurement of the amount of information\n explained by the i-th j-th item column vector.\n\n Args:\n cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences o... | def mutual_information(cooccurrence):
with np.errstate(invalid="ignore", divide="ignore"):
result = np.log2(cooccurrence.shape[0] * lift(cooccurrence))
return np.array(result)
| |
3,612 | 20,908 | 72 | pipenv/patched/notpip/_vendor/typing_extensions.py | 49 | 5 | def _is_dunder(name):
return len(name) > 4 and name.startswith('__') and name.endswith('__ | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | _is_dunder | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | typing_extensions.py | 10 | 2 | https://github.com/pypa/pipenv.git | 3 | 27 | 0 | 44 | 53 | Python | {
"docstring": "Returns True if name is a __dunder_variable_name__.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def _is_dunder(name):
return len(name) > 4 and name.startswith('__') and name.endswith('__')
# Prior to Python 3.7 types did not have `copy_with`. A lot of the equality
# checks, argument expansion etc. are done on the _subs_tre. As a result we
# can't provide a get_type_hints function tha... | |
75,949 | 259,851 | 615 | sklearn/neighbors/_kde.py | 133 | 36 | def fit(self, X, y=None, sample_weight=None):
algorithm = self._choose_algorithm(self.algorithm, self.metric)
if isinstance(self.bandwidth, str):
methods_supported = ("scott", "silvermann")
if self.bandwidth not in methods_supported:
raise ValueError(
... | FEA Added Kernel Density bandwidth estimation and test (#22993)
Co-authored-by: STOJANOVIC Jovan <jovan.stojanovic@inria.fr>
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com> | fit | dedaa8f25f136e954941d15151bbbc88150789fc | scikit-learn | _kde.py | 19 | 42 | https://github.com/scikit-learn/scikit-learn.git | 8 | 278 | 0 | 89 | 454 | Python | {
"docstring": "Fit the Kernel Density model on the data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : None\n Ignored. Thi... | def fit(self, X, y=None, sample_weight=None):
algorithm = self._choose_algorithm(self.algorithm, self.metric)
if isinstance(self.bandwidth, str):
methods_supported = ("scott", "silvermann")
if self.bandwidth not in methods_supported:
raise ValueError(
... | |
70,117 | 243,767 | 61 | src/PIL/ImageMorph.py | 18 | 9 | def get_on_pixels(self, image):
| Improve exception traceback readability | get_on_pixels | 2ae55ccbdad9c842929fb238ea1eb81d1f999024 | Pillow | ImageMorph.py | 9 | 5 | https://github.com/python-pillow/Pillow.git | 2 | 34 | 0 | 18 | 60 | Python | {
"docstring": "Get a list of all turned on pixels in a binary image\n\n Returns a list of tuples of (x,y) coordinates\n of all matching pixels. See :ref:`coordinate-system`.",
"language": "en",
"n_whitespaces": 39,
"n_words": 26,
"vocab_size": 19
} | def get_on_pixels(self, image):
if image.mode != "L":
msg = "Image mode must be L"
raise ValueError(msg)
return _imagingmorph.get_on_pixels(image.im.id)
| |
48,420 | 197,273 | 109 | sympy/parsing/ast_parser.py | 21 | 14 | def visit_Num(self, node):
if isinstance(node.n, int):
return fix_missing_locations(Call(func=Name('Integer', Load( | Inserted the `visit_Num` function back in.
This was required to keep SymPy compatible with Python 3.7. | visit_Num | e95d725680aab772037848628471a31f03a13901 | sympy | ast_parser.py | 17 | 8 | https://github.com/sympy/sympy.git | 3 | 86 | 0 | 15 | 136 | Python | {
"docstring": "This function exists for backwards compatibility with Python 3.7.\n It should be removed when SymPy removes support for Python 3.7.",
"language": "en",
"n_whitespaces": 29,
"n_words": 20,
"vocab_size": 17
} | def visit_Num(self, node):
if isinstance(node.n, int):
return fix_missing_locations(Call(func=Name('Integer', Load()),
args=[node], keywords=[]))
elif isinstance(node.n, float):
return fix_missing_locations(Call(func=Name('Float', Load()),
... | |
56,343 | 221,318 | 42 | python3.10.4/Lib/cgitb.py | 11 | 8 | def enable(display=1, logdir=None, context=5, format="html"):
sys.excepthook = Hook(display=display, logdir=logdir,
context=context, format=format | add python 3.10.4 for windows | enable | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | cgitb.py | 9 | 3 | https://github.com/XX-net/XX-Net.git | 1 | 42 | 0 | 11 | 64 | Python | {
"docstring": "Install an exception handler that formats tracebacks as HTML.\n\n The optional argument 'display' can be set to 0 to suppress sending the\n traceback to the browser, and 'logdir' can be set to a directory to cause\n tracebacks to be written to files there.",
"language": "en",
"n_whitespac... | def enable(display=1, logdir=None, context=5, format="html"):
sys.excepthook = Hook(display=display, logdir=logdir,
context=context, format=format)
| |
13,455 | 63,660 | 63 | .venv/lib/python3.8/site-packages/pip/_vendor/requests/utils.py | 27 | 7 | def parse_list_header(value):
result = []
for item in | upd; format | parse_list_header | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | utils.py | 15 | 7 | https://github.com/jindongwang/transferlearning.git | 3 | 54 | 0 | 23 | 91 | Python | {
"docstring": "Parse lists as described by RFC 2068 Section 2.\n\n In particular, parse comma-separated lists where the elements of\n the list may include quoted-strings. A quoted-string could\n contain a comma. A non-quoted string could have quotes in the\n middle. Quotes are removed automatically af... | def parse_list_header(value):
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission). | |
105,628 | 306,845 | 46 | homeassistant/components/apple_tv/media_player.py | 14 | 8 | def media_series_title(self) -> str | None:
if self._playing and self._is_feature_available(FeatureName.SeriesName):
return self | Improve type hints in apple_tv media player (#77940) | media_series_title | 5276d849ec497ccd0cecf3cb6a8dacae4fa6f845 | core | media_player.py | 9 | 5 | https://github.com/home-assistant/core.git | 3 | 32 | 0 | 13 | 53 | Python | {
"docstring": "Title of series of current playing media, TV show only.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def media_series_title(self) -> str | None:
if self._playing and self._is_feature_available(FeatureName.SeriesName):
return self._playing.series_name
return None
| |
57,244 | 224,209 | 221 | mkdocs/commands/build.py | 116 | 18 | def _build_template(name, template, files, config, nav):
# Run `pre_template` plugin events.
template = config['plugins'].run_event(
'pre_template', template, template_name=name, config=config
)
if utils.is_error_template(name):
# Force absolute URLs in the nav of error pages and ... | Format code with `black -l100 --skip-string-normalization` | _build_template | dca7cbb43fcd6ea7c677c98ba585395b070d387b | mkdocs | build.py | 14 | 15 | https://github.com/mkdocs/mkdocs.git | 3 | 134 | 0 | 73 | 221 | Python | {
"docstring": "\n Return rendered output for given template as a string.\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 9,
"vocab_size": 9
} | def _build_template(name, template, files, config, nav):
# Run `pre_template` plugin events.
template = config['plugins'].run_event(
'pre_template', template, template_name=name, config=config
)
if utils.is_error_template(name):
# Force absolute URLs in the nav of error pages and ... | |
48,951 | 198,467 | 375 | sympy/core/basic.py | 88 | 18 | def matches(self, expr, repl_dict=None, old=False):
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if repl_dict is None:
repl_dict = {}
else:
repl_dict = repl_dict.copy()
if self == expr:
| Code cleanup | matches | 9d58006fc0a23afcba38f641c9472917c436428a | sympy | basic.py | 16 | 26 | https://github.com/sympy/sympy.git | 10 | 164 | 0 | 52 | 260 | Python | {
"docstring": "\n Helper method for match() that looks for a match between Wild symbols\n in self and expressions in expr.\n\n Examples\n ========\n\n >>> from sympy import symbols, Wild, Basic\n >>> a, b, c = symbols('a b c')\n >>> x = Wild('x')\n >>> Basic(a ... | def matches(self, expr, repl_dict=None, old=False):
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if repl_dict is None:
repl_dict = {}
else:
repl_dict = repl_dict.copy()
if self == expr:
return... | |
30,006 | 133,393 | 376 | python/ray/util/sgd/torch/worker_group.py | 85 | 26 | def _create_placement_group(self, num_workers):
pg = get_current_placement_group()
if pg is None:
bundle = {"CPU": self._num_cpus_per_worker, "GPU": int(self._use_gpu)}
bundles = [bundle] * num_workers
pg = ray.util.placement_group(bundles, strategy="SPREAD")... | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | _create_placement_group | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | worker_group.py | 16 | 21 | https://github.com/ray-project/ray.git | 3 | 121 | 0 | 67 | 212 | Python | {
"docstring": "Creates a placement group for the workers.\n\n If this worker is already in a placement group then a new one will\n not be created. This is primarily for when Tune is the upstream and\n will allocate resources for SGD workers.\n\n If this worker is not in a placement group,... | def _create_placement_group(self, num_workers):
pg = get_current_placement_group()
if pg is None:
bundle = {"CPU": self._num_cpus_per_worker, "GPU": int(self._use_gpu)}
bundles = [bundle] * num_workers
pg = ray.util.placement_group(bundles, strategy="SPREAD")... | |
14,679 | 67,953 | 30 | erpnext/stock/report/warehouse_wise_item_balance_age_and_value/warehouse_wise_item_balance_age_and_value.py | 48 | 16 | def get_warehouse_list(filters):
from frappe.core.doctype.user_permission.user_permission import get_permitted_documents
condition = ""
user_permitted_warehouse = get_permitted_documents("Warehouse")
value = ()
if user_permitted_warehouse:
condition = "and name in %s"
value = set( | style: format code with black | get_warehouse_list | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | warehouse_wise_item_balance_age_and_value.py | 12 | 20 | https://github.com/frappe/erpnext.git | 4 | 87 | 0 | 33 | 149 | Python | {
"docstring": "select name\n\t\tfrom `tabWarehouse` where is_group = 0\n\t\t{condition}",
"language": "en",
"n_whitespaces": 6,
"n_words": 9,
"vocab_size": 9
} | def get_warehouse_list(filters):
from frappe.core.doctype.user_permission.user_permission import get_permitted_documents
condition = ""
user_permitted_warehouse = get_permitted_documents("Warehouse")
value = ()
if user_permitted_warehouse:
condition = "and name in %s"
value = set(user_permitted_warehouse)
el... | |
8,684 | 45,743 | 94 | airflow/models/mappedoperator.py | 30 | 10 | def unmap(self) -> "BaseOperator":
dag = self.dag
if not dag:
| More explicit mapped argument validation (#21933)
* More explicit mapped argument validation
Instead of always using MagicMock to validate mapped arguments, this
implements a more sophisticated protocol that allows an operator to
implement a 'validate_mapped_arguments' to provide custom validation
logic. If an o... | unmap | b65e52205a7045eb08d471289b85abda587442b7 | airflow | mappedoperator.py | 10 | 9 | https://github.com/apache/airflow.git | 3 | 57 | 0 | 24 | 101 | Python | {
"docstring": "Get the \"normal\" Operator after applying the current mapping.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | def unmap(self) -> "BaseOperator":
dag = self.dag
if not dag:
raise RuntimeError("Cannot unmap a task without a DAG")
dag._remove_task(self.task_id)
if isinstance(self.operator_class, str):
raise RuntimeError("Cannot unmap a deserialized operator")
... | |
121,034 | 337,338 | 35 | src/accelerate/test_utils/testing.py | 12 | 5 | def require_tensorflow(test_case):
if not is_tensorflow_available():
return unittest.skip( | Add logging capabilities (#293)
Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
- Added experiment tracking API, and support for Weights and Biases, TensorBoard, and CometML + Tests
- Added `tensorflow` to a new dependency list to be used during tests
- Added three new functions in `Acc... | require_tensorflow | 5668270de74a09e5bff15891054f73ddbb1176ac | accelerate | testing.py | 11 | 5 | https://github.com/huggingface/accelerate.git | 2 | 26 | 0 | 11 | 49 | Python | {
"docstring": "\n Decorator marking a test that requires TensorFlow installed. These tests are skipped when TensorFlow isn't\n installed\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 16,
"vocab_size": 15
} | def require_tensorflow(test_case):
if not is_tensorflow_available():
return unittest.skip("test requires TensorFlow")(test_case)
else:
return test_case
| |
56,433 | 221,571 | 595 | python3.10.4/Lib/concurrent/futures/_base.py | 125 | 36 | def as_completed(fs, timeout=None):
if timeout is not None:
end_time = timeout + time.monotonic()
fs = set(fs)
total_futures = len(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
... | add python 3.10.4 for windows | as_completed | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _base.py | 20 | 36 | https://github.com/XX-net/XX-Net.git | 9 | 212 | 0 | 81 | 365 | Python | {
"docstring": "An iterator over the given futures that yields each as it completes.\n\n Args:\n fs: The sequence of Futures (possibly created by different Executors) to\n iterate over.\n timeout: The maximum number of seconds to wait. If None, then there\n is no limit on the wa... | def as_completed(fs, timeout=None):
if timeout is not None:
end_time = timeout + time.monotonic()
fs = set(fs)
total_futures = len(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
... | |
35,150 | 151,842 | 139 | freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 27 | 15 | def pack_env_dict(self) -> Dict[str, Any]:
env_info = {"window_size": self.CONV_WIDTH,
"reward_kwargs": self.reward_params,
"config": self.config,
"live": self.live}
if self.data_provider:
env_info["fee"] = self.data_pr | use a dictionary to make code more readable | pack_env_dict | 7b4abd5ef50f3c6f84c6604fc1f79ff4b92c2575 | freqtrade | BaseReinforcementLearningModel.py | 15 | 12 | https://github.com/freqtrade/freqtrade.git | 2 | 74 | 0 | 25 | 122 | Python | {
"docstring": "\n Create dictionary of environment arguments\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 5,
"vocab_size": 5
} | def pack_env_dict(self) -> Dict[str, Any]:
env_info = {"window_size": self.CONV_WIDTH,
"reward_kwargs": self.reward_params,
"config": self.config,
"live": self.live}
if self.data_provider:
env_info["fee"] = self.data_provid... | |
31,780 | 139,820 | 734 | rllib/evaluation/rollout_worker.py | 184 | 41 | def sample(self) -> SampleBatchType:
if self.fake_sampler and self.last_batch is not None:
return self.last_batch
elif self.input_reader is None:
raise ValueError(
"RolloutWorker has no `input_reader` object! "
"Cannot call `sample()`. Yo... | [RLlib] Agents to algos: DQN w/o Apex and R2D2, DDPG/TD3, SAC, SlateQ, QMIX, PG, Bandits (#24896) | sample | 3815e52a61b6afe44b883d7d745fa00b599f66ca | ray | rollout_worker.py | 13 | 66 | https://github.com/ray-project/ray.git | 17 | 284 | 0 | 119 | 481 | Python | {
"docstring": "Returns a batch of experience sampled from this worker.\n\n This method must be implemented by subclasses.\n\n Returns:\n A columnar batch of experiences (e.g., tensors).\n\n Examples:\n >>> import gym\n >>> from ray.rllib.evaluation.rollout_worker... | def sample(self) -> SampleBatchType:
if self.fake_sampler and self.last_batch is not None:
return self.last_batch
elif self.input_reader is None:
raise ValueError(
"RolloutWorker has no `input_reader` object! "
"Cannot call `sample()`. Yo... | |
51,828 | 206,994 | 152 | tests/admin_changelist/tests.py | 47 | 26 | def test_no_duplicates_for_non_unique_related_object_in_list_filter(self):
parent = Parent.objects.create(name="Mary")
# Two children with the same name
Child.objects.create(parent=parent, name="Daniel")
Child.objects.create(parent=parent, name="Daniel")
m = ParentAdmin... | Refs #33476 -- Reformatted code with Black. | test_no_duplicates_for_non_unique_related_object_in_list_filter | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 12 | 12 | https://github.com/django/django.git | 1 | 136 | 0 | 38 | 229 | Python | {
"docstring": "\n Regressions tests for #15819: If a field listed in list_filters is a\n non-unique related object, results shouldn't appear more than once.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 21,
"vocab_size": 20
} | def test_no_duplicates_for_non_unique_related_object_in_list_filter(self):
parent = Parent.objects.create(name="Mary")
# Two children with the same name
Child.objects.create(parent=parent, name="Daniel")
Child.objects.create(parent=parent, name="Daniel")
m = ParentAdmin... | |
5,158 | 28,140 | 48 | saleor/plugins/webhook/utils.py | 11 | 10 | def get_current_tax_app() -> Optional[App]:
return (
App.objects.order_by("pk")
.for_event_type(WebhookEventSyncType.CHECKOUT_CALCULATE_TAXES)
| Add support for calculating taxes in Saleor Apps (#9526)
* Squash previouse PR in taxes by Sync webhooks
* Adjust incoming communication form tax app in order calculation
* Change return type for base_checkout_total to Money
* Fix cratign order lines for checkout lines
* Remove not needed args
* Fix ord... | get_current_tax_app | 3e06a6462559498c6ad09c0591e648a7943ac0c6 | saleor | utils.py | 15 | 8 | https://github.com/saleor/saleor.git | 1 | 39 | 0 | 11 | 67 | Python | {
"docstring": "Return currently used tax app or None, if there aren't any.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def get_current_tax_app() -> Optional[App]:
return (
App.objects.order_by("pk")
.for_event_type(WebhookEventSyncType.CHECKOUT_CALCULATE_TAXES)
.for_event_type(WebhookEventSyncType.ORDER_CALCULATE_TAXES)
.last()
)
| |
73,173 | 249,857 | 315 | tests/util/caches/test_deferred_cache.py | 140 | 20 | def test_callbacks(self) -> None:
cache: DeferredCache[str, int] = DeferredCache("test")
callbacks = set()
# start with an entry, with a callba | Add missing type hints to test.util.caches (#14529) | test_callbacks | 4ae967cf6308e80b03da749f0cbaed36988e235e | synapse | test_deferred_cache.py | 13 | 16 | https://github.com/matrix-org/synapse.git | 1 | 171 | 0 | 100 | 300 | Python | {
"docstring": "Invalidation callbacks are called at the right time",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def test_callbacks(self) -> None:
cache: DeferredCache[str, int] = DeferredCache("test")
callbacks = set()
# start with an entry, with a callback
cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill"))
# now replace that entry with a pending result
o... | |
50,845 | 204,710 | 68 | django/core/management/sql.py | 18 | 11 | def sql_flush(style, connection, reset_sequences=True, allow_cascade=False):
tables = connection.in | Refs #33476 -- Reformatted code with Black. | sql_flush | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | sql.py | 9 | 10 | https://github.com/django/django.git | 1 | 52 | 0 | 17 | 76 | Python | {
"docstring": "\n Return a list of the SQL statements used to flush the database.\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 12,
"vocab_size": 11
} | def sql_flush(style, connection, reset_sequences=True, allow_cascade=False):
tables = connection.introspection.django_table_names(
only_existing=True, include_views=False
)
return connection.ops.sql_flush(
style,
tables,
reset_sequences=reset_sequences,
allow_cas... | |
69,920 | 242,777 | 316 | src/PIL/ImageFilter.py | 88 | 18 | def generate(cls, size, callback, channels=3, target_mode=None):
size_1d, size_2d, size_3d = cls._check_size(size)
if channels not in (3, 4):
raise ValueError("Only 3 or 4 output channels are supported")
table = [0] * (size_1d * size_2d * size_3d * channels)
idx_out... | Variable in function should be snake_case | generate | d3c9a6504e84f87379554b6b671a1fb6c66a449e | Pillow | ImageFilter.py | 17 | 20 | https://github.com/python-pillow/Pillow.git | 5 | 151 | 0 | 61 | 222 | Python | {
"docstring": "Generates new LUT using provided callback.\n\n :param size: Size of the table. Passed to the constructor.\n :param callback: Function with three parameters which correspond\n three color channels. Will be called ``size**3``\n times with val... | def generate(cls, size, callback, channels=3, target_mode=None):
size_1d, size_2d, size_3d = cls._check_size(size)
if channels not in (3, 4):
raise ValueError("Only 3 or 4 output channels are supported")
table = [0] * (size_1d * size_2d * size_3d * channels)
idx_out... | |
50,951 | 204,878 | 46 | django/db/backends/base/operations.py | 14 | 4 | def regex_lookup(self, lookup_type):
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a regex_lookup() method"
| Refs #33476 -- Reformatted code with Black. | regex_lookup | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | operations.py | 8 | 4 | https://github.com/django/django.git | 1 | 13 | 0 | 14 | 25 | Python | {
"docstring": "\n Return the string to use in a query when performing regular expression\n lookups (using \"regex\" or \"iregex\"). It should contain a '%s'\n placeholder for the column being searched against.\n\n If the feature is not supported (or part of it is not supported), raise\n ... | def regex_lookup(self, lookup_type):
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a regex_lookup() method"
)
| |
14,712 | 68,062 | 35 | erpnext/telephony/doctype/call_log/call_log.py | 55 | 32 | def link_existing_conversations(doc, state):
if doc.doctype != "Contact":
return
try:
numbers = [d.phone for d in doc.phone_nos]
for number in numbers:
number = strip_number(number)
if not number:
continue
logs = frappe.db.sql_list(
,
dict(phone_number="%{}".format(number), docname=doc.n... | style: format code with black | link_existing_conversations | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | call_log.py | 18 | 33 | https://github.com/frappe/erpnext.git | 7 | 142 | 0 | 46 | 232 | Python | {
"docstring": "\n\tCalled from hooks on creation of Contact or Lead to link all the existing conversations.\n\t\n\t\t\t\tSELECT cl.name FROM `tabCall Log` cl\n\t\t\t\tLEFT JOIN `tabDynamic Link` dl\n\t\t\t\tON cl.name = dl.parent\n\t\t\t\tWHERE (cl.`from` like %(phone_number)s or cl.`to` like %(phone_number)s)\n\t\t... | def link_existing_conversations(doc, state):
if doc.doctype != "Contact":
return
try:
numbers = [d.phone for d in doc.phone_nos]
for number in numbers:
number = strip_number(number)
if not number:
continue
logs = frappe.db.sql_list(
,
dict(phone_number="%{}".format(number), docname=doc.n... | |
89,517 | 290,401 | 125 | homeassistant/components/media_player/__init__.py | 31 | 12 | async def async_volume_up(self) -> None:
if hasattr(self, "volume_up"):
await | Update mypy to 0.990 (#81783)
* Update mypy to 0.990
* Remove type ignore - overriding attr with property (13475)
* Remove type ignores - hasattr (13544)
* Adjust type ignore - assignment (13549)
* New error code - type-abstract (13785)
* Disable annotation-unchecked (13851) | async_volume_up | 0c8eeaa6436b04ba6da46bccab8b11523f314d9b | core | __init__.py | 14 | 14 | https://github.com/home-assistant/core.git | 5 | 70 | 0 | 26 | 112 | Python | {
"docstring": "Turn volume up for media player.\n\n This method is a coroutine.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 11,
"vocab_size": 11
} | async def async_volume_up(self) -> None:
if hasattr(self, "volume_up"):
await self.hass.async_add_executor_job(self.volume_up)
return
if (
self.volume_level is not None
and self.volume_level < 1
and self.supported_features & MediaPlay... | |
39,981 | 167,374 | 62 | pandas/io/pytables.py | 16 | 6 | def infer_axes(self) -> bool:
s = self.storable
if s is None:
return False
self.get_attrs()
return True
| TYP: some return annotations in pytables.py (#47512) | infer_axes | 7d2f9b8d59908fbf57c6453bc41891efbfe981a6 | pandas | pytables.py | 7 | 10 | https://github.com/pandas-dev/pandas.git | 2 | 27 | 0 | 14 | 47 | Python | {
"docstring": "\n infer the axes of my storer\n return a boolean indicating if we have a valid storer or not\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 16
} | def infer_axes(self) -> bool:
s = self.storable
if s is None:
return False
self.get_attrs()
return True
| |
35,351 | 153,296 | 130 | modin/core/dataframe/pandas/dataframe/dataframe.py | 36 | 9 | def _validate_set_axis(self, new_labels, old_labels):
new_labels = ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
f"Length mismatch: Expected axis has {old_len} elements, "
... | REFACTOR-#3900: add flake8-no-implicit-concat plugin and refactor flake8 error codes (#3901)
Signed-off-by: jeffreykennethli <jkli@ponder.io> | _validate_set_axis | e5e9634357e60925a5a70e56a1d4882d269f533a | modin | dataframe.py | 12 | 10 | https://github.com/modin-project/modin.git | 2 | 43 | 0 | 32 | 77 | Python | {
"docstring": "\n Validate the possibility of replacement of old labels with the new labels.\n\n Parameters\n ----------\n new_labels : list-like\n The labels to replace with.\n old_labels : list-like\n The labels to replace.\n\n Returns\n ------... | def _validate_set_axis(self, new_labels, old_labels):
new_labels = ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
f"Length mismatch: Expected axis has {old_len} elements, "
... | |
18,225 | 87,117 | 55 | src/sentry/snuba/discover.py | 32 | 8 | def transform_data(result, translated_columns, query_builder) -> EventsResponse:
final_result: EventsResponse = {"data": result["data"], "meta": result["meta"]}
for col in final_result["meta"]:
# Translate back column names that were converted to snuba format
col["name"] = translated_column... | feat(discover): Only transform when ordering project (#39468)
- This updates the querybuilder with a orderby resolver so we can
implement more custom orderbys(orderbies?) in the future
- This changes the project field to just select the project_id only,
which results in needing a new post-processing capability to t... | transform_data | bf416f7ad23d7537a84c9727cfe1c0a7effd27bb | sentry | discover.py | 12 | 13 | https://github.com/getsentry/sentry.git | 3 | 80 | 0 | 31 | 102 | Python | {
"docstring": "\n Transform internal names back to the public schema ones.\n\n When getting timeseries results via rollup, this function will\n zerofill the output results.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 22,
"vocab_size": 21
} | def transform_data(result, translated_columns, query_builder) -> EventsResponse:
final_result: EventsResponse = {"data": result["data"], "meta": result["meta"]}
for col in final_result["meta"]:
# Translate back column names that were converted to snuba format
col["name"] = translated_column... | |
48,627 | 197,550 | 81 | sympy/plotting/plot.py | 43 | 17 | def plot_contour(*args, show=True, **kwargs):
args = list(map(sympify, args))
plot_expr = check_arguments(args, 1, 2)
series = [ContourSeries(*arg) for arg | Improve documentation | plot_contour | eb20cbe9b89917786a10d50b785b4f21230f04be | sympy | plot.py | 10 | 10 | https://github.com/sympy/sympy.git | 4 | 86 | 0 | 36 | 138 | Python | {
"docstring": "\n Draws contour plot of a function\n\n Usage\n =====\n\n Single plot\n\n ``plot_contour(expr, range_x, range_y, **kwargs)``\n\n If the ranges are not specified, then a default range of (-10, 10) is used.\n\n Multiple plot with the same range.\n\n ``plot_contour(expr1, expr2, r... | def plot_contour(*args, show=True, **kwargs):
args = list(map(sympify, args))
plot_expr = check_arguments(args, 1, 2)
series = [ContourSeries(*arg) for arg in plot_expr]
plot_contours = Plot(*series, **kwargs)
if len(plot_expr[0].free_symbols) > 2:
raise ValueError('Contour Plot cannot... | |
56,263 | 221,193 | 73 | python3.10.4/Lib/bz2.py | 31 | 5 | def peek(self, n=0):
self._check_can_read()
# Relies on the u | add python 3.10.4 for windows | peek | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | bz2.py | 8 | 3 | https://github.com/XX-net/XX-Net.git | 1 | 24 | 0 | 26 | 44 | Python | {
"docstring": "Return buffered data without advancing the file position.\n\n Always returns at least one byte of data, unless at EOF.\n The exact number of bytes returned is unspecified.\n ",
"language": "en",
"n_whitespaces": 48,
"n_words": 27,
"vocab_size": 25
} | def peek(self, n=0):
self._check_can_read()
# Relies on the undocumented fact that BufferedReader.peek()
# always returns at least one byte (except at EOF), independent
# of the value of n
return self._buffer.peek(n)
| |
20,701 | 101,282 | 55 | lib/training/cache.py | 12 | 5 | def cache_full(self) -> bool:
if self._cache_ | Data Augmentation update (#1263)
- lib.detected_face
- Subclass Masks for Landmark based masks
- Add training mask propery + methods to DetectedFace
- lib.training_training
- subclass TrainingDataGenerator for training and preview data
- Split cache into own module
- Reduce thread count to ... | cache_full | 2beceffad9b15c1fd78f06b9b272563321c5a41e | faceswap | cache.py | 9 | 7 | https://github.com/deepfakes/faceswap.git | 2 | 35 | 0 | 10 | 64 | Python | {
"docstring": "bool: ``True`` if the cache has been fully populated. ``False`` if there are items still\n to be cached. ",
"language": "en",
"n_whitespaces": 25,
"n_words": 18,
"vocab_size": 17
} | def cache_full(self) -> bool:
if self._cache_info["cache_full"]:
return self._cache_info["cache_full"]
with self._lock:
return self._cache_info["cache_full"]
| |
78,245 | 265,914 | 102 | netbox/netbox/views/generic/base.py | 29 | 8 | def get_queryset(self, request):
if self.queryset is None:
raise ImproperlyConfigured(
f"{self.__class__.__name__} does not define a queryset. Set queryset on the class or "
f"override its get_queryset() method."
| Closes #10739: Introduce get_queryset() method on generic views | get_queryset | b2e2e3be35f3922ecee945b97279c50725c0b7fa | netbox | base.py | 14 | 7 | https://github.com/netbox-community/netbox.git | 2 | 31 | 0 | 29 | 63 | Python | {
"docstring": "\n Return the base queryset for the view. By default, this returns self.queryset.all().\n\n Args:\n request: The current request\n ",
"language": "en",
"n_whitespaces": 50,
"n_words": 17,
"vocab_size": 16
} | def get_queryset(self, request):
if self.queryset is None:
raise ImproperlyConfigured(
f"{self.__class__.__name__} does not define a queryset. Set queryset on the class or "
f"override its get_queryset() method."
)
return self.queryset.all... | |
118,085 | 322,190 | 117 | paddlenlp/taskflow/knowledge_mining.py | 46 | 9 | def _preprocess(self, inputs):
inputs = self._check_input_text(inputs)
self._max_cls_len = 5
num_workers = self.kwargs[
'num_workers'] if 'num_workers' in self.kwargs else 0
lazy_load = self.kwargs[
'lazy_load'] if 'lazy_load' in self.kwargs else False
... | Update neural search readme and Add Paddle Serving Support (#1558)
* add recall inference similarity
* update examples
* updatea readme
* update dir name
* update neural search readme
* update milvus readme
* update domain adaptive pretraining readme
* fix the mistakes
* update readme
* add ... | _preprocess | 621357338437ee420eabbbf5ab19065bc85e73a5 | PaddleNLP | knowledge_mining.py | 10 | 26 | https://github.com/PaddlePaddle/PaddleNLP.git | 3 | 168 | 0 | 33 | 115 | Python | {
"docstring": "\n Create the dataset and dataloader for the predict.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 7
} | def _preprocess(self, inputs):
inputs = self._check_input_text(inputs)
self._max_cls_len = 5
num_workers = self.kwargs[
'num_workers'] if 'num_workers' in self.kwargs else 0
lazy_load = self.kwargs[
'lazy_load'] if 'lazy_load' in self.kwargs else False
... | |
54,151 | 215,757 | 257 | salt/modules/consul.py | 86 | 16 | def session_destroy(consul_url=None, token=None, session=None, **kwargs):
ret = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error("No Consul URL found.")
ret["me | [merge jam] Master port 49261 - consul modules (#58101)
* add consul states and acl function present/absent
* add consul to states doc index
* refact/fix consul states
* fix doc, fix states
* fix name parameter for acl_changes
* fixing pylint errors
* small changes after review by @rallytime
* fix... | session_destroy | fb825aa760fa0585a2c8fdafc6e62be8aec8cecf | salt | consul.py | 12 | 29 | https://github.com/saltstack/salt.git | 6 | 160 | 0 | 56 | 283 | Python | {
"docstring": "\n Destroy session\n\n :param consul_url: The Consul server URL.\n :param session: The ID of the session to destroy.\n :param dc: By default, the datacenter of the agent is queried;\n however, the dc can be provided using the \"dc\" parameter.\n :return: Boolean & message ... | def session_destroy(consul_url=None, token=None, session=None, **kwargs):
ret = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error("No Consul URL found.")
ret["message"] = "No Consul URL found."
ret["res"] = False
re... | |
5,242 | 29,623 | 31 | saleor/graphql/product/mutations/collection/collection_update.py | 10 | 10 | def post_save_action(cls, info, instance, cleaned_input):
manager = load_plugin_manager(info.context | Split product types and mutations (#11259)
* Split product types file
* Split product/mutations/products.py file | post_save_action | 74d1c8d8504dbdd339865ff97ca4ac9bd30a8faf | saleor | collection_update.py | 9 | 3 | https://github.com/saleor/saleor.git | 1 | 30 | 0 | 10 | 47 | Python | {
"docstring": "Override this method with `pass` to avoid triggering product webhook.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def post_save_action(cls, info, instance, cleaned_input):
manager = load_plugin_manager(info.context)
cls.call_event(manager.collection_updated, instance)
| |
81,600 | 276,242 | 91 | keras/saving/saving_utils.py | 47 | 6 | def _deserialize_metric(metric_config):
from keras import (
metrics as metrics_module,
) # pylint:disable=g-import-not-at-top
if metric_config in ["accuracy", "acc", "crossentropy", "ce"]:
# Do not deserialize accuracy and cross-entropy strings as we have special
# case handli... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _deserialize_metric | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | saving_utils.py | 8 | 7 | https://github.com/keras-team/keras.git | 2 | 37 | 0 | 41 | 68 | Python | {
"docstring": "Deserialize metrics, leaving special strings untouched.",
"language": "en",
"n_whitespaces": 5,
"n_words": 6,
"vocab_size": 6
} | def _deserialize_metric(metric_config):
from keras import (
metrics as metrics_module,
) # pylint:disable=g-import-not-at-top
if metric_config in ["accuracy", "acc", "crossentropy", "ce"]:
# Do not deserialize accuracy and cross-entropy strings as we have special
# case handli... | |
@keras_export("keras.activations.swish")
@tf.__internal__.dispatch.add_dispatch_support | 80,026 | 269,312 | 10 | keras/activations.py | 6 | 8 | def softsign(x):
return tf.math.softsign(x)
@keras_export("keras.activations.swish")
@tf.__internal__.dispatch.add_dispatch_support | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | softsign | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | activations.py | 8 | 2 | https://github.com/keras-team/keras.git | 1 | 15 | 1 | 6 | 50 | Python | {
"docstring": "Softsign activation function, `softsign(x) = x / (abs(x) + 1)`.\n\n Example Usage:\n\n >>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32)\n >>> b = tf.keras.activations.softsign(a)\n >>> b.numpy()\n array([-0.5, 0. , 0.5], dtype=float32)\n\n Args:\n x: Input tensor.\n... | def softsign(x):
return tf.math.softsign(x)
@keras_export("keras.activations.swish")
@tf.__internal__.dispatch.add_dispatch_support |
34,894 | 150,952 | 708 | freqtrade/freqai/data_kitchen.py | 145 | 59 | def compute_inlier_metric(self) -> None:
import scipy.stats as ss
nmb_previous_points = self.data['InlierMetric_nmb_points']
weibull_percentile = self.data['InlierMetric_weib_perc']
train_ft_df = self.data_dictionary['train_features']
train_ft_df_reindexed = train... | Add inlier metric computation | compute_inlier_metric | d3cb211283ced68d082cfdbdac12f3d2ab90d63b | freqtrade | data_kitchen.py | 16 | 64 | https://github.com/freqtrade/freqtrade.git | 4 | 417 | 0 | 98 | 653 | Python | {
"docstring": "\n \n Compute inlier metric from backwards distance distributions. \n This metric defines how well features from a timepoint fit \n into previous timepoints.\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 20,
"vocab_size": 18
} | def compute_inlier_metric(self) -> None:
import scipy.stats as ss
nmb_previous_points = self.data['InlierMetric_nmb_points']
weibull_percentile = self.data['InlierMetric_weib_perc']
train_ft_df = self.data_dictionary['train_features']
train_ft_df_reindexed = train... | |
55,942 | 220,224 | 70 | python3.10.4/Lib/ast.py | 16 | 7 | def items_view(self, traverser, items):
if len(items) == 1:
traverser(items[0])
self.write(",")
else:
self.interleave(lambda: self.write(", "), tra | add python 3.10.4 for windows | items_view | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | ast.py | 14 | 6 | https://github.com/XX-net/XX-Net.git | 2 | 50 | 0 | 15 | 84 | Python | {
"docstring": "Traverse and separate the given *items* with a comma and append it to\n the buffer. If *items* is a single item sequence, a trailing comma\n will be added.",
"language": "en",
"n_whitespaces": 41,
"n_words": 28,
"vocab_size": 22
} | def items_view(self, traverser, items):
if len(items) == 1:
traverser(items[0])
self.write(",")
else:
self.interleave(lambda: self.write(", "), traverser, items)
| |
72,195 | 248,296 | 62 | synapse/metrics/jemalloc.py | 19 | 9 | def refresh_stats(self) -> None:
try:
self._mallctl("epoch", read=False, write=1)
except Exception as e:
logger.warning("Failed to reload jemalloc stats: %s | Add config flags to allow for cache auto-tuning (#12701) | refresh_stats | cde8af9a495cbc7f3d0207e3f17c37eddaee34e1 | synapse | jemalloc.py | 11 | 9 | https://github.com/matrix-org/synapse.git | 2 | 37 | 0 | 19 | 65 | Python | {
"docstring": "Request that jemalloc updates its internal statistics. This needs to\n be called before querying for stats, otherwise it will return stale\n values.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 22,
"vocab_size": 22
} | def refresh_stats(self) -> None:
try:
self._mallctl("epoch", read=False, write=1)
except Exception as e:
logger.warning("Failed to reload jemalloc stats: %s", e)
| |
41,791 | 176,247 | 77 | networkx/tests/test_convert_numpy.py | 43 | 18 | def test_to_numpy_array_multiweight_reduction(func, expected):
G = nx.MultiDiGraph()
weights = [-1, 2, 10.0]
for w in weights:
G.add_edge(0, 1, weight=w)
A = nx.to_numpy_array(G, multigraph_weight=func, dtype=float)
| Refactor `to_numpy_array` with advanced indexing (#5250)
* WIP: try approach based on advanced indexing.
* WIP: Fix some tests and support multigraphs.
* Rm test for limiting reductions to nanfunctions.
* Catch edgeless graph cornercase.
* Cleanups.
* Update networkx/convert_matrix.py
Comments from r... | test_to_numpy_array_multiweight_reduction | 0cc70051fa0a979b1f1eab4af5b6587a6ebf8334 | networkx | test_convert_numpy.py | 10 | 9 | https://github.com/networkx/networkx.git | 2 | 122 | 0 | 32 | 175 | Python | {
"docstring": "Test various functions for reducing multiedge weights.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def test_to_numpy_array_multiweight_reduction(func, expected):
G = nx.MultiDiGraph()
weights = [-1, 2, 10.0]
for w in weights:
G.add_edge(0, 1, weight=w)
A = nx.to_numpy_array(G, multigraph_weight=func, dtype=float)
assert np.allclose(A, [[0, expected], [0, 0]])
# Undirected case
... | |
28,018 | 125,896 | 669 | rllib/connectors/tests/test_agent.py | 152 | 36 | def test_vr_connector_causal_slice(self):
view_rq_dict = {
"state": ViewRequirement("obs"),
# shift array should be [-2, -1, 0]
"prev_states": ViewRequirement("obs", shift="-2:0"),
# shift array should be [-4, -2, 0]
"prev_strided_states_even"... | [RLlib] Implemented ViewRequirementConnector (#26998) | test_vr_connector_causal_slice | 8ddcf89096e5631c6b6e0d04dc094b458a15c9f9 | ray | test_agent.py | 15 | 38 | https://github.com/ray-project/ray.git | 4 | 300 | 0 | 105 | 491 | Python | {
"docstring": "Test that the ViewRequirementConnector can handle slice shifts correctly.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_vr_connector_causal_slice(self):
view_rq_dict = {
"state": ViewRequirement("obs"),
# shift array should be [-2, -1, 0]
"prev_states": ViewRequirement("obs", shift="-2:0"),
# shift array should be [-4, -2, 0]
"prev_strided_states_even"... | |
44,269 | 183,607 | 68 | examples/calculator.py | 14 | 10 | def render(self) -> RenderableType:
return Padding(
Align.right(FigletText(self.value), vertical="middle"),
(0, 1),
style="white o | more docs | render | 6bfc26c1ec37262b9cd4bbab35d15907dc6742bf | textual | calculator.py | 12 | 7 | https://github.com/Textualize/textual.git | 1 | 38 | 0 | 14 | 62 | Python | {
"docstring": "Build a Rich renderable to render the calculator display.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def render(self) -> RenderableType:
return Padding(
Align.right(FigletText(self.value), vertical="middle"),
(0, 1),
style="white on rgb(51,51,51)",
)
| |
@keras_export("keras.__internal__.models.clone_and_build_model", v1=[]) | 82,432 | 278,255 | 253 | keras/models/cloning.py | 101 | 18 | def in_place_subclassed_model_state_restoration(model):
assert not model._is_graph_network
# Restore layers and build attributes
if (
hasattr(model, "_original_attributes_cache")
and model._original_attributes_cache is not None
):
# Models have sticky attribute assignment, s... | resolve line-too-long in models | in_place_subclassed_model_state_restoration | f0fc6f798937a7a5fdab469c0f16bdde7cfc4ccd | keras | cloning.py | 14 | 17 | https://github.com/keras-team/keras.git | 5 | 97 | 1 | 75 | 181 | Python | {
"docstring": "Restores the original state of a model after it was \"reset\".\n\n This undoes this action of `_in_place_subclassed_model_reset`, which is\n called in `clone_and_build_model` if `in_place_reset` is set to True.\n\n Args:\n model: Instance of a Keras model created via subclassing, on whic... | def in_place_subclassed_model_state_restoration(model):
assert not model._is_graph_network
# Restore layers and build attributes
if (
hasattr(model, "_original_attributes_cache")
and model._original_attributes_cache is not None
):
# Models have sticky attribute assignment, s... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.