ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
48,786 | 198,085 | 618 | sympy/functions/elementary/trigonometric.py | 122 | 31 | def _pi_coeff(arg, cycles=1):
r
arg = sympify(arg)
if arg is pi:
return S.One
elif not arg:
return S.Zero
elif arg.is_Mul:
cx = arg.coeff(pi)
if cx:
c, x = cx.as_coeff_Mul() # pi is not included as coeff
if c.is_Float:
# recast... | replace S.Pi with pi; cache InverseTrigonometric tables | _pi_coeff | dcb6e3c69f4e47f2fdb10a2ef0ede2cc6c8f2e06 | sympy | trigonometric.py | 24 | 72 | https://github.com/sympy/sympy.git | 13 | 201 | 0 | 70 | 327 | Python | {
"docstring": "\n When arg is a Number times $\\pi$ (e.g. $3\\pi/2$) then return the Number\n normalized to be in the range $[0, 2]$, else `None`.\n\n When an even multiple of $\\pi$ is encountered, if it is multiplying\n something with known parity then the multiple is returned as 0 otherwise\n as 2.... | def _pi_coeff(arg, cycles=1):
r
arg = sympify(arg)
if arg is pi:
return S.One
elif not arg:
return S.Zero
elif arg.is_Mul:
cx = arg.coeff(pi)
if cx:
c, x = cx.as_coeff_Mul() # pi is not included as coeff
if c.is_Float:
# recast... | |
async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]:
"""Verify that the still image is valid before we create an entity."""
fmt = None
if not (url := info.get(CONF_STILL_IMAGE_URL)):
return {}, None
if not isinstance(url, template_helper.Template) and url:
url = c... | 93,650 | 294,616 | 139 | homeassistant/components/generic/config_flow.py | 63 | 27 | async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]:
fmt = None
if not (url := info.get(CONF_STILL_IMAGE_URL)):
return {}, None
if not isinstance(url, template_helper.Template) and url:
url = cv.template(url)
url.hass = hass
try:
url = url.asy... | Generic IP Camera configflow 2 (#52360)
Co-authored-by: J. Nick Koston <nick@koston.org> | async_test_still | c1a2be72fc8b76b55cfde1823c5688100e397369 | core | config_flow.py | 11 | 40 | https://github.com/home-assistant/core.git | 8 | 253 | 1 | 50 | 208 | Python | {
"docstring": "Verify that the still image is valid before we create an entity.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]:
fmt = None
if not (url := info.get(CONF_STILL_IMAGE_URL)):
return {}, None
if not isinstance(url, template_helper.Template) and url:
url = cv.template(url)
url.hass = hass
try:
url = url.asy... |
55,723 | 219,698 | 30 | python3.10.4/Lib/_pydecimal.py | 9 | 6 | def normalize(self, a):
a = _convert_other(a, raiseit=True)
return a.norm | add python 3.10.4 for windows | normalize | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _pydecimal.py | 9 | 3 | https://github.com/XX-net/XX-Net.git | 1 | 27 | 0 | 9 | 44 | Python | {
"docstring": "normalize reduces an operand to its simplest form.\n\n Essentially a plus operation with all trailing zeros removed from the\n result.\n\n >>> ExtendedContext.normalize(Decimal('2.1'))\n Decimal('2.1')\n >>> ExtendedContext.normalize(Decimal('-2.0'))\n Decimal... | def normalize(self, a):
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
| |
25,698 | 116,214 | 74 | mindsdb/integrations/handlers/druid_handler/druid_handler.py | 24 | 10 | def get_tables(self) -> StatusResponse:
query =
result = self.native_query(query)
df = result.data_frame
| implemented the get_tables() and get_columns() methods | get_tables | 9a0e918bba3439959112a7fd8e5210276b5ac255 | mindsdb | druid_handler.py | 12 | 15 | https://github.com/mindsdb/mindsdb.git | 1 | 55 | 0 | 17 | 103 | Python | {
"docstring": "\n Return list of entities that will be accessible as tables.\n Returns:\n HandlerResponse\n \n SELECT *\n FROM INFORMATION_SCHEMA.TABLES\n ",
"language": "en",
"n_whitespaces": 79,
"n_words": 16,
"vocab_size": 16
} | def get_tables(self) -> StatusResponse:
query =
result = self.native_query(query)
df = result.data_frame
df = df[['TABLE_NAME' 'TABLE_TYPE']]
result.data_frame = df.rename(columns={'TABLE_NAME': 'table_name', 'TABLE_TYPE': 'table_type'})
return result
| |
@require_torch
@require_vision | 6,261 | 34,338 | 487 | tests/test_feature_extraction_vilt.py | 131 | 27 | def get_expected_values(self, image_inputs, batched=False):
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
else:
h, w = image.shape[1], image.shape[2]
scale = self.size / min(w,... | Add ViLT (#14895)
* First commit
* Add conversion script
* Make conversion script work for base model
* More improvements
* Update conversion script, works for vqa
* Add indexing argument to meshgrid
* Make conversion script work for ViltForPreTraining
* Add ViltForPreTraining to docs
* Fix dev... | get_expected_values | ac227093e41cecb07c7e0f2fc9a504850907bd06 | transformers | test_feature_extraction_vilt.py | 15 | 30 | https://github.com/huggingface/transformers.git | 6 | 249 | 1 | 69 | 409 | Python | {
"docstring": "\n This function computes the expected height and width when providing images to ViltFeatureExtractor,\n assuming do_resize is set to True with a scalar size and size_divisor.\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 25,
"vocab_size": 23
} | def get_expected_values(self, image_inputs, batched=False):
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
else:
h, w = image.shape[1], image.shape[2]
scale = self.size / min(w,... |
70,355 | 244,366 | 358 | mmdet/models/detectors/base.py | 90 | 22 | def preprocss_testing_data(self, data):
num_augs = len(data[0]['img'])
batch_size = len(data)
aug_batch_imgs = []
aug_batch_data_samples = []
# adjust `images` and `data_samples` to a list of list
# outer list is test-time augmentation and inter list
# ... | Simplify api of one-stage detector | preprocss_testing_data | 9c5b3331ac8edbfa328922fbab45c382380da540 | mmdetection | base.py | 14 | 20 | https://github.com/open-mmlab/mmdetection.git | 5 | 164 | 0 | 61 | 265 | Python | {
"docstring": " Process input data during training and testing phases.\n Args:\n data (list[dict]): The data to be processed, which\n comes from dataloader. The list indicate the batch dimension.\n Each dict contains these keys:\n\n - `img` (list[Tensor]... | def preprocss_testing_data(self, data):
num_augs = len(data[0]['img'])
batch_size = len(data)
aug_batch_imgs = []
aug_batch_data_samples = []
# adjust `images` and `data_samples` to a list of list
# outer list is test-time augmentation and inter list
# ... | |
27,573 | 124,307 | 22 | python/ray/widgets/render.py | 8 | 7 | def list_templates() -> List[pathlib.Path]:
return (pathlib.Path(__file__).parent / "templates").glob("*.html.j2")
| [Core] Add HTML reprs for `ClientContext` and `WorkerContext` (#25730) | list_templates | ea47d97a548504bdb6ff1afdb1021b0bc54d5dfa | ray | render.py | 12 | 8 | https://github.com/ray-project/ray.git | 1 | 30 | 0 | 8 | 54 | Python | {
"docstring": "List the available HTML templates.\n\n Returns:\n List[pathlib.Path]: A list of files with .html.j2 extensions inside\n ./templates/\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 16,
"vocab_size": 16
} | def list_templates() -> List[pathlib.Path]:
return (pathlib.Path(__file__).parent / "templates").glob("*.html.j2")
| |
78,599 | 266,796 | 21 | test/lib/ansible_test/_internal/python_requirements.py | 15 | 6 | def usable_pip_file(path): # type: (t.Optional[str | ansible-test - Code cleanup and refactoring. (#77169)
* Remove unnecessary PyCharm ignores.
* Ignore intentional undefined attribute usage.
* Add missing type hints. Fix existing type hints.
* Fix docstrings and comments.
* Use function to register completion handler.
* Pass strings to display functions.
* Fix C... | usable_pip_file | a06fa496d3f837cca3c437ab6e9858525633d147 | ansible | python_requirements.py | 11 | 2 | https://github.com/ansible/ansible.git | 3 | 32 | 0 | 13 | 56 | Python | {
"docstring": "Return True if the specified pip file is usable, otherwise False.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def usable_pip_file(path): # type: (t.Optional[str]) -> bool
return bool(path) and os.path.exists(path) and bool(os.path.getsize(path))
# Cryptography
| |
@keras_export('keras.metrics.categorical_accuracy')
@tf.__internal__.dispatch.add_dispatch_support | 79,764 | 268,903 | 26 | keras/metrics/metrics.py | 23 | 16 | def binary_accuracy(y_true, y_pred, threshold=0.5):
y_pred = tf.convert_to_tensor(y_pred)
threshold = tf.cast(threshold, y_pred.dtype)
y_pred = tf.cast(y_pred > threshol | reverting binary accuracy to original | binary_accuracy | 8bb1b365ca6bb21b32a1ee1654eecb02570970ac | keras | metrics.py | 9 | 5 | https://github.com/keras-team/keras.git | 1 | 67 | 1 | 19 | 123 | Python | {
"docstring": "Calculates how often predictions match binary labels.\n\n Standalone usage:\n >>> y_true = [[1], [1], [0], [0]]\n >>> y_pred = [[1], [1], [0], [0]]\n >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)\n >>> assert m.shape == (4,)\n >>> m.numpy()\n array([1., 1., 1., 1.], dtype=float32)\n\... | def binary_accuracy(y_true, y_pred, threshold=0.5):
y_pred = tf.convert_to_tensor(y_pred)
threshold = tf.cast(threshold, y_pred.dtype)
y_pred = tf.cast(y_pred > threshold, y_pred.dtype)
return backend.mean(tf.equal(y_true, y_pred), axis=-1)
@keras_export('keras.metrics.categorical_accuracy')
@tf.__internal... |
267 | 2,224 | 100 | packages/syft/src/syft/core/node/common/node_service/oblv/oblv_messages.py | 13 | 9 | def _object2proto(self) -> SyftOblvClient_PB:
return SyftOblvClient_PB(
token=self.token,
oblivious_user_id=self.oblivious_user_id,
cookies=self.cookies,
h | Changes for publishing data to enclave | _object2proto | fd3b9772cb97127f9f356c1e854dc3b4a436402d | PySyft | oblv_messages.py | 9 | 20 | https://github.com/OpenMined/PySyft.git | 1 | 48 | 0 | 13 | 71 | Python | {
"docstring": "Returns a protobuf serialization of self.\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n :return: returns a protobuf object\n ... | def _object2proto(self) -> SyftOblvClient_PB:
return SyftOblvClient_PB(
token=self.token,
oblivious_user_id=self.oblivious_user_id,
cookies=self.cookies,
headers=self.headers,
timeout=self.timeout,
verify_ssl=self.verify_ssl,
... | |
17,696 | 83,633 | 189 | zerver/tests/test_digest.py | 50 | 28 | def test_bulk_handle_digest_email_skips_deactivated_users(self) -> None:
realm = get_realm("zulip")
hamlet = self.example_user("hamlet")
user_ids = list(
UserProfile.objects.filter(is_bot=False, realm=realm).values_list("id", flat=True)
)
do_deactivate_us | digest: Don't send emails to deactivated users, even if queued. | test_bulk_handle_digest_email_skips_deactivated_users | fcf82bf0477d7b5c6fe6d26f2458a5acef43dae2 | zulip | test_digest.py | 13 | 21 | https://github.com/zulip/zulip.git | 4 | 129 | 0 | 41 | 213 | Python | {
"docstring": "\n A user id may be added to the queue before the user is deactivated. In such a case,\n the function responsible for sending the email should correctly skip them.\n ",
"language": "en",
"n_whitespaces": 51,
"n_words": 29,
"vocab_size": 25
} | def test_bulk_handle_digest_email_skips_deactivated_users(self) -> None:
realm = get_realm("zulip")
hamlet = self.example_user("hamlet")
user_ids = list(
UserProfile.objects.filter(is_bot=False, realm=realm).values_list("id", flat=True)
)
do_deactivate_user(... | |
43,388 | 181,599 | 263 | tests/driver_tests.py | 62 | 15 | def test_driver_4():
args_list = [
'tests/tests.csv',
'-is', ',',
'-target', 'class',
'-g', '1',
'-p', '2',
'-cv', '3',
'-s', '42',
'-config', 'TPOT light',
'-v', '3'
... | Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068. | test_driver_4 | 388616b6247ca4ea8de4e2f340d6206aee523541 | tpot | driver_tests.py | 17 | 23 | https://github.com/EpistasisLab/tpot.git | 2 | 123 | 0 | 51 | 229 | Python | {
"docstring": "Assert that the tpot_driver() in TPOT driver outputs normal result with verbosity = 3.",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 14
} | def test_driver_4():
args_list = [
'tests/tests.csv',
'-is', ',',
'-target', 'class',
'-g', '1',
'-p', '2',
'-cv', '3',
'-s', '42',
'-config', 'TPOT light',
'-v', '3'
... | |
73,181 | 249,884 | 150 | tests/handlers/test_sso.py | 55 | 18 | async def test_set_avatar(self) -> None:
handler = self.hs.get_sso_handler()
# Create a new user to set avatar for
reg_handler = s | Add support for handling avatar with SSO login (#13917)
This commit adds support for handling a provided avatar picture URL
when logging in via SSO.
Signed-off-by: Ashish Kumar <ashfame@users.noreply.github.com>
Fixes #9357. | test_set_avatar | 09de2aecb05cb46e0513396e2675b24c8beedb68 | synapse | test_sso.py | 12 | 11 | https://github.com/matrix-org/synapse.git | 1 | 92 | 0 | 45 | 158 | Python | {
"docstring": "Tests successfully setting the avatar of a newly created user",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | async def test_set_avatar(self) -> None:
handler = self.hs.get_sso_handler()
# Create a new user to set avatar for
reg_handler = self.hs.get_registration_handler()
user_id = self.get_success(reg_handler.register_user(approved=True))
self.assertTrue(
self.ge... | |
27,945 | 125,674 | 130 | python/ray/tune/examples/wandb_example.py | 28 | 14 | def tune_decorated(api_key_file):
tuner = tune.Tuner(
decorated_train_function,
tune_config=tune.TuneConfig(
metric="loss",
mode="min", | [air/tuner/docs] Update docs for Tuner() API 2a: Tune examples (non-docs) (#26931)
Splitting up #26884: This PR includes changes to use Tuner() instead of tune.run() for all examples included in python/ray/tune/examples
Signed-off-by: xwjiang2010 <xwjiang2010@gmail.com>
Signed-off-by: Kai Fricke <kai@anyscale.com>... | tune_decorated | 8d7b865614f3635def12c42b653f8acd8b4ae56a | ray | wandb_example.py | 14 | 14 | https://github.com/ray-project/ray.git | 1 | 87 | 0 | 28 | 136 | Python | {
"docstring": "Example for using the @wandb_mixin decorator with the function API",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def tune_decorated(api_key_file):
tuner = tune.Tuner(
decorated_train_function,
tune_config=tune.TuneConfig(
metric="loss",
mode="min",
),
param_space={
"mean": tune.grid_search([1, 2, 3, 4, 5]),
"sd": tune.uniform(0.2, 0.8),
... | |
12,200 | 60,540 | 45 | .venv/lib/python3.8/site-packages/pip/_internal/cli/parser.py | 16 | 8 | def format_usage(self, usage):
# type: (str) -> str
msg = "\nUsage: {}\n".format(self.indent_lines(text | upd; format | format_usage | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | parser.py | 12 | 3 | https://github.com/jindongwang/transferlearning.git | 1 | 30 | 0 | 15 | 56 | Python | {
"docstring": "\n Ensure there is only one newline between usage and the first heading\n if there is no description.\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 17,
"vocab_size": 15
} | def format_usage(self, usage):
# type: (str) -> str
msg = "\nUsage: {}\n".format(self.indent_lines(textwrap.dedent(usage), " "))
return msg
| |
48,817 | 198,195 | 112 | sympy/matrices/expressions/matexpr.py | 35 | 16 | def from_index_summation(expr, first_index=None, last_index=None, dimensions=None):
r
from sympy.tensor.array.expressions.from_indexed_to_array import convert_index | Rename files for array expression conversions in order to avoid naming conflicts in TAB-completion of the corresponding functions | from_index_summation | a69c49bec6caf2cb460dc4eedf0fec184db92f0e | sympy | matexpr.py | 9 | 50 | https://github.com/sympy/sympy.git | 3 | 86 | 0 | 28 | 126 | Python | {
"docstring": "\n Parse expression of matrices with explicitly summed indices into a\n matrix expression without indices, if possible.\n\n This transformation expressed in mathematical notation:\n\n `\\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \\Longrightarrow \\mathbf{A}\\cdot \\mathbf{B}`\n\n ... | def from_index_summation(expr, first_index=None, last_index=None, dimensions=None):
r
from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array
from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix
first_indices = []
... | |
25,253 | 114,699 | 46 | mindsdb/integrations/mysql_handler/mysql_handler.py | 18 | 6 | def get_views(self):
| Update mysql handler | get_views | 5c2ce68a8eb8b992ab841db3d3a6b4694ecd244b | mindsdb | mysql_handler.py | 9 | 4 | https://github.com/mindsdb/mindsdb.git | 1 | 20 | 0 | 16 | 43 | Python | {
"docstring": "\n Get more information about specific database views\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | def get_views(self):
q = f"SHOW FULL TABLES IN {self.database} WHERE TABLE_TYPE LIKE 'VIEW';"
result = self.native_query(q)
return result
| |
30,118 | 133,814 | 93 | rllib/agents/qmix/qmix_policy.py | 55 | 18 | def _mac(model, obs, h):
B, n_agents = obs.size(0), obs.size(1)
if not isinstance(obs, dict):
obs = {"obs": obs}
obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()}
h_flat = [s.reshape([B * n_agents, -1]) for s in h]
q_flat, h_flat = model(obs_agents_as_batches, h_fl... | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | _mac | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | qmix_policy.py | 11 | 10 | https://github.com/ray-project/ray.git | 5 | 130 | 0 | 41 | 198 | Python | {
"docstring": "Forward pass of the multi-agent controller.\n\n Args:\n model: TorchModelV2 class\n obs: Tensor of shape [B, n_agents, obs_size]\n h: List of tensors of shape [B, n_agents, h_size]\n\n Returns:\n q_vals: Tensor of shape [B, n_agents, n_actions]\n h: Tensor of s... | def _mac(model, obs, h):
B, n_agents = obs.size(0), obs.size(1)
if not isinstance(obs, dict):
obs = {"obs": obs}
obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()}
h_flat = [s.reshape([B * n_agents, -1]) for s in h]
q_flat, h_flat = model(obs_agents_as_batches, h_fl... | |
19,794 | 100,294 | 66 | tools/alignments/jobs.py | 31 | 12 | def _get_count(self):
has_meta = all(val is not None for val in self._alignments.video_meta_data.values())
retval = len(self._alignments.video_meta_data["pts_time"]) if has_meta else None
logger.debug("Frame count from alignments file: (has | alignments tool - Don't re-analyze video if metadata in alignments | _get_count | 30872ef265c0fc29465f4c3a0778d0049f8c3897 | faceswap | jobs.py | 13 | 5 | https://github.com/deepfakes/faceswap.git | 3 | 56 | 0 | 27 | 91 | Python | {
"docstring": " If the alignments file has been run through the manual tool, then it will hold video\n meta information, meaning that the count of frames in the alignment file can be relied\n on to be accurate.\n\n Returns\n -------\n int or ``None``\n For video input which ... | def _get_count(self):
has_meta = all(val is not None for val in self._alignments.video_meta_data.values())
retval = len(self._alignments.video_meta_data["pts_time"]) if has_meta else None
logger.debug("Frame count from alignments file: (has_meta: %s, %s", has_meta, retval)
retur... | |
43,447 | 181,659 | 38 | tests/one_hot_encoder_tests.py | 8 | 6 | def test_sparse1_with_non_sparse_components():
fit_then_transform(
| Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068. | test_sparse1_with_non_sparse_components | 388616b6247ca4ea8de4e2f340d6206aee523541 | tpot | one_hot_encoder_tests.py | 9 | 6 | https://github.com/EpistasisLab/tpot.git | 1 | 23 | 0 | 8 | 38 | Python | {
"docstring": "Test fit_transform a sparse matrix with specifying categorical_features.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def test_sparse1_with_non_sparse_components():
fit_then_transform(
sparse1_paratial_1h.todense(),
sparse1,
categorical_features=[True, False]
)
| |
38,532 | 160,160 | 78 | numpy/f2py/tests/test_f2py2e.py | 41 | 21 | def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch):
MNAME = "hi"
foutl = get_io_paths(hello_world_f90, mname=MNAME)
ipath = foutl.f90inp
monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split())
with util.switchdir(ipath.parent):
f2pycli()
# Always generate C modu... | TST: Initialize f2py2e tests of the F2PY CLI (#20668)
Increases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff.
More importantly, sets the groundwork for #20056, in that passing the same testsuite should ind... | test_mod_gen_f77 | 729ad4f92420231e2a7009b3223c6c7620b8b808 | numpy | test_f2py2e.py | 11 | 9 | https://github.com/numpy/numpy.git | 1 | 74 | 0 | 37 | 134 | Python | {
"docstring": "Checks the generation of files based on a module name\n CLI :: -m\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 13
} | def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch):
MNAME = "hi"
foutl = get_io_paths(hello_world_f90, mname=MNAME)
ipath = foutl.f90inp
monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split())
with util.switchdir(ipath.parent):
f2pycli()
# Always generate C modu... | |
78,577 | 266,774 | 217 | test/lib/ansible_test/_internal/delegation.py | 51 | 31 | def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None
assert isinstance(args, EnvironmentConfig)
with delegation_context(args, host_state):
if isinstance(args, TestConfig):
args.metadata.ci_provider = get_ci_provider().co... | ansible-test - Code cleanup and refactoring. (#77169)
* Remove unnecessary PyCharm ignores.
* Ignore intentional undefined attribute usage.
* Add missing type hints. Fix existing type hints.
* Fix docstrings and comments.
* Use function to register completion handler.
* Pass strings to display functions.
* Fix C... | delegate | a06fa496d3f837cca3c437ab6e9858525633d147 | ansible | delegation.py | 17 | 15 | https://github.com/ansible/ansible.git | 3 | 146 | 0 | 39 | 232 | Python | {
"docstring": "Delegate execution of ansible-test to another environment.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None
assert isinstance(args, EnvironmentConfig)
with delegation_context(args, host_state):
if isinstance(args, TestConfig):
args.metadata.ci_provider = get_ci_provider().co... | |
6,819 | 37,514 | 40 | src/transformers/testing_utils.py | 21 | 9 | def require_torch_non_multi_gpu(test_case):
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_cas | Update all require decorators to use skipUnless when possible (#16999) | require_torch_non_multi_gpu | 57e6464ac9a31156f1c93e59107323e6ec01309e | transformers | testing_utils.py | 12 | 5 | https://github.com/huggingface/transformers.git | 2 | 44 | 0 | 19 | 79 | Python | {
"docstring": "\n Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 13,
"vocab_size": 13
} | def require_torch_non_multi_gpu(test_case):
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
| |
53,412 | 212,800 | 91 | PySimpleGUI.py | 27 | 9 | def _ReturnKeyHandler(self, event):
# if the element is disabled, ignore the | If an element is disabled, then don't generate events for it (specifically for Input element in this case) | _ReturnKeyHandler | 47047700dd76c40c4471635a7de5f770d5c23c02 | PySimpleGUI | PySimpleGUI.py | 9 | 7 | https://github.com/PySimpleGUI/PySimpleGUI.git | 3 | 38 | 0 | 21 | 65 | Python | {
"docstring": "\n Internal callback for the ENTER / RETURN key. Results in calling the ButtonCallBack for element that has the return key bound to it, just as if button was clicked.\n\n :param event:\n :type event:\n\n ",
"language": "en",
"n_whitespaces": 62,
"n_words": 33,
"voca... | def _ReturnKeyHandler(self, event):
# if the element is disabled, ignore the event
if self.Disabled:
return
MyForm = self.ParentForm
button_element = self._FindReturnKeyBoundButton(MyForm)
if button_element is not None:
button_element.ButtonCallB... | |
@pytest.fixture | 39,556 | 164,367 | 32 | pandas/tests/frame/conftest.py | 17 | 8 | def uint64_frame():
return DataFrame(
{ | ⬆️ UPGRADE: Autoupdate pre-commit config (#45752)
Co-authored-by: MarcoGorelli <MarcoGorelli@users.noreply.github.com> | uint64_frame | 419331c598a097896edae40bc0687e4127f97b6b | pandas | conftest.py | 12 | 4 | https://github.com/pandas-dev/pandas.git | 1 | 45 | 1 | 15 | 80 | Python | {
"docstring": "\n Fixture for DataFrame with uint64 values\n\n Columns are ['A', 'B']\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 10,
"vocab_size": 10
} | def uint64_frame():
return DataFrame(
{"A": np.arange(3), "B": [2**63, 2**63 + 5, 2**63 + 10]}, dtype=np.uint64
)
@pytest.fixture |
49,839 | 200,995 | 48 | tests/annotations/tests.py | 9 | 12 | def test_null_annotation(self):
| Refs #33476 -- Reformatted code with Black. | test_null_annotation | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 16 | 5 | https://github.com/django/django.git | 1 | 39 | 0 | 9 | 66 | Python | {
"docstring": "\n Annotating None onto a model round-trips\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 6,
"vocab_size": 6
} | def test_null_annotation(self):
book = Book.objects.annotate(
no_value=Value(None, output_field=IntegerField())
).first()
self.assertIsNone(book.no_value)
| |
41,731 | 176,161 | 233 | networkx/generators/small.py | 51 | 5 | def icosahedral_graph(create_using=None):
description = [
"adjacencylist",
"Platonic Icosahedral Graph",
12,
[
[2, 6, 8, 9, 12],
[3, 6, 7, 9],
[4, 7, 9, 10],
[5, 7, 10, 11],
[ | Docstrings for the small.py module (#5240)
* added description for the first 5 small graphs
* modified descriptions based on comment and added description for two more functions
* added doctrings to all the functions
* Minor touchups.
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> | icosahedral_graph | dec723f072eb997a497a159dbe8674cd39999ee9 | networkx | small.py | 9 | 22 | https://github.com/networkx/networkx.git | 1 | 117 | 0 | 37 | 150 | Python | {
"docstring": "\n Returns the Platonic Icosahedral graph.\n\n The icosahedral graph has 12 nodes and 30 edges. It is a Platonic graph\n whose nodes have the connectivity of the icosahedron. It is undirected,\n regular and Hamiltonian [1]_.\n\n Parameters\n ----------\n create_using : NetworkX gr... | def icosahedral_graph(create_using=None):
description = [
"adjacencylist",
"Platonic Icosahedral Graph",
12,
[
[2, 6, 8, 9, 12],
[3, 6, 7, 9],
[4, 7, 9, 10],
[5, 7, 10, 11],
[6, 7, 11, 12],
[7, 12],
... | |
118,342 | 323,029 | 274 | examples/biomedical/cblue/train_spo.py | 93 | 44 | def evaluate(model, criterion, metric, data_loader):
model.eval()
metric.reset()
losses = []
for batch in tqdm(data_loader):
input_ids, token_type_ids, position_ids, masks, ent_label, spo_label = batch
max_batch_len = input_ids.shape[-1]
ent_mask = paddle.unsqueeze(masks, ax... | [ehealth] fix problems for dynamic2static | evaluate | aa82dc06668ddca275e3a350d4c2793e4961086c | PaddleNLP | train_spo.py | 12 | 26 | https://github.com/PaddlePaddle/PaddleNLP.git | 2 | 256 | 0 | 69 | 389 | Python | {
"docstring": "\n Given a dataset, it evals model and compute the metric.\n Args:\n model(obj:`paddle.nn.Layer`): A model to classify texts.\n dataloader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches.\n criterion(`paddle.nn.functional`): It can compute the loss.\n... | def evaluate(model, criterion, metric, data_loader):
model.eval()
metric.reset()
losses = []
for batch in tqdm(data_loader):
input_ids, token_type_ids, position_ids, masks, ent_label, spo_label = batch
max_batch_len = input_ids.shape[-1]
ent_mask = paddle.unsqueeze(masks, ax... | |
51,174 | 205,717 | 66 | django/db/models/options.py | 16 | 6 | def get_fields(self, include_parents=True, include_hidden=False):
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(
| Refs #33476 -- Reformatted code with Black. | get_fields | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | options.py | 8 | 6 | https://github.com/django/django.git | 2 | 35 | 0 | 15 | 55 | Python | {
"docstring": "\n Return a list of fields associated to the model. By default, include\n forward and reverse fields, fields derived from inheritance, but not\n hidden fields. The returned fields can be changed using the parameters:\n\n - include_parents: include fields derived from inheri... | def get_fields(self, include_parents=True, include_hidden=False):
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(
include_parents=include_parents, include_hidden=include_hidden
)
| |
50,690 | 204,309 | 119 | django/contrib/sessions/backends/file.py | 48 | 13 | def _key_to_file(self, session_key=None):
if session_key is None:
session_key | Refs #33476 -- Reformatted code with Black. | _key_to_file | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | file.py | 10 | 6 | https://github.com/django/django.git | 3 | 56 | 0 | 41 | 96 | Python | {
"docstring": "\n Get the file associated with this session key.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 8
} | def _key_to_file(self, session_key=None):
if session_key is None:
session_key = self._get_or_create_session_key()
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
... | |
12,436 | 61,197 | 200 | .venv/lib/python3.8/site-packages/pip/_internal/utils/hashes.py | 47 | 20 | def check_against_chunks(self, chunks):
# type: (Iterator[bytes]) -> None
gots = {}
for hash_name in self._allowed.keys():
try:
gots[hash_name] = hashlib.new(hash_name)
except (ValueError, TypeError):
raise Installati | upd; format | check_against_chunks | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | hashes.py | 14 | 14 | https://github.com/jindongwang/transferlearning.git | 7 | 101 | 0 | 38 | 167 | Python | {
"docstring": "Check good hashes against ones built from iterable of chunks of\n data.\n\n Raise HashMismatch if none match.\n\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 17,
"vocab_size": 16
} | def check_against_chunks(self, chunks):
# type: (Iterator[bytes]) -> None
gots = {}
for hash_name in self._allowed.keys():
try:
gots[hash_name] = hashlib.new(hash_name)
except (ValueError, TypeError):
raise InstallationError(f"Unkn... | |
70,252 | 244,125 | 169 | mmdet/models/losses/cross_entropy_loss.py | 61 | 19 | def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(
valid_mask & (labels < label_channels), as_tuple=False)
if inds.nume... | [Fix] Fix reduction=mean in CELoss. (#7449)
* [Fix] Fix ignore in CELoss.
* add ut
* fix and add comments
* add avg_non_ignore option
* bce avg
* fix lint | _expand_onehot_labels | 3b2e9655631a2edd28bb94c640bd6a74c0bfad55 | mmdetection | cross_entropy_loss.py | 14 | 15 | https://github.com/open-mmlab/mmdetection.git | 3 | 147 | 0 | 42 | 223 | Python | {
"docstring": "Expand onehot labels to match the size of prediction.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(
valid_mask & (labels < label_channels), as_tuple=False)
if inds.nume... | |
80,852 | 271,751 | 227 | keras/engine/training_test.py | 55 | 24 | def test_sequence_input_types(self, input_type):
if not tf.executing_eagerly():
self.skipTest("Improved checking is only present in data_adapter.")
xy_function, x_function = self._make_sequence_input_functions(
input_type
)
fit_kwargs, evaluate_kwargs, p... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | test_sequence_input_types | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | training_test.py | 10 | 20 | https://github.com/keras-team/keras.git | 3 | 144 | 0 | 44 | 239 | Python | {
"docstring": "Ensure that namedtuples and tuples are plumbed identically.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def test_sequence_input_types(self, input_type):
if not tf.executing_eagerly():
self.skipTest("Improved checking is only present in data_adapter.")
xy_function, x_function = self._make_sequence_input_functions(
input_type
)
fit_kwargs, evaluate_kwargs, p... | |
@frappe.whitelist() | 14,704 | 68,000 | 70 | erpnext/stock/utils.py | 100 | 25 | def get_stock_value_on(warehouse=None, posting_date=None, item_code=None):
if not posting_date:
posting_date = nowdate()
values, condition = [posting_date], ""
if warehouse:
lft, rgt, is_group = frappe.db.get_value("Warehouse", w | style: format code with black | get_stock_value_on | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | utils.py | 13 | 35 | https://github.com/frappe/erpnext.git | 7 | 172 | 1 | 75 | 294 | Python | {
"docstring": "\n\t\tSELECT item_code, stock_value, name, warehouse\n\t\tFROM `tabStock Ledger Entry` sle\n\t\tWHERE posting_date <= %s {0}\n\t\t\tand is_cancelled = 0\n\t\tORDER BY timestamp(posting_date, posting_time) DESC, creation DESC\n\t",
"language": "en",
"n_whitespaces": 21,
"n_words": 26,
"vocab_si... | def get_stock_value_on(warehouse=None, posting_date=None, item_code=None):
if not posting_date:
posting_date = nowdate()
values, condition = [posting_date], ""
if warehouse:
lft, rgt, is_group = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt", "is_group"])
if is_group:
values.extend([lft, rgt]... |
53,802 | 215,084 | 25 | salt/modules/aixpkg.py | 13 | 6 | def _is_installed_rpm(name):
log.debug(f"_is_installed_rpm '{name}'")
cmd = ["/usr/bin/rpm", "-q", name]
return __sal | Working tests for install | _is_installed_rpm | f1c37893caf90738288e789c3233ab934630254f | salt | aixpkg.py | 9 | 4 | https://github.com/saltstack/salt.git | 1 | 32 | 0 | 13 | 62 | Python | {
"docstring": "\n Returns True if the rpm package is installed. Otherwise returns False.\n ",
"language": "en",
"n_whitespaces": 18,
"n_words": 11,
"vocab_size": 11
} | def _is_installed_rpm(name):
log.debug(f"_is_installed_rpm '{name}'")
cmd = ["/usr/bin/rpm", "-q", name]
return __salt__["cmd.retcode"](cmd) == 0
| |
34,219 | 148,283 | 35 | python/ray/_private/thirdparty/pathspec/util.py | 45 | 14 | def iter_tree_files(root, on_error=None, follow_links=None):
if on_error is not None and not callable(on_error):
raise TypeError("on_error:{!r} is not callable.".format(on_error))
if follow_links is None:
follow_links = True
for entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow... | [Bugfix] fix invalid excluding of Black (#24042)
- We should use `--force-exclude` when we pass code path explicitly https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html?highlight=--force-exclude#command-line-options
- Recover the files in `python/ray/_private/thirdparty` which has been form... | iter_tree_files | 0e6c042e29cbbe429d81c9c1af3c75c261f00980 | ray | util.py | 12 | 8 | https://github.com/ray-project/ray.git | 6 | 81 | 0 | 36 | 136 | Python | {
"docstring": "\n\tWalks the specified directory for all files.\n\n\t*root* (:class:`str`) is the root directory to search for files.\n\n\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`)\n\toptionally is the error handler for file-system exceptions. It will be\n\tcalled with the exception (:exc:`OSEr... | def iter_tree_files(root, on_error=None, follow_links=None):
if on_error is not None and not callable(on_error):
raise TypeError("on_error:{!r} is not callable.".format(on_error))
if follow_links is None:
follow_links = True
for entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow... | |
12,362 | 60,970 | 240 | .venv/lib/python3.8/site-packages/pip/_internal/req/constructors.py | 100 | 16 | def _get_url_from_path(path, name):
# type: (str, str) -> Optional[str]
if _looks_like_path(name) and os.path.isdir(path):
if is_installable_dir(path):
return path_to_url(path)
raise InstallationError(
f"Directory {name!r} is not installable. Neither 'setup.py' "
... | upd; format | _get_url_from_path | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | constructors.py | 12 | 21 | https://github.com/jindongwang/transferlearning.git | 8 | 108 | 0 | 73 | 191 | Python | {
"docstring": "\n First, it checks whether a provided path is an installable directory\n (e.g. it has a setup.py). If it is, returns the path.\n\n If false, check if the path is an archive file (such as a .whl).\n The function checks if the path is a file. If false, if the path has\n an @, it will tre... | def _get_url_from_path(path, name):
# type: (str, str) -> Optional[str]
if _looks_like_path(name) and os.path.isdir(path):
if is_installable_dir(path):
return path_to_url(path)
raise InstallationError(
f"Directory {name!r} is not installable. Neither 'setup.py' "
... | |
53,808 | 215,091 | 228 | tests/pytests/unit/modules/test_aixpkg.py | 61 | 18 | def test_install_fileset_with_bff_extension():
installp_call = MagicMock(return_value={"retcode": 0, "stdout": ""})
fileset_pkg_name = (
"/cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff"
)
list_pkgs_mock = MagicMock(
side_effect=[{"bos.rte.printers": "7.1.6.0"},... | Working tests for install | test_install_fileset_with_bff_extension | f1c37893caf90738288e789c3233ab934630254f | salt | test_aixpkg.py | 16 | 21 | https://github.com/saltstack/salt.git | 1 | 137 | 0 | 49 | 248 | Python | {
"docstring": "\n Test install of fileset with bff extension\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 7
} | def test_install_fileset_with_bff_extension():
installp_call = MagicMock(return_value={"retcode": 0, "stdout": ""})
fileset_pkg_name = (
"/cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff"
)
list_pkgs_mock = MagicMock(
side_effect=[{"bos.rte.printers": "7.1.6.0"},... | |
14,123 | 66,174 | 4 | erpnext/hr/doctype/leave_block_list/leave_block_list.py | 9 | 7 | def is_user_in_allow_list(block_list):
return frappe.ses | style: format code with black | is_user_in_allow_list | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | leave_block_list.py | 9 | 6 | https://github.com/frappe/erpnext.git | 1 | 23 | 0 | 9 | 37 | Python | {
"docstring": "select allow_user\n\t\tfrom `tabLeave Block List Allow` where parent=%s",
"language": "en",
"n_whitespaces": 7,
"n_words": 9,
"vocab_size": 9
} | def is_user_in_allow_list(block_list):
return frappe.session.user in frappe.db.sql_list(
,
block_list,
)
| |
54,495 | 216,276 | 91 | tests/pytests/functional/transport/server/test_req_channel.py | 34 | 15 | def test_normalization(push_channel):
types = {
"list": list,
}
msgs = [
{"list": tuple([1, 2, 3])},
]
for msg in msgs:
ret = push_channel.send(msg, timeout=5, tries=1)
| Fix minion unit tests, specifically .../tests/pytests/test_minion.py | test_normalization | 3c7e1ec1f08abd7cd1ba78ad7880acb6ba6fdce7 | salt | test_req_channel.py | 12 | 11 | https://github.com/saltstack/salt.git | 3 | 78 | 0 | 30 | 124 | Python | {
"docstring": "\n Since we use msgpack, we need to test that list types are converted to lists\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 15,
"vocab_size": 13
} | def test_normalization(push_channel):
types = {
"list": list,
}
msgs = [
{"list": tuple([1, 2, 3])},
]
for msg in msgs:
ret = push_channel.send(msg, timeout=5, tries=1)
for key, value in ret["load"].items():
assert types[key] == type(value)
| |
40,230 | 168,206 | 219 | pandas/core/arrays/datetimes.py | 73 | 24 | def to_perioddelta(self, freq) -> TimedeltaArray:
# Deprecaation GH#34853
warnings.warn(
"to_perioddelta is deprecated and will be removed in a "
"future version. "
"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.",
FutureWarning,
... | PERF cache find_stack_level (#48023)
cache stacklevel | to_perioddelta | 2f8d0a36703e81e4dca52ca9fe4f58c910c1b304 | pandas | datetimes.py | 12 | 27 | https://github.com/pandas-dev/pandas.git | 2 | 87 | 0 | 63 | 152 | Python | {
"docstring": "\n Calculate deltas between self values and self converted to Periods at a freq.\n\n Used for vectorized offsets.\n\n Parameters\n ----------\n freq : Period frequency\n\n Returns\n -------\n TimedeltaArray/Index\n ",
"language": "en",
... | def to_perioddelta(self, freq) -> TimedeltaArray:
# Deprecaation GH#34853
warnings.warn(
"to_perioddelta is deprecated and will be removed in a "
"future version. "
"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.",
FutureWarning,
... | |
4,407 | 22,678 | 59 | linear-algebra-python/src/lib.py | 16 | 6 | def set(self, components):
if len(components) > 0:
| refactor: clean code
Signed-off-by: slowy07 <slowy.arfy@gmail.com> | set | f0af0c43340763724f139fa68aa1e5a9ffe458b4 | Python | lib.py | 11 | 5 | https://github.com/geekcomputers/Python.git | 2 | 28 | 0 | 16 | 50 | Python | {
"docstring": "\n input: new components\n changes the components of the vector.\n replace the components with newer one.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 15,
"vocab_size": 11
} | def set(self, components):
if len(components) > 0:
self.__components = components
else:
raise Exception("please give any vector")
| |
24,742 | 112,742 | 123 | nni/algorithms/compression/v2/pytorch/pruning/tools/base.py | 43 | 24 | def get_best_result(self) -> Optional[Tuple[Union[int, str], Module, Dict[str, Dict[str, Tensor]], Optional[float], List[Dict]]]:
if self._best_task_id is not None:
compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth'))
compact_model_masks = torch.load(... | [Compression] fix typehints (#4800) | get_best_result | cbac2c5c0f7606aca8ccf08fbd418ffe3adfe427 | nni | base.py | 15 | 15 | https://github.com/microsoft/nni.git | 2 | 128 | 0 | 35 | 199 | Python | {
"docstring": "\n Returns\n -------\n Optional[Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]]\n If self._best_task_id is not None,\n return best task id, best compact model, masks on the compact model, score, config list used in this task.\n ",
... | def get_best_result(self) -> Optional[Tuple[Union[int, str], Module, Dict[str, Dict[str, Tensor]], Optional[float], List[Dict]]]:
if self._best_task_id is not None:
compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth'))
compact_model_masks = torch.load(... | |
70,113 | 243,757 | 75 | src/PIL/ImageFont.py | 21 | 9 | def set_variation_by_axes(self, axes):
try:
self.font.setvaraxes( | Improve exception traceback readability | set_variation_by_axes | 2ae55ccbdad9c842929fb238ea1eb81d1f999024 | Pillow | ImageFont.py | 10 | 6 | https://github.com/python-pillow/Pillow.git | 2 | 33 | 0 | 21 | 58 | Python | {
"docstring": "\n :param axes: A list of values for each axis.\n :exception OSError: If the font is not a variation font.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 19
} | def set_variation_by_axes(self, axes):
try:
self.font.setvaraxes(axes)
except AttributeError as e:
msg = "FreeType 2.9.1 or greater is required"
raise NotImplementedError(msg) from e
| |
56,214 | 221,111 | 57 | python3.10.4/Lib/bdb.py | 14 | 9 | def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
| add python 3.10.4 for windows | dispatch_line | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | bdb.py | 9 | 5 | https://github.com/XX-net/XX-Net.git | 4 | 40 | 0 | 13 | 66 | Python | {
"docstring": "Invoke user function and return trace function for line event.\n\n If the debugger stops on the current line, invoke\n self.user_line(). Raise BdbQuit if self.quitting is set.\n Return self.trace_dispatch to continue tracing in this scope.\n ",
"language": "en",
"n_whit... | def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
| |
31,440 | 138,497 | 110 | python/ray/data/impl/plan.py | 36 | 7 | def clear(self) -> None:
self._in_blocks.clear()
self._snapshot_blocks = None
self._snapshot_stats = None
# | [Datasets] [Out-of-Band Serialization: 2/3] Refactor `ExecutionPlan` to maintain complete lineage and eagerly unlink block references. (#23931)
This PR refactors ExecutionPlan to maintain complete stage lineage, even for eagerly computed datasets, while ensuring that block references are unlinked as early as possible ... | clear | 9ee24530abf1b5e3239869b5257dd7b678337b90 | ray | plan.py | 9 | 11 | https://github.com/ray-project/ray.git | 1 | 44 | 0 | 28 | 76 | Python | {
"docstring": "Clear all cached block references of this plan, including input blocks.\n\n This will render the plan un-executable unless the root is a LazyBlockList.",
"language": "en",
"n_whitespaces": 29,
"n_words": 23,
"vocab_size": 22
} | def clear(self) -> None:
self._in_blocks.clear()
self._snapshot_blocks = None
self._snapshot_stats = None
# We're erasing the snapshot, so put all stages into the "after snapshot"
# bucket.
self._stages_after_snapshot = (
self._stages_before_snapshot ... | |
76,190 | 260,331 | 446 | sklearn/cluster/_birch.py | 131 | 25 | def _global_clustering(self, X=None):
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_la | MAINT validate parameters in Birch (#23593)
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
Co-authored-by: jeremiedbb <jeremiedbb@yahoo.fr> | _global_clustering | 24c2448cc7687fbacbc3a9af13f47a935dfcbeeb | scikit-learn | _birch.py | 15 | 23 | https://github.com/scikit-learn/scikit-learn.git | 8 | 151 | 0 | 88 | 249 | Python | {
"docstring": "\n Global clustering for the subclusters obtained after fitting\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 8
} | def _global_clustering(self, X=None):
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, Integ... | |
@pytest.mark.parametrize('field_name', ['extra__test__custom_field', 'custom_field'])
@mock.patch('airflow.utils.module_loading.import_string')
@mock.patch('airflow.providers_manager.ProvidersManager.hooks', new_callable=PropertyMock) | 9,302 | 47,930 | 65 | tests/www/views/test_views_connection.py | 41 | 23 | def test_prefill_form_backcompat(extras, expected):
mock_form = mock.Mock()
mock_form.data = {"conn_id": "test", "extra": json.dumps(extras), "conn_type": "test"}
cmv = ConnectionModelView()
cmv.extra_fields = ['extra__test__my_param']
# this is set by `lazy_add_provider_discovered_options_to_... | Enable use of custom conn extra fields without prefix (#22607)
Previously, connection "extra" fields which were added as custom fields in the
webserver connection form had to be named with prefix `extra__<conn_type>__`.
This was because custom fields are registered globally on the connection view model,
so the pref... | test_prefill_form_backcompat | 1dfae80412377eef0a38637535d6a1d3393cc4fe | airflow | test_views_connection.py | 10 | 8 | https://github.com/apache/airflow.git | 1 | 77 | 1 | 37 | 197 | Python | {
"docstring": "\n When populating custom fields in the connection form we should first check for the non-prefixed\n value (since prefixes in extra are deprecated) and then fallback to the prefixed value.\n\n Either way, the field is known internally to the model view as the prefixed value.\n ",
"langua... | def test_prefill_form_backcompat(extras, expected):
mock_form = mock.Mock()
mock_form.data = {"conn_id": "test", "extra": json.dumps(extras), "conn_type": "test"}
cmv = ConnectionModelView()
cmv.extra_fields = ['extra__test__my_param']
# this is set by `lazy_add_provider_discovered_options_to_... |
18,967 | 93,030 | 313 | src/sentry/search/utils.py | 98 | 24 | def tokenize_query(query):
result = defaultdict(list)
query_params = defaultdict(list)
tokens = split_query_into_tokens(query)
for token in tokens:
if token.upper() in ["OR", "AND"] or token.strip("()") == "":
continue
state = "query"
for idx, char in enumerate(... | ref: replace legacy compat.map with list comprehensions (#36372) | tokenize_query | 522d6f27c28dc5fd4d996ed605865c42fbda0da8 | sentry | utils.py | 16 | 25 | https://github.com/getsentry/sentry.git | 13 | 185 | 0 | 63 | 322 | Python | {
"docstring": "\n Tokenizes a standard Sentry search query.\n\n Example:\n >>> query = 'is:resolved foo bar tag:value'\n >>> tokenize_query(query)\n {\n 'is': ['resolved'],\n 'query': ['foo', 'bar'],\n 'tag': ['value'],\n }\n\n Has a companion implementation in static/app/ut... | def tokenize_query(query):
result = defaultdict(list)
query_params = defaultdict(list)
tokens = split_query_into_tokens(query)
for token in tokens:
if token.upper() in ["OR", "AND"] or token.strip("()") == "":
continue
state = "query"
for idx, char in enumerate(... | |
@keras_export("keras.models.load_model") | 83,344 | 280,311 | 410 | keras/saving/saving_api.py | 110 | 21 | def save_model(model, filepath, overwrite=True, save_format=None, **kwargs):
save_format = get_save_format(filepath, save_format)
if save_format not in ("keras", "tf", "h5", "keras_v3"):
raise ValueError(
"Unknown `save_format` argument. Expecte | Prepare public API surface for v3 saving.
PiperOrigin-RevId: 484397600 | save_model | c9068087d9142bab573e0c300bf9874a957accff | keras | saving_api.py | 18 | 33 | https://github.com/keras-team/keras.git | 10 | 144 | 1 | 80 | 273 | Python | {
"docstring": "Saves a model as a TensorFlow SavedModel or HDF5 file.\n\n See the [Serialization and Saving guide](\n https://keras.io/guides/serialization_and_saving/) for details.\n\n Args:\n model: Keras model instance to be saved.\n filepath: `str` or `pathlib.Path` object. Path where ... | def save_model(model, filepath, overwrite=True, save_format=None, **kwargs):
save_format = get_save_format(filepath, save_format)
if save_format not in ("keras", "tf", "h5", "keras_v3"):
raise ValueError(
"Unknown `save_format` argument. Expected one of "
"'keras', 'tf', or ... |
@pytest.mark.slow | 42,077 | 176,745 | 193 | networkx/algorithms/tree/tests/test_mst.py | 78 | 22 | def test_random_spanning_tree_additive_small():
pytest.importorskip("numpy")
edges = {
(0, 1): 1,
(0, 2): 1,
(0, 5): 3,
(1, 2): 2,
(1, 4): 3,
(2, 3): 3,
(5, 3): 4,
(5, 4): 5,
(4, 3): 4,
}
# Build the graph
G = nx.Graph()
... | Moved random_spanning_tree to public API (#5656)
Adds two new functions random_spanning_tree and
total_spanning_tree_weight to public networkx API, accessible
from the main namespace.
These functions had previously been defined, tested, and used internally
in the TSP package, but have now been added to the publi... | test_random_spanning_tree_additive_small | 99d31932bd7388aadfa54305c116ca0c9261a67e | networkx | test_mst.py | 12 | 23 | https://github.com/networkx/networkx.git | 2 | 201 | 1 | 57 | 295 | Python | {
"docstring": "\n Sample a single spanning tree from the additive method.\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 9,
"vocab_size": 9
} | def test_random_spanning_tree_additive_small():
pytest.importorskip("numpy")
edges = {
(0, 1): 1,
(0, 2): 1,
(0, 5): 3,
(1, 2): 2,
(1, 4): 3,
(2, 3): 3,
(5, 3): 4,
(5, 4): 5,
(4, 3): 4,
}
# Build the graph
G = nx.Graph()
... |
46,922 | 192,979 | 133 | references/optical_flow/utils.py | 86 | 24 | def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400):
if gamma > 1:
raise ValueError(f"Gamma should be < 1, got {gamma}.")
# exlude invalid pixels and extremely large diplacements
flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()
valid_flow_mask = valid_flow_mask... | Upgrade usort to `1.0.2` and black to 22.3.0 (#5106)
* upgrade usort to
* Also update black
* Actually use 1.0.2
* Apply pre-commit
Co-authored-by: Nicolas Hug <contact@nicolas-hug.com> | sequence_loss | 6ca9c76adb6daf2695d603ad623a9cf1c4f4806f | vision | utils.py | 12 | 13 | https://github.com/pytorch/vision.git | 2 | 157 | 0 | 65 | 244 | Python | {
"docstring": "Loss function defined over sequence of flow predictions",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400):
if gamma > 1:
raise ValueError(f"Gamma should be < 1, got {gamma}.")
# exlude invalid pixels and extremely large diplacements
flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()
valid_flow_mask = valid_flow_mask... | |
6,324 | 34,756 | 347 | src/transformers/modeling_tf_utils.py | 108 | 13 | def booleans_processing(config, **kwargs):
final_booleans = {}
if tf.executing_eagerly():
final_booleans["output_attentions"] = (
kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
)
final_booleans["output_hidden_states"... | Misfiring tf warnings (#15442)
* Fix spurious warning in TF TokenClassification models
* Fixing one last spurious warning
* Removing outdated warning altogether | booleans_processing | 09f9d07271297e97f5a0495fcf7e9cc107fedbdd | transformers | modeling_tf_utils.py | 15 | 38 | https://github.com/huggingface/transformers.git | 13 | 247 | 0 | 51 | 322 | Python | {
"docstring": "\n Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or\n graph)\n\n Args:\n config ([`PretrainedConfig`]):\n The config of the running model.\n **kwargs:\n The boolean parameters\n\n Returns:\... | def booleans_processing(config, **kwargs):
final_booleans = {}
if tf.executing_eagerly():
final_booleans["output_attentions"] = (
kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
)
final_booleans["output_hidden_states"... | |
9,045 | 46,950 | 259 | tests/jobs/test_scheduler_job.py | 84 | 33 | def test_dagrun_root_fail_unfinished(self):
# TODO: this should live in test_dagrun.py
# Run both the failed and successful tasks
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_ | Fixed backfill interference with scheduler (#22701)
Co-authored-by: Dmirty Suvorov <dmitry.suvorov@scribd.com> | test_dagrun_root_fail_unfinished | 9769a65c20f6028d640061efacbc5bfeb5ebaf3d | airflow | test_scheduler_job.py | 11 | 17 | https://github.com/apache/airflow.git | 1 | 128 | 0 | 70 | 215 | Python | {
"docstring": "\n DagRuns with one unfinished and one failed root task -> RUNNING\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 10
} | def test_dagrun_root_fail_unfinished(self):
# TODO: this should live in test_dagrun.py
# Run both the failed and successful tasks
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dr = dag.create_dagrun(
run_type=DagRunType.... | |
40,287 | 168,328 | 1,553 | pandas/plotting/_core.py | 266 | 38 | def _get_call_args(backend_name, data, args, kwargs):
if isinstance(data, ABCSeries):
arg_def = [
("kind", "line"),
("ax", None),
("figsize", None),
("use_index", True),
("title", None),
("grid",... | DEPR: `sort_columns` in `plot` (#47563) (#48073) | _get_call_args | 8b72297c8799725e98cb2c6aee664325b752194f | pandas | _core.py | 16 | 97 | https://github.com/pandas-dev/pandas.git | 9 | 570 | 0 | 142 | 924 | Python | {
"docstring": "\n This function makes calls to this accessor `__call__` method compatible\n with the previous `SeriesPlotMethods.__call__` and\n `DataFramePlotMethods.__call__`. Those had slightly different\n signatures, since `DataFramePlotMethods` accepted `x` and `y`\n parameter... | def _get_call_args(backend_name, data, args, kwargs):
if isinstance(data, ABCSeries):
arg_def = [
("kind", "line"),
("ax", None),
("figsize", None),
("use_index", True),
("title", None),
("grid",... | |
50,048 | 202,094 | 40 | tests/cache/tests_async.py | 12 | 6 | async def test_ahas_key(self):
await cache.aset("hello1", "goodbye1")
self.assertIs(await cache.ahas_key("hello1"), False)
self.assertIs(await cache.ahas_key("goodbye1"), False)
| Refs #33476 -- Reformatted code with Black. | test_ahas_key | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests_async.py | 11 | 4 | https://github.com/django/django.git | 1 | 43 | 0 | 10 | 80 | Python | {
"docstring": "ahas_key() doesn't ever return True for the dummy cache backend.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | async def test_ahas_key(self):
await cache.aset("hello1", "goodbye1")
self.assertIs(await cache.ahas_key("hello1"), False)
self.assertIs(await cache.ahas_key("goodbye1"), False)
| |
96,070 | 297,101 | 44 | homeassistant/components/gree/climate.py | 12 | 8 | def min_temp(self) -> float:
if self.temperature_unit == UnitOfTemperature.CELSIUS:
return TEMP_MIN
return TEMP_MIN_F
| Use UnitOfTemperature in climate entities [g-l] (#83127)
* Use UnitOfTemperature in climate entities [g-l]
* Adjust gree
* Adjust honeywell | min_temp | 68e454712dae5b65599ef12a025bc4446f7e3e6e | core | climate.py | 7 | 5 | https://github.com/home-assistant/core.git | 2 | 21 | 0 | 11 | 36 | Python | {
"docstring": "Return the minimum temperature supported by the device.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 7
} | def min_temp(self) -> float:
if self.temperature_unit == UnitOfTemperature.CELSIUS:
return TEMP_MIN
return TEMP_MIN_F
| |
47,462 | 195,878 | 111 | sympy/core/expr.py | 49 | 11 | def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0):
if x and x not in self.free_symbols:
return self
if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None):
return self.series(x, x0, n, dir, cdir=cdir)
else... | Fixed failing doctest | nseries | 46ba104ee0f9cb35b54c2f5f5591cfabb26d0301 | sympy | expr.py | 11 | 7 | https://github.com/sympy/sympy.git | 6 | 91 | 0 | 40 | 135 | Python | {
"docstring": "\n Wrapper to _eval_nseries if assumptions allow, else to series.\n\n If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is\n called. This calculates \"n\" terms in the innermost expressions and\n then builds up the final series just by \"cross-multiplying\... | def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0):
if x and x not in self.free_symbols:
return self
if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None):
return self.series(x, x0, n, dir, cdir=cdir)
else... | |
45,750 | 187,229 | 46 | tests/test_api_validate.py | 15 | 14 | def test_getitem_error(self, exception):
container = self.Container(exception("failure"))
with pytest.raises(validate.ValidationError) as cm: | plugin.api.validate: rewrite tests
Completely rewrite tests using pytest, with full coverage | test_getitem_error | d09112ab1f6db6aa605650fe1ff6a3028344f90d | streamlink | test_api_validate.py | 13 | 10 | https://github.com/streamlink/streamlink.git | 1 | 55 | 0 | 15 | 96 | Python | {
"docstring": "\n ValidationError(GetItemSchema):\n Could not get key 'foo' from object Container\n Context:\n failure\n ",
"language": "en",
"n_whitespaces": 71,
"n_words": 11,
"vocab_size": 11
} | def test_getitem_error(self, exception):
container = self.Container(exception("failure"))
with pytest.raises(validate.ValidationError) as cm:
validate.validate(validate.get("foo", default="default"), container)
assert_validationerror(cm.value, )
| |
70,935 | 245,986 | 24 | mmdet/models/task_modules/prior_generators/point_generator.py | 10 | 8 | def num_base_priors(self) -> List[int]:
| [Doc]: Add typehint and update docstring for task modules (#9468)
* part 11
* part 11
* part 11
* part 11 | num_base_priors | 92e2eb355bc964af5e798281bcb4cb9179fdaecc | mmdetection | point_generator.py | 12 | 4 | https://github.com/open-mmlab/mmdetection.git | 2 | 27 | 0 | 10 | 44 | Python | {
"docstring": "list[int]: The number of priors (points) at a point\n on the feature grid",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 13
} | def num_base_priors(self) -> List[int]:
return [1 for _ in range(len(self.strides))]
| |
51,961 | 207,420 | 243 | tests/admin_utils/test_logentry.py | 43 | 24 | def test_logentry_change_message_localized_datetime_input(self):
post_data = {
"site": self.site.pk,
"title": "Changed",
"hist": "Some content",
"created_0": "12/03/2008",
"created_1": "11:54",
}
with translation.override("fr")... | Refs #33476 -- Reformatted code with Black. | test_logentry_change_message_localized_datetime_input | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | test_logentry.py | 16 | 20 | https://github.com/django/django.git | 1 | 113 | 0 | 39 | 206 | Python | {
"docstring": "\n Localized date/time inputs shouldn't affect changed form data detection.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def test_logentry_change_message_localized_datetime_input(self):
post_data = {
"site": self.site.pk,
"title": "Changed",
"hist": "Some content",
"created_0": "12/03/2008",
"created_1": "11:54",
}
with translation.override("fr")... | |
else: | 77,523 | 263,954 | 97 | PyInstaller/depend/bytecode.py | 66 | 5 | def _cleanup_code(code):
return code # Nothing to do here
# language=PythonVerboseRegExp
_call_function_bytecode = bytecode_regex(
rb
)
else:
# Starting with python 3.11, the bytecode is peppered with CACHE instructions (which dis module conveniently hides
# unless show_caches=True... | depend: adjust bytecode scanner for python 3.11
Python 3.11 removed CALL_FUNCTION and CALL_METHOD opcodes, replacing
them with PRECALL + CALL. For our purposes, it should be enough to
match PRECALL only (as both opcodes have same parameter, i.e., the
argument count).
In addition, the bytecode is now peppered with CAC... | _cleanup_code | 8ee5afa1ea56906b30ba2ea4578082c61a1f94e2 | pyinstaller | bytecode.py | 7 | 2 | https://github.com/pyinstaller/pyinstaller.git | 1 | 7 | 1 | 52 | 32 | Python | {
"docstring": "\n # Matches `global_function('some', 'constant', 'arguments')`.\n\n # Load the global function. In code with >256 of names, this may require extended name references.\n ((?:`EXTENDED_ARG`.)*\n (?:`LOAD_NAME`|`LOAD_GLOBAL`|`LOAD_FAST`).)\n\n # For foo.bar.whizz(), t... | def _cleanup_code(code):
return code # Nothing to do here
# language=PythonVerboseRegExp
_call_function_bytecode = bytecode_regex(
rb
)
else:
# Starting with python 3.11, the bytecode is peppered with CACHE instructions (which dis module conveniently hides
# unless show_caches=True... |
3,858 | 21,469 | 104 | pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py | 26 | 9 | def filemode(mode):
perm = []
for table in filemode_table | Vendor in pip 22.1.2 | filemode | c69d55f7c82d5ae2cce542bcfb98d043ca4836a0 | pipenv | tarfile.py | 13 | 10 | https://github.com/pypa/pipenv.git | 4 | 51 | 0 | 24 | 89 | Python | {
"docstring": "Convert a file's mode to a string of the form\n -rwxrwxrwx.\n Used by TarFile.list()\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 13
} | def filemode(mode):
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
| |
1,612 | 9,415 | 35 | reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/upfirdn_2d.py | 25 | 12 | def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'):
r
k = _setup_kernel(k) * gain
| initialize ostec | filter_2d | 7375ee364e0df2a417f92593e09557f1b2a3575a | insightface | upfirdn_2d.py | 11 | 21 | https://github.com/deepinsight/insightface.git | 1 | 54 | 0 | 23 | 105 | Python | {
"docstring": "Filter a batch of 2D images with the given FIR filter.\n\n Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`\n and filters each image with the given filter. The filter is normalized so that\n if the input pixels are constant, they will be scaled by the specified `gain... | def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'):
r
k = _setup_kernel(k) * gain
p = k.shape[0] - 1
return _simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)
#----------------------------------------------------------------------------
| |
14,198 | 66,513 | 17 | erpnext/patches/v10_0/set_currency_in_pricing_rule.py | 26 | 13 | def execute():
frappe.reload_doctype("Pricing Rule")
currency = frappe.db.get_default("currency")
for doc in frappe.get_all("Pricing Rule", fields=["company", "name"]):
if doc.company:
currenc | style: format code with black | execute | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | set_currency_in_pricing_rule.py | 13 | 9 | https://github.com/frappe/erpnext.git | 3 | 73 | 0 | 24 | 126 | Python | {
"docstring": "update `tabPricing Rule` set currency = %s where name = %s",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 9
} | def execute():
frappe.reload_doctype("Pricing Rule")
currency = frappe.db.get_default("currency")
for doc in frappe.get_all("Pricing Rule", fields=["company", "name"]):
if doc.company:
currency = frappe.get_cached_value("Company", doc.company, "default_currency")
frappe.db.sql(
, (currency, doc.name)
)... | |
10,437 | 51,941 | 341 | modules/image/Image_editing/super_resolution/swinir_l_real_sr_x4/swinir.py | 131 | 27 | def forward(self, x, mask=None):
B_, N, C = x.shape
qkv = self.qkv(x).reshape((B_, N, 3, self.num_heads, C // self.num_heads)).transpose((2, 0, 3, 1, 4))
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q... | add swinir_l_real_sr_x4 (#2076)
* git add swinir_l_real_sr_x4
* fix typo
* fix typo
Co-authored-by: chenjian <chenjian26@baidu.com> | forward | 2e373966a7fd3119c205350fb14d0b7bfe74185d | PaddleHub | swinir.py | 13 | 23 | https://github.com/PaddlePaddle/PaddleHub.git | 2 | 288 | 0 | 80 | 490 | Python | {
"docstring": "\n Args:\n x: input features with shape of (num_windows*B, N, C)\n mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None\n ",
"language": "en",
"n_whitespaces": 58,
"n_words": 21,
"vocab_size": 18
} | def forward(self, x, mask=None):
B_, N, C = x.shape
qkv = self.qkv(x).reshape((B_, N, 3, self.num_heads, C // self.num_heads)).transpose((2, 0, 3, 1, 4))
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q... | |
72,338 | 248,546 | 780 | tests/test_event_auth.py | 154 | 17 | def test_join_rules_invite(self):
creator = "@creator:example.com"
pleb = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(RoomVersions.V6, creator),
("m.room.member", creator): _join_event(RoomVersions.V6, creator),
("m.roo... | EventAuthTestCase: build events for the right room version
In practice, when we run the auth rules, all of the events have the right room
version. Let's stop building Room V1 events for these tests and use the right
version. | test_join_rules_invite | 2959184a42398277ff916206235b844a8f7be5d7 | synapse | test_event_auth.py | 12 | 56 | https://github.com/matrix-org/synapse.git | 1 | 325 | 0 | 69 | 518 | Python | {
"docstring": "\n Test joining an invite only room.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 6,
"vocab_size": 6
} | def test_join_rules_invite(self):
creator = "@creator:example.com"
pleb = "@joiner:example.com"
auth_events = {
("m.room.create", ""): _create_event(RoomVersions.V6, creator),
("m.room.member", creator): _join_event(RoomVersions.V6, creator),
("m.roo... | |
47,065 | 194,765 | 103 | parlai/crowdsourcing/tasks/pairwise_per_turn_eval/worlds.py | 53 | 11 | def validate_onboarding(data):
logging.info(f"Validating onboarding data {data}")
messages = data['outputs']['messages']
if len(messages) == 0:
return False
status_message = messages[-2]
if status_message is None:
return False
submitted_data = status_message.get('data')
... | Create Per-Turn Evaluation Folder in ParlAI (#4323)
* Auto fixes
* Remove worker blocklists
* Add __init__.py
* Add __init__.py
* Lint fix
* Rename task configs
* More lint error fixes
* Update Per Turn Eval README with link to paper
* Add configs to example
* Remove commented out lines
*... | validate_onboarding | 2d062907bcf416150e36879a2246218babad28b1 | ParlAI | worlds.py | 9 | 13 | https://github.com/facebookresearch/ParlAI.git | 4 | 73 | 0 | 37 | 132 | Python | {
"docstring": "\n Check the contents of the data to ensure they are valid.\n ",
"language": "en",
"n_whitespaces": 18,
"n_words": 11,
"vocab_size": 10
} | def validate_onboarding(data):
logging.info(f"Validating onboarding data {data}")
messages = data['outputs']['messages']
if len(messages) == 0:
return False
status_message = messages[-2]
if status_message is None:
return False
submitted_data = status_message.get('data')
... | |
99,723 | 300,869 | 58 | tests/helpers/test_event.py | 23 | 8 | async def test_async_track_entity_registry_updated_event_with_empty_list(hass):
unsub_single = async_track_entity_registry_updated_event(
hass, [], ha.callback(lambda event: None)
)
unsub_single2 = async_track_entity_registry_updated_event(
hass, [], ha.callback(lamb | Clean up accessing event helpers via hass (#72011) | test_async_track_entity_registry_updated_event_with_empty_list | 8f4caf414124f380a8f5e1d54aedb54a8f6c5c05 | core | test_event.py | 12 | 9 | https://github.com/home-assistant/core.git | 1 | 50 | 0 | 15 | 84 | Python | {
"docstring": "Test async_track_entity_registry_updated_event passing an empty list of entities.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | async def test_async_track_entity_registry_updated_event_with_empty_list(hass):
unsub_single = async_track_entity_registry_updated_event(
hass, [], ha.callback(lambda event: None)
)
unsub_single2 = async_track_entity_registry_updated_event(
hass, [], ha.callback(lambda event: None)
... | |
29,220 | 130,296 | 484 | python/ray/_private/tls_utils.py | 167 | 67 | def generate_self_signed_tls_certs():
try:
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.... | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | generate_self_signed_tls_certs | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | tls_utils.py | 25 | 49 | https://github.com/ray-project/ray.git | 2 | 324 | 0 | 132 | 522 | Python | {
"docstring": "Create self-signed key/cert pair for testing.\n\n This method requires the library ``cryptography`` be installed.\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 14,
"vocab_size": 14
} | def generate_self_signed_tls_certs():
try:
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.... | |
84,835 | 284,585 | 60 | openbb_terminal/stocks/government/gov_controller.py | 25 | 10 | def print_help(self):
has_ticker_start = "[unvl]" if not self.ticker else ""
has_ticker_end = "[/unvl]" if not self.ticker else ""
| Bounty Hunter mood: 11 bugs fixed (#1853)
* fix #1850
* fix #1831
* add extra check to Reddit API keys
* ignore warning message to update praw api
* improve OpenBB links
* fix quick performance only on stocks class because I'm James bitch
* fix quick performance only on stocks class because I'm James... | print_help | a6f7e111e68346aeab315985b3704c2710693b38 | OpenBBTerminal | gov_controller.py | 10 | 24 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 3 | 42 | 0 | 18 | 96 | Python | {
"docstring": "Print help\n[src][QuiverQuant][/src]\n\n[info]Explore:[/info][cmds]\n lasttrades last trades\n topbuys show most purchased stocks\n topsells show most sold stocks\n lastcontracts show last government contracts given out\n qtrcontracts quarterly government c... | def print_help(self):
has_ticker_start = "[unvl]" if not self.ticker else ""
has_ticker_end = "[/unvl]" if not self.ticker else ""
help_text = f
console.print(text=help_text, menu="Stocks - Government")
| |
14,639 | 67,844 | 38 | erpnext/stock/reorder_item.py | 60 | 18 | def get_item_warehouse_projected_qty(items_to_consider):
item_warehouse_projected_qty = {}
for item_code, warehouse, projected_qty in frappe.db.sql(
.format(
", ".join(["%s"] * le | style: format code with black | get_item_warehouse_projected_qty | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | reorder_item.py | 16 | 24 | https://github.com/frappe/erpnext.git | 6 | 166 | 0 | 44 | 265 | Python | {
"docstring": "select item_code, warehouse, projected_qty\n\t\tfrom tabBin where item_code in ({0})\n\t\t\tand (warehouse != \"\" and warehouse is not null)",
"language": "en",
"n_whitespaces": 16,
"n_words": 19,
"vocab_size": 18
} | def get_item_warehouse_projected_qty(items_to_consider):
item_warehouse_projected_qty = {}
for item_code, warehouse, projected_qty in frappe.db.sql(
.format(
", ".join(["%s"] * len(items_to_consider))
),
items_to_consider,
):
if item_code not in item_warehouse_projected_qty:
item_warehouse_projected_... | |
3,959 | 21,619 | 225 | pipenv/patched/notpip/_vendor/typing_extensions.py | 132 | 27 | def _collect_type_vars(types, typevar_types=None):
if typevar_types is None:
typevar_types = typing.TypeVar
tvars = []
for t in types:
if (
isinstance(t, typevar_types) and
t not in tvars and
not _is_unpack(t)
):
tvars.append(t)
... | Vendor in pip 22.1.2 | _collect_type_vars | c69d55f7c82d5ae2cce542bcfb98d043ca4836a0 | pipenv | typing_extensions.py | 14 | 14 | https://github.com/pypa/pipenv.git | 9 | 86 | 0 | 89 | 293 | Python | {
"docstring": "Collect all type variable contained in types in order of\n first appearance (lexicographic order). For example::\n\n _collect_type_vars((T, List[S, T])) == (T, S)\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 22,
"vocab_size": 21
} | def _collect_type_vars(types, typevar_types=None):
if typevar_types is None:
typevar_types = typing.TypeVar
tvars = []
for t in types:
if (
isinstance(t, typevar_types) and
t not in tvars and
not _is_unpack(t)
):
tvars.append(t)
... | |
39,794 | 166,238 | 46 | pandas/core/exchange/from_dataframe.py | 20 | 10 | def from_dataframe(df, allow_copy=True):
if isinstance(df, pd.DataFrame):
return df
if not hasattr(df, "__dataframe__"):
| ENH: Implement DataFrame interchange protocol (#46141) | from_dataframe | 90140f055892a46f473bd26affab88a7f171e394 | pandas | from_dataframe.py | 10 | 6 | https://github.com/pandas-dev/pandas.git | 3 | 48 | 0 | 17 | 81 | Python | {
"docstring": "\n Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol.\n\n Parameters\n ----------\n df : DataFrameXchg\n Object supporting the exchange protocol, i.e. `__dataframe__` method.\n allow_copy : bool, default: True\n Whether to allow copying the me... | def from_dataframe(df, allow_copy=True):
if isinstance(df, pd.DataFrame):
return df
if not hasattr(df, "__dataframe__"):
raise ValueError("`df` does not support __dataframe__")
return _from_dataframe(df.__dataframe__(allow_copy=allow_copy))
| |
12,522 | 61,340 | 85 | .venv/lib/python3.8/site-packages/pip/_internal/utils/wheel.py | 39 | 14 | def parse_wheel(wheel_zip, name):
# type: (ZipFile, str) -> Tuple[str, Message]
try:
info_dir = wheel_dist_info_dir(wheel_zip, name)
metadata = wheel | upd; format | parse_wheel | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | wheel.py | 14 | 9 | https://github.com/jindongwang/transferlearning.git | 2 | 62 | 0 | 35 | 103 | Python | {
"docstring": "Extract information from the provided wheel, ensuring it meets basic\n standards.\n\n Returns the name of the .dist-info directory and the parsed WHEEL metadata.\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 23,
"vocab_size": 20
} | def parse_wheel(wheel_zip, name):
# type: (ZipFile, str) -> Tuple[str, Message]
try:
info_dir = wheel_dist_info_dir(wheel_zip, name)
metadata = wheel_metadata(wheel_zip, info_dir)
version = wheel_version(metadata)
except UnsupportedWheel as e:
raise UnsupportedWheel("{} ... | |
6,207 | 34,177 | 114 | utils/style_doc.py | 70 | 17 | def style_docstrings_in_code(code, max_len=119):
# fmt: off
splits = code.split('\"\"\"')
splits = [
(s if i % 2 == 0 or _re_doc_ignore.search(splits[i - 1]) is not None else style_docstring(s, max_len=max_len))
for i, s in enumerate(splits)
]
black_errors = "\n\n".join([s[1] fo... | Copies and docstring styling (#15202)
* Style docstrings when making/checking copies
* Polish | style_docstrings_in_code | 1144d336b689d1710534b245697e41be7a168075 | transformers | style_doc.py | 15 | 10 | https://github.com/huggingface/transformers.git | 9 | 131 | 0 | 48 | 212 | Python | {
"docstring": "\n Style all docstrings in some code.\n\n Args:\n code (`str`): The code in which we want to style the docstrings.\n max_len (`int`): The maximum number of characters per line.\n\n Returns:\n `Tuple[str, str]`: A tuple with the clean code and the black errors (if any)\n ... | def style_docstrings_in_code(code, max_len=119):
# fmt: off
splits = code.split('\"\"\"')
splits = [
(s if i % 2 == 0 or _re_doc_ignore.search(splits[i - 1]) is not None else style_docstring(s, max_len=max_len))
for i, s in enumerate(splits)
]
black_errors = "\n\n".join([s[1] fo... | |
34,179 | 148,116 | 208 | python/ray/_private/utils.py | 73 | 12 | def check_version_info(cluster_metadata):
cluster_version_info = (
cluster_metadata["ray_version"],
cluster_metadata["python_version"],
)
version_info = compute_version_info()
if version_info != cluster_version_info:
node_ip_address = ray._private.services.get_node_ip_addres... | [Core] Add a utility to check GCS / Ray cluster health (#23382)
* Provide a utility to ping a Ray cluster and verify it has the same Ray version. This is useful to check if a Ray cluster is available at a given address, without connecting to the cluster with the more heavyweight ray.init(). This utility is integrated ... | check_version_info | d5d2ef424965b2cfdc62a97674dbca7abe3af34b | ray | utils.py | 22 | 17 | https://github.com/ray-project/ray.git | 2 | 90 | 0 | 39 | 176 | Python | {
"docstring": "Check if the Python and Ray versions stored in GCS matches this process.\n Args:\n cluster_metadata: Ray cluster metadata from GCS.\n\n Raises:\n Exception: An exception is raised if there is a version mismatch.\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 32,
... | def check_version_info(cluster_metadata):
cluster_version_info = (
cluster_metadata["ray_version"],
cluster_metadata["python_version"],
)
version_info = compute_version_info()
if version_info != cluster_version_info:
node_ip_address = ray._private.services.get_node_ip_addres... | |
3,481 | 20,690 | 45 | pipenv/patched/notpip/_vendor/rich/__init__.py | 19 | 4 | def get_console() -> "Console":
| check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | get_console | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | __init__.py | 10 | 12 | https://github.com/pypa/pipenv.git | 2 | 26 | 0 | 16 | 50 | Python | {
"docstring": "Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,\n and hasn't been explicitly given one.\n\n Returns:\n Console: A console instance.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 25,
"vocab_size": 23
} | def get_console() -> "Console":
global _console
if _console is None:
from .console import Console
_console = Console()
return _console
| |
12,300 | 60,852 | 111 | .venv/lib/python3.8/site-packages/pip/_internal/models/link.py | 52 | 7 | def is_hash_allowed(self, hashes):
# type: (Optional[Hashes]) -> bool
if hashes is None or not self.has_hash:
return False
# Assert non-None so mypy knows self.hash_name and self.hash are str.
assert self.hash_name is not None
assert self.h | upd; format | is_hash_allowed | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | link.py | 9 | 6 | https://github.com/jindongwang/transferlearning.git | 3 | 49 | 0 | 40 | 79 | Python | {
"docstring": "\n Return True if the link has a hash and it is allowed.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 12
} | def is_hash_allowed(self, hashes):
# type: (Optional[Hashes]) -> bool
if hashes is None or not self.has_hash:
return False
# Assert non-None so mypy knows self.hash_name and self.hash are str.
assert self.hash_name is not None
assert self.hash is not None
... | |
76,126 | 260,218 | 110 | sklearn/metrics/_ranking.py | 60 | 28 | def coverage_error(y_true, y_score, *, sample_weight=None):
y_true = check_array(y_true, ensure_2d=True)
y_score = check_array(y_score, ensure_2d=True)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true, input_name="y_true")
if y_type != "multilabel-indicato... | FIX Ensure correct sklearn.metrics.coverage_error error message for 1D array (#23548)
* Change input array to ensure_2d=True
* Reshape input list to 2D if metric is coverage_error
* Add test for error message with 1D array on coverage_error
* Modify 1D error message test
* Use parametrize to test different 1d arra... | coverage_error | e98e0353787f87ce10d6d47e643bbefe9b6a8ddd | scikit-learn | _ranking.py | 12 | 14 | https://github.com/scikit-learn/scikit-learn.git | 3 | 153 | 0 | 46 | 244 | Python | {
"docstring": "Coverage error measure.\n\n Compute how far we need to go through the ranked scores to cover all\n true labels. The best value is equal to the average number\n of labels in ``y_true`` per sample.\n\n Ties in ``y_scores`` are broken by giving maximal rank that would have\n been assigned ... | def coverage_error(y_true, y_score, *, sample_weight=None):
y_true = check_array(y_true, ensure_2d=True)
y_score = check_array(y_score, ensure_2d=True)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true, input_name="y_true")
if y_type != "multilabel-indicato... | |
69,654 | 241,685 | 20 | pytorch_lightning/callbacks/progress/base.py | 6 | 6 | def total_predict_batches(self) -> int:
return sum(self.trainer.num_predict_batches)
| Integrate progress tracking into the progress bar (#11213) | total_predict_batches | 8a549a550cb10189ff1db382f546a40cd1c6c5b3 | lightning | base.py | 9 | 7 | https://github.com/Lightning-AI/lightning.git | 1 | 17 | 0 | 6 | 30 | Python | {
"docstring": "The total number of prediction batches, which may change from epoch to epoch.\n\n Use this to set the total number of iterations in the progress bar. Can return ``inf`` if the predict dataloader\n is of infinite size.\n ",
"language": "en",
"n_whitespaces": 58,
"n_words": 37... | def total_predict_batches(self) -> int:
return sum(self.trainer.num_predict_batches)
| |
21,975 | 104,793 | 35 | src/datasets/dataset_dict.py | 14 | 10 | def shape(self) -> Dict[str, Tuple[int]]:
self._check_values_type()
return {k: datas | Add code examples for DatasetDict (#4245)
* 📝 add code examples for DatasetDict
* 🖍 apply quentin review | shape | 1904d0c0a3a96330d9b870cdca3e9a3a137f2977 | datasets | dataset_dict.py | 9 | 14 | https://github.com/huggingface/datasets.git | 2 | 39 | 0 | 14 | 62 | Python | {
"docstring": "Shape of each split of the dataset (number of columns, number of rows).\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\")\n >>> ds.shape\n {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)... | def shape(self) -> Dict[str, Tuple[int]]:
self._check_values_type()
return {k: dataset.shape for k, dataset in self.items()}
| |
6,560 | 36,018 | 71 | src/transformers/onnx/config.py | 17 | 8 | def is_torch_support_available(self) -> bool:
if is_torch_available():
from transformers.file_utils import t | Add ONNX export for ViT (#15658)
* Add ONNX support for ViT
* Refactor to use generic preprocessor
* Add vision dep to tests
* Extend ONNX slow tests to ViT
* Add dummy image generator
* Use model_type to determine modality
* Add deprecation warnings for tokenizer argument
* Add warning when overw... | is_torch_support_available | 50dd314d939a86f3a81e19af01459f449fbaeeca | transformers | config.py | 9 | 12 | https://github.com/huggingface/transformers.git | 2 | 29 | 0 | 15 | 50 | Python | {
"docstring": "\n The minimum PyTorch version required to export the model.\n\n Returns:\n `bool`: Whether the installed version of PyTorch is compatible with the model.\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 22,
"vocab_size": 17
} | def is_torch_support_available(self) -> bool:
if is_torch_available():
from transformers.file_utils import torch_version
return torch_version >= self.torch_onnx_minimum_version
else:
return False
| |
589 | 3,887 | 106 | airbyte-integrations/connectors/source-orb/source_orb/source.py | 31 | 20 | def check_connection(self, logger, config) -> Tuple[bool, any]:
auth_header = TokenAuthenticator(token=config["api_key"]).get_auth_header()
ping_url = ORB_API_BASE_URL + "ping"
ping_response = requests.get(ping_url, headers= | 🎉 New Source: Orb (#9985)
* V1 of source_orb connector
* add boostrap.md file
* add clause on Pagination to bootstrap.md
* add SUMMARY documentation
* add lookback_window_days connector parameter
* Add support for start_date parameter
* Add ability to transform record in order to un-nest IDs
* Ad... | check_connection | 1e0ac30ebdcfce55a5644bcd486044da45c93dd6 | airbyte | source.py | 13 | 13 | https://github.com/airbytehq/airbyte.git | 2 | 69 | 0 | 28 | 114 | Python | {
"docstring": "\n Makes a request to the /ping endpoint, which validates that the authentication credentials are appropriate.\n API Docs: https://docs.withorb.com/reference/ping\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 17
} | def check_connection(self, logger, config) -> Tuple[bool, any]:
auth_header = TokenAuthenticator(token=config["api_key"]).get_auth_header()
ping_url = ORB_API_BASE_URL + "ping"
ping_response = requests.get(ping_url, headers=auth_header)
try:
ping_response.raise_for_s... | |
16,416 | 75,517 | 555 | wagtail/search/backends/database/postgres/postgres.py | 112 | 37 | def add_items_upsert(self, content_type_pk, indexers):
compiler = InsertQuery(IndexEntry).get_compiler(connection=self.connection)
title_sql = []
autocomplete_sql | Reformat with black | add_items_upsert | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | postgres.py | 13 | 47 | https://github.com/wagtail/wagtail.git | 3 | 260 | 0 | 67 | 417 | Python | {
"docstring": "\n INSERT INTO %s (content_type_id, object_id, title, autocomplete, body, title_norm)\n (VALUES %s)\n ON CONFLICT (content_type_id, object_id)\n DO UPDATE SET title = EXCLUDED.title,\n title_norm = 1.0,\n ... | def add_items_upsert(self, content_type_pk, indexers):
compiler = InsertQuery(IndexEntry).get_compiler(connection=self.connection)
title_sql = []
autocomplete_sql = []
body_sql = []
data_params = []
for indexer in indexers:
data_params.extend((content_type_pk... | |
19,097 | 94,500 | 216 | tests/sentry/sentry_metrics/test_all_indexers.py | 108 | 27 | def test_already_created_plus_written_results(indexer, indexer_cache) -> None:
org_id = 1234
raw_indexer = indexer
indexer = CachingIndexer(indexer_cache, indexer)
v0 = raw_indexer.record(use_case_id, org_id, "v1.2.0")
v1 = raw_indexer.record(use_case_id, org_id, "v1.2.1")
v2 = raw_indexe... | ref(metrics): Split caching out of indexers, random test refactoring [sns-1606] (#37714) | test_already_created_plus_written_results | 7bbb85a0d95d23620228a02bb4401fc09658f5f1 | sentry | test_all_indexers.py | 13 | 32 | https://github.com/getsentry/sentry.git | 3 | 257 | 0 | 62 | 411 | Python | {
"docstring": "\n Test that we correctly combine db read results with db write results\n for the same organization.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 16,
"vocab_size": 14
} | def test_already_created_plus_written_results(indexer, indexer_cache) -> None:
org_id = 1234
raw_indexer = indexer
indexer = CachingIndexer(indexer_cache, indexer)
v0 = raw_indexer.record(use_case_id, org_id, "v1.2.0")
v1 = raw_indexer.record(use_case_id, org_id, "v1.2.1")
v2 = raw_indexe... | |
55,634 | 219,596 | 740 | python3.10.4/Lib/_osx_support.py | 268 | 32 | def compiler_fixup(compiler_so, cc_args):
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = ... | add python 3.10.4 for windows | compiler_fixup | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _osx_support.py | 16 | 49 | https://github.com/XX-net/XX-Net.git | 29 | 357 | 0 | 135 | 613 | Python | {
"docstring": "\n This function will strip '-isysroot PATH' and '-arch ARCH' from the\n compile flags if the user has specified one them in extra_compile_flags.\n\n This is needed because '-arch ARCH' adds another architecture to the\n build, without a way to remove an architecture. Furthermore GCC will\... | def compiler_fixup(compiler_so, cc_args):
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = ... | |
16,243 | 74,277 | 889 | wagtail/core/tests/test_page_model.py | 197 | 22 | def test_copy_page_with_excluded_parental_and_child_relations(self):
try:
# modify excluded fields for this test
EventPage.exclude_fields_in_copy = [
"advert_placements",
"categories",
"signup_link",
]
# s... | Reformat with black | test_copy_page_with_excluded_parental_and_child_relations | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_page_model.py | 14 | 45 | https://github.com/wagtail/wagtail.git | 2 | 190 | 0 | 107 | 337 | Python | {
"docstring": "Test that a page will be copied with parental and child relations removed if excluded.",
"language": "en",
"n_whitespaces": 14,
"n_words": 15,
"vocab_size": 15
} | def test_copy_page_with_excluded_parental_and_child_relations(self):
try:
# modify excluded fields for this test
EventPage.exclude_fields_in_copy = [
"advert_placements",
"categories",
"signup_link",
]
# s... | |
52,056 | 207,694 | 32 | tests/admin_views/tests.py | 11 | 7 | def test_app_model_in_list_body_class(self):
| Refs #33476 -- Reformatted code with Black. | test_app_model_in_list_body_class | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 11 | 3 | https://github.com/django/django.git | 1 | 27 | 0 | 11 | 50 | Python | {
"docstring": "\n Ensure app and model tag are correctly read by change_list template\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 11
} | def test_app_model_in_list_body_class(self):
response = self.client.get(reverse("admin:admin_views_section_changelist"))
self.assertContains(response, '<body class=" app-admin_views model-section ')
| |
3,214 | 20,068 | 20 | pipenv/patched/notpip/_vendor/distro.py | 11 | 3 | def version_parts(best=False):
# type: (bool) -> Tuple[s | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | version_parts | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | distro.py | 7 | 2 | https://github.com/pypa/pipenv.git | 1 | 15 | 0 | 11 | 28 | Python | {
"docstring": "\n Return the version of the current OS distribution as a tuple\n ``(major, minor, build_number)`` with items as follows:\n\n * ``major``: The result of :func:`distro.major_version`.\n\n * ``minor``: The result of :func:`distro.minor_version`.\n\n * ``build_number``: The result of :f... | def version_parts(best=False):
# type: (bool) -> Tuple[str, str, str]
return _distro.version_parts(best)
| |
25,267 | 114,734 | 182 | mindsdb/integrations/mssql_handler/mssql_handler.py | 42 | 11 | def check_status(self):
status = {
'success': False
}
try:
con = s | Add sql server handler | check_status | cf75c4186e1caa36b18c9ddffce98da94b9904e6 | mindsdb | mssql_handler.py | 13 | 12 | https://github.com/mindsdb/mindsdb.git | 2 | 56 | 0 | 36 | 116 | Python | {
"docstring": "\n Check the connection of the SQL Server database\n :return: success status and error message if error occurs\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 17,
"vocab_size": 15
} | def check_status(self):
status = {
'success': False
}
try:
con = self.__connect()
with closing(con) as con:
#TODO: best way to check con.connected ?
status['success'] = True
except Exception as e:
... | |
@keras_export("keras.models.model_from_yaml") | 81,500 | 275,885 | 96 | keras/saving/model_config.py | 37 | 10 | def model_from_config(config, custom_objects=None):
if isinstance(config, list):
raise TypeError(
"`model_from_config` expects a dictionary, not a list. "
f"Received: co | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | model_from_config | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | model_config.py | 12 | 9 | https://github.com/keras-team/keras.git | 2 | 41 | 1 | 35 | 85 | Python | {
"docstring": "Instantiates a Keras model from its config.\n\n Usage:\n ```\n # for a Functional API model\n tf.keras.Model().from_config(model.get_config())\n\n # for a Sequential model\n tf.keras.Sequential().from_config(model.get_config())\n ```\n\n Args:\n config: Configuration dic... | def model_from_config(config, custom_objects=None):
if isinstance(config, list):
raise TypeError(
"`model_from_config` expects a dictionary, not a list. "
f"Received: config={config}. Did you meant to use "
"`Sequential.from_config(config)`?"
)
from keras... |
16,242 | 74,269 | 41 | wagtail/core/tests/test_page_model.py | 9 | 12 | def test_golden_path(self):
with self.assertNumQueries(0):
result = self.page.cached_ | Reformat with black | test_golden_path | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_page_model.py | 12 | 4 | https://github.com/wagtail/wagtail.git | 1 | 42 | 0 | 9 | 71 | Python | {
"docstring": "\n The return value should match the value you'd get\n if fetching the ContentType from the database,\n and shouldn't trigger any database queries when\n the ContentType is already in memory.\n ",
"language": "en",
"n_whitespaces": 65,
"n_words": 29,
"vocab_s... | def test_golden_path(self):
with self.assertNumQueries(0):
result = self.page.cached_content_type
self.assertEqual(result, ContentType.objects.get(id=self.page.content_type_id))
| |
18,484 | 88,971 | 44 | src/sentry/lang/javascript/processor.py | 22 | 12 | def fold_function_name(function_name):
parts = function_name.split(".")
if len(parts) == 1:
retu | ref(processor): Fold occurences of property names in function_name (#41697)
Fold multiple consecutive occurrences of the same property name into a
single group, excluding the last component.
```
foo | foo
foo.foo | foo.foo
foo.foo.foo | {foo#2}.foo
bar.foo.foo | bar.foo.foo
bar.foo.foo.foo | bar.{foo#2}.foo
... | fold_function_name | 8078d89b46841c7f7a57cc49a4b9cafb42b12ce0 | sentry | processor.py | 10 | 8 | https://github.com/getsentry/sentry.git | 3 | 53 | 0 | 20 | 82 | Python | {
"docstring": "\n Fold multiple consecutive occurences of the same property name into a single group, excluding the last component.\n\n foo | foo\n foo.foo | foo.foo\n foo.foo.foo | {foo#2}.foo\n bar.foo.foo | bar.foo.foo\n bar.foo.foo.foo | bar.{foo#2}.foo\n bar.foo.foo.onError | bar.{foo#2}.on... | def fold_function_name(function_name):
parts = function_name.split(".")
if len(parts) == 1:
return function_name
tail = parts.pop()
grouped = [list(g) for _, g in groupby(parts)]
| |
49,675 | 200,507 | 238 | sympy/integrals/transforms.py | 73 | 28 | def _laplace_rule_exp(f, t, s, doit=True, **hints):
hints.pop('simplify', True)
a = Wild('a', exclude=[t])
y = Wild('y')
z = Wild('z')
k, func = f.as_independent(t, as_Add=False)
ma1 = func.match(exp(y)*z)
if ma1:
ma2 = ma1[y].collect(t).match(a*t)
if ma2:
d... | Fixed Issue #24294 | _laplace_rule_exp | 807f499971f9c298bc6bacbb08bcb19299fbb42c | sympy | transforms.py | 14 | 20 | https://github.com/sympy/sympy.git | 4 | 178 | 0 | 59 | 283 | Python | {
"docstring": "\n This internal helper function tries to transform a product containing the\n `exp` function and returns `None` if it cannot do it.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 21,
"vocab_size": 20
} | def _laplace_rule_exp(f, t, s, doit=True, **hints):
hints.pop('simplify', True)
a = Wild('a', exclude=[t])
y = Wild('y')
z = Wild('z')
k, func = f.as_independent(t, as_Add=False)
ma1 = func.match(exp(y)*z)
if ma1:
ma2 = ma1[y].collect(t).match(a*t)
if ma2:
d... | |
12,099 | 60,370 | 68 | code/deep/BJMMD/caffe/scripts/cpp_lint.py | 37 | 9 | def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
for i in xrange(startpos, len(line)):
if line[i] == startchar:
depth += 1
elif line[i] == endchar:
depth -= 1
if depth == 0:
return (i + 1, 0)
return (-1, depth)
| Balanced joint maximum mean discrepancy for deep transfer learning | FindEndOfExpressionInLine | cc4d0564756ca067516f71718a3d135996525909 | transferlearning | cpp_lint.py | 14 | 9 | https://github.com/jindongwang/transferlearning.git | 5 | 69 | 0 | 29 | 103 | Python | {
"docstring": "Find the position just after the matching endchar.\n\n Args:\n line: a CleansedLines line.\n startpos: start searching at this position.\n depth: nesting level at startpos.\n startchar: expression opening character.\n endchar: expression closing character.\n\n Returns:\n On finding... | def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
for i in xrange(startpos, len(line)):
if line[i] == startchar:
depth += 1
elif line[i] == endchar:
depth -= 1
if depth == 0:
return (i + 1, 0)
return (-1, depth)
| |
76,472 | 260,762 | 40 | sklearn/neighbors/tests/test_nca.py | 22 | 14 | def test_toy_example_collapse_points():
rng = np.random.RandomState(42)
input_dim = 5
two_points = rng.randn(2, input_dim)
| MAINT Parameters validation for NeighborhoodComponentsAnalysis (#24195)
Co-authored-by: jeremie du boisberranger <jeremiedbb@yahoo.fr> | test_toy_example_collapse_points | d7c978b764c6aafb65cc28757baf3f64da2cae34 | scikit-learn | test_nca.py | 13 | 15 | https://github.com/scikit-learn/scikit-learn.git | 1 | 132 | 0 | 18 | 99 | Python | {
"docstring": "Test on a toy example of three points that should collapse\n\n We build a simple example: two points from the same class and a point from\n a different class in the middle of them. On this simple example, the new\n (transformed) points should all collapse into one single point. Indeed, the\n ... | def test_toy_example_collapse_points():
rng = np.random.RandomState(42)
input_dim = 5
two_points = rng.randn(2, input_dim)
X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]])
y = [0, 0, 1]
| |
19,027 | 93,936 | 264 | tests/sentry/sentry_metrics/test_batch.py | 31 | 9 | def _get_string_indexer_log_records(caplog):
return [
(
rec.message,
{
k: v
for k, v in rec.__dict__.items()
if k
| ref(metrics_indexer): Improve typing, introduce more dataclasses, fix org_id namespacing bug in metadata [INGEST-1380] (#37170) | _get_string_indexer_log_records | f31b57cbc5ec359c8ef9c6459d3d9d8ffcd6e8d9 | sentry | test_batch.py | 12 | 19 | https://github.com/getsentry/sentry.git | 4 | 54 | 0 | 24 | 88 | Python | {
"docstring": "\n Get all log records and relevant extra arguments for easy snapshotting.\n ",
"language": "en",
"n_whitespaces": 18,
"n_words": 11,
"vocab_size": 11
} | def _get_string_indexer_log_records(caplog):
return [
(
rec.message,
{
k: v
for k, v in rec.__dict__.items()
if k
in (
"string_type",
"is_global_quota",
"n... | |
93,990 | 294,963 | 71 | tests/components/subaru/test_config_flow.py | 23 | 16 | async def test_registered_pin_required(hass, user_form):
with patch(MOCK_API_CONNECT, return_value=True), patch(
MOCK_API_DEVICE_REGISTERED, new_callable=PropertyMock
) as mock_device_registered, patch(MOCK_API_IS_PIN_REQUIRED, return_value=True):
mock_device_registered.return_value = True
... | Add 2FA support for Subaru integration setup (#68753)
* Add 2FA support for Subaru integration setup
* Update config flow to abort with 2FA request fail | test_registered_pin_required | ab0abdc988ac101217ba043909c4be8b33101ab3 | core | test_config_flow.py | 12 | 8 | https://github.com/home-assistant/core.git | 1 | 61 | 0 | 22 | 100 | Python | {
"docstring": "Test if the device is already registered and PIN required.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | async def test_registered_pin_required(hass, user_form):
with patch(MOCK_API_CONNECT, return_value=True), patch(
MOCK_API_DEVICE_REGISTERED, new_callable=PropertyMock
) as mock_device_registered, patch(MOCK_API_IS_PIN_REQUIRED, return_value=True):
mock_device_registered.return_value = True
... | |
73,197 | 249,920 | 674 | tests/replication/test_pusher_shard.py | 133 | 22 | def test_send_push_multiple_workers(self):
http_client_mock1 = Mock(spec_set=["post_json_get_json"])
http_client_mock1.post_jso | Modernize unit tests configuration settings for workers. (#14568)
Use the newer foo_instances configuration instead of the
deprecated flags to enable specific features (e.g. start_pushers). | test_send_push_multiple_workers | 854a6884d81c95297bf93badcddc00a4cab93418 | synapse | test_pusher_shard.py | 13 | 55 | https://github.com/matrix-org/synapse.git | 1 | 278 | 0 | 71 | 473 | Python | {
"docstring": "Test that registration works when using sharded pusher workers.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_send_push_multiple_workers(self):
http_client_mock1 = Mock(spec_set=["post_json_get_json"])
http_client_mock1.post_json_get_json.side_effect = (
lambda *_, **__: defer.succeed({})
)
self.make_worker_hs(
"synapse.app.generic_worker",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.