ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
72,982 | 249,542 | 132 | tests/storage/test_event_federation.py | 27 | 8 | def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo:
room_id = "!backfill-room-test:some-host"
depth_map: Dict[str, int] = { | Only try to backfill event if we haven't tried before recently (#13635)
Only try to backfill event if we haven't tried before recently (exponential backoff). No need to keep trying the same backfill point that fails over and over.
Fix https://github.com/matrix-org/synapse/issues/13622
Fix https://github.com/matrix... | _setup_room_for_insertion_backfill_tests | ac1a31740b6d0dfda4d57a25762aaddfde981caf | synapse | test_event_federation.py | 9 | 27 | https://github.com/matrix-org/synapse.git | 1 | 81 | 0 | 26 | 88 | Python | {
"docstring": "\n Sets up a room with various insertion event backward extremities to test\n backfill functions against.\n\n Returns:\n _BackfillSetupInfo including the `room_id` to test against and\n `depth_map` of events in the room\n ",
"language": "en",
"n_wh... | def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo:
room_id = "!backfill-room-test:some-host"
depth_map: Dict[str, int] = {
"1": 1,
"2": 2,
"insertion_eventA": 3,
"3": 4,
"insertion_eventB": 5,
"4": 6... | |
28,435 | 127,405 | 196 | python/ray/serve/experimental/gradio_visualize_graph.py | 59 | 13 | def postprocessing(data):
if type_to_string(type(data)) == "torch.Tensor":
try:
import torch
from torchvision import transforms
# By default Torch tensors are displayed as images. To display them as JSON,
# the user can simply convert them to numpy arra... | [serve] Add additional features to DAG visualization with Gradio (#28246) | postprocessing | 203253321d34543aa25483803ebc21e3903679b6 | ray | gradio_visualize_graph.py | 13 | 13 | https://github.com/ray-project/ray.git | 3 | 55 | 0 | 50 | 101 | Python | {
"docstring": "Add support for types that are not supported by Gradio.\n\n Some data types like PyTorch tensors, cannot be processed and displayed through\n Gradio. Thus we extend support to these data types by transforming them into a form\n that Gradio can process and display.\n ",
"language": "en",
... | def postprocessing(data):
if type_to_string(type(data)) == "torch.Tensor":
try:
import torch
from torchvision import transforms
# By default Torch tensors are displayed as images. To display them as JSON,
# the user can simply convert them to numpy arra... | |
14,186 | 66,430 | 9 | erpnext/manufacturing/doctype/work_order/test_work_order.py | 18 | 10 | def get_scrap_item_details(bom_no):
scrap_items = {}
for item in frappe.db.sql(
,
bom_no,
as_dict=1,
):
scrap_items[item.item_code] = item.stock_qty
return scrap_items
| style: format code with black | get_scrap_item_details | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | test_work_order.py | 10 | 10 | https://github.com/frappe/erpnext.git | 2 | 40 | 0 | 16 | 62 | Python | {
"docstring": "select item_code, stock_qty from `tabBOM Scrap Item`\n\t\twhere parent = %s",
"language": "en",
"n_whitespaces": 9,
"n_words": 11,
"vocab_size": 11
} | def get_scrap_item_details(bom_no):
scrap_items = {}
for item in frappe.db.sql(
,
bom_no,
as_dict=1,
):
scrap_items[item.item_code] = item.stock_qty
return scrap_items
| |
9,020 | 46,854 | 23 | airflow/models/dag.py | 9 | 11 | def get_is_active(self, session=NEW_SESSION) -> Optional[None]:
| API: Fix deprecation warning due to using query.value (#22775)
When using sqlalchemy 1.4, there's a deprecation warning at the task logging:
SADeprecationWarning: Query.value() is deprecated and will be removed
in a future release. Please use Query.with_entities() in combination
with Query.scalar() (deprecated s... | get_is_active | 921ccedf7f90f15e8d18c27a77b29d232be3c8cb | airflow | dag.py | 12 | 3 | https://github.com/apache/airflow.git | 1 | 39 | 0 | 9 | 63 | Python | {
"docstring": "Returns a boolean indicating whether this DAG is active",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def get_is_active(self, session=NEW_SESSION) -> Optional[None]:
return session.query(DagModel.is_active).filter(DagModel.dag_id == self.dag_id).scalar()
| |
57,470 | 225,565 | 99 | mkdocs/structure/pages.py | 36 | 17 | def is_homepage(self) -> bool:
return self.is_top_level and self.is_index and self.file.url in ('.', './', 'index.html')
previous_page: Optional[Page]
next_page: Optional[Page]
parent: Optional[Section]
children: None = None
is_section: bool = False
... | Relative links end with slash even for homepage links (#3022)
Fixes #3015 | is_homepage | 32359f3e93f5ca7778b9f7c3d6d92f49a629c84c | mkdocs | pages.py | 9 | 3 | https://github.com/mkdocs/mkdocs.git | 3 | 30 | 0 | 27 | 143 | Python | {
"docstring": "Evaluates to `True` for the homepage of the site and `False` for all other pages.The [page][mkdocs.structure.pages.Page] object for the previous page or `None`.\n The value will be `None` if the current page is the first item in the site navigation\n or if the current page is not included in the... | def is_homepage(self) -> bool:
return self.is_top_level and self.is_index and self.file.url in ('.', './', 'index.html')
previous_page: Optional[Page]
next_page: Optional[Page]
parent: Optional[Section]
children: None = None
is_section: bool = False
... | |
19,835 | 100,340 | 774 | lib/gui/utils.py | 154 | 31 | def _filetypes(self):
all_files = ("All files", "*.*")
filetypes = dict(
default=(all_files,),
alignments=[("Faceswap Alignments", "*.fsa"), all_files],
config_project=[("Faceswap Project files", "*.fsw"), all_files],
config_task=[("Faceswap Task ... | Update code to support Tensorflow versions up to 2.8 (#1213)
* Update maximum tf version in setup + requirements
* - bump max version of tf version in launcher
- standardise tf version check
* update keras get_custom_objects for tf>2.6
* bugfix: force black text in GUI file dialogs (linux)
* dssim loss -... | _filetypes | c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf | faceswap | utils.py | 18 | 40 | https://github.com/deepfakes/faceswap.git | 8 | 337 | 0 | 116 | 586 | Python | {
"docstring": " dict: The accepted extensions for each file type for opening/saving ",
"language": "en",
"n_whitespaces": 11,
"n_words": 10,
"vocab_size": 9
} | def _filetypes(self):
all_files = ("All files", "*.*")
filetypes = dict(
default=(all_files,),
alignments=[("Faceswap Alignments", "*.fsa"), all_files],
config_project=[("Faceswap Project files", "*.fsw"), all_files],
config_task=[("Faceswap Task ... | |
56,035 | 220,528 | 118 | python3.10.4/Lib/asyncio/futures.py | 32 | 12 | def result(self):
if self._state == _CANCELLED:
exc = self._make_cancelled_error()
raise exc
if self._state != _FINISHED:
raise exceptions.InvalidStateError('Result is not ready.')
| add python 3.10.4 for windows | result | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | futures.py | 10 | 10 | https://github.com/XX-net/XX-Net.git | 4 | 57 | 0 | 22 | 95 | Python | {
"docstring": "Return the result this future represents.\n\n If the future has been cancelled, raises CancelledError. If the\n future's result isn't yet available, raises InvalidStateError. If\n the future is done and has an exception set, this exception is raised.\n ",
"language": "e... | def result(self):
if self._state == _CANCELLED:
exc = self._make_cancelled_error()
raise exc
if self._state != _FINISHED:
raise exceptions.InvalidStateError('Result is not ready.')
self.__log_traceback = False
if self._exception is not None:
... | |
22,075 | 105,108 | 19 | src/datasets/formatting/dataset_wrappers/torch_iterable_dataset.py | 10 | 5 | def _set_fsspec_for_multiprocess() -> None:
fsspec.asyn.iothread[0] = None
fsspec.asyn. | Support DataLoader with num_workers > 0 in streaming mode (#4375)
* make TorchIterableDataset work in parallel
- make it picklable
- paralellize over the shards when num_workers is passed
* start writing some tests
* fix streaming extension and fsspec issues in subprocesses
* fix some tests
* fix more te... | _set_fsspec_for_multiprocess | ab7d3045ac9154e9c1c2602d0869130defdc6dc7 | datasets | torch_iterable_dataset.py | 9 | 9 | https://github.com/huggingface/datasets.git | 1 | 27 | 0 | 8 | 45 | Python | {
"docstring": "\n Clear reference to the loop and thread.\n This is necessary otherwise HTTPFileSystem hangs in the ML training loop.\n Only required for fsspec >= 0.9.0\n See https://github.com/fsspec/gcsfs/issues/379\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 26,
"vocab_size": 25... | def _set_fsspec_for_multiprocess() -> None:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
| |
76,501 | 260,798 | 137 | sklearn/utils/extmath.py | 47 | 19 | def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(
np.isclose(
| DOC ensure sklearn/utils/extmath/stable_cumsum passes numpydoc (#24348) | stable_cumsum | 45756377c748d84aa52f66950b8d9eeefc31456c | scikit-learn | extmath.py | 13 | 14 | https://github.com/scikit-learn/scikit-learn.git | 2 | 108 | 0 | 40 | 157 | Python | {
"docstring": "Use high precision for cumsum and check that final value matches sum.\n\n Warns if the final cumulative sum does not match the sum (up to the chosen\n tolerance).\n\n Parameters\n ----------\n arr : array-like\n To be cumulatively summed as flat.\n axis : int, default=None\n ... | def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(
np.isclose(
out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True
)
):
... | |
7,960 | 43,407 | 751 | airflow/www/views.py | 208 | 57 | def confirm(self):
args = request.args
dag_id = args.get('dag_id')
task_id = args.get('task_id')
dag_run_id = args.get('dag_run_id')
state = args.get('state')
origin = args.get('origin')
if 'map_index' not in args:
map_indexes: Optional[List[... | Upgrade FAB to 4.1.1 (#24399)
* Upgrade FAB to 4.1.1
The Flask Application Builder have been updated recently to
support a number of newer dependencies. This PR is the
attempt to migrate FAB to newer version.
This includes:
* update setup.py and setup.cfg upper and lower bounds to
account for proper vers... | confirm | e2f19505bf3622935480e80bee55bf5b6d80097b | airflow | views.py | 13 | 61 | https://github.com/apache/airflow.git | 12 | 430 | 0 | 129 | 729 | Python | {
"docstring": "Show confirmation page for marking tasks as success or failed.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def confirm(self):
args = request.args
dag_id = args.get('dag_id')
task_id = args.get('task_id')
dag_run_id = args.get('dag_run_id')
state = args.get('state')
origin = args.get('origin')
if 'map_index' not in args:
map_indexes: Optional[List[... | |
24,583 | 112,125 | 54 | nni/retiarii/oneshot/pytorch/base_lightning.py | 15 | 10 | def resample(self) -> Dict[str, Any]:
| Valuechoice oneshot lightning (#4602) | resample | 14d2966b9e91ae16dcc39de8f41017a75cec8ff9 | nni | base_lightning.py | 12 | 13 | https://github.com/microsoft/nni.git | 2 | 39 | 0 | 14 | 63 | Python | {
"docstring": "Trigger the resample for each ``nas_module``.\n Sometimes (e.g., in differentiable cases), it does nothing.\n\n Returns\n -------\n dict\n Sampled architecture.\n ",
"language": "en",
"n_whitespaces": 65,
"n_words": 19,
"vocab_size": 19
} | def resample(self) -> Dict[str, Any]:
result = {}
for module in self.nas_modules:
result.update(module.resample(memo=result))
return result
| |
24,402 | 111,419 | 28 | spacy/tests/doc/test_json_doc_conversion.py | 9 | 11 | def test_json_to_doc_attribute_consistency(doc):
doc_json = doc.to_json()
doc_json["tokens"][1].pop("morph")
with pytest.raises(ValueError):
Doc(doc.vocab).from_js | Add Doc.from_json() (#10688)
* Implement Doc.from_json: rough draft.
* Implement Doc.from_json: first draft with tests.
* Implement Doc.from_json: added documentation on website for Doc.to_json(), Doc.from_json().
* Implement Doc.from_json: formatting changes.
* Implement Doc.to_json(): reverting unrelated... | test_json_to_doc_attribute_consistency | 8387ce4c01db48d92ac5638e18316c0f1fc8861e | spaCy | test_json_doc_conversion.py | 12 | 5 | https://github.com/explosion/spaCy.git | 1 | 44 | 0 | 9 | 80 | Python | {
"docstring": "Test that Doc.from_json() raises an exception if tokens don't all have the same set of properties.",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 16
} | def test_json_to_doc_attribute_consistency(doc):
doc_json = doc.to_json()
doc_json["tokens"][1].pop("morph")
with pytest.raises(ValueError):
Doc(doc.vocab).from_json(doc_json)
| |
52,092 | 207,767 | 114 | tests/admin_views/tests.py | 37 | 8 | def test_index_css_classes(self):
| Refs #33476 -- Reformatted code with Black. | test_index_css_classes | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 13 | 9 | https://github.com/django/django.git | 1 | 87 | 0 | 19 | 156 | Python | {
"docstring": "\n CSS class names are used for each app and model on the admin index\n pages (#17050).\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 16
} | def test_index_css_classes(self):
# General index page
response = self.client.get(reverse("admin:index"))
self.assertContains(response, '<div class="app-admin_views module')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr clas... | |
51,985 | 207,495 | 36 | tests/admin_views/test_actions.py | 8 | 9 | def test_action_column_class(self):
response = self.client.get(reverse("admin:admin_views_subscriber_changelist"))
self.assertIsNotNone(response.context["act | Refs #33476 -- Reformatted code with Black. | test_action_column_class | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | test_actions.py | 11 | 4 | https://github.com/django/django.git | 1 | 38 | 0 | 8 | 69 | Python | {
"docstring": "The checkbox column class is present in the response.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_action_column_class(self):
response = self.client.get(reverse("admin:admin_views_subscriber_changelist"))
self.assertIsNotNone(response.context["action_form"])
self.assertContains(response, "action-checkbox-column")
| |
17,168 | 81,176 | 22 | awx/conf/settings.py | 8 | 7 | def hashkey(cls, *args, **kwargs):
return cachetools.keys.hashkey(f"<{cls.__name__}>", *args, **kwargs)
| add lock to cachetools usage
* We observed daphne giving tracebacks when accessing logging settings.
Originally, configure tower in tower settings was no a suspect because
daphne is not multi-process. We've had issues with configure tower in
tower settings and multi-process before. We later learned that Daphne
... | hashkey | 21972c91dd2b52cd206bf71ea038ab0e1f478b32 | awx | settings.py | 10 | 2 | https://github.com/ansible/awx.git | 1 | 28 | 0 | 7 | 52 | Python | {
"docstring": "\n Usage of @cachetools.cached has changed to @cachetools.cachedmethod\n The previous cachetools decorator called the hash function and passed in (self, key).\n The new cachtools decorator calls the hash function with just (key).\n Ideally, we would continue to pass self, h... | def hashkey(cls, *args, **kwargs):
return cachetools.keys.hashkey(f"<{cls.__name__}>", *args, **kwargs)
| |
24,457 | 111,625 | 102 | nni/experiment/config/base.py | 31 | 17 | def load(cls, path):
with open(path) as yaml_file:
data = yaml.safe_load(yaml_file)
if not isinstance(data, dict):
raise TypeError(f'Conent of config file {path} is not a dict/object')
utils.set_base_path(Path(path).parent)
config = cls(**data)
ut... | Some string changes around experiment module (#4442) | load | 3f6a8274a97bf003b5eadc05faa324162b7f4123 | nni | base.py | 11 | 9 | https://github.com/microsoft/nni.git | 2 | 64 | 0 | 27 | 114 | Python | {
"docstring": "\n Load a YAML config file from file system.\n\n Since YAML is a superset of JSON, it can also load JSON files.\n\n This method raises exception if:\n\n - The file is not available\n - The file content is not valid YAML\n - Top level value of the YAML is not o... | def load(cls, path):
with open(path) as yaml_file:
data = yaml.safe_load(yaml_file)
if not isinstance(data, dict):
raise TypeError(f'Conent of config file {path} is not a dict/object')
utils.set_base_path(Path(path).parent)
config = cls(**data)
ut... | |
@frappe.whitelist() | 14,111 | 66,151 | 22 | erpnext/hr/doctype/leave_allocation/leave_allocation.py | 34 | 14 | def get_leave_allocation_for_period(employee, leave_type, from_date, to_date):
leave_allocated = 0
leave_allocations = frappe.db.sql(
,
{"from_date": from_date, "to_date": to_date, "employee": employee, "leave_type": leave_type},
as_dict=1,
)
if leave_allocations:
for leave_alloc in leave_allocations:
l... | style: format code with black | get_leave_allocation_for_period | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | leave_allocation.py | 11 | 19 | https://github.com/frappe/erpnext.git | 3 | 62 | 1 | 29 | 109 | Python | {
"docstring": "\n\t\tselect employee, leave_type, from_date, to_date, total_leaves_allocated\n\t\tfrom `tabLeave Allocation`\n\t\twhere employee=%(employee)s and leave_type=%(leave_type)s\n\t\t\tand docstatus=1\n\t\t\tand (from_date between %(from_date)s and %(to_date)s\n\t\t\t\tor to_date between %(from_date)s and ... | def get_leave_allocation_for_period(employee, leave_type, from_date, to_date):
leave_allocated = 0
leave_allocations = frappe.db.sql(
,
{"from_date": from_date, "to_date": to_date, "employee": employee, "leave_type": leave_type},
as_dict=1,
)
if leave_allocations:
for leave_alloc in leave_allocations:
l... |
51,957 | 207,414 | 397 | tests/admin_utils/test_logentry.py | 92 | 34 | def test_proxy_model_content_type_is_used_for_log_entries(self):
proxy_content_type = ContentType.objects.get_for_model(
ArticleProxy, for_concrete_model=False
)
post_data = {
"site": self.site.pk,
"title": "Foo",
"hist": "Bar",
... | Refs #33476 -- Reformatted code with Black. | test_proxy_model_content_type_is_used_for_log_entries | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | test_logentry.py | 11 | 36 | https://github.com/django/django.git | 1 | 251 | 0 | 62 | 424 | Python | {
"docstring": "\n Log entries for proxy models should have the proxy model's contenttype\n (#21084).\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 12,
"vocab_size": 11
} | def test_proxy_model_content_type_is_used_for_log_entries(self):
proxy_content_type = ContentType.objects.get_for_model(
ArticleProxy, for_concrete_model=False
)
post_data = {
"site": self.site.pk,
"title": "Foo",
"hist": "Bar",
... | |
36,605 | 156,222 | 156 | dask/utils.py | 42 | 12 | def typename(typ, short=False) -> str:
if not isinstance(typ, type):
return typename(type(typ))
try:
if not typ.__module__ or typ.__module__ == "builtins":
return typ. | Add mild typing to common utils functions (#8848) | typename | 261bf174931580230717abca93fe172e166cc1e8 | dask | utils.py | 16 | 28 | https://github.com/dask/dask.git | 6 | 88 | 0 | 29 | 150 | Python | {
"docstring": "\n Return the name of a type\n\n Examples\n --------\n >>> typename(int)\n 'int'\n\n >>> from dask.core import literal\n >>> typename(literal)\n 'dask.core.literal'\n >>> typename(literal, short=True)\n 'dask.literal'\n ",
"language": "en",
"n_whitespaces": 57,
"... | def typename(typ, short=False) -> str:
if not isinstance(typ, type):
return typename(type(typ))
try:
if not typ.__module__ or typ.__module__ == "builtins":
return typ.__name__
else:
if short:
module, *_ = typ.__module__.split(".")
... | |
50,801 | 204,587 | 396 | django/core/management/__init__.py | 114 | 24 | def fetch_command(self, subcommand):
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
if os.environ.get("DJANGO_SETTINGS_MODULE"):
# If `su... | Refs #33476 -- Reformatted code with Black. | fetch_command | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | __init__.py | 14 | 20 | https://github.com/django/django.git | 6 | 126 | 0 | 89 | 223 | Python | {
"docstring": "\n Try to fetch the given subcommand, printing a message with the\n appropriate command called from the command line (usually\n \"django-admin\" or \"manage.py\") if it can't be found.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 27,
"vocab_size": 24
} | def fetch_command(self, subcommand):
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
if os.environ.get("DJANGO_SETTINGS_MODULE"):
# If `su... | |
42,643 | 178,270 | 382 | label_studio/core/storage.py | 63 | 28 | def url(self, name):
name = self._normalize_name(clean_name(name))
blob = self.bucket.blob(name)
blob_params = self.get_object_parameters(name)
no_signed_url = (
blob_params.get('acl', self.default_acl) == 'publicRead' or not self.querystring_auth)
if not se... | fix: DEV-3911: Move persistent storages to OS (#3377)
* fix: DEV-3911: Move persistent storages to OS
* Fix
* Add deps
* Back header
* Move DownloadStorageData handler
* Update all urls json
* Fix import
* add nginx config
* Fix GSC storage
Co-authored-by: Sergei Ivashchenko <triklozoid@gmai... | url | 92314e4a9c431c407533e4a064481acf3c5983ab | label-studio | storage.py | 16 | 29 | https://github.com/heartexlabs/label-studio.git | 6 | 164 | 0 | 41 | 266 | Python | {
"docstring": "\n Return public url or a signed url for the Blob.\n This DOES NOT check for existance of Blob - that makes codes too slow\n for many use cases.\n Overridden to force the use of the IAM signBlob API.\n See https://github.com/googleapis/python-storage/blob/51907411277... | def url(self, name):
name = self._normalize_name(clean_name(name))
blob = self.bucket.blob(name)
blob_params = self.get_object_parameters(name)
no_signed_url = (
blob_params.get('acl', self.default_acl) == 'publicRead' or not self.querystring_auth)
if not se... | |
76,591 | 260,960 | 242 | sklearn/utils/validation.py | 104 | 23 | def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):
if isclass(estimator):
raise TypeError("{} is a class, not an instance.".format(estimator))
if msg is None:
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate... | DOC Ensures that check_is_fitted passes numpydoc validation (#24454) | check_is_fitted | b850a9417d4777931e2894fd8155b73dc87973b9 | scikit-learn | validation.py | 16 | 22 | https://github.com/scikit-learn/scikit-learn.git | 12 | 170 | 0 | 69 | 284 | Python | {
"docstring": "Perform is_fitted validation for estimator.\n\n Checks if the estimator is fitted by verifying the presence of\n fitted attributes (ending with a trailing underscore) and otherwise\n raises a NotFittedError with the given message.\n\n If an estimator does not set any attributes with a trai... | def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):
if isclass(estimator):
raise TypeError("{} is a class, not an instance.".format(estimator))
if msg is None:
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate... | |
120,634 | 334,468 | 261 | models/vision/glide/modeling_glide.py | 113 | 37 | def p_mean_variance(self, model, x, t, transformer_out, clip_denoised=True, model_kwargs=None):
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, t, transformer_out)
assert model_output.shape == (B,... | Classifier-free guidance scheduler + GLIDe pipeline | p_mean_variance | 1e21f061601dda0aa9740e88bfce68bf4aac4acd | diffusers | modeling_glide.py | 12 | 19 | https://github.com/huggingface/diffusers.git | 3 | 243 | 0 | 77 | 356 | Python | {
"docstring": "\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D ... | def p_mean_variance(self, model, x, t, transformer_out, clip_denoised=True, model_kwargs=None):
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, t, transformer_out)
assert model_output.shape == (B,... | |
76,372 | 260,606 | 24 | sklearn/utils/tests/test_estimator_html_repr.py | 12 | 9 | def test_invalid_parameters_in_stacking():
stacker = StackingClassifier(estimators | FIX Show a HTML repr for meta-estimatosr with invalid parameters (#24015)
Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com> | test_invalid_parameters_in_stacking | 84c6421a9067de7d1b54b7a6d8e21ce38e1f0eca | scikit-learn | test_estimator_html_repr.py | 10 | 4 | https://github.com/scikit-learn/scikit-learn.git | 1 | 32 | 0 | 10 | 56 | Python | {
"docstring": "Invalidate stacking configuration uses default repr.\n\n Non-regression test for #24009.\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 10,
"vocab_size": 10
} | def test_invalid_parameters_in_stacking():
stacker = StackingClassifier(estimators=[])
html_output = estimator_html_repr(stacker)
assert html.escape(str(stacker)) in html_output
| |
14,485 | 67,302 | 44 | erpnext/regional/south_africa/setup.py | 59 | 13 | def add_permissions():
for doctype in ("South Africa VAT Settings", "South Africa VAT Account"):
add_permission(doctype, "All", 0)
for role in ("Accounts Manager", "Accounts User", "System Manager"):
add_permission(doctype, role, 0)
update_permission_property(doctype, role, 0, "write", 1)
update_permiss... | style: format code with black | add_permissions | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | setup.py | 19 | 15 | https://github.com/frappe/erpnext.git | 4 | 128 | 0 | 46 | 215 | Python | {
"docstring": "Add Permissions for South Africa VAT Settings and South Africa VAT Account\n\tand VAT Audit Report",
"language": "en",
"n_whitespaces": 14,
"n_words": 16,
"vocab_size": 11
} | def add_permissions():
for doctype in ("South Africa VAT Settings", "South Africa VAT Account"):
add_permission(doctype, "All", 0)
for role in ("Accounts Manager", "Accounts User", "System Manager"):
add_permission(doctype, role, 0)
update_permission_property(doctype, role, 0, "write", 1)
update_permiss... | |
50,370 | 203,426 | 44 | django/contrib/admin/options.py | 16 | 10 | def has_delete_permission(self, request, obj=None):
opts = self.opts
codename = get_permission_codename("delete", opts)
retu | Refs #33476 -- Reformatted code with Black. | has_delete_permission | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | options.py | 10 | 4 | https://github.com/django/django.git | 1 | 42 | 0 | 15 | 69 | Python | {
"docstring": "\n Return True if the given request has permission to change the given\n Django model instance, the default implementation doesn't examine the\n `obj` parameter.\n\n Can be overridden by the user in subclasses. In such case it should\n return True if the given reques... | def has_delete_permission(self, request, obj=None):
opts = self.opts
codename = get_permission_codename("delete", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
| |
23,829 | 109,922 | 75 | lib/mpl_toolkits/mplot3d/art3d.py | 26 | 17 | def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
zs = cbook._to_unmasked_float_array(zs).ravel()
zs = np.broadcast_to(zs, len(xs))
self._ve | Improve mpl_toolkit documentation | set_3d_properties | df6f95703b60348e01603f98a439b133da2938a0 | matplotlib | art3d.py | 10 | 7 | https://github.com/matplotlib/matplotlib.git | 1 | 72 | 0 | 20 | 116 | Python | {
"docstring": "\n Set the *z* position and direction of the line.\n\n Parameters\n ----------\n zs : float or array of floats\n The location along the *zdir* axis in 3D space to position the\n line.\n zdir : {'x', 'y', 'z'}\n Plane to plot line orth... | def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
zs = cbook._to_unmasked_float_array(zs).ravel()
zs = np.broadcast_to(zs, len(xs))
self._verts3d = juggle_axes(xs, ys, zs, zdir)
self.stale = True
| |
21,305 | 101,926 | 29 | lib/gui/project.py | 8 | 5 | def clear_tasks(self):
logger.debug("Clearing stored tasks")
self._tasks = | Typing - lib.gui.display_command | clear_tasks | dab823a3eb7a5257cb1e0818ee10ed234d3de97f | faceswap | project.py | 8 | 3 | https://github.com/deepfakes/faceswap.git | 1 | 18 | 0 | 8 | 35 | Python | {
"docstring": " Clears all of the stored tasks.\n\n This is required when loading a task stored in a legacy project file, and is only to be\n called by :class:`Project` when a project has been loaded which is in fact a task.\n ",
"language": "en",
"n_whitespaces": 61,
"n_words": 39,
"voc... | def clear_tasks(self):
logger.debug("Clearing stored tasks")
self._tasks = {}
| |
14,117 | 66,160 | 28 | erpnext/hr/doctype/leave_application/leave_application.py | 39 | 15 | def add_department_leaves(events, start, end, employee, company):
department = frappe.db.get_value("Emplo | style: format code with black | add_department_leaves | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | leave_application.py | 10 | 11 | https://github.com/frappe/erpnext.git | 2 | 71 | 0 | 33 | 113 | Python | {
"docstring": "select name from tabEmployee where department=%s\n\t\tand company=%s",
"language": "en",
"n_whitespaces": 6,
"n_words": 8,
"vocab_size": 8
} | def add_department_leaves(events, start, end, employee, company):
department = frappe.db.get_value("Employee", employee, "department")
if not department:
return
# department leaves
department_employees = frappe.db.sql_list(
,
(department, company),
)
filter_conditions = ' and employee in ("%s")' % '", "'... | |
29,216 | 130,291 | 58 | python/ray/_private/thirdparty/pathspec/util.py | 19 | 9 | def is_file(self, follow_links=None):
if follow_links is None:
follow_links = True
node_stat = self._stat if follow_links else self._lstat | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | is_file | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | util.py | 8 | 5 | https://github.com/ray-project/ray.git | 3 | 38 | 0 | 15 | 61 | Python | {
"docstring": "\n Get whether the entry is a regular file.\n\n *follow_links* (:class:`bool` or :data:`None`) is whether to follow\n symbolic links. If this is :data:`True`, a symlink to a regular file\n will result in :data:`True`. Default is :data:`None` for :data:`True`.\n\n Ret... | def is_file(self, follow_links=None):
if follow_links is None:
follow_links = True
node_stat = self._stat if follow_links else self._lstat
return stat.S_ISREG(node_stat.st_mode)
| |
51,888 | 207,170 | 119 | tests/admin_inlines/tests.py | 29 | 7 | def test_tabular_model_form_meta_readonly_field(self):
response = self.client.get(reverse("admin:admin_inlines_someparentmodel_add"))
self.assertCont | Refs #33476 -- Reformatted code with Black. | test_tabular_model_form_meta_readonly_field | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 11 | 10 | https://github.com/django/django.git | 1 | 39 | 0 | 24 | 75 | Python | {
"docstring": "\n Tabular inlines use ModelForm.Meta.help_texts and labels for read-only\n fields.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 9,
"vocab_size": 9
} | def test_tabular_model_form_meta_readonly_field(self):
response = self.client.get(reverse("admin:admin_inlines_someparentmodel_add"))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height=... | |
17,033 | 80,220 | 483 | wagtail/snippets/tests/test_locking.py | 121 | 18 | def test_edit_get_unlocked_no_lock_permission(self):
# Use edit permission only
self.set_permissions(["change"])
# Get the edit page
response = self.client.get(self.get_url("edit"))
html = response.content.decode()
lock_url = self.get_url("lock")
# Shou... | Add tests for locking snippets | test_edit_get_unlocked_no_lock_permission | 10dbbddaf35607e4257f50dd960520a1268dd225 | wagtail | test_locking.py | 11 | 34 | https://github.com/wagtail/wagtail.git | 1 | 123 | 0 | 70 | 225 | Python | {
"docstring": "A user cannot lock an object without the lock permission.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def test_edit_get_unlocked_no_lock_permission(self):
# Use edit permission only
self.set_permissions(["change"])
# Get the edit page
response = self.client.get(self.get_url("edit"))
html = response.content.decode()
lock_url = self.get_url("lock")
# Shou... | |
19,047 | 94,186 | 751 | src/sentry/models/counter.py | 184 | 23 | def increment_project_counter(project, delta=1, using="default"):
if delta <= 0:
raise ValueError("There is only one way, and that's up.")
sample_rate = options.get("store.projectcounter-modern-upsert-sample-rate")
modern_upsert = sample_rate and random.random() <= sample_rate
# To preve... | fix(counter): Fix minor linting violation (#37392) | increment_project_counter | 7f0e298ca45cd41f0e6df3968a6c0c2923a7b831 | sentry | counter.py | 16 | 39 | https://github.com/getsentry/sentry.git | 7 | 179 | 0 | 127 | 312 | Python | {
"docstring": "This method primarily exists so that south code can use it.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def increment_project_counter(project, delta=1, using="default"):
if delta <= 0:
raise ValueError("There is only one way, and that's up.")
sample_rate = options.get("store.projectcounter-modern-upsert-sample-rate")
modern_upsert = sample_rate and random.random() <= sample_rate
# To preve... | |
73,160 | 249,830 | 21 | tests/storage/test_id_generators.py | 7 | 4 | def test_multiple_gen_nexts_closed_in_different_order(self) -> None:
| Reintroduce #14376, with bugfix for monoliths (#14468)
* Add tests for StreamIdGenerator
* Drive-by: annotate all defs
* Revert "Revert "Remove slaved id tracker (#14376)" (#14463)"
This reverts commit d63814fd736fed5d3d45ff3af5e6d3bfae50c439, which in
turn reverted 36097e88c4da51fce6556a58c49bd675f4cf20ab. This re... | test_multiple_gen_nexts_closed_in_different_order | 115f0eb2334b13665e5c112bd87f95ea393c9047 | synapse | test_id_generators.py | 8 | 6 | https://github.com/matrix-org/synapse.git | 1 | 26 | 0 | 7 | 28 | Python | {
"docstring": "Check that we handle overlapping calls to gen_next, even when their IDs\n created and persisted in different orders.",
"language": "en",
"n_whitespaces": 24,
"n_words": 18,
"vocab_size": 18
} | def test_multiple_gen_nexts_closed_in_different_order(self) -> None:
id_gen = self._create_id_generator()
| |
11,712 | 57,811 | 49 | src/prefect/cli/deployment.py | 34 | 18 | def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
return dumper.represent_scalar("tag:yaml.org,2002:str", data)
yaml.add_representer(str, str_presen | Working YAML generation with lots of bells and whistles | str_presenter | 36d9870433a22fff3944fa07f8e2feeb1b622bd9 | prefect | deployment.py | 11 | 4 | https://github.com/PrefectHQ/prefect.git | 2 | 42 | 0 | 30 | 135 | Python | {
"docstring": "\n configures yaml for dumping multiline strings\n Ref: https://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data\n ",
"language": "en",
"n_whitespaces": 18,
"n_words": 8,
"vocab_size": 8
} | def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
return dumper.represent_scalar("tag:yaml.org,2002:str", data)
yaml.add_representer(str, str_presenter)
yaml.representer.SafeRepres... | |
5,201 | 29,120 | 123 | saleor/core/auth_backend.py | 44 | 16 | def _get_permissions(self, user_obj, obj, from_name):
if not user_obj.is_active or user_obj.is_anonymous or obj is not None:
return set()
perm_cache_name = "_effective_permissions_cache"
if not getattr(user_obj, perm_cache_name, None):
perms = getattr(self, f"_g... | Replace Interpolation With Fstring (#11016)
* Replace Interpolation With Fstring
* Fix out of bound lines.
* Revert to lazy formatting for log messages. Also fix failing flake8.
* Fix minor code smells and typo.
* Make street_address to one line.
* Fix test cases.
* Fix lints. | _get_permissions | 92a0c6c9f4324aa8f65a9b3e3a319604660a92a8 | saleor | auth_backend.py | 13 | 9 | https://github.com/saleor/saleor.git | 6 | 95 | 0 | 34 | 163 | Python | {
"docstring": "Return the permissions of `user_obj` from `from_name`.\n\n `from_name` can be either \"group\" or \"user\" to return permissions from\n `_get_group_permissions` or `_get_user_permissions` respectively.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 22,
"vocab_size"... | def _get_permissions(self, user_obj, obj, from_name):
if not user_obj.is_active or user_obj.is_anonymous or obj is not None:
return set()
perm_cache_name = "_effective_permissions_cache"
if not getattr(user_obj, perm_cache_name, None):
perms = getattr(self, f"_g... | |
81,576 | 276,145 | 91 | keras/saving/saved_model/saved_model_test.py | 28 | 12 | def test_trainable_layers(self):
mo | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | test_trainable_layers | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | saved_model_test.py | 10 | 8 | https://github.com/keras-team/keras.git | 1 | 80 | 0 | 24 | 130 | Python | {
"docstring": "Tests that trainable status of individual layers is preserved.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_trainable_layers(self):
model = model = self._get_model()
# Set the last layer to *not* be trainable.
model.layers[-1].trainable = False
self._train_model(model, use_dataset=True)
loaded = self._save_and_load(model)
self._test_evaluation(model, loaded)
... | |
35,236 | 153,052 | 239 | modin/core/dataframe/pandas/dataframe/dataframe.py | 57 | 16 | def _reorder_labels(self, row_positions=None, col_positions=None):
if row_positions is not None:
ordered_rows = self._partition_mgr_cls.map_axis_partitions(
0, self._partitions, lambda df: df.iloc[row_positions]
)
row_idx = self.index[row_positions]
... | REFACTOR-#2656: Update modin to fit algebra (code only) (#3717)
Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru>
Co-authored-by: Vasily Litvinov <vasilij.n.litvinov@intel.com>
Co-authored-by: Alexey Prutskov <alexey.prutskov@intel.com>
Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com... | _reorder_labels | 58bbcc37477866d19c8b092a0e1974a4f0baa586 | modin | dataframe.py | 13 | 18 | https://github.com/modin-project/modin.git | 3 | 123 | 0 | 36 | 187 | Python | {
"docstring": "\n Reorder the column and or rows in this DataFrame.\n\n Parameters\n ----------\n row_positions : list of int, optional\n The ordered list of new row orders such that each position within the list\n indicates the new position.\n col_positions :... | def _reorder_labels(self, row_positions=None, col_positions=None):
if row_positions is not None:
ordered_rows = self._partition_mgr_cls.map_axis_partitions(
0, self._partitions, lambda df: df.iloc[row_positions]
)
row_idx = self.index[row_positions]
... | |
14,998 | 69,225 | 14 | erpnext/assets/doctype/asset_capitalization/test_asset_capitalization.py | 27 | 13 | def get_actual_sle_dict(name):
sles = frappe.db.sql(
,
name,
as_dict=1,
)
sle_dict = {}
for d in sles:
sle_dict[(d.item_code, d.warehouse)] = {
"actual_qty": d.actual_qty, | feat: Asset Capitalization
- manual selection of entry type
- GLE cleanup with smaller functions
- GLE considering periodical inventory
- test cases | get_actual_sle_dict | 58d430fe3ee62e93ad8d16a08bb42156a25b7d41 | erpnext | test_asset_capitalization.py | 11 | 22 | https://github.com/frappe/erpnext.git | 2 | 60 | 0 | 24 | 94 | Python | {
"docstring": "\n\t\tselect\n\t\t\titem_code, warehouse,\n\t\t\tsum(actual_qty) as actual_qty,\n\t\t\tsum(stock_value_difference) as stock_value_difference\n\t\tfrom `tabStock Ledger Entry`\n\t\twhere voucher_type = 'Asset Capitalization' and voucher_no = %s\n\t\tgroup by item_code, warehouse\n\t\thaving actual_qty ... | def get_actual_sle_dict(name):
sles = frappe.db.sql(
,
name,
as_dict=1,
)
sle_dict = {}
for d in sles:
sle_dict[(d.item_code, d.warehouse)] = {
"actual_qty": d.actual_qty,
"stock_value_difference": d.stock_value_difference,
}
return sle_dict
| |
113,486 | 314,885 | 125 | homeassistant/config_entries.py | 30 | 11 | async def _async_process_on_unload(self) -> None:
| Track tasks adding entities (#73828)
* Track tasks adding entities
* Update homeassistant/config_entries.py
* fix cast tests
Co-authored-by: J. Nick Koston <nick@koston.org> | _async_process_on_unload | 00810235c92b492a966c6021021d49360ffb3cdd | core | config_entries.py | 13 | 10 | https://github.com/home-assistant/core.git | 7 | 71 | 0 | 25 | 120 | Python | {
"docstring": "Process the on_unload callbacks and wait for pending tasks.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | async def _async_process_on_unload(self) -> None:
if self._on_unload is not None:
while self._on_unload:
self._on_unload.pop()()
while self._pending_tasks:
pending = [task for task in self._pending_tasks if not task.done()]
self._pending_task... | |
36,013 | 154,490 | 22 | modin/core/execution/dask/implementations/pandas_on_dask/partitioning/partition.py | 13 | 7 | def apply_func(partition, func, *args, **kwargs):
| FIX-#4597: Refactor Partition handling of func, args, kwargs (#4715)
Co-authored-by: Iaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: Jonathan Shi <jhshi@ponder.io> | apply_func | d6d503ac7c3028d871c34d9e99e925ddb0746df6 | modin | partition.py | 9 | 3 | https://github.com/modin-project/modin.git | 1 | 32 | 0 | 12 | 51 | Python | {
"docstring": "\n Execute a function on the partition in a worker process.\n\n Parameters\n ----------\n partition : pandas.DataFrame\n A pandas DataFrame the function needs to be executed on.\n func : callable\n The function to perform.\n *args : list\n Positional arguments to... | def apply_func(partition, func, *args, **kwargs):
result = func(partition, *args, **kwargs)
return result, get_ip()
| |
54,603 | 216,481 | 186 | salt/client/mixins.py | 53 | 22 | def _proc_function_remote(self, *, fun, low, user, tag, jid, daemonize=True):
if daemonize and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize() | Implement ``__getstate__`` and ``__setstate__`` instead of using ``classmethod``
Signed-off-by: Pedro Algarvio <palgarvio@vmware.com> | _proc_function_remote | c78f1ee4f49df35ab04e921a45de0878716d8bf5 | salt | mixins.py | 11 | 12 | https://github.com/saltstack/salt.git | 4 | 105 | 0 | 47 | 175 | Python | {
"docstring": "\n Run this method in a multiprocess target to execute the function on the\n master and fire the return data on the event bus\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 23,
"vocab_size": 19
} | def _proc_function_remote(self, *, fun, low, user, tag, jid, daemonize=True):
if daemonize and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize()
... | |
36,536 | 156,073 | 23 | dask/array/utils.py | 14 | 9 | def array_safe(a, like, **kwargs):
| absolufy-imports - No relative - PEP8 (#8796)
Conversation in https://github.com/dask/distributed/issues/5889 | array_safe | cccb9d8d8e33a891396b1275c2448c352ef40c27 | dask | utils.py | 8 | 3 | https://github.com/dask/dask.git | 1 | 35 | 0 | 13 | 51 | Python | {
"docstring": "\n If `a` is `dask.array`, return `dask.array.asarray(a, **kwargs)`,\n otherwise return `np.asarray(a, like=like, **kwargs)`, dispatching\n the call to the library that implements the like array. Note that\n when `a` is a `dask.Array` backed by `cupy.ndarray` but `like`\n isn't, this fu... | def array_safe(a, like, **kwargs):
from dask.array.routines import array
return _array_like_safe(np.array, array, a, like, **kwargs)
| |
25,668 | 116,102 | 73 | mindsdb/integrations/handlers/elasticsearch_handler/elasticsearch_handler.py | 23 | 12 | def get_tables(self) -> StatusResponse:
query =
result = self.native_query(query)
df = result.data_frame
df = df.drop(['type', 'type'], axis=1)
result.data_frame = df.rename(columns={'name': 'table_name'})
return result
| implemented the get_tables() and get_columns() methods | get_tables | c8accc16e3c56d0e7d2a0b63c63a956849da57da | mindsdb | elasticsearch_handler.py | 12 | 14 | https://github.com/mindsdb/mindsdb.git | 1 | 58 | 0 | 16 | 103 | Python | {
"docstring": "\n Return list of entities that will be accessible as tables.\n Returns:\n HandlerResponse\n \n SHOW TABLES;\n ",
"language": "en",
"n_whitespaces": 66,
"n_words": 14,
"vocab_size": 14
} | def get_tables(self) -> StatusResponse:
query =
result = self.native_query(query)
df = result.data_frame
df = df.drop(['type', 'type'], axis=1)
result.data_frame = df.rename(columns={'name': 'table_name'})
return result
| |
74,596 | 254,376 | 75 | d2l/jax.py | 33 | 19 | def accuracy(self, params, X, Y, averaged=True):
Y_hat = self.apply(params, X)
Y_hat = d2l.reshap | JAX: Add section classification.md (#2293) | accuracy | f348aecdade3cdec4f93b72da548c7394ecb42ce | d2l-en | jax.py | 12 | 6 | https://github.com/d2l-ai/d2l-en.git | 2 | 101 | 0 | 28 | 150 | Python | {
"docstring": "Compute the number of correct predictions.\n \n Defined in :numref:`sec_classification`",
"language": "en",
"n_whitespaces": 19,
"n_words": 9,
"vocab_size": 9
} | def accuracy(self, params, X, Y, averaged=True):
Y_hat = self.apply(params, X)
Y_hat = d2l.reshape(Y_hat, (-1, Y_hat.shape[-1]))
preds = d2l.astype(d2l.argmax(Y_hat, axis=1), Y.dtype)
compare = d2l.astype(preds == d2l.reshape(Y, -1), d2l.float32)
return d2l.reduce_mean(c... | |
40,848 | 173,483 | 380 | cps/tasks/metadata_backup.py | 92 | 24 | def open_metadata(self, book, custom_columns):
if config.config_use_google_drive:
if not gdriveutils.is_gdrive_ready():
raise Exception('Google Drive is configured but not ready')
web_content_link = gdriveutils.get_metadata_backup_via_gdrive(book.path)
if not... | Backup metadata 3rd step | open_metadata | 26be5ee2372b08c2f906661283a12e84d6c181f8 | calibre-web | metadata_backup.py | 15 | 37 | https://github.com/janeczku/calibre-web.git | 7 | 121 | 0 | 68 | 209 | Python | {
"docstring": "namespaces = {'dc': PURL_NAMESPACE, 'opf': OPF_NAMESPACE}\n test = etree.parse(book_metadata_filepath)\n root = test.getroot()\n for i in root.iter():\n self.log.info(i)\n title = root.find(\"dc:metadata\", namespaces)\n pass\n ... | def open_metadata(self, book, custom_columns):
if config.config_use_google_drive:
if not gdriveutils.is_gdrive_ready():
raise Exception('Google Drive is configured but not ready')
web_content_link = gdriveutils.get_metadata_backup_via_gdrive(book.path)
if not... | |
@pytest.mark.skipif(not can_import_module("tkinter"), reason="tkinter cannot be imported.") | 77,352 | 262,765 | 38 | tests/functional/test_libraries.py | 39 | 8 | def test_gevent_monkey(pyi_builder):
pyi_builder.test_source()
# The tkinter | tests: gevent tests: remove no-op excludes
The `gevent` tests seem to be attempting to exclude several packages.
As per comment in 416e1a0e83bf5a4924cc50d2befa2bb622b55107, this
was introduced in an attempt to break the following Windows-specific
import chain: setuptools.msvc -> numpy -> numpy.testing -> pytest ->
pyg... | test_gevent_monkey | 93ad16d5c970f70f843a5eda8b177f681743005b | pyinstaller | test_libraries.py | 10 | 5 | https://github.com/pyinstaller/pyinstaller.git | 1 | 11 | 1 | 36 | 54 | Python | {
"docstring": "\n from gevent.monkey import patch_all\n patch_all()\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 5,
"vocab_size": 5
} | def test_gevent_monkey(pyi_builder):
pyi_builder.test_source()
# The tkinter module may be available for import, but not actually importable due to missing shared libraries.
# Therefore, we need to use `can_import_module`-based skip decorator instead of `@importorskip`.
@pytest.mark.skipif(not can_import_module("... |
12,446 | 61,221 | 32 | .venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py | 20 | 7 | def split_auth_netloc_from_url(url):
# type: (str) -> Tuple[str, str, Tuple[str, | upd; format | split_auth_netloc_from_url | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | misc.py | 8 | 3 | https://github.com/jindongwang/transferlearning.git | 1 | 26 | 0 | 18 | 42 | Python | {
"docstring": "\n Parse a url into separate netloc, auth, and url with no auth.\n\n Returns: (url_without_auth, netloc, (username, password))\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 17,
"vocab_size": 15
} | def split_auth_netloc_from_url(url):
# type: (str) -> Tuple[str, str, Tuple[str, str]]
url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc)
return url_without_auth, netloc, auth
| |
46,153 | 189,647 | 19 | tests/test_text_mobject.py | 10 | 7 | def test_non_str_color():
text = Text("test_color_inheritance", color=Color("blue"))
markup_text = MarkupText("test_color_inheritance", color=Color("blue"))
| :class:`~.MathTex`, :class:`~.Tex`, :class:`~.Text` and :class:`~.MarkupText` inherit color from their parent mobjects. (#2467)
* comment out color-related things from tex_mob
* add change to svg_mobject
* MarkupText handles colour internally
* MarkupText handles colour internally
* make coordinate_system.... | test_non_str_color | 2275ec5916de0ad3bedbc276da09fc3bfbae4d5e | manim | test_text_mobject.py | 12 | 3 | https://github.com/ManimCommunity/manim.git | 1 | 31 | 0 | 8 | 60 | Python | {
"docstring": "Test that the Text and MarkupText can accept non_str color values\n i.e. colour.Color(red).",
"language": "en",
"n_whitespaces": 15,
"n_words": 13,
"vocab_size": 13
} | def test_non_str_color():
text = Text("test_color_inheritance", color=Color("blue"))
markup_text = MarkupText("test_color_inheritance", color=Color("blue"))
| |
103,899 | 305,107 | 49 | tests/components/zha/test_config_flow.py | 20 | 16 | async def test_strategy_no_network_settings(pick_radio, mock_app, hass):
mock_app.load_network_info = MagicMock(side_effect=NetworkNotFormed())
result, port = await pick_radio(RadioType.ezsp)
assert (
config_flow.FORMATION_REUSE_SETTINGS
not in result["data_schema"].schema["next_step_i... | ZHA backup/restore config flow (#77044) | test_strategy_no_network_settings | f78b39bdbfbe151e8bab72610b6fe03afc8c0747 | core | test_config_flow.py | 12 | 7 | https://github.com/home-assistant/core.git | 1 | 52 | 0 | 19 | 87 | Python | {
"docstring": "Test formation strategy when no network settings are present.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | async def test_strategy_no_network_settings(pick_radio, mock_app, hass):
mock_app.load_network_info = MagicMock(side_effect=NetworkNotFormed())
result, port = await pick_radio(RadioType.ezsp)
assert (
config_flow.FORMATION_REUSE_SETTINGS
not in result["data_schema"].schema["next_step_i... | |
23,059 | 108,099 | 282 | lib/matplotlib/mlab.py | 121 | 15 | def detrend(x, key=None, axis=None):
if key is None or key in ['constant', 'mean', 'default']:
return detrend(x, key=detrend_mean, axis=axis)
elif key == 'linear':
return detrend(x, key=detrend_linear, axis=axis)
elif key == 'none':
return detrend(x, key=detrend_none, axis=axis)... | Improve mlab documentation (and example) | detrend | 17b3c44f67f779e7d103381878f08c548c2c8495 | matplotlib | mlab.py | 14 | 21 | https://github.com/matplotlib/matplotlib.git | 13 | 180 | 0 | 82 | 295 | Python | {
"docstring": "\n Return *x* with its trend removed.\n\n Parameters\n ----------\n x : array or sequence\n Array or sequence containing the data.\n\n key : {'default', 'constant', 'mean', 'linear', 'none'} or function\n The detrending algorithm to use. 'default', 'mean', and 'constant' a... | def detrend(x, key=None, axis=None):
if key is None or key in ['constant', 'mean', 'default']:
return detrend(x, key=detrend_mean, axis=axis)
elif key == 'linear':
return detrend(x, key=detrend_linear, axis=axis)
elif key == 'none':
return detrend(x, key=detrend_none, axis=axis)... | |
@register.filter(is_safe=True)
@stringfilter | 15,653 | 71,270 | 26 | wagtail/admin/templatetags/wagtailadmin_tags.py | 12 | 10 | def has_unrendered_errors(bound_field):
return bound_field.errors and not hasattr(
bound_field.field.widget, "render_with_errors"
)
| Reformat with black | has_unrendered_errors | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | wagtailadmin_tags.py | 11 | 4 | https://github.com/wagtail/wagtail.git | 2 | 22 | 1 | 12 | 57 | Python | {
"docstring": "\n Return true if this field has errors that were not accounted for by render_with_errors, because\n the widget does not support the render_with_errors method\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 23,
"vocab_size": 21
} | def has_unrendered_errors(bound_field):
return bound_field.errors and not hasattr(
bound_field.field.widget, "render_with_errors"
)
@register.filter(is_safe=True)
@stringfilter |
17,448 | 82,589 | 189 | cms/tests/test_admin.py | 36 | 9 | def test_raw_id_threshold_page_permission_inline_admin(self):
with self.settings(CMS_RAW_ID_USERS=1):
with self.assertNumQueries(1):
self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, [])
# Create users to check if threshold is honored
self._get_guys(... | perf: Don't count users when CMS_RAW_ID_USERS=True (#7414)
* perf: Don't count users when CMS_RAW_ID_USERS=True
When using CMS_RAW_ID_USERS=True on a Postgres database with many users,
counting the users is slow and will always yield the same result.
Only count users when using an integer value as a threshold and re... | test_raw_id_threshold_page_permission_inline_admin | 7ca1b613d8573dff70e45dd54229b0032c3e8ca7 | django-cms | test_admin.py | 13 | 14 | https://github.com/django-cms/django-cms.git | 1 | 129 | 0 | 21 | 229 | Python | {
"docstring": "\n Only count users when using an integer value as threshold for\n CMS_RAW_ID_USERS.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 12,
"vocab_size": 12
} | def test_raw_id_threshold_page_permission_inline_admin(self):
with self.settings(CMS_RAW_ID_USERS=1):
with self.assertNumQueries(1):
self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, [])
# Create users to check if threshold is honored
self._get_guys(... | |
56,240 | 221,145 | 65 | python3.10.4/Lib/bdb.py | 22 | 8 | def get_breaks(self, filename, lineno):
filename = self | add python 3.10.4 for windows | get_breaks | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | bdb.py | 11 | 5 | https://github.com/XX-net/XX-Net.git | 4 | 47 | 0 | 18 | 69 | Python | {
"docstring": "Return all breakpoints for filename:lineno.\n\n If no breakpoints are set, return an empty list.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 14,
"vocab_size": 13
} | def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
| |
73,002 | 249,580 | 156 | tests/storage/test_registration.py | 26 | 14 | def test_override(self) -> None:
self.get_success(
self.store.register_user(
self.user_id,
self.pwhash,
approved=True,
)
)
user = self.get_success(self.store.get_user_by_id(self.user_id))
self.assertIsNotNo... | Allow admins to require a manual approval process before new accounts can be used (using MSC3866) (#13556) | test_override | be76cd8200b18f3c68b895f85ac7ef5b0ddc2466 | synapse | test_registration.py | 11 | 17 | https://github.com/matrix-org/synapse.git | 1 | 94 | 0 | 23 | 150 | Python | {
"docstring": "Tests that if we require approval for new accounts, but we explicitly say the\n new user should be considered approved, they're marked as approved.\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 24,
"vocab_size": 22
} | def test_override(self) -> None:
self.get_success(
self.store.register_user(
self.user_id,
self.pwhash,
approved=True,
)
)
user = self.get_success(self.store.get_user_by_id(self.user_id))
self.assertIsNotNo... | |
37,370 | 158,197 | 64 | d2l/mxnet.py | 31 | 7 | def tokenize(lines, token='word'):
if token == 'word':
return [line.spl | [PaddlePaddle] Merge master into Paddle branch (#1186)
* change 15.2 title in chinese version (#1109)
change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘
* 修改部分语义表述 (#1105)
* Update r0.17.5 (#1120)
* Bump versions in installation
* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)
* line 313: "b... | tokenize | b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2 | d2l-zh | mxnet.py | 12 | 7 | https://github.com/d2l-ai/d2l-zh.git | 5 | 51 | 0 | 23 | 90 | Python | {
"docstring": "Split text lines into word or character tokens.\n\n Defined in :numref:`sec_text_preprocessing`",
"language": "en",
"n_whitespaces": 13,
"n_words": 11,
"vocab_size": 11
} | def tokenize(lines, token='word'):
if token == 'word':
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('ERROR: unknown token type: ' + token)
| |
@frappe.whitelist() | 14,507 | 67,369 | 158 | erpnext/selling/doctype/sales_order/sales_order.py | 252 | 68 | def make_purchase_order_for_default_supplier(source_name, selected_items=None, target_doc=None):
if not selected_items:
return
if isinstance(selected_items, str):
selected_items = json.loads(selected_items)
def set_missing_values(source, target):
target.supplier = supplier
target.apply_discount_on = ""
... | style: format code with black | make_purchase_order_for_default_supplier | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | sales_order.py | 19 | 66 | https://github.com/frappe/erpnext.git | 11 | 297 | 1 | 168 | 886 | Python | {
"docstring": "Creates Purchase Order for each Supplier. Returns a list of doc objects.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def make_purchase_order_for_default_supplier(source_name, selected_items=None, target_doc=None):
if not selected_items:
return
if isinstance(selected_items, str):
selected_items = json.loads(selected_items)
def set_missing_values(source, target):
target.supplier = supplier
target.apply_discount_on = ""
... |
50,262 | 203,228 | 256 | django/db/migrations/utils.py | 70 | 11 | def resolve_relation(model, app_label=None, model_name=None):
if isinstance(model, str):
if model == RECURSIVE_RELATIONSHIP_CONSTANT:
| Refs #33476 -- Refactored problematic code before reformatting by Black.
In these cases Black produces unexpected results, e.g.
def make_random_password(
self,
length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789',
):
or
cursor.execute("""
SELECT ...
""",
... | resolve_relation | c5cd8783825b5f6384417dac5f3889b4210b7d08 | django | utils.py | 15 | 18 | https://github.com/django/django.git | 7 | 101 | 0 | 42 | 169 | Python | {
"docstring": "\n Turn a model class or model reference string and return a model tuple.\n\n app_label and model_name are used to resolve the scope of recursive and\n unscoped model relationship.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 28,
"vocab_size": 22
} | def resolve_relation(model, app_label=None, model_name=None):
if isinstance(model, str):
if model == RECURSIVE_RELATIONSHIP_CONSTANT:
if app_label is None or model_name is None:
raise TypeError(
'app_label and model_name must be provided to resolve '
... | |
5,750 | 31,459 | 1,803 | src/transformers/modeling_tf_utils.py | 479 | 49 | def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
missing_layers = []
unexpected_layers = []
mismatched_layers = []
# Read the H5 file
with h5py.File(resolved_archive_file, "r") as sharded_checkpoint_file:
# Retrieve the name of each layer ... | TF Sharded (#17713)
* initial commit
* update modeeling tf utils
* quality
* clean and update args
* update
* remove potential bug
* code quality
* update
* update max shard
* update tests for sharding from pretrained
* fix remaining test
* make style
* h5py if tf available
* upd... | load_tf_weights | 7cced021fa8ddc59f0f77384300760d34545394e | transformers | modeling_tf_utils.py | 27 | 54 | https://github.com/huggingface/transformers.git | 13 | 415 | 0 | 200 | 705 | Python | {
"docstring": "\n Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and\n shapes.\n\n Args:\n model (`tf.keras.models.Model`):\n The model to load the weights into.\n resolved_archive_file (`str`):\n The location o... | def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
missing_layers = []
unexpected_layers = []
mismatched_layers = []
# Read the H5 file
with h5py.File(resolved_archive_file, "r") as sharded_checkpoint_file:
# Retrieve the name of each layer ... | |
51,287 | 205,929 | 104 | django/forms/boundfield.py | 29 | 14 | def css_classes(self, extra_classes=None):
if hasattr(extra_classes, "split"):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and h | Refs #33476 -- Reformatted code with Black. | css_classes | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | boundfield.py | 11 | 9 | https://github.com/django/django.git | 7 | 91 | 0 | 23 | 153 | Python | {
"docstring": "\n Return a string of space-separated CSS classes for this field.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def css_classes(self, extra_classes=None):
if hasattr(extra_classes, "split"):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, "error_css_class"):
extra_classes.add(self.form.error_css_class)
... | |
20,039 | 100,575 | 65 | lib/gpu_stats/nvidia.py | 22 | 11 | def _get_free_vram(self) -> List[float]:
vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024)
for handle in self._handles]
self._log("debug", f"GPU VRAM free: {vram}")
return vram
| Refactor lib.gpu_stats (#1218)
* inital gpu_stats refactor
* Add dummy CPU Backend
* Update Sphinx documentation | _get_free_vram | bdbbad4d310fb606b6f412aa81e9f57ccd994e97 | faceswap | nvidia.py | 11 | 14 | https://github.com/deepfakes/faceswap.git | 2 | 46 | 0 | 21 | 79 | Python | {
"docstring": " Obtain the amount of VRAM that is available, in Megabytes, for each connected Nvidia\n GPU.\n\n Returns\n -------\n list\n List of `float`s containing the amount of VRAM available, in Megabytes, for each\n connected GPU as corresponding to the value... | def _get_free_vram(self) -> List[float]:
vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024)
for handle in self._handles]
self._log("debug", f"GPU VRAM free: {vram}")
return vram
| |
@frappe.whitelist() | 14,404 | 66,996 | 50 | erpnext/projects/doctype/task/task.py | 69 | 25 | def get_project(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries imp | style: format code with black | get_project | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | task.py | 12 | 23 | https://github.com/frappe/erpnext.git | 3 | 119 | 1 | 52 | 210 | Python | {
"docstring": " select name {search_columns} from `tabProject`\n\t\twhere %(key)s like %(txt)s\n\t\t\t%(mcond)s\n\t\t\t{search_condition}\n\t\torder by name\n\t\tlimit %(start)s, %(page_len)s",
"language": "en",
"n_whitespaces": 12,
"n_words": 17,
"vocab_size": 16
} | def get_project(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
meta = frappe.get_meta(doctype)
searchfields = meta.get_search_fields()
search_columns = ", " + ", ".join(searchfields) if searchfields else ""
search_cond = " or " + " or ".join(field + " ... |
81,352 | 275,258 | 289 | keras/optimizers/optimizer_experimental/nadam.py | 59 | 18 | def build(self, var_list):
super().build(var_list)
if getattr(self, "_built", False):
return
self._built = True
self._momentums = []
self._velocities = []
self._u_product = tf.Variable(1.0, dtype=var_list[0].dtype)
# Keep a counter on how many... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | build | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | nadam.py | 13 | 20 | https://github.com/keras-team/keras.git | 3 | 113 | 0 | 48 | 182 | Python | {
"docstring": "Initialize optimizer variables.\n\n Nadam optimizer has 2 types of variables: momentums and velocities.\n\n Args:\n var_list: list of model variables to build Nadam variables on.\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 24,
"vocab_size": 20
} | def build(self, var_list):
super().build(var_list)
if getattr(self, "_built", False):
return
self._built = True
self._momentums = []
self._velocities = []
self._u_product = tf.Variable(1.0, dtype=var_list[0].dtype)
# Keep a counter on how many... | |
12,762 | 61,938 | 372 | .venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py | 49 | 18 | def list_distinfo_files(self, absolute=False):
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = li... | upd; format | list_distinfo_files | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | database.py | 19 | 17 | https://github.com/jindongwang/transferlearning.git | 7 | 118 | 0 | 34 | 200 | Python | {
"docstring": "\n Iterates over the ``installed-files.txt`` entries and returns paths for\n each line if the path is pointing to a file located in the\n ``.egg-info`` directory or one of its subdirectories.\n\n :parameter absolute: If *absolute* is ``True``, each returned path is\n ... | def list_distinfo_files(self, absolute=False):
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = li... | |
2,610 | 13,348 | 217 | jina/parsers/orchestrate/base.py | 68 | 15 | def mixin_scalable_deployment_parser(parser):
gp = mixin_base_deployment_parser(parser, title='Scalable Deployment')
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help=,
)
gp.add_argument(
'--shards',
type=int,
defaul... | refactor: remove unnecessary parser args (#5328)
* refactor: refactor deployment mixin and remove polling and shards for gateway
* chore: rename executor to pod and move native and array type to worker args
* refactor: make exit-on-exceptions just a worker arg
* style: fix overload and cli autocomplete
* c... | mixin_scalable_deployment_parser | bd8003508da0b35713361484f5801ebc818bd0c3 | jina | base.py | 10 | 37 | https://github.com/jina-ai/jina.git | 1 | 97 | 0 | 52 | 162 | Python | {
"docstring": "Mixing in arguments required by a scalable deployment into the given parser.\n The deployment is scalable and can have shards, replicas and polling\n :param parser: the parser instance to which we add arguments\n \n The polling strategy of the Deployment and its endpoints (when `shards>1`)... | def mixin_scalable_deployment_parser(parser):
gp = mixin_base_deployment_parser(parser, title='Scalable Deployment')
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help=,
)
gp.add_argument(
'--shards',
type=int,
defaul... | |
15,840 | 72,114 | 135 | wagtail/admin/tests/test_privacy.py | 35 | 13 | def test_explorer_list_private(self):
response = self.client.get(
reverse("wagtailadmin_explore", args=(self.private_page.id,))
)
| Reformat with black | test_explorer_list_private | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_privacy.py | 14 | 10 | https://github.com/wagtail/wagtail.git | 1 | 53 | 0 | 30 | 88 | Python | {
"docstring": "\n This tests that there is a padlock displayed\n next to the private child page in the private pages explorer listing\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 20,
"vocab_size": 18
} | def test_explorer_list_private(self):
response = self.client.get(
reverse("wagtailadmin_explore", args=(self.private_page.id,))
)
# Check the response
self.assertEqual(response.status_code, 200)
# Must have one privacy icon (next to the private child page)
... | |
3,847 | 21,454 | 596 | pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py | 112 | 24 | def next(self):
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
... | Vendor in pip 22.1.2 | next | c69d55f7c82d5ae2cce542bcfb98d043ca4836a0 | pipenv | tarfile.py | 17 | 37 | https://github.com/pypa/pipenv.git | 14 | 211 | 0 | 58 | 353 | Python | {
"docstring": "Return the next member of the archive as a TarInfo object, when\n TarFile is opened for reading. Return None if there is no more\n available.\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 25,
"vocab_size": 22
} | def next(self):
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
... | |
76,839 | 261,495 | 287 | sklearn/linear_model/_logistic.py | 67 | 19 | def predict_proba(self, X):
check_is_fitted(self)
ovr = self.multi_class in ["ovr", "warn"] or (
self.multi_class == "auto"
and (
self.classes_.size <= 2
or self.solver in ("liblinear", "newton-cholesky")
)
)
i... | ENH add newton-cholesky solver to LogisticRegression (#24767) | predict_proba | bb080aa690364d84d11232c73dc8db2f0dde3578 | scikit-learn | _logistic.py | 14 | 18 | https://github.com/scikit-learn/scikit-learn.git | 6 | 105 | 0 | 51 | 177 | Python | {
"docstring": "\n Probability estimates.\n\n The returned estimates for all classes are ordered by the\n label of classes.\n\n For a multi_class problem, if multi_class is set to be \"multinomial\"\n the softmax function is used to find the predicted probability of\n each cl... | def predict_proba(self, X):
check_is_fitted(self)
ovr = self.multi_class in ["ovr", "warn"] or (
self.multi_class == "auto"
and (
self.classes_.size <= 2
or self.solver in ("liblinear", "newton-cholesky")
)
)
i... | |
@keras_export("keras.activations.softplus")
@tf.__internal__.dispatch.add_dispatch_support | 80,020 | 269,306 | 10 | keras/activations.py | 6 | 8 | def selu(x):
return tf.nn.selu(x)
@keras_export("keras.activations.softplus")
@tf | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | selu | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | activations.py | 8 | 2 | https://github.com/keras-team/keras.git | 1 | 15 | 1 | 6 | 50 | Python | {
"docstring": "Scaled Exponential Linear Unit (SELU).\n\n The Scaled Exponential Linear Unit (SELU) activation function is defined as:\n\n - `if x > 0: return scale * x`\n - `if x < 0: return scale * alpha * (exp(x) - 1)`\n\n where `alpha` and `scale` are pre-defined constants\n (`alpha=1.67326324` an... | def selu(x):
return tf.nn.selu(x)
@keras_export("keras.activations.softplus")
@tf.__internal__.dispatch.add_dispatch_support |
3,348 | 20,369 | 155 | pipenv/patched/notpip/_vendor/pygments/formatters/latex.py | 41 | 16 | def _find_safe_escape_tokens(self, text):
for i, t, v in self._filter_to(
self.lang.get_tokens_unprocessed(text),
lambda t: t in Token.Comment or t in Token.String
):
if t is None:
for i2, t2, v | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | _find_safe_escape_tokens | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | latex.py | 14 | 10 | https://github.com/pypa/pipenv.git | 5 | 79 | 0 | 29 | 121 | Python | {
"docstring": " find escape tokens that are not in strings or comments ",
"language": "en",
"n_whitespaces": 11,
"n_words": 10,
"vocab_size": 10
} | def _find_safe_escape_tokens(self, text):
for i, t, v in self._filter_to(
self.lang.get_tokens_unprocessed(text),
lambda t: t in Token.Comment or t in Token.String
):
if t is None:
for i2, t2, v2 in self._find_escape_tokens(v):
... | |
117,264 | 320,669 | 59 | tests/end2end/features/test_downloads_bdd.py | 31 | 13 | def set_up_fileselector(quteproc, py_proc, kind, files, output_type):
cmd, args = py_proc(r)
args += files.split(' ')
if output_type == "a temporary file":
args += ['--file={}']
fileselect_cmd = json.dumps([cmd, *args])
quteproc.set_setting('fileselect.handler', 'external')
quteproc... | test(downloads) wip test for external fileselect | set_up_fileselector | 36563450763868f12a2481ca636efccb2c7a43cc | qutebrowser | test_downloads_bdd.py | 10 | 25 | https://github.com/qutebrowser/qutebrowser.git | 2 | 71 | 0 | 27 | 125 | Python | {
"docstring": "Set up fileselect.xxx.command to select the file(s).\n import os\n import sys\n tmp_file = None\n for i, arg in enumerate(sys.argv):\n if arg.startswith('--file='):\n tmp_file = arg[len('--file='):]\n sys.argv.pop(i)\n ... | def set_up_fileselector(quteproc, py_proc, kind, files, output_type):
cmd, args = py_proc(r)
args += files.split(' ')
if output_type == "a temporary file":
args += ['--file={}']
fileselect_cmd = json.dumps([cmd, *args])
quteproc.set_setting('fileselect.handler', 'external')
quteproc... | |
10,948 | 53,896 | 31 | tests/test_task_runners.py | 15 | 7 | def task_runner(request):
if not hasattr(r | Add service marks to task runner tests | task_runner | dc0f9feb764c72620a68ca139eb56e43f6e5f068 | prefect | test_task_runners.py | 10 | 4 | https://github.com/PrefectHQ/prefect.git | 2 | 33 | 0 | 15 | 60 | Python | {
"docstring": "\n An indirect fixture that expects to receive a pytest fixture that yields a task\n runner.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 15,
"vocab_size": 12
} | def task_runner(request):
if not hasattr(request.param, "_pytestfixturefunction"):
raise TypeError("Received invalid `task_runner` parameter. Expected fixture.")
yield request.getfixturevalue(request.param.__name__)
| |
17,588 | 83,054 | 224 | zerver/tests/test_push_notifications.py | 47 | 18 | def test_get_apns_context(self) -> None:
import zerver.lib.push_notifications
zerver.lib.push_notifications.get_apns_context.cache_clear()
try:
with self.settings(APNS_CERT_FILE="/foo.pem"), mock.patch("aioapns.APNs") as mock_apns:
apns_context = get_apns_co... | test_push_notifications: Close event loops.
Fixes “ResourceWarning: unclosed event loop <_UnixSelectorEventLoop
running=False closed=False debug=False>”.
Signed-off-by: Anders Kaseorg <anders@zulip.com> | test_get_apns_context | 9e70a47f93ad422cadc9d26c656cc8c02e08805e | zulip | test_push_notifications.py | 15 | 17 | https://github.com/zulip/zulip.git | 3 | 92 | 0 | 40 | 161 | Python | {
"docstring": "This test is pretty hacky, and needs to carefully reset the state\n it modifies in order to avoid leaking state that can lead to\n nondeterministic results for other tests.\n ",
"language": "en",
"n_whitespaces": 50,
"n_words": 29,
"vocab_size": 26
} | def test_get_apns_context(self) -> None:
import zerver.lib.push_notifications
zerver.lib.push_notifications.get_apns_context.cache_clear()
try:
with self.settings(APNS_CERT_FILE="/foo.pem"), mock.patch("aioapns.APNs") as mock_apns:
apns_context = get_apns_co... | |
20,791 | 101,376 | 727 | scripts/convert.py | 230 | 23 | def _validate(self) -> None:
if (self._args.writer == "ffmpeg" and
not self._images.is_video and
self._args.reference_video is None):
raise FaceswapError("Output as video selected, but using frames as input. You must "
"provide... | Bugfix: convert - Gif Writer
- Fix non-launch error on Gif Writer
- convert plugins - linting
- convert/fs_media/preview/queue_manager - typing
- Change convert items from dict to Dataclass | _validate | 1022651eb8a7741014f5d2ec7cbfe882120dfa5f | faceswap | convert.py | 14 | 49 | https://github.com/deepfakes/faceswap.git | 15 | 224 | 0 | 125 | 423 | Python | {
"docstring": " Validate the Command Line Options.\n\n Ensure that certain cli selections are valid and won't result in an error. Checks:\n * If frames have been passed in with video output, ensure user supplies reference\n video.\n * If \"on-the-fly\" and a Neural Network mas... | def _validate(self) -> None:
if (self._args.writer == "ffmpeg" and
not self._images.is_video and
self._args.reference_video is None):
raise FaceswapError("Output as video selected, but using frames as input. You must "
"provide... | |
18,646 | 90,218 | 246 | src/sentry/api/base.py | 52 | 18 | def get_authenticators(self) -> List[BaseAuthentication]:
| ref(hybrid-cloud): Additional test annotations: auth_index (#42425)
Extends the hybrid cloud auth service to be usable in many more places (
TY @corps)
Annotate 30+ more api endpoint tests
Co-authored-by: Mike Ihbe <mike.ihbe@sentry.io>
Co-authored-by: Zachary Collins <zachary.collins@sentry.io>
Co-authored-by: Z... | get_authenticators | 17644550024d6a2eb01356ee48ec0d3ef95c043d | sentry | base.py | 16 | 21 | https://github.com/getsentry/sentry.git | 6 | 113 | 0 | 40 | 189 | Python | {
"docstring": "\n Instantiates and returns the list of authenticators that this view can use.\n Aggregates together authenticators that can be supported using HybridCloud.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 21,
"vocab_size": 18
} | def get_authenticators(self) -> List[BaseAuthentication]:
# TODO: Increase test coverage and get this working for monolith mode.
if SiloMode.get_current_mode() == SiloMode.MONOLITH:
return super().get_authenticators()
last_api_authenticator = ApiAuthentication([])
... | |
42,229 | 177,017 | 77 | networkx/algorithms/tests/test_lowest_common_ancestors.py | 21 | 7 | def test_naive_lowest_common_ancestor2(self):
G = nx.DiGraph()
G.add_edge(0, 1)
G.add_edge(2, 0)
G.add_edge(2, 3)
G.add_edge(4, 0)
G.add_edge(5, 2)
assert naive_lca(G, 1, 3) == 2
| Naive lowest common ancestor implementation (#5736)
* Add naive lca methods
* Naive algorithm implementation for LCA
* Modify naive lca functions
* Correct parameters of nx.ancestors
* Update lowest_common_ancestors.py
* Parametrize tests
* Apply suggestions from code review
Co-authored-by: Dan Sc... | test_naive_lowest_common_ancestor2 | b2f91c34a23058dd70b41784af0d87890216026a | networkx | test_lowest_common_ancestors.py | 8 | 8 | https://github.com/networkx/networkx.git | 1 | 64 | 0 | 18 | 100 | Python | {
"docstring": "Test that the one-pair function works for issue #4942.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_naive_lowest_common_ancestor2(self):
G = nx.DiGraph()
G.add_edge(0, 1)
G.add_edge(2, 0)
G.add_edge(2, 3)
G.add_edge(4, 0)
G.add_edge(5, 2)
assert naive_lca(G, 1, 3) == 2
| |
51,111 | 205,388 | 105 | django/db/migrations/utils.py | 29 | 13 | def get_references(state, model_tuple, field_tuple=()):
for state_model_tuple, model_state in state.models.items():
for name, field in model_state.fields.items():
reference = field_references(
state_model_tuple, field, model_tuple, *field_tuple
)
if r... | Refs #33476 -- Reformatted code with Black. | get_references | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | utils.py | 13 | 15 | https://github.com/django/django.git | 4 | 63 | 0 | 22 | 96 | Python | {
"docstring": "\n Generator of (model_state, name, field, reference) referencing\n provided context.\n\n If field_tuple is provided only references to this particular field of\n model_tuple will be generated.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 24,
"vocab_size": 22
} | def get_references(state, model_tuple, field_tuple=()):
for state_model_tuple, model_state in state.models.items():
for name, field in model_state.fields.items():
reference = field_references(
state_model_tuple, field, model_tuple, *field_tuple
)
if r... | |
93,241 | 294,202 | 429 | tests/components/alexa/test_smart_home.py | 181 | 13 | async def test_media_player_eq_bands_not_supported(hass):
device = (
"media_player.test_bands",
"on",
{
"friendly_name": "Test media player",
"supported_features": SUPPORT_SELECT_SOUND_MODE,
"sound_mode": "tv",
"sound_mode_list": ["movie",... | Exclude hidden entities from alexa (#68555) | test_media_player_eq_bands_not_supported | dc8e87a6f70439f9830d93d03c53d6ff098a4861 | core | test_smart_home.py | 12 | 53 | https://github.com/home-assistant/core.git | 1 | 339 | 0 | 72 | 643 | Python | {
"docstring": "Test EqualizerController bands directive not supported.",
"language": "en",
"n_whitespaces": 5,
"n_words": 6,
"vocab_size": 6
} | async def test_media_player_eq_bands_not_supported(hass):
device = (
"media_player.test_bands",
"on",
{
"friendly_name": "Test media player",
"supported_features": SUPPORT_SELECT_SOUND_MODE,
"sound_mode": "tv",
"sound_mode_list": ["movie",... | |
@derived_from(np.linalg) | 36,455 | 155,724 | 219 | dask/array/linalg.py | 118 | 45 | def lstsq(a, b):
q, r = qr(a)
x = solve_triangular(r, q.T.conj().dot(b))
residuals = b - a.dot(x)
residuals = abs(residuals**2).sum(axis=0, keepdims=b.ndim == 1)
token = tokenize(a, b)
# r must be a triangular with single block
# rank
rname = "lstsq-rank-" + token
rdsk = {(rn... | Update `pre-commit` version (#8691) | lstsq | 510bbc380531cbf56a409f1ae68e6fd84a9599e6 | dask | linalg.py | 14 | 22 | https://github.com/dask/dask.git | 1 | 280 | 1 | 85 | 425 | Python | {
"docstring": "\n Return the least-squares solution to a linear matrix equation using\n QR decomposition.\n\n Solves the equation `a x = b` by computing a vector `x` that\n minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may\n be under-, well-, or over- determined (i.e., the number of\... | def lstsq(a, b):
q, r = qr(a)
x = solve_triangular(r, q.T.conj().dot(b))
residuals = b - a.dot(x)
residuals = abs(residuals**2).sum(axis=0, keepdims=b.ndim == 1)
token = tokenize(a, b)
# r must be a triangular with single block
# rank
rname = "lstsq-rank-" + token
rdsk = {(rn... |
78,652 | 266,908 | 36 | test/lib/ansible_test/_internal/docker_util.py | 23 | 9 | def docker_environment(): # type: () -> t.Dict[str, str]
env = common_environment()
env.update(dict((key, os.environ[key] | Support podman-remote in ansible-test (#75753) | docker_environment | 7cb581ed2cb1d4591d094df37a40c9155ea446da | ansible | docker_util.py | 14 | 4 | https://github.com/ansible/ansible.git | 4 | 50 | 0 | 22 | 86 | Python | {
"docstring": "Return a dictionary of docker related environment variables found in the current environment.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def docker_environment(): # type: () -> t.Dict[str, str]
env = common_environment()
env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_') or key.startswith('CONTAINER_')))
return env
| |
23,463 | 109,177 | 64 | lib/matplotlib/tests/test_colorbar.py | 31 | 20 | def test_remove_from_figure(use_gridspec):
fig, ax = plt.subplots()
sc = ax.scatter([1, 2], [3, 4])
| warning when scatter plot color settings discarded (#23516)
* Warning when scatter plot color settings discarded
* Update lib/matplotlib/axes/_axes.py
Co-authored-by: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com>
* Wrapped 23516-MS.rst lines at 80 characters
* Fixed tests to look for proper warn... | test_remove_from_figure | 5d3124dbc826a019bb55b4229312a033912331ff | matplotlib | test_colorbar.py | 11 | 11 | https://github.com/matplotlib/matplotlib.git | 1 | 107 | 0 | 25 | 175 | Python | {
"docstring": "\n Test `remove` with the specified ``use_gridspec`` setting\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 7
} | def test_remove_from_figure(use_gridspec):
fig, ax = plt.subplots()
sc = ax.scatter([1, 2], [3, 4])
sc.set_array(np.array([5, 6]))
pre_position = ax.get_position()
cb = fig.colorbar(sc, use_gridspec=use_gridspec)
fig.subplots_adjust()
cb.remove()
fig.subplots_adjust()
post_posit... | |
48,962 | 198,499 | 68 | sympy/multipledispatch/utils.py | 24 | 7 | def groupby(func, seq):
d = {}
for item in seq:
key = func(item)
if key not in d:
d[key] = []
| Code cleanup | groupby | 9d58006fc0a23afcba38f641c9472917c436428a | sympy | utils.py | 11 | 8 | https://github.com/sympy/sympy.git | 3 | 47 | 0 | 19 | 76 | Python | {
"docstring": " Group a collection by a key function\n\n >>> from sympy.multipledispatch.utils import groupby\n >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']\n >>> groupby(len, names) # doctest: +SKIP\n {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}\n\n >>> ise... | def groupby(func, seq):
d = {}
for item in seq:
key = func(item)
if key not in d:
d[key] = []
d[key].append(item)
return d
| |
33,324 | 144,863 | 82 | python/ray/data/dataset.py | 21 | 14 | def input_files(self) -> List[str]:
metadata = self._plan.execute().get_metadata()
files = set()
for m in metadata:
for f in m.input_files:
file | Lay the groundwork for lazy dataset optimization (no behavior changes) (#22233)
This PR refactors Dataset execution to enable lazy mode in the future, which can reduce memory usage in large-scale ingest pipelines. There should be no behavior changes in this PR. Many of the optimizations are also punted for future work... | input_files | 35a157948efa7ba1adf1d1507c2af1d6d84a7db7 | ray | dataset.py | 11 | 15 | https://github.com/ray-project/ray.git | 3 | 52 | 0 | 18 | 86 | Python | {
"docstring": "Return the list of input files for the dataset.\n\n Time complexity: O(num input files)\n\n Returns:\n The list of input files used to create the dataset, or an empty\n list if the input files is not known.\n ",
"language": "en",
"n_whitespaces": 79,
"n... | def input_files(self) -> List[str]:
metadata = self._plan.execute().get_metadata()
files = set()
for m in metadata:
for f in m.input_files:
files.add(f)
return list(files)
| |
29,926 | 133,019 | 33 | python/ray/util/collective/collective_group/nccl_util.py | 17 | 5 | def get_nccl_reduce_op(reduce_op):
if reduce_op not in NCCL_REDUCE_OP_MAP:
| [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | get_nccl_reduce_op | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | nccl_util.py | 12 | 4 | https://github.com/ray-project/ray.git | 2 | 27 | 0 | 16 | 47 | Python | {
"docstring": "Map the reduce op to NCCL reduce op type.\n\n Args:\n reduce_op (ReduceOp): ReduceOp Enum (SUM/PRODUCT/MIN/MAX).\n Returns:\n (nccl.ncclRedOp_t): the mapped NCCL reduce op.\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 22,
"vocab_size": 17
} | def get_nccl_reduce_op(reduce_op):
if reduce_op not in NCCL_REDUCE_OP_MAP:
raise RuntimeError("NCCL does not support reduce op: '{}'.".format(reduce_op))
return NCCL_REDUCE_OP_MAP[reduce_op]
| |
20,644 | 101,224 | 32 | lib/align/detected_face.py | 11 | 4 | def aligned(self) -> AlignedFace:
assert self._aligned is not None
return self._aligned
| lib.align updates:
- alignments.py
- Add typed dicts for imported alignments
- Explicitly check for presence of thumb value in alignments dict
- linting
- detected_face.py
- Typing
- Linting
- Legacy support for pre-aligned face
- Update dependencies to new property names | aligned | 5e73437be47f2410439a3c6716de96354e6a0c94 | faceswap | detected_face.py | 7 | 4 | https://github.com/deepfakes/faceswap.git | 1 | 19 | 0 | 10 | 32 | Python | {
"docstring": " The aligned face connected to this detected face. ",
"language": "en",
"n_whitespaces": 9,
"n_words": 8,
"vocab_size": 8
} | def aligned(self) -> AlignedFace:
assert self._aligned is not None
return self._aligned
| |
8,592 | 45,465 | 159 | airflow/migrations/versions/64a7d6477aae_fix_description_field_in_connection_to_.py | 47 | 14 | def upgrade():
conn = op.get_bind()
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
| Autogenerate migration reference doc (#21601)
* document airflow version in each alembic migration module and use this to autogen the doc
* update each migration module to have the same description used in migration ref (so it can be used in autogen) | upgrade | 69f6f9e01b6df76c3c8fa266d460324163957887 | airflow | 64a7d6477aae_fix_description_field_in_connection_to_.py | 14 | 14 | https://github.com/apache/airflow.git | 3 | 95 | 0 | 41 | 162 | Python | {
"docstring": "Apply Fix description field in ``connection`` to be ``text``",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def upgrade():
conn = op.get_bind()
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.String(length=50... | |
12,085 | 60,322 | 121 | code/deep/BJMMD/caffe/python/caffe/test/test_net.py | 43 | 16 | def test_memory(self):
params = sum(map(list, six.itervalues(self.net.params)), [])
blobs = self.net.blobs.values()
del self.net
# now sum every | Balanced joint maximum mean discrepancy for deep transfer learning | test_memory | cc4d0564756ca067516f71718a3d135996525909 | transferlearning | test_net.py | 14 | 9 | https://github.com/jindongwang/transferlearning.git | 3 | 91 | 0 | 35 | 148 | Python | {
"docstring": "Check that holding onto blob data beyond the life of a Net is OK",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 14
} | def test_memory(self):
params = sum(map(list, six.itervalues(self.net.params)), [])
blobs = self.net.blobs.values()
del self.net
# now sum everything (forcing all memory to be read)
total = 0
for p in params:
total += p.data.sum() + p.diff.sum()
... | |
11,587 | 56,927 | 23 | tests/test_flows.py | 9 | 4 | async def test_timeout_stops_execution_in_sync_subflows(self, tmp_path):
canary_file = tmp_path / "canary"
| Ensure flows are called in an interruptible thread (PrefectHQ/orion#2174)
* Ensure flows are called in an interruptible thread
* Set higher runtime limit in `test_timeout_stops_execution_in_sync_subflows` | test_timeout_stops_execution_in_sync_subflows | 336eca7839fccbcbdb77179f352f926da8b1fa15 | prefect | test_flows.py | 8 | 14 | https://github.com/PrefectHQ/prefect.git | 1 | 72 | 0 | 9 | 26 | Python | {
"docstring": "\n Sync flow runs can be cancelled after a timeout once a task is called\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 13
} | async def test_timeout_stops_execution_in_sync_subflows(self, tmp_path):
canary_file = tmp_path / "canary"
| |
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs | 13,970 | 65,649 | 59 | erpnext/controllers/queries.py | 77 | 21 | def get_income_account(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
# income account can be any Credit account,
# but can also be a Asset account with account_type='Income Account' in special circumstances.
# Hence the first condition is an "OR"
if n... | style: format code with black | get_income_account | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | queries.py | 12 | 19 | https://github.com/frappe/erpnext.git | 3 | 94 | 1 | 66 | 177 | Python | {
"docstring": "select tabAccount.name from `tabAccount`\n\t\t\twhere (tabAccount.report_type = \"Profit and Loss\"\n\t\t\t\t\tor tabAccount.account_type in (\"Income Account\", \"Temporary\"))\n\t\t\t\tand tabAccount.is_group=0\n\t\t\t\tand tabAccount.`{key}` LIKE %(txt)s\n\t\t\t\t{condition} {match_condition}\n\t\t... | def get_income_account(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
# income account can be any Credit account,
# but can also be a Asset account with account_type='Income Account' in special circumstances.
# Hence the first condition is an "OR"
if n... |
52,796 | 209,810 | 168 | scapy/arch/windows/__init__.py | 56 | 14 | def win_find_exe(filename, installsubdir=None, env="ProgramFiles"):
# type: (str, Optional[Any], | [Hinty] Core typing: windows (#3684)
* Core typing: windows
Co-authored-by: Pierre <pierre@droids-corp.org> | win_find_exe | a2b7a28faff1db058dd22ce097a268e0ad5d1d33 | scapy | __init__.py | 21 | 13 | https://github.com/secdev/scapy.git | 6 | 93 | 0 | 44 | 156 | Python | {
"docstring": "Find executable in current dir, system path or in the\n given ProgramFiles subdir, and retuen its absolute path.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 18,
"vocab_size": 17
} | def win_find_exe(filename, installsubdir=None, env="ProgramFiles"):
# type: (str, Optional[Any], str) -> str
fns = [filename] if filename.endswith(".exe") else [filename + ".exe", filename] # noqa: E501
for fn in fns:
try:
if installsubdir is None:
path = _where(fn)... | |
16,245 | 74,294 | 85 | wagtail/core/tests/test_page_model.py | 14 | 9 | def test_custom_page_queryset(self):
self.assertIs(type(CustomManagerPage.objects.all()), CustomPageQuerySet)
| Reformat with black | test_custom_page_queryset | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_page_model.py | 14 | 9 | https://github.com/wagtail/wagtail.git | 1 | 82 | 0 | 10 | 135 | Python | {
"docstring": "\n Managers that are constructed from a custom PageQuerySet\n (via PageManager.from_queryset(CustomPageQuerySet)) should return\n querysets of that type\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 16,
"vocab_size": 15
} | def test_custom_page_queryset(self):
self.assertIs(type(CustomManagerPage.objects.all()), CustomPageQuerySet)
self.assertIs(type(CustomManagerPage.objects.about_spam()), CustomPageQuerySet)
self.assertIs(
type(CustomManagerPage.objects.all().about_spam()), CustomPageQuerySet... | |
3,197 | 20,048 | 31 | pipenv/patched/notpip/_vendor/distro.py | 10 | 3 | def distro_release_info(self):
# type: () -> Dict[s | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | distro_release_info | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | distro.py | 6 | 2 | https://github.com/pypa/pipenv.git | 1 | 10 | 0 | 10 | 20 | Python | {
"docstring": "\n Return a dictionary containing key-value pairs for the information\n items from the distro release file data source of the OS\n distribution.\n\n For details, see :func:`distro.distro_release_info`.\n ",
"language": "en",
"n_whitespaces": 61,
"n_words": 25,
... | def distro_release_info(self):
# type: () -> Dict[str, str]
return self._distro_release_info
| |
3,297 | 20,247 | 20 | pipenv/patched/notpip/_vendor/platformdirs/windows.py | 6 | 7 | def user_documents_dir(self) -> str:
return os.path.normpath(get | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | user_documents_dir | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | windows.py | 10 | 5 | https://github.com/pypa/pipenv.git | 1 | 20 | 0 | 6 | 38 | Python | {
"docstring": "\n :return: documents directory tied to the user e.g. ``%USERPROFILE%\\\\Documents``\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def user_documents_dir(self) -> str:
return os.path.normpath(get_win_folder("CSIDL_PERSONAL"))
| |
32,637 | 142,266 | 384 | python/ray/data/_internal/push_based_shuffle.py | 69 | 12 | def round_robin_reduce_idx_iterator(self):
idx = 0
round_idx = 0
while idx < self.output_num_blocks:
for merge_idx in range(self.num_merge_tasks_per_round):
if merge_idx < self._partitions_with_extra_task:
reduce_idx = merge_idx * (self.me... | [dataset] Pipeline task submission during reduce stage in push-based shuffle (#25795)
Reduce stage in push-based shuffle fails to complete at 100k output partitions or more. This is likely because of driver or raylet load from having too many tasks in flight at once.
We can fix this from ray core too, but for now, ... | round_robin_reduce_idx_iterator | 93aae48b80db80f7e9f922eaabedead0d15ee01c | ray | push_based_shuffle.py | 17 | 21 | https://github.com/ray-project/ray.git | 5 | 103 | 0 | 33 | 168 | Python | {
"docstring": "\n When there are multiple nodes, merge tasks are spread throughout the\n cluster to improve load-balancing. Each merge task produces outputs for\n a contiguous partition of reduce tasks. This method creates an iterator\n that returns reduce task indices round-robin across ... | def round_robin_reduce_idx_iterator(self):
idx = 0
round_idx = 0
while idx < self.output_num_blocks:
for merge_idx in range(self.num_merge_tasks_per_round):
if merge_idx < self._partitions_with_extra_task:
reduce_idx = merge_idx * (self.me... | |
35,261 | 153,106 | 64 | modin/pandas/groupby.py | 21 | 6 | def _check_index_name(self, result):
if self._by is not None:
# pandas does not n | FIX-#3197: do not pass lambdas to the backend in GroupBy (#3373)
Signed-off-by: Dmitry Chigarev <dmitry.chigarev@intel.com> | _check_index_name | 1e65a4afd191cf61ba05b80545d23f9b88962f41 | modin | groupby.py | 10 | 4 | https://github.com/modin-project/modin.git | 2 | 26 | 0 | 20 | 44 | Python | {
"docstring": "\n Check the result of groupby aggregation on the need of resetting index name.\n\n Parameters\n ----------\n result : DataFrame\n Group by aggregation result.\n\n Returns\n -------\n DataFrame\n ",
"language": "en",
"n_whitespaces... | def _check_index_name(self, result):
if self._by is not None:
# pandas does not name the index for this case
result._query_compiler.set_index_name(None)
return result
| |
53,127 | 211,652 | 210 | ppdet/modeling/rbox_utils.py | 136 | 22 | def check_points_in_rotated_boxes(points, boxes):
# [B, N, 5] -> [B, N, 4, 2]
corners = box2corners(boxes)
# [1, L, 2] -> [1, 1, L, 2]
points = points.unsqueeze(0)
# [B, N, 4, 2] -> [B, N, 1, 2]
a, b, c, d = corners.split(4, axis=2)
ab = b - a
ad = d - a
# [B, N, L, 2]
ap = ... | add ppyoloe_r (#7105)
* add ppyoloe_r
* modify code of ops.py
* add ppyoloe_r docs and modify rotate docs
* modify docs and refine connfigs
* fix some problems
* refine docs, add nms_rotated ext_op and fix some problems
* add image and inference_benchmark.py
* modify docs
* fix some problems
... | check_points_in_rotated_boxes | c6c10032924aaf4eb1646a4fd593c17a7e2ecb3b | PaddleDetection | rbox_utils.py | 11 | 14 | https://github.com/PaddlePaddle/PaddleDetection.git | 1 | 142 | 0 | 58 | 229 | Python | {
"docstring": "Check whether point is in rotated boxes\n\n Args:\n points (tensor): (1, L, 2) anchor points\n boxes (tensor): [B, N, 5] gt_bboxes\n eps (float): default 1e-9\n \n Returns:\n is_in_box (tensor): (B, N, L)\n\n ",
"language": "en",
"n_whitespaces": 72,
"n_wo... | def check_points_in_rotated_boxes(points, boxes):
# [B, N, 5] -> [B, N, 4, 2]
corners = box2corners(boxes)
# [1, L, 2] -> [1, 1, L, 2]
points = points.unsqueeze(0)
# [B, N, 4, 2] -> [B, N, 1, 2]
a, b, c, d = corners.split(4, axis=2)
ab = b - a
ad = d - a
# [B, N, L, 2]
ap = ... | |
78,474 | 266,557 | 208 | lib/ansible/modules/git.py | 102 | 30 | def write_ssh_wrapper(module):
try:
# make sure we | Bypass fragile git ssh wrapper (#73404)
git module now uses env vars exclusively
- updated docs to clarify usage
- now env vars append instead of overwrite to allow existing custom setups to keep working
fixes #38104, #64673, #64674
- added note for hostkeychecking more securely
fixes #69846
- ... | write_ssh_wrapper | b493c590bcee9b64e8ae02c17d4fde2331e0598b | ansible | git.py | 15 | 18 | https://github.com/ansible/ansible.git | 3 | 154 | 0 | 83 | 265 | Python | {
"docstring": "\n This writes an shell wrapper for ssh options to be used with git\n this is only relevant for older versions of gitthat cannot\n handle the options themselves. Returns path to the script\n #!/bin/sh\n%s $GIT_SSH_OPTS\n",
"language": "en",
"n_whitespaces": 58,
"n_words":... | def write_ssh_wrapper(module):
try:
# make sure we have full permission to the module_dir, which
# may not be the case if we're sudo'ing to a non-root user
if os.access(module.tmpdir, os.W_OK | os.R_OK | os.X_OK):
fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/... | |
70,092 | 243,723 | 441 | src/PIL/Image.py | 157 | 21 | def alpha_composite(self, im, dest=(0, 0), source=(0, 0)):
if not isinstance(source, (list, tuple)):
msg = "Source must be a tuple"
raise ValueError(msg)
if not isinstance(dest, (list, tuple)):
msg = "Destination must be a tuple"
raise ValueError... | Improve exception traceback readability | alpha_composite | 2ae55ccbdad9c842929fb238ea1eb81d1f999024 | Pillow | Image.py | 11 | 41 | https://github.com/python-pillow/Pillow.git | 9 | 226 | 0 | 80 | 362 | Python | {
"docstring": "'In-place' analog of Image.alpha_composite. Composites an image\n onto this image.\n\n :param im: image to composite over this one\n :param dest: Optional 2 tuple (left, top) specifying the upper\n left corner in this (destination) image.\n :param source: Optional ... | def alpha_composite(self, im, dest=(0, 0), source=(0, 0)):
if not isinstance(source, (list, tuple)):
msg = "Source must be a tuple"
raise ValueError(msg)
if not isinstance(dest, (list, tuple)):
msg = "Destination must be a tuple"
raise ValueError... | |
16,685 | 77,647 | 112 | wagtail/contrib/forms/tests/test_models.py | 28 | 14 | def test_form_field_clean_name_override(self):
field = ExtendedFormField.objects.create(
page=self.form_page,
sort_order=1, | form builder - allow clean_name generation to be overridden
- adds a new class method to AbstractFormField `get_field_clean_name`
- resolves #6903 | test_form_field_clean_name_override | fd5218220e4ccc7697ee18f57356810560e5e718 | wagtail | test_models.py | 10 | 9 | https://github.com/wagtail/wagtail.git | 1 | 47 | 0 | 27 | 78 | Python | {
"docstring": "\n Creating a new field should use the overridden method\n See ExtendedFormField get_field_clean_name method\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 13,
"vocab_size": 12
} | def test_form_field_clean_name_override(self):
field = ExtendedFormField.objects.create(
page=self.form_page,
sort_order=1,
label="quanti ge·là·to?",
field_type="number", # only number fields will add the ID as a prefix to the clean_name
req... | |
@_wraps(np.polymul, lax_description=_LEADING_ZEROS_DOC) | 26,695 | 119,830 | 63 | jax/_src/numpy/polynomial.py | 52 | 20 | def polyder(p, m=1):
_check_arraylike("polyder", p)
m = core.concrete_or_error(operator.index, m, "'m' argument of jnp.polyder")
p, = _promote_dtypes_inexact(p)
if m < 0:
raise ValueError("Order of derivative must be positive")
if m == 0:
return p
coeff = (arange(le | lax_numpy: move poly functions into numpy.polynomial | polyder | 603bb3c5ca288674579211e64fa47c6b2b0fb7a6 | jax | polynomial.py | 16 | 10 | https://github.com/google/jax.git | 3 | 104 | 1 | 40 | 190 | Python | {
"docstring": "\\\nSetting trim_leading_zeros=True makes the output match that of numpy.\nBut prevents the function from being able to be used in compiled code.\n",
"language": "en",
"n_whitespaces": 20,
"n_words": 23,
"vocab_size": 22
} | def polyder(p, m=1):
_check_arraylike("polyder", p)
m = core.concrete_or_error(operator.index, m, "'m' argument of jnp.polyder")
p, = _promote_dtypes_inexact(p)
if m < 0:
raise ValueError("Order of derivative must be positive")
if m == 0:
return p
coeff = (arange(len(p), m, -1)[np.newaxis, :] - 1 - ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.