ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5,241 | 29,606 | 140 | saleor/plugins/base_plugin.py | 39 | 10 | def _clean_configuration_value(cls, item_type, new_value):
| Fix plugin configuration (#11278)
* Fix updating plugin configuration
* Fix failing tax migration | _clean_configuration_value | eac1ae9cf107b8b0189b8b21ff6668c4131c6a00 | saleor | base_plugin.py | 11 | 10 | https://github.com/saleor/saleor.git | 5 | 48 | 0 | 31 | 80 | Python | {
"docstring": "Clean the value that is saved in plugin configuration.\n\n Change the string provided as boolean into the bool value.\n Return None for Output type, as it's read only field.\n ",
"language": "en",
"n_whitespaces": 50,
"n_words": 29,
"vocab_size": 26
} | def _clean_configuration_value(cls, item_type, new_value):
if (
item_type == ConfigurationTypeField.BOOLEAN
and new_value
and not isinstance(new_value, bool)
):
new_value = new_value.lower() == "true"
if item_type == ConfigurationTypeField... | |
12,025 | 60,232 | 62 | code/deep/BJMMD/caffe/python/caffe/coord_map.py | 47 | 3 | def coord_map_from_to(top_from, top_to):
# We need to find a common ancestor of top_from and top_to.
# We'll assume that all ancestors are equivalent here (otherwise the graph
# is an inconsistent state (which we could improve this to check for) | Balanced joint maximum mean discrepancy for deep transfer learning | coord_map_from_to | cc4d0564756ca067516f71718a3d135996525909 | transferlearning | coord_map.py | 6 | 28 | https://github.com/jindongwang/transferlearning.git | 8 | 177 | 0 | 42 | 19 | Python | {
"docstring": "\n Determine the coordinate mapping betweeen a top (from) and a top (to).\n Walk the graph to find a common ancestor while composing the coord maps for\n from and to until they meet. As a last step the from map is inverted.\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 41,
... | def coord_map_from_to(top_from, top_to):
# We need to find a common ancestor of top_from and top_to.
# We'll assume that all ancestors are equivalent here (otherwise the graph
# is an inconsistent state (which we could improve this to check for)).
# For now use a brute-force algorithm.
| |
13,985 | 65,678 | 19 | erpnext/controllers/stock_controller.py | 31 | 16 | def get_conditions_to_validate_future_sle(sl_entries):
warehouse_items_map = {}
for entry in sl_entries:
if entry.warehouse not in warehouse_ite | style: format code with black | get_conditions_to_validate_future_sle | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | stock_controller.py | 16 | 13 | https://github.com/frappe/erpnext.git | 4 | 69 | 0 | 25 | 155 | Python | {
"docstring": "warehouse = {frappe.db.escape(warehouse)}\n\t\t\t\tand item_code in ({', '.join(frappe.db.escape(item) for item in items)})",
"language": "en",
"n_whitespaces": 10,
"n_words": 12,
"vocab_size": 11
} | def get_conditions_to_validate_future_sle(sl_entries):
warehouse_items_map = {}
for entry in sl_entries:
if entry.warehouse not in warehouse_items_map:
warehouse_items_map[entry.warehouse] = set()
warehouse_items_map[entry.warehouse].add(entry.item_code)
or_conditions = []
for warehouse, items in warehouse... | |
77,180 | 262,317 | 324 | TTS/tts/datasets/__init__.py | 118 | 23 | def split_dataset(items, eval_split_max_size=None, eval_split_size=0.01):
speakers = [item["speaker_name"] for item in items]
is_multi_speaker = len(set(speakers)) > 1
if eval_split_size > 1:
eval_split_size = int(eval_split_size)
else:
if eval_split_max_size:
eval_split... | Make style and lint | split_dataset | 1425a023fe4bc6bda8578295aeeeb02af78cc082 | TTS | __init__.py | 18 | 30 | https://github.com/coqui-ai/TTS.git | 8 | 219 | 0 | 82 | 347 | Python | {
"docstring": "Split a dataset into train and eval. Consider speaker distribution in multi-speaker training.\n\n Args:\n <<<<<<< HEAD\n items (List[List]):\n A list of samples. Each sample is a list of `[audio_path, text, speaker_id]`.\n\n eval_split_max_size (int):\n ... | def split_dataset(items, eval_split_max_size=None, eval_split_size=0.01):
speakers = [item["speaker_name"] for item in items]
is_multi_speaker = len(set(speakers)) > 1
if eval_split_size > 1:
eval_split_size = int(eval_split_size)
else:
if eval_split_max_size:
eval_split... | |
71,979 | 247,891 | 273 | tests/rest/admin/test_media.py | 67 | 21 | def test_quarantine_media(self) -> None:
media_info = self.get_success(self.store.get_local_media(self.media_id))
assert media_info is not None
self.assertFalse(media_info["quarantined_by"])
# quarantining
channel = self.make_request(
"POST",
se... | Add type hints for `tests/unittest.py`. (#12347)
In particular, add type hints for get_success and friends, which are then helpful in a bunch of places. | test_quarantine_media | f0b03186d96305fd44d74a89bf4230beec0c5c31 | synapse | test_media.py | 11 | 27 | https://github.com/matrix-org/synapse.git | 1 | 215 | 0 | 33 | 340 | Python | {
"docstring": "\n Tests that quarantining and remove from quarantine a media is successfully\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 11
} | def test_quarantine_media(self) -> None:
media_info = self.get_success(self.store.get_local_media(self.media_id))
assert media_info is not None
self.assertFalse(media_info["quarantined_by"])
# quarantining
channel = self.make_request(
"POST",
se... | |
41,794 | 176,254 | 804 | networkx/algorithms/community/modularity_max.py | 250 | 29 | def naive_greedy_modularity_communities(G, resolution=1, weight=None):
r
# First create one community for each node
communities = list(frozenset([u]) for u in G.nodes())
# Track merges
merges = []
# Greedily merge communities until no improvement is possible
old_modularity = None
new_mod... | Add weights to karate club graph (#5285)
Add weights to the karate_club_graph.
Modifies `non_randomness` and `naive_greedy_modularity_communities` to
accept a `weight` parameter and modifies tests that use the kcg accordingly
Co-authored-by: Kevin Berry <kevin.berry@worthix.com>
Co-authored-by: Dan Schult <dschu... | naive_greedy_modularity_communities | 290ebce534b84f9db20ec58b98cbb170e65a0ba1 | networkx | modularity_max.py | 19 | 80 | https://github.com/networkx/networkx.git | 16 | 301 | 0 | 136 | 472 | Python | {
"docstring": "Find communities in G using greedy modularity maximization.\n\n This implementation is O(n^4), much slower than alternatives, but it is\n provided as an easy-to-understand reference implementation.\n\n Greedy modularity maximization begins with each node in its own community\n and joins th... | def naive_greedy_modularity_communities(G, resolution=1, weight=None):
r
# First create one community for each node
communities = list(frozenset([u]) for u in G.nodes())
# Track merges
merges = []
# Greedily merge communities until no improvement is possible
old_modularity = None
new_mod... | |
7,717 | 42,747 | 871 | airflow/providers/microsoft/psrp/hooks/psrp.py | 167 | 43 | def invoke(self) -> Generator[PowerShell, None, None]:
logger = copy(self.log)
logger.setLevel(self._logging_level)
local_context = self._conn is None
if local_context:
self.__enter__()
try:
assert self._conn is not None
ps = PowerShel... | Ensure @contextmanager decorates generator func (#23103) | invoke | e58985598f202395098e15b686aec33645a906ff | airflow | psrp.py | 19 | 45 | https://github.com/apache/airflow.git | 11 | 264 | 0 | 116 | 420 | Python | {
"docstring": "\n Context manager that yields a PowerShell object to which commands can be\n added. Upon exit, the commands will be invoked.\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 20,
"vocab_size": 18
} | def invoke(self) -> Generator[PowerShell, None, None]:
logger = copy(self.log)
logger.setLevel(self._logging_level)
local_context = self._conn is None
if local_context:
self.__enter__()
try:
assert self._conn is not None
ps = PowerShel... | |
117,210 | 320,536 | 38 | src/documents/tests/test_task_signals.py | 10 | 12 | def util_call_before_task_publish_handler(self, headers_to_use, body_to_use):
self.assertEqual(PaperlessTask.objects.all().count(), 0)
before_task_publish_handler(headers=headers_to_use, body=body_to_use)
| Switches task serialization over to pickle format | util_call_before_task_publish_handler | 97d6503fefc5737028637c39a2c1f33dd1e12904 | paperless-ngx | test_task_signals.py | 12 | 4 | https://github.com/paperless-ngx/paperless-ngx.git | 1 | 56 | 0 | 9 | 90 | Python | {
"docstring": "\n Simple utility to call the pre-run handle and ensure it created a single task\n instance\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 15
} | def util_call_before_task_publish_handler(self, headers_to_use, body_to_use):
self.assertEqual(PaperlessTask.objects.all().count(), 0)
before_task_publish_handler(headers=headers_to_use, body=body_to_use)
self.assertEqual(PaperlessTask.objects.all().count(), 1)
| |
15,807 | 71,963 | 84 | wagtail/admin/tests/test_edit_handlers.py | 21 | 15 | def test_form(self):
form = self.EventPageForm(instance=self.event_page)
self.assertIn("comments", form.formsets)
comments_formset = form.formsets["comments"]
self.assertEqual(len(comments_formset.forms), 1)
self.asse | Reformat with black | test_form | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_edit_handlers.py | 10 | 9 | https://github.com/wagtail/wagtail.git | 1 | 109 | 0 | 18 | 174 | Python | {
"docstring": "\n Check that the form has the comments/replies formsets, and that the\n user has been set on each CommentForm/CommentReplyForm subclass\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 15
} | def test_form(self):
form = self.EventPageForm(instance=self.event_page)
self.assertIn("comments", form.formsets)
comments_formset = form.formsets["comments"]
self.assertEqual(len(comments_formset.forms), 1)
self.assertEqual(comments_formset.forms[0].user, self.comment... | |
7,629 | 42,569 | 180 | nltk/corpus/reader/wordnet.py | 38 | 15 | def _doc(self, doc_type, default, lang="eng"):
corpus = self._wordnet_corpus_reader
if lang not in corpus.langs():
return None
elif lang == "eng":
return default
else:
corpus._load_lang_data(lang)
of = corpus.ss2of(self)
... | Fix wordnet's all_synsets() function (#3078)
* Fix all_synsets() function
* Add simple regression tests for #3077
* Add suggestions by @tomaarsen
Co-authored-by: Tom Aarsen <Cubiegamedev@gmail.com> | _doc | 3ca43e26efd7d5aa37b3cd79446258d8bfa79561 | nltk | wordnet.py | 14 | 14 | https://github.com/nltk/nltk.git | 4 | 94 | 0 | 27 | 151 | Python | {
"docstring": "Helper method for Synset.definition and Synset.examples",
"language": "en",
"n_whitespaces": 5,
"n_words": 6,
"vocab_size": 6
} | def _doc(self, doc_type, default, lang="eng"):
corpus = self._wordnet_corpus_reader
if lang not in corpus.langs():
return None
elif lang == "eng":
return default
else:
corpus._load_lang_data(lang)
of = corpus.ss2of(self)
... | |
39,392 | 163,184 | 221 | pandas/core/arrays/categorical.py | 73 | 30 | def map(self, mapper):
new_categories = self.categories.map(mapper)
try:
return self.from_codes(
self._codes.copy(), categories=new_categories, ordered=self.ordered
)
except ValueE | DOC: Improve doc summaries in series.rst (#45237) | map | 521259299f7829da667ba39302ec77acedde9e5e | pandas | categorical.py | 15 | 10 | https://github.com/pandas-dev/pandas.git | 3 | 85 | 0 | 57 | 216 | Python | {
"docstring": "\n Map categories using an input mapping or function.\n\n Maps the categories to new categories. If the mapping correspondence is\n one-to-one the result is a :class:`~pandas.Categorical` which has the\n same order property as the original, otherwise a :class:`~pandas.Index... | def map(self, mapper):
new_categories = self.categories.map(mapper)
try:
return self.from_codes(
self._codes.copy(), categories=new_categories, ordered=self.ordered
)
except ValueError:
# NA values are represented in self._codes with -... | |
36,295 | 155,204 | 91 | modin/experimental/core/execution/unidist/implementations/pandas_on_unidist/io/io.py | 26 | 11 | def to_pickle_distributed(cls, qc, **kwargs):
if not (
isinstance(kwargs["filepath_or_buffer"], str)
and "*" in kwargs["filepath_or_buffer"]
) or not isinstance(qc, PandasQueryCompiler):
warnings.warn("Defaulting to Modin core implementation")
ret... | FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059)
Signed-off-by: Igoshev, Iaroslav <iaroslav.igoshev@intel.com> | to_pickle_distributed | 193505fdf0c984743397ba3df56262f30aee13a8 | modin | io.py | 13 | 12 | https://github.com/modin-project/modin.git | 4 | 93 | 0 | 25 | 95 | Python | {
"docstring": "\n When `*` in the filename all partitions are written to their own separate file.\n\n The filenames is determined as follows:\n - if `*` in the filename then it will be replaced by the increasing sequence 0, 1, 2, …\n - if `*` is not the filename, then will be used default... | def to_pickle_distributed(cls, qc, **kwargs):
if not (
isinstance(kwargs["filepath_or_buffer"], str)
and "*" in kwargs["filepath_or_buffer"]
) or not isinstance(qc, PandasQueryCompiler):
warnings.warn("Defaulting to Modin core implementation")
ret... | |
51,080 | 205,304 | 42 | django/db/migrations/loader.py | 10 | 8 | def project_state(self, nodes=None, at_end=True):
return self.graph.make_state(
nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps
)
| Refs #33476 -- Reformatted code with Black. | project_state | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | loader.py | 9 | 4 | https://github.com/django/django.git | 1 | 35 | 0 | 10 | 53 | Python | {
"docstring": "\n Return a ProjectState object representing the most recent state\n that the loaded migrations represent.\n\n See graph.make_state() for the meaning of \"nodes\" and \"at_end\".\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 23,
"vocab_size": 21
} | def project_state(self, nodes=None, at_end=True):
return self.graph.make_state(
nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps
)
| |
39,282 | 162,744 | 375 | research/neo_peq/legacy_frequency_response.py | 125 | 26 | def center(self, frequency=1000):
equal_energy_fr = self.__class__(name='equal_energy', frequency=self.frequency.copy(), raw=self.raw.copy())
equal_energy_fr.interpolate()
interpolator = InterpolatedUnivariateSpline(np.log10(equal_energy_fr.frequency), equal_energy_fr.raw, k=1)
... | Added PEQ configs to CLI and function interfaces. Improved default value handling for PEQ parameters and added more predefined configs. Removed legacy PEQ optimization. Fixed readme write. Improved shelf filter initialization. Added plot method to PEQ. Notebook for comparing old and new optimizers. Bug fixes. | center | 9120cdffe618c6c2ff16fe6a311b6a1367efdbc8 | AutoEq | legacy_frequency_response.py | 15 | 22 | https://github.com/jaakkopasanen/AutoEq.git | 7 | 225 | 0 | 87 | 353 | Python | {
"docstring": "Removed bias from frequency response.\n\n Args:\n frequency: Frequency which is set to 0 dB. If this is a list with two values then an average between the two\n frequencies is set to 0 dB.\n\n Returns:\n Gain shifted\n ",
"language": "... | def center(self, frequency=1000):
equal_energy_fr = self.__class__(name='equal_energy', frequency=self.frequency.copy(), raw=self.raw.copy())
equal_energy_fr.interpolate()
interpolator = InterpolatedUnivariateSpline(np.log10(equal_energy_fr.frequency), equal_energy_fr.raw, k=1)
... | |
51,569 | 206,570 | 98 | django/utils/cache.py | 51 | 9 | def _i18n_cache_key_suffix(request, cache_key):
if settings.USE_I18N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active lan | Refs #33476 -- Reformatted code with Black. | _i18n_cache_key_suffix | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | cache.py | 13 | 6 | https://github.com/django/django.git | 3 | 41 | 0 | 38 | 76 | Python | {
"docstring": "If necessary, add the current locale or time zone to the cache key.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 12
} | def _i18n_cache_key_suffix(request, cache_key):
if settings.USE_I18N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += ".%s" %... | |
arff_file = BytesIO(
textwrap.dedent(
""" | 76,098 | 260,158 | 37 | sklearn/datasets/tests/test_arff_parser.py | 9 | 9 | def test_pandas_arff_parser_strip_double_quotes(parser_func):
pd = | FIX make pandas and liac arff parser quoting behaviour closer (#23497)
Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org>
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
Co-authored-by: Loïc Estève <loic.esteve@ymail.com> | test_pandas_arff_parser_strip_double_quotes | 8515b486810e844bc7f5f1a4fb2227405d46871e | scikit-learn | test_arff_parser.py | 9 | 54 | https://github.com/scikit-learn/scikit-learn.git | 1 | 186 | 1 | 8 | 39 | Python | {
"docstring": "Check that we properly strip double quotes from the data.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def test_pandas_arff_parser_strip_double_quotes(parser_func):
pd = pytest.importorskip("pandas")
arff_file = BytesIO(
textwrap.dedent(
|
3,369 | 20,440 | 1,512 | pipenv/patched/notpip/_vendor/pygments/lexer.py | 193 | 30 | def get_tokens_unprocessed(self, text=None, context=None):
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = c... | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | get_tokens_unprocessed | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | lexer.py | 24 | 56 | https://github.com/pypa/pipenv.git | 20 | 373 | 0 | 108 | 609 | Python | {
"docstring": "\n Split ``text`` into (tokentype, text) pairs.\n If ``context`` is given, use this lexer context instead.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 15
} | def get_tokens_unprocessed(self, text=None, context=None):
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = c... | |
30,625 | 135,458 | 230 | rllib/core/rl_module/torch/tests/test_torch_marl_module.py | 67 | 16 | def get_policy_data_from_agent_data(agent_data, policy_map_fn):
policy_data = {}
for agent_id, data in agent_data.items():
policy_id = policy_map_fn(agent_id)
policy_data.setdefault(policy_id, {})
policy_data[policy_id].setdefault("agent_id", [])
if data["obs"].ndim == 1:
... | [RLlib] MARLModule, RLModule PR 4/N (N=4) (#29449)
Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> | get_policy_data_from_agent_data | 30058267363b8de16b809c987bb1f7d7befad24d | ray | test_torch_marl_module.py | 16 | 21 | https://github.com/ray-project/ray.git | 8 | 182 | 0 | 47 | 291 | Python | {
"docstring": "Utility function to get policy data from agent data and policy map function.\n\n It also keeps track of agent_id for each row so that we can retreive the agent\n level information after the forward pass.\n\n Returns:\n dict of module_id to module data\n ",
"language": "en",
"n_w... | def get_policy_data_from_agent_data(agent_data, policy_map_fn):
policy_data = {}
for agent_id, data in agent_data.items():
policy_id = policy_map_fn(agent_id)
policy_data.setdefault(policy_id, {})
policy_data[policy_id].setdefault("agent_id", [])
if data["obs"].ndim == 1:
... | |
44,390 | 183,911 | 73 | src/textual/widgets/_data_table.py | 23 | 13 | def _update_dimensions(self) -> None:
total_width = sum(column.width for column in self.columns)
s | docstring name change | _update_dimensions | c3dcc529b3aa0b168728b3315cfe973218d09685 | textual | _data_table.py | 12 | 7 | https://github.com/Textualize/textual.git | 3 | 50 | 0 | 22 | 78 | Python | {
"docstring": "Called to recalculate the virtual (scrollable) size.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def _update_dimensions(self) -> None:
total_width = sum(column.width for column in self.columns)
self.virtual_size = Size(
total_width,
len(self._y_offsets) + (self.header_height if self.show_header else 0),
)
| |
50,666 | 204,168 | 49 | django/contrib/messages/storage/base.py | 17 | 7 | def _store(self, messages, response, *args, **kwargs):
raise NotImplementedError(
"subclasses of BaseStorage mu | Refs #33476 -- Reformatted code with Black. | _store | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | base.py | 8 | 4 | https://github.com/django/django.git | 1 | 21 | 0 | 17 | 35 | Python | {
"docstring": "\n Store a list of messages and return a list of any messages which could\n not be stored.\n\n One type of object must be able to be stored, ``Message``.\n\n **This method must be implemented by a subclass.**\n ",
"language": "en",
"n_whitespaces": 72,
"n_words... | def _store(self, messages, response, *args, **kwargs):
raise NotImplementedError(
"subclasses of BaseStorage must provide a _store() method"
)
| |
72,812 | 249,309 | 86 | tests/rest/admin/test_event_reports.py | 18 | 13 | def test_from_is_negative(self) -> None:
channel = self.make_request(
"GET",
self.url + "?from=-5",
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.jso | Use literals in place of `HTTPStatus` constants in tests (#13488)
* Use literals in place of `HTTPStatus` constants in tests
* newsfile
* code style
* code style | test_from_is_negative | 2281427175e4c93a30c39607fb4ac23c2a1f399f | synapse | test_event_reports.py | 10 | 11 | https://github.com/matrix-org/synapse.git | 1 | 60 | 0 | 18 | 97 | Python | {
"docstring": "\n Testing that a negative from parameter returns a 400\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 8
} | def test_from_is_negative(self) -> None:
channel = self.make_request(
"GET",
self.url + "?from=-5",
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.... | |
45,340 | 186,112 | 8 | tests/test_binding_inheritance.py | 5 | 1 | async def test_focused_child_widget_no_inherit_empty_bindings_with_movement_bindings_on_screen() -> None:
| Add test for focused widget, no inherit, empty BINDINGS
Testing the overlap between #1343 and #1351. | test_focused_child_widget_no_inherit_empty_bindings_with_movement_bindings_on_screen | e8c87ced33ccac893121e3cc0fb1097b0d8da035 | textual | test_binding_inheritance.py | 6 | 5 | https://github.com/Textualize/textual.git | 2 | 53 | 0 | 5 | 16 | Python | {
"docstring": "A focused child widget, that doesn't inherit bindings and sets BINDINGS empty, with movement bindings in the screen, should trigger screen actions.",
"language": "en",
"n_whitespaces": 21,
"n_words": 22,
"vocab_size": 21
} | async def test_focused_child_widget_no_inherit_empty_bindings_with_movement_bindings_on_screen() -> None:
| |
55,486 | 218,798 | 1,039 | python3.10.4/Lib/lib2to3/pgen2/parse.py | 220 | 28 | def addtoken(self, type, value, context):
# Map from token to label
ilabel = self.classify(type, value, context)
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
states, first = dfa
arcs = s... | add python 3.10.4 for windows | addtoken | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | parse.py | 19 | 33 | https://github.com/XX-net/XX-Net.git | 10 | 232 | 0 | 123 | 365 | Python | {
"docstring": "Add a token; return True iff this is the end of the program.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 12
} | def addtoken(self, type, value, context):
# Map from token to label
ilabel = self.classify(type, value, context)
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
states, first = dfa
arcs = s... | |
8,055 | 43,771 | 464 | airflow/settings.py | 115 | 28 | def import_local_settings():
try:
import airflow_local_settings
if hasattr(airflow_local_settings, "__all__"):
for i in airflow_local_settings.__all__:
globals()[i] = getattr(airflow_local_settings, i)
else:
for k, v in airflow_local_settings.__d... | Speed up creation of DagRun for large DAGs (5k+ tasks) by 25-130% (#20722)
* Speed up creation of DagRun for large DAGs (5k+ tasks) by 15-40%
This uses the "bulk" operation API of SQLAlchemy to get a big speed
up. Due to the `task_instance_mutation_hook` we still need to keep
actual TaskInstance objects around.
... | import_local_settings | f2039b4c9e15b514661d4facbd710791fe0a2ef4 | airflow | settings.py | 18 | 34 | https://github.com/apache/airflow.git | 11 | 191 | 0 | 83 | 336 | Python | {
"docstring": "Import airflow_local_settings.py files to allow overriding any configs in settings.py file",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def import_local_settings():
try:
import airflow_local_settings
if hasattr(airflow_local_settings, "__all__"):
for i in airflow_local_settings.__all__:
globals()[i] = getattr(airflow_local_settings, i)
else:
for k, v in airflow_local_settings.__d... | |
47,444 | 195,857 | 190 | sympy/functions/elementary/complexes.py | 75 | 13 | def unpolarify(eq, subs=None, exponents_only=False):
if isinstance(eq, bool):
return eq
eq = sympify(eq)
if subs is not None:
return unpolarify(eq.subs(subs))
changed = True
pause = False
if exponents_only:
pause = True
while changed:
changed = False
... | Improved documentation formatting | unpolarify | cda8dfe6f45dc5ed394c2f5cda706cd6c729f713 | sympy | complexes.py | 11 | 19 | https://github.com/sympy/sympy.git | 7 | 116 | 0 | 46 | 184 | Python | {
"docstring": "\n If `p` denotes the projection from the Riemann surface of the logarithm to\n the complex line, return a simplified version `eq'` of `eq` such that\n `p(eq') = p(eq)`.\n Also apply the substitution subs in the end. (This is a convenience, since\n ``unpolarify``, in a certain sense, un... | def unpolarify(eq, subs=None, exponents_only=False):
if isinstance(eq, bool):
return eq
eq = sympify(eq)
if subs is not None:
return unpolarify(eq.subs(subs))
changed = True
pause = False
if exponents_only:
pause = True
while changed:
changed = False
... | |
21,170 | 101,766 | 34 | plugins/extract/_base.py | 9 | 4 | def check_and_raise_error(self) -> None:
for thread in self._th | Extract: Typing and standardization | check_and_raise_error | 765e385177bda9b9e99951492ef33b34b4e4773e | faceswap | _base.py | 9 | 7 | https://github.com/deepfakes/faceswap.git | 2 | 20 | 0 | 9 | 35 | Python | {
"docstring": " Check all threads for errors\n\n Exposed for :mod:`~plugins.extract.pipeline` to check plugin's threads for errors\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 10
} | def check_and_raise_error(self) -> None:
for thread in self._threads:
thread.check_and_raise_error()
| |
92,777 | 293,721 | 54 | homeassistant/components/recorder/pool.py | 15 | 9 | def recorder_or_dbworker(self) -> bool:
thread_name = threading.current_thread().name
return bool(
thread_name == "Recorder" or thread_name.startswith(DB_WORKER_PREFIX)
)
| Use a dedicated executor pool for database operations (#68105)
Co-authored-by: Erik Montnemery <erik@montnemery.com>
Co-authored-by: Franck Nijhof <git@frenck.dev> | recorder_or_dbworker | bc862e97ed68cce8c437327651f85892787e755e | core | pool.py | 10 | 6 | https://github.com/home-assistant/core.git | 2 | 31 | 0 | 14 | 55 | Python | {
"docstring": "Check if the thread is a recorder or dbworker thread.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def recorder_or_dbworker(self) -> bool:
thread_name = threading.current_thread().name
return bool(
thread_name == "Recorder" or thread_name.startswith(DB_WORKER_PREFIX)
)
| |
120,843 | 335,973 | 112 | scripts/convert_ldm_original_checkpoint_to_diffusers.py | 44 | 9 | def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
mapping = []
for old_item in old_list:
new_item = old_item.replace('in_layers.0', 'norm1')
| LDM conversion script (#92)
Conversion script
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> | renew_resnet_paths | 87060e6a9c7754b648e621175b4d73161e82906e | diffusers | convert_ldm_original_checkpoint_to_diffusers.py | 12 | 12 | https://github.com/huggingface/diffusers.git | 2 | 105 | 0 | 30 | 189 | Python | {
"docstring": "\n Updates paths inside resnets to the new naming scheme (local renaming)\n ",
"language": "en",
"n_whitespaces": 18,
"n_words": 11,
"vocab_size": 11
} | def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
mapping = []
for old_item in old_list:
new_item = old_item.replace('in_layers.0', 'norm1')
new_item = new_item.replace('in_layers.2', 'conv1')
new_item = new_item.replace('out_layers.0', 'norm2')
new_item = new_it... | |
82,808 | 278,998 | 113 | keras/utils/metrics_utils.py | 42 | 13 | def _assert_splits_match(nested_splits_lists):
error_msg = (
"Inputs must have identical ragged splits. "
f"Input received: {nested_splits_lists}"
)
for splits_list in nested_splits_lists:
if len(splits_list) != len(nested_splits_lists[0]):
rais | Remove pylint comments.
PiperOrigin-RevId: 452353044 | _assert_splits_match | 3613c3defc39c236fb1592c4f7ba1a9cc887343a | keras | metrics_utils.py | 11 | 13 | https://github.com/keras-team/keras.git | 5 | 78 | 0 | 37 | 124 | Python | {
"docstring": "Checks that the given splits lists are identical.\n\n Performs static tests to ensure that the given splits lists are identical,\n and returns a list of control dependency op tensors that check that they are\n fully identical.\n\n Args:\n nested_splits_lists: A list of nested_splits_l... | def _assert_splits_match(nested_splits_lists):
error_msg = (
"Inputs must have identical ragged splits. "
f"Input received: {nested_splits_lists}"
)
for splits_list in nested_splits_lists:
if len(splits_list) != len(nested_splits_lists[0]):
raise ValueError(error_msg... | |
21,280 | 101,898 | 137 | lib/gui/display_command.py | 38 | 16 | def _iteration_limit_callback(self, *args) -> None:
try:
limit = self.vars["display_iterations"].get()
except tk.TclError:
| Typing - lib.gui.display_command | _iteration_limit_callback | dab823a3eb7a5257cb1e0818ee10ed234d3de97f | faceswap | display_command.py | 12 | 11 | https://github.com/deepfakes/faceswap.git | 3 | 62 | 0 | 36 | 105 | Python | {
"docstring": " Limit the amount of data displayed in the live graph on a iteration slider\n variable change. ",
"language": "en",
"n_whitespaces": 24,
"n_words": 16,
"vocab_size": 15
} | def _iteration_limit_callback(self, *args) -> None:
try:
limit = self.vars["display_iterations"].get()
except tk.TclError:
# Don't update when there is no value in the variable
return
logger.debug("Updating graph iteration limit: (new_value: %s, args:... | |
9,138 | 47,512 | 290 | tests/jobs/test_scheduler_job.py | 88 | 43 | def test_queued_dagruns_stops_creating_when_max_active_is_reached(self, dag_maker):
with dag_maker(max_active_runs=10) as dag:
EmptyOperator(task_id='mytask')
session = settings.Session()
self.scheduler_job = SchedulerJob(subdir=os.devnull)
self.scheduler_job.execut... | Replace usage of `DummyOperator` with `EmptyOperator` (#22974)
* Replace usage of `DummyOperator` with `EmptyOperator` | test_queued_dagruns_stops_creating_when_max_active_is_reached | 49e336ae0302b386a2f47269a6d13988382d975f | airflow | test_scheduler_job.py | 13 | 26 | https://github.com/apache/airflow.git | 4 | 278 | 0 | 48 | 448 | Python | {
"docstring": "This tests that queued dagruns stops creating once max_active_runs is reached",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def test_queued_dagruns_stops_creating_when_max_active_is_reached(self, dag_maker):
with dag_maker(max_active_runs=10) as dag:
EmptyOperator(task_id='mytask')
session = settings.Session()
self.scheduler_job = SchedulerJob(subdir=os.devnull)
self.scheduler_job.execut... | |
44,334 | 183,781 | 32 | tests/test_xterm_parser.py | 17 | 7 | def test_escape_sequence_resulting_in_multiple_keypresses(parser):
events = list(parser.feed("\x1b[2;4~"))
assert len(events) == 2
assert events[0].key == "escape"
| Backtracking unknown escape sequences, various tests for XTermParser | test_escape_sequence_resulting_in_multiple_keypresses | bfb962bacf274373e5706090cd854b6aa0857270 | textual | test_xterm_parser.py | 11 | 5 | https://github.com/Textualize/textual.git | 1 | 42 | 0 | 13 | 75 | Python | {
"docstring": "Some sequences are interpreted as more than 1 keypress",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_escape_sequence_resulting_in_multiple_keypresses(parser):
events = list(parser.feed("\x1b[2;4~"))
assert len(events) == 2
assert events[0].key == "escape"
assert events[1].key == "shift+insert"
| |
51,057 | 205,271 | 516 | django/db/migrations/autodetector.py | 85 | 33 | def generate_altered_options(self):
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys,
self.kept_unmanaged_keys,
# unmanaged converted to managed
self.old_unmanaged_keys & self.new_model_keys,
# managed converted to unmanaged
... | Refs #33476 -- Reformatted code with Black. | generate_altered_options | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | autodetector.py | 14 | 31 | https://github.com/django/django.git | 7 | 165 | 0 | 52 | 248 | Python | {
"docstring": "\n Work out if any non-schema-affecting options have changed and make an\n operation to represent them in state changes (in case Python code in\n migrations needs them).\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 26,
"vocab_size": 25
} | def generate_altered_options(self):
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys,
self.kept_unmanaged_keys,
# unmanaged converted to managed
self.old_unmanaged_keys & self.new_model_keys,
# managed converted to unmanaged
... | |
50,315 | 203,341 | 250 | django/contrib/admin/checks.py | 42 | 16 | def _check_readonly_fields(self, obj):
if obj.readonly_fields == ():
return []
elif not isinstance(obj.readonly_fields, (list, tuple)):
return must_be(
"a list o | Refs #33476 -- Reformatted code with Black. | _check_readonly_fields | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | checks.py | 16 | 16 | https://github.com/django/django.git | 4 | 85 | 0 | 37 | 137 | Python | {
"docstring": "Check that readonly_fields refers to proper attribute or field.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _check_readonly_fields(self, obj):
if obj.readonly_fields == ():
return []
elif not isinstance(obj.readonly_fields, (list, tuple)):
return must_be(
"a list or tuple", option="readonly_fields", obj=obj, id="admin.E034"
)
else:
... | |
@register.simple_tag(takes_context=True) | 15,652 | 71,268 | 139 | wagtail/admin/templatetags/wagtailadmin_tags.py | 61 | 16 | def querystring(context, **kwargs):
request = context["request"]
querydict = request.GET.copy()
# Can't do querydict.update(kwargs), because QueryDict.update() appends to
# the list of values, instead of replacing the values.
for key, value in kwar | Reformat with black | querystring | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | wagtailadmin_tags.py | 13 | 9 | https://github.com/wagtail/wagtail.git | 3 | 67 | 1 | 46 | 132 | Python | {
"docstring": "\n Print out the current querystring. Any keyword arguments to this template\n tag will be added to the querystring before it is printed out.\n\n <a href=\"/page/{% querystring key='value' %}\">\n\n Will result in something like:\n\n <a href=\"/page/?foo=bar&key=value\">\n ",... | def querystring(context, **kwargs):
request = context["request"]
querydict = request.GET.copy()
# Can't do querydict.update(kwargs), because QueryDict.update() appends to
# the list of values, instead of replacing the values.
for key, value in kwargs.items():
if value is None:
... |
38,133 | 159,111 | 234 | rasa/graph_components/validators/finetuning_validator.py | 66 | 26 | def _get_fingerprint_of_schema_without_irrelevant_keys(self) -> Text:
graph_schema = self._execution_context.graph_schema
schema_as_dict = graph_schema.as_dict()
for node_name, node_dict in schema_as_dict["nodes"].items():
config_copy = copy.deepcopy(node_dict["config"])
... | Update dependencies in 3.0 to align with rasa-sdk (#10667)
* align dependencies
* use black 21.7b0
* apply black and docstring reformatting
* add changelog | _get_fingerprint_of_schema_without_irrelevant_keys | 36eb9c9a5fcca2160e54a6cde5076c93db5bd70b | rasa | finetuning_validator.py | 13 | 23 | https://github.com/RasaHQ/rasa.git | 5 | 129 | 0 | 52 | 217 | Python | {
"docstring": "Returns a fingerprint of the given schema with certain items removed.\n\n These items include specifications that do not influence actual training\n results such as \"eager\" mode. The only configuration (in your config) that is\n allowed to change is the number of `epochs`.\n\n ... | def _get_fingerprint_of_schema_without_irrelevant_keys(self) -> Text:
graph_schema = self._execution_context.graph_schema
schema_as_dict = graph_schema.as_dict()
for node_name, node_dict in schema_as_dict["nodes"].items():
config_copy = copy.deepcopy(node_dict["config"])
... | |
77,454 | 263,829 | 18 | PyInstaller/utils/hooks/gi.py | 9 | 6 | def get_gi_typelibs(module, version):
module_info = GiModuleInfo(module, version)
return module_info.collect_ | hooks: refactor GObject introspection (gi) hooks
The modules imported from gi.repository are marked as runtime
modules by their corresponding pre-safe-import-module hooks.
Therefore, their standard hooks are always loaded and executed,
regardless of whether the modue is actually importable or not.
In PyInstaller v5, ... | get_gi_typelibs | 684bfac8adcf254fec5777f212c13eb62181f900 | pyinstaller | gi.py | 8 | 3 | https://github.com/pyinstaller/pyinstaller.git | 1 | 22 | 0 | 9 | 37 | Python | {
"docstring": "\n Return a tuple of (binaries, datas, hiddenimports) to be used by PyGObject related hooks. Searches for and adds\n dependencies recursively.\n\n :param module: GI module name, as passed to 'gi.require_version()'\n :param version: GI module version, as passed to 'gi.require_version()'\n ... | def get_gi_typelibs(module, version):
module_info = GiModuleInfo(module, version)
return module_info.collect_typelib_data()
| |
7,785 | 43,002 | 21 | airflow/www/security.py | 7 | 7 | def _sync_dag_view_permissions(self, dag_id, access_control):
dag | Fix permission issue for dag that has dot in name (#23510)
How we determine if a DAG is a subdag in airflow.security.permissions.resource_name_for_dag is not right.
If a dag_id contains a dot, the permission is not recorded correctly.
The current solution makes a query every time we check for permission for dags t... | _sync_dag_view_permissions | cc35fcaf89eeff3d89e18088c2e68f01f8baad56 | airflow | security.py | 8 | 26 | https://github.com/apache/airflow.git | 7 | 116 | 0 | 7 | 30 | Python | {
"docstring": "\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n ",
"la... | def _sync_dag_view_permissions(self, dag_id, access_control):
dag_resource_name = permissions.resource_name_for_dag(dag_id)
| |
40,788 | 172,199 | 39 | pandas/tests/util/test_assert_series_equal.py | 20 | 16 | def test_series_equal_datetime_values_mismatch(rtol):
msg =
s1 = Series(pd.date_range("2018-01-01", periods=3, freq="D"))
s2 = Series(pd.date_range("2019-02-02", periods=3, freq="D"))
with pytest.raises(AssertionError, match=msg):
tm.a | ENH: Include column for ea comparison in asserters (#50323)
* ENH: Include column for ea comparison in asserters
* Add gh ref
* Fix test
* Add gh ref
* Split tests | test_series_equal_datetime_values_mismatch | 07b363ea8eee184df30b54bfae9acd04511e1cda | pandas | test_assert_series_equal.py | 12 | 11 | https://github.com/pandas-dev/pandas.git | 1 | 70 | 0 | 16 | 131 | Python | {
"docstring": "Series are different\n\nSeries values are different \\\\(100.0 %\\\\)\n\\\\[index\\\\]: \\\\[0, 1, 2\\\\]\n\\\\[left\\\\]: \\\\[1514764800000000000, 1514851200000000000, 1514937600000000000\\\\]\n\\\\[right\\\\]: \\\\[1549065600000000000, 1549152000000000000, 1549238400000000000\\\\]",
"language": ... | def test_series_equal_datetime_values_mismatch(rtol):
msg =
s1 = Series(pd.date_range("2018-01-01", periods=3, freq="D"))
s2 = Series(pd.date_range("2019-02-02", periods=3, freq="D"))
with pytest.raises(AssertionError, match=msg):
tm.assert_series_equal(s1, s2, rtol=rtol)
| |
54,344 | 216,038 | 132 | tests/pytests/functional/pillar/test_gpg.py | 73 | 16 | def test_decrypt_pillar_invalid_renderer(salt_master, grains, pillar_homedir):
opts = salt_master.config.copy()
opts["decrypt_pillar"] = [{"secrets:vault": "gpg"}]
opts["dec | Add tests for gpg decryption failure option
Test that:
1. Pillar registers an error when `gpg_decrypt_must_succeed` is `True` and decryption fails
2. The GPG renderer fails silently when `gpg_decrypt_must_succeed` is `False`
Also mock `__opts__["gpg_decrypt_must_succeed"]` for gpg renderer unit pytests. | test_decrypt_pillar_invalid_renderer | b856d3225ef1003cbe94499dc8bd82efffabb661 | salt | test_gpg.py | 10 | 17 | https://github.com/saltstack/salt.git | 1 | 185 | 0 | 56 | 346 | Python | {
"docstring": "\n Test decryption using a renderer which is not permitted. It should\n fail, leaving the encrypted keys intact, and add an error to the pillar\n dictionary.\n\n decrypt_pillar_default: foo\n decrypt_pillar_renderers:\n - foo\n - bar\n decrypt_pillar:\n ... | def test_decrypt_pillar_invalid_renderer(salt_master, grains, pillar_homedir):
opts = salt_master.config.copy()
opts["decrypt_pillar"] = [{"secrets:vault": "gpg"}]
opts["decrypt_pillar_default"] = "foo"
opts["decrypt_pillar_renderers"] = ["foo", "bar"]
pillar_obj = salt.pillar.Pillar(opts, grai... | |
@DeveloperAPI | 27,515 | 124,104 | 49 | python/ray/tune/trainable/session.py | 14 | 9 | def get_trial_name():
warnings.warn(
_deprecation_msg,
DeprecationWarning,
)
_session = get_session()
if _session:
return _session.trial_name
| [air] update documentation to use `session.report` (#26051)
Update documentation to use `session.report`.
Next steps:
1. Update our internal caller to use `session.report`. Most importantly, CheckpointManager and DataParallelTrainer.
2. Update `get_trial_resources` to use PGF notions to incorporate the requiremen... | get_trial_name | ac831fded416381ad3c7fe2ba135eaa4aaab5879 | ray | session.py | 8 | 8 | https://github.com/ray-project/ray.git | 2 | 26 | 1 | 14 | 49 | Python | {
"docstring": "Trial name for the corresponding trial.\n\n For function API use only.\n ",
"language": "en",
"n_whitespaces": 17,
"n_words": 11,
"vocab_size": 11
} | def get_trial_name():
warnings.warn(
_deprecation_msg,
DeprecationWarning,
)
_session = get_session()
if _session:
return _session.trial_name
@DeveloperAPI |
36,580 | 156,139 | 48 | dask/utils.py | 23 | 14 | def get_scheduler_lock(collection=None, scheduler=None):
from dask import multiprocessing
from dask.base import get_scheduler
actual_get = get_scheduler(collections=[collection], scheduler=scheduler)
if actual_get == multiprocessing.get:
return multiprocessing.get_context().Manager(). | absolufy-imports - No relative - PEP8 (#8796)
Conversation in https://github.com/dask/distributed/issues/5889 | get_scheduler_lock | cccb9d8d8e33a891396b1275c2448c352ef40c27 | dask | utils.py | 13 | 7 | https://github.com/dask/dask.git | 2 | 61 | 0 | 19 | 100 | Python | {
"docstring": "Get an instance of the appropriate lock for a certain situation based on\n scheduler used.",
"language": "en",
"n_whitespaces": 17,
"n_words": 15,
"vocab_size": 15
} | def get_scheduler_lock(collection=None, scheduler=None):
from dask import multiprocessing
from dask.base import get_scheduler
actual_get = get_scheduler(collections=[collection], scheduler=scheduler)
if actual_get == multiprocessing.get:
return multiprocessing.get_context().Manager().Lock... | |
80,868 | 271,851 | 150 | keras/engine/training_utils_v1.py | 58 | 16 | def verify_dataset_shuffled(x):
assert isinstance(x, tf.data.Dataset)
graph_def = get_dataset_graph_def(x)
for node in graph_def.node:
if node.op.startswith("ShuffleDataset"):
return True
# Also check graph_def.library.function for ds.interleave or ds.flat_map
for function i... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | verify_dataset_shuffled | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | training_utils_v1.py | 12 | 15 | https://github.com/keras-team/keras.git | 6 | 79 | 0 | 45 | 134 | Python | {
"docstring": "Verifies that the dataset is shuffled.\n\n Args:\n x: Dataset passed as an input to the model.\n\n Returns:\n boolean, whether the input dataset is shuffled or not.\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 26,
"vocab_size": 21
} | def verify_dataset_shuffled(x):
assert isinstance(x, tf.data.Dataset)
graph_def = get_dataset_graph_def(x)
for node in graph_def.node:
if node.op.startswith("ShuffleDataset"):
return True
# Also check graph_def.library.function for ds.interleave or ds.flat_map
for function i... | |
@log_start_end(log=logger) | 84,165 | 282,485 | 39 | gamestonk_terminal/cryptocurrency/due_diligence/binance_model.py | 18 | 12 | def get_binance_available_quotes_for_each_coin() -> dict:
| Global plot styles (#1228)
* Add default stylesheets
* Add terminal style helper class and global style initialization in cfg
* Style comments and docstrings
* Load rich terminal theme from config file
* Add application chart styles to candle charts
* Add todos
* Remove explicit color setting for som... | get_binance_available_quotes_for_each_coin | e1b6022b9cf156ffc0697d0d25a5ed2772ea8d68 | OpenBBTerminal | binance_model.py | 12 | 15 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 2 | 40 | 1 | 16 | 82 | Python | {
"docstring": "Helper methods that for every coin available on Binance add all quote assets. [Source: Binance]\n\n Returns\n -------\n dict:\n All quote assets for given coin\n {'ETH' : ['BTC', 'USDT' ...], 'UNI' : ['ETH', 'BTC','BUSD', ...]\n\n ",
"language": "en",
"n_whitespaces": 60,... | def get_binance_available_quotes_for_each_coin() -> dict:
trading_pairs = _get_trading_pairs()
results = defaultdict(list)
for pair in trading_pairs:
results[pair["baseAsset"]].append(pair["quoteAsset"])
return results
@log_start_end(log=logger) |
14,688 | 67,965 | 54 | erpnext/stock/stock_ledger.py | 73 | 17 | def update_qty_in_future_sle(args, allow_negative_stock=False):
datetime_limit_condition = ""
qty_shift = args.actual_qty
# find difference/shift in qty caused by stock reconciliation
if args.voucher_type == "Stock Reconciliation":
qty_shift = get_stock_reco_qty_shift(args)
# find the next nearest stock reco... | style: format code with black | update_qty_in_future_sle | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | stock_ledger.py | 10 | 31 | https://github.com/frappe/erpnext.git | 3 | 80 | 0 | 59 | 136 | Python | {
"docstring": "Recalculate Qty after Transaction in future SLEs based on current SLE.\n\t\tupdate `tabStock Ledger Entry`\n\t\tset qty_after_transaction = qty_after_transaction + {qty_shift}\n\t\twhere\n\t\t\titem_code = %(item_code)s\n\t\t\tand warehouse = %(warehouse)s\n\t\t\tand voucher_no != %(voucher_no)s\n\t\t... | def update_qty_in_future_sle(args, allow_negative_stock=False):
datetime_limit_condition = ""
qty_shift = args.actual_qty
# find difference/shift in qty caused by stock reconciliation
if args.voucher_type == "Stock Reconciliation":
qty_shift = get_stock_reco_qty_shift(args)
# find the next nearest stock reco... | |
50,273 | 203,245 | 80 | django/templatetags/tz.py | 40 | 9 | def get_current_timezone_tag(parser, token):
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
a | Refs #33476 -- Refactored problematic code before reformatting by Black.
In these cases Black produces unexpected results, e.g.
def make_random_password(
self,
length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789',
):
or
cursor.execute("""
SELECT ...
""",
... | get_current_timezone_tag | c5cd8783825b5f6384417dac5f3889b4210b7d08 | django | tz.py | 11 | 7 | https://github.com/django/django.git | 3 | 47 | 0 | 38 | 81 | Python | {
"docstring": "\n Store the name of the current time zone in the context.\n\n Usage::\n\n {% get_current_timezone as TIME_ZONE %}\n\n This will fetch the currently active time zone and put its name\n into the ``TIME_ZONE`` context variable.\n ",
"language": "en",
"n_whitespaces": 57,
"n_w... | def get_current_timezone_tag(parser, token):
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError(
"'get_current_timezone' requires 'as variable'... | |
@register_agent("fake_report") | 47,106 | 194,834 | 365 | tests/test_train_model.py | 55 | 25 | def test_save_multiple_world_logs_mutator(self):
with testing_utils.tempdir() as tmpdir:
log_report = os.path.join(tmpdir, 'world_logs.jsonl')
multitask = 'integration_tests:mutators=flatt | Fixes train_model worldlogging for multitask with mutators. (#4414)
* Fixes train_model worldlogging for multitask with mutators.
* Fix bug in train_model when evaltask doesn't match task. | test_save_multiple_world_logs_mutator | d6773a0b4acf1027dc9b68342a1d84344f1a0d95 | ParlAI | test_train_model.py | 14 | 21 | https://github.com/facebookresearch/ParlAI.git | 2 | 113 | 1 | 47 | 207 | Python | {
"docstring": "\n Test that we can save multiple world_logs from train model on multiple tasks\n with mutators present.\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 15
} | def test_save_multiple_world_logs_mutator(self):
with testing_utils.tempdir() as tmpdir:
log_report = os.path.join(tmpdir, 'world_logs.jsonl')
multitask = 'integration_tests:mutators=flatten,integration_tests:ReverseTeacher:mutator=reverse'
valid, test = testing_util... |
32,293 | 141,204 | 151 | python/ray/tune/tests/test_trial_relative_logdir.py | 36 | 19 | def testDotsInLogdir(self):
local_dir_path = Path("/tmp/test_ | [tune] Relative logdir paths in trials for ExperimentAnalysis in remote buckets (#25063)
When running an experiment for example in the cloud and syncing to a bucket the logdir path in the trials will be changed when working with the checkpoints in the bucket. There are some workarounds, but the easier solution is to a... | testDotsInLogdir | 2a5d322e705df080e9254c9c9a3e187c1ea41c4e | ray | test_trial_relative_logdir.py | 14 | 13 | https://github.com/ray-project/ray.git | 4 | 100 | 0 | 22 | 179 | Python | {
"docstring": "This should result in errors as dots in paths are not allowed.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def testDotsInLogdir(self):
local_dir_path = Path("/tmp/test_rel_dots")
local_dir = str(local_dir_path)
if local_dir_path.exists():
local_dir = tempfile.mkdtemp(prefix=str(local_dir_path) + "_")
trial = Trial(trainable_name="rel_logdir", local_dir=local_dir)
... | |
@override_settings(WAGTAILIMAGES_IMAGE_MODEL="tests.CustomImage") | 16,361 | 75,124 | 155 | wagtail/images/tests/test_admin_views.py | 40 | 25 | def test_delete_post(self):
# Send request
response = self.client.post(
reverse("wagtailimages:delete_multiple", args=(self.ima | Reformat with black | test_delete_post | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_admin_views.py | 14 | 12 | https://github.com/wagtail/wagtail.git | 1 | 128 | 1 | 33 | 232 | Python | {
"docstring": "\n This tests that a POST request to the delete view deletes the image\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 12
} | def test_delete_post(self):
# Send request
response = self.client.post(
reverse("wagtailimages:delete_multiple", args=(self.image.id,))
)
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "applica... |
46,030 | 189,389 | 329 | tests/utils/GraphicalUnitTester.py | 106 | 31 | def _show_diff_helper(self, frame_data, expected_frame_data):
import matplotlib.gridspec as gridspec # type: ignore
import matplotlib.pyplot as plt
gs = gridspec.GridSpec(2, 2)
fig = plt.figure()
fig.suptitle(f"Test for {str(self.scene | Added MyPy Support (#1972)
* MyPy Support
* MyPy Hook
* Removing MyPy Hook
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Delete __init__.pyi
* Delete color.pyi
* Update .mypy.ini
Co-authored-by: Christopher Besch <christopher.besch@gmx.... | _show_diff_helper | c4217731e08470d5a56cf02cf76cae01c03fb78f | manim | GraphicalUnitTester.py | 14 | 28 | https://github.com/ManimCommunity/manim.git | 1 | 240 | 0 | 69 | 407 | Python | {
"docstring": "Will visually display with matplotlib differences between frame generated and the one expected.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def _show_diff_helper(self, frame_data, expected_frame_data):
import matplotlib.gridspec as gridspec # type: ignore
import matplotlib.pyplot as plt
gs = gridspec.GridSpec(2, 2)
fig = plt.figure()
fig.suptitle(f"Test for {str(self.scene).replace('Test', '')}", fontsize=... | |
17,326 | 82,188 | 84 | awx/main/scheduler/task_manager_models.py | 30 | 8 | def consume_capacity(self, task):
if self.is_container_gr | Add max concurrent jobs and max forks per ig
The intention of this feature is primarily to provide some notion of max
capacity of container groups, but the logic I've left generic. Default
is 0, which will be interpereted as no maximum number of jobs or forks.
Includes refactor of variable and method names for clarit... | consume_capacity | 86856f242aec6051c1cace683fe1761c0775babb | awx | task_manager_models.py | 11 | 6 | https://github.com/ansible/awx.git | 2 | 32 | 0 | 28 | 56 | Python | {
"docstring": "We only consume capacity on an instance group level if it is a container group. Otherwise we consume capacity on an instance level.",
"language": "en",
"n_whitespaces": 22,
"n_words": 23,
"vocab_size": 18
} | def consume_capacity(self, task):
if self.is_container_group:
self.container_group_jobs += 1
self.container_group_consumed_forks += task.task_impact
else:
raise RuntimeError("We only track capacity for container groups at the instance group level. Otherwise, ... | |
31,216 | 137,681 | 25 | python/ray/util/spark/utils.py | 9 | 8 | def get_avail_mem_per_ray_worker_node(spark, object_store_memory_per_node):
num_cpus_per_spark_task = int(
spark.sparkContext.getConf().get("spark.task.cpus", "1")
)
| Ray on spark implementation (#28771)
REP: ray-project/enhancements#14 | get_avail_mem_per_ray_worker_node | e76ccee69aaa7583be1a9d81cf7b2aa72cf25647 | ray | utils.py | 13 | 20 | https://github.com/ray-project/ray.git | 2 | 83 | 0 | 9 | 49 | Python | {
"docstring": "\n Return the available heap memory and object store memory for each ray worker.\n NB: We have one ray node per spark task.\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 22,
"vocab_size": 20
} | def get_avail_mem_per_ray_worker_node(spark, object_store_memory_per_node):
num_cpus_per_spark_task = int(
spark.sparkContext.getConf().get("spark.task.cpus", "1")
)
| |
3,751 | 21,285 | 214 | pipenv/patched/notpip/_internal/metadata/importlib/_dists.py | 44 | 10 | def _iter_egg_info_dependencies(self) -> Iterable[str]:
for entry i | Vendor in pip 22.1.2 | _iter_egg_info_dependencies | c69d55f7c82d5ae2cce542bcfb98d043ca4836a0 | pipenv | _dists.py | 16 | 26 | https://github.com/pypa/pipenv.git | 7 | 69 | 0 | 30 | 161 | Python | {
"docstring": "Get distribution dependencies from the egg-info directory.\n\n To ease parsing, this converts a legacy dependency entry into a PEP 508\n requirement string. Like ``_iter_requires_txt_entries()``, there is code\n in ``importlib.metadata`` that does mostly the same, but not do exact... | def _iter_egg_info_dependencies(self) -> Iterable[str]:
for entry in self._iter_requires_txt_entries():
if entry.extra and entry.marker:
marker = f'({entry.marker}) and extra == "{safe_extra(entry.extra)}"'
elif entry.extra:
marker = f'extra == "{... | |
55,071 | 218,009 | 29 | python3.10.4/Lib/imp.py | 9 | 7 | def cache_from_source(path, debug_override=None):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return util.cache_from_source(path, debug_override)
| add python 3.10.4 for windows | cache_from_source | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | imp.py | 10 | 4 | https://github.com/XX-net/XX-Net.git | 1 | 32 | 0 | 9 | 57 | Python | {
"docstring": "**DEPRECATED**\n\n Given the path to a .py file, return the path to its .pyc file.\n\n The .py file does not need to exist; this simply returns the path to the\n .pyc file calculated as if the .py file were imported.\n\n If debug_override is not None, then it must be a boolean and is used ... | def cache_from_source(path, debug_override=None):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return util.cache_from_source(path, debug_override)
| |
54,998 | 217,895 | 287 | python3.10.4/Lib/http/server.py | 112 | 19 | def _url_collapse_path(path):
# Query componen | add python 3.10.4 for windows | _url_collapse_path | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | server.py | 14 | 25 | https://github.com/XX-net/XX-Net.git | 10 | 151 | 0 | 71 | 281 | Python | {
"docstring": "\n Given a URL path, remove extra '/'s and '.' path elements and collapse\n any '..' references and returns a collapsed path.\n\n Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.\n The utility of this function is limited to is_cgi method and helps\n preventing s... | def _url_collapse_path(path):
# Query component should not be involved.
path, _, query = path.partition('?')
path = urllib.parse.unquote(path)
# Similar to os.path.split(os.path.normpath(path)) but specific to URL
# path semantics rather than local operating system semantics.
path_parts = ... | |
40,402 | 169,224 | 72 | pandas/core/arrays/sparse/accessor.py | 26 | 16 | def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False):
from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo
A, rows, columns = sparse_series_to_coo(
self._parent, row_levels, column_levels, sort_labels=sort_labels
)
retu... | TYP: type all arguments with bool default values (#48624)
* TYP: type all arguments with bool default values
* bool_t
* ignore type error in pandas/core/arrays/sparse/accessor.py | to_coo | 5c66e65d7b9fef47ccb585ce2fd0b3ea18dc82ea | pandas | accessor.py | 9 | 80 | https://github.com/pandas-dev/pandas.git | 1 | 64 | 0 | 22 | 89 | Python | {
"docstring": "\n Create a scipy.sparse.coo_matrix from a Series with MultiIndex.\n\n Use row_levels and column_levels to determine the row and column\n coordinates respectively. row_levels and column_levels are the names\n (labels) or numbers of the levels. {row_levels, column_levels} mu... | def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False):
from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo
A, rows, columns = sparse_series_to_coo(
self._parent, row_levels, column_levels, sort_labels=sort_labels
)
retu... | |
47,255 | 195,349 | 171 | projects/bb3/agents/r2c2_bb3_agent.py | 28 | 9 | def _get_memory_heuristic_values(self) -> Dict[str, Union[str, float, bool]]:
return {
'ignore_in_session_memories': self.opt.get(
'ignore_in_session_memories_mkm', False
),
'memory_overlap_threshold': self.opt.get('memory_overlap_threshold', 0.0),
... | [BB3] Memory Heuristics (#4770)
* memory heuristics
* small changes
* address comments
* fix config
* reqs | _get_memory_heuristic_values | 58b6977a9cb45a91d78aabdc3c5538f873829a9f | ParlAI | r2c2_bb3_agent.py | 10 | 16 | https://github.com/facebookresearch/ParlAI.git | 1 | 79 | 0 | 24 | 123 | Python | {
"docstring": "\n Extract heuristics from self.opt.\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 4,
"vocab_size": 4
} | def _get_memory_heuristic_values(self) -> Dict[str, Union[str, float, bool]]:
return {
'ignore_in_session_memories': self.opt.get(
'ignore_in_session_memories_mkm', False
),
'memory_overlap_threshold': self.opt.get('memory_overlap_threshold', 0.0),
... | |
78,293 | 266,099 | 13 | netbox/extras/templatetags/plugins.py | 7 | 4 | def plugin_list_buttons(context, model):
return _ | 4751 Enable plugins to inject content within object list views (#10901)
* 4751 add plugin buttons to list templates
* 4751 add plugin buttons to list templates
* 4751 add documentation
* 4751 fix object reference
* 4751 update docs | plugin_list_buttons | 27bf7b4a9add27b4f3f8b0f4fd5dfc4cfe74a65b | netbox | plugins.py | 8 | 2 | https://github.com/netbox-community/netbox.git | 1 | 17 | 0 | 7 | 29 | Python | {
"docstring": "\n Render all list buttons registered by plugins\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 7
} | def plugin_list_buttons(context, model):
return _get_registered_content(model, 'list_buttons', context)
| |
10,052 | 50,215 | 157 | modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/droppath.py | 73 | 16 | def drop_path(self, inputs):
# if prob is 0 or eval mode, return original input
if self.drop_prob == 0. or not self.training:
return inputs
| add disco_diffusion_ernievil_base | drop_path | ffcde21305c61d950a9f93e57e6180c9a9665b87 | PaddleHub | droppath.py | 11 | 10 | https://github.com/PaddlePaddle/PaddleHub.git | 3 | 101 | 0 | 48 | 162 | Python | {
"docstring": "drop path op\n Args:\n input: tensor with arbitrary shape\n drop_prob: float number of drop path probability, default: 0.0\n training: bool, if current mode is training, default: False\n Returns:\n output: output tensor after drop path\n ... | def drop_path(self, inputs):
# if prob is 0 or eval mode, return original input
if self.drop_prob == 0. or not self.training:
return inputs
keep_prob = 1 - self.drop_prob
keep_prob = paddle.to_tensor(keep_prob, dtype='float32')
shape = (inputs.shape[0], ) + (... | |
7,473 | 42,069 | 17 | seaborn/rcmod.py | 8 | 8 | def set_style(style=None, rc=None):
style_object = axes_style | Convert docs to pydata-sphinx-theme and add new material (#2842)
* Do basic conversion of site to pydata_sphinx_theme
* Remove some pae structure customizations we no longer need
* Add some custom CSS
* Tweak a few more colors
* Remove vestigial div closing tag
* Reorganize release notes into hierarchic... | set_style | 34662f4be5c364e7518f9c1118c9b362038ee5dd | seaborn | rcmod.py | 8 | 3 | https://github.com/mwaskom/seaborn.git | 1 | 28 | 0 | 8 | 46 | Python | {
"docstring": "\n Set the parameters that control the general style of the plots.\n\n The style parameters control properties like the color of the background and\n whether a grid is enabled by default. This is accomplished using the\n matplotlib rcParams system.\n\n The options are illustrated in the... | def set_style(style=None, rc=None):
style_object = axes_style(style, rc)
mpl.rcParams.update(style_object)
| |
6,252 | 34,302 | 247 | src/transformers/models/vilt/feature_extraction_vilt.py | 97 | 23 | def _resize(self, image, shorter=800, longer=1333, size_divisor=32, resample=Image.BICUBIC):
if not isinstance(image, Image.Image):
image = self.to_pil_image(image)
w, h = image.size
min_size = shorter
max_size = longer
| Add ViLT (#14895)
* First commit
* Add conversion script
* Make conversion script work for base model
* More improvements
* Update conversion script, works for vqa
* Add indexing argument to meshgrid
* Make conversion script work for ViltForPreTraining
* Add ViltForPreTraining to docs
* Fix dev... | _resize | ac227093e41cecb07c7e0f2fc9a504850907bd06 | transformers | feature_extraction_vilt.py | 11 | 18 | https://github.com/huggingface/transformers.git | 4 | 169 | 0 | 52 | 266 | Python | {
"docstring": "\n Resizes the shorter edge of `image` to `shorter` and limits the longer edge to under `longer`, while preserving\n the aspect ratio. Also makes sure that both the height and width can be divided by `size_divisor`.\n\n Based on original implementation:\n https://github.com... | def _resize(self, image, shorter=800, longer=1333, size_divisor=32, resample=Image.BICUBIC):
if not isinstance(image, Image.Image):
image = self.to_pil_image(image)
w, h = image.size
min_size = shorter
max_size = longer
scale = min_size / min(w, h)
i... | |
31,708 | 139,470 | 39 | rllib/policy/dynamic_tf_policy_v2.py | 11 | 9 | def extra_action_out_fn(self) -> Dict[str, TensorType]:
extra_action_fetches = super().extra_action_out_fn()
extra_action_fetches.update(self._policy_extra_action_fetches)
return extra_action_fetches
| [RLlib] Introduce new policy base classes. (#24742) | extra_action_out_fn | bc3a1d35cf6e9a5fd7eef908a8e76aefb80ce6a9 | ray | dynamic_tf_policy_v2.py | 10 | 10 | https://github.com/ray-project/ray.git | 1 | 32 | 0 | 10 | 54 | Python | {
"docstring": "Extra values to fetch and return from compute_actions().\n\n Returns:\n Dict[str, TensorType]: An extra fetch-dict to be passed to and\n returned from the compute_actions() call.\n ",
"language": "en",
"n_whitespaces": 65,
"n_words": 24,
"vocab_size": 2... | def extra_action_out_fn(self) -> Dict[str, TensorType]:
extra_action_fetches = super().extra_action_out_fn()
extra_action_fetches.update(self._policy_extra_action_fetches)
return extra_action_fetches
| |
22,019 | 104,904 | 53 | src/datasets/builder.py | 14 | 13 | def get_all_exported_dataset_infos(cls) -> dict:
dset_infos_file_path = os.path.join(cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME)
if os.path.exists(dset_infos_file_path):
return DatasetInfosDict.from_directory(cls.get_imported_module_dir() | Add API code examples for Builder classes (#4313)
* 📝 add examples for builder classes
* 📝 apply quentin review | get_all_exported_dataset_infos | d1d4f1065fd4ab91b2c8682643dbd12f86d66fcd | datasets | builder.py | 11 | 16 | https://github.com/huggingface/datasets.git | 2 | 50 | 0 | 13 | 96 | Python | {
"docstring": "Empty dict if doesn't exist\n\n Example:\n\n ```py\n >>> from datasets import load_dataset_builder\n >>> ds_builder = load_dataset_builder('rotten_tomatoes')\n >>> ds_builder.get_all_exported_dataset_infos()\n {'default': DatasetInfo(description=\"Movie Review... | def get_all_exported_dataset_infos(cls) -> dict:
dset_infos_file_path = os.path.join(cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME)
if os.path.exists(dset_infos_file_path):
return DatasetInfosDict.from_directory(cls.get_imported_module_dir())
return {}
| |
118,370 | 323,124 | 174 | paddlenlp/trainer/trainer_args.py | 44 | 20 | def to_sanitized_dict(self) -> Dict[str, Any]:
d = self.to_dict()
d = {
** d, ** {
"train_batch_size": self.train_batch_size,
"eval_batch_size": self.eval_batch_size
}
}
valid_types = [bool, int, | [Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761)
* add some datasets for finetune.
* support fine tune for all tastks.
* add trainer prototype.
* init verison for paddlenlp trainer.
* refine trainer.
* update for some details.
* support multi-card... | to_sanitized_dict | 44a290e94d1becd1f09fddc3d873f9e19c9d6919 | PaddleNLP | trainer_args.py | 11 | 17 | https://github.com/PaddlePaddle/PaddleNLP.git | 3 | 88 | 0 | 33 | 138 | Python | {
"docstring": "\n Sanitized serialization to use with TensorBoard’s hparams\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | def to_sanitized_dict(self) -> Dict[str, Any]:
d = self.to_dict()
d = {
** d, ** {
"train_batch_size": self.train_batch_size,
"eval_batch_size": self.eval_batch_size
}
}
valid_types = [bool, int, float, str]
valid_... | |
56,252 | 221,182 | 27 | python3.10.4/Lib/bz2.py | 6 | 5 | def readinto(self, b):
self._check_can_read()
| add python 3.10.4 for windows | readinto | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | bz2.py | 8 | 3 | https://github.com/XX-net/XX-Net.git | 1 | 22 | 0 | 6 | 38 | Python | {
"docstring": "Read bytes into b.\n\n Returns the number of bytes read (0 for EOF).\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 13,
"vocab_size": 12
} | def readinto(self, b):
self._check_can_read()
return self._buffer.readinto(b)
| |
50,997 | 205,032 | 557 | django/db/backends/oracle/base.py | 126 | 18 | def _output_type_handler(cursor, name, defaultType, length, precision, scale):
if defaultType == Database.NUMBER:
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point.
# This will normally be an intege... | Refs #33476 -- Reformatted code with Black. | _output_type_handler | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | base.py | 13 | 19 | https://github.com/django/django.git | 5 | 90 | 0 | 77 | 147 | Python | {
"docstring": "\n Called for each db column fetched from cursors. Return numbers as the\n appropriate Python type.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 15
} | def _output_type_handler(cursor, name, defaultType, length, precision, scale):
if defaultType == Database.NUMBER:
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point.
# This will normally be an intege... | |
21,771 | 104,101 | 365 | src/datasets/features/features.py | 121 | 21 | def decode_nested_example(schema, obj):
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, dict):
return {
k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj)
}
elif isinstance(schema, (list,... | Add Arrow type casting to struct for Image and Audio + Support nested casting (#3575)
* add storage cast
* implement dict cast for image
* factorize extension type creation for audio and image + implement type cast for thos custom types
* fix tests
* style
* [big] allow extension array in nested arrays
... | decode_nested_example | 6ca96c707502e0689f9b58d94f46d871fa5a3c9c | datasets | features.py | 18 | 25 | https://github.com/huggingface/datasets.git | 15 | 207 | 0 | 79 | 310 | Python | {
"docstring": "Decode a nested example.\n This is used since some features (in particular Audio and Image) have some logic during decoding.\n\n To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.\n ... | def decode_nested_example(schema, obj):
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, dict):
return {
k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj)
}
elif isinstance(schema, (list,... | |
41,541 | 175,013 | 14 | src/pip/_internal/utils/virtualenv.py | 8 | 4 | def running_under_virtualenv() -> bool:
return _running_und | Name virtualenv<20 as "legacy"
Well they are. At least not "regular" anymore. | running_under_virtualenv | 5ded5474ac9b323496506e6391e8d8c2c888d7f1 | pip | virtualenv.py | 8 | 3 | https://github.com/pypa/pip.git | 2 | 15 | 0 | 8 | 29 | Python | {
"docstring": "True if we're running inside a virtual environment, False otherwise.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def running_under_virtualenv() -> bool:
return _running_under_venv() or _running_under_legacy_virtualenv()
| |
70,351 | 244,362 | 398 | mmdet/models/dense_heads/base_dense_head.py | 97 | 23 | def forward_train(self, x, data_samples, proposal_cfg=None, **kwargs):
img_metas = [data_sample['meta'] for data_sample in data_samples]
outs = self(x)
gt_bboxes = [
data_sample.gt_instances.bboxes for data_sample in data_samples
]
if hasattr(data_samples[0]... | Simplify api of one-stage detector | forward_train | 9c5b3331ac8edbfa328922fbab45c382380da540 | mmdetection | base_dense_head.py | 12 | 30 | https://github.com/open-mmlab/mmdetection.git | 9 | 178 | 0 | 51 | 277 | Python | {
"docstring": "\n Args:\n x (list[Tensor]): Features from FPN.\n data_samples (list[:obj:`GeneralData`]): Each item contains\n the meta information of each image and corresponding\n annotations.\n proposal_cfg (mmcv.Config): Test / postprocessing ... | def forward_train(self, x, data_samples, proposal_cfg=None, **kwargs):
img_metas = [data_sample['meta'] for data_sample in data_samples]
outs = self(x)
gt_bboxes = [
data_sample.gt_instances.bboxes for data_sample in data_samples
]
if hasattr(data_samples[0]... | |
78,277 | 266,040 | 54 | netbox/netbox/models/features.py | 11 | 7 | def cf(self):
return {
cf.name: cf.deserialize(self.custom_field_data.get(cf.name | Closes #10052: The cf attribute now returns deserialized custom field data | cf | ea6d86e6c4bb6037465410db6205a7471bc81a6c | netbox | features.py | 12 | 5 | https://github.com/netbox-community/netbox.git | 2 | 34 | 0 | 11 | 55 | Python | {
"docstring": "\n Return a dictionary mapping each custom field for this instance to its deserialized value.\n\n ```python\n >>> tenant = Tenant.objects.first()\n >>> tenant.cf\n {'primary_site': <Site: DM-NYC>, 'cust_id': 'DMI01', 'is_active': True}\n ```\n ",
"lan... | def cf(self):
return {
cf.name: cf.deserialize(self.custom_field_data.get(cf.name))
for cf in self.custom_fields
}
| |
42,292 | 177,153 | 30 | networkx/drawing/tests/test_layout.py | 9 | 10 | def test_arf_layout_negative_a_check(self):
G = self.Gs
pytest.raises(ValueError, nx.arf_layout, G=G, a=- | Arf layout (#5910)
* added arf_layout
* reference to docstring and comparison to spring layout
* rebase to origin main
* black re-format
* Left aligned docstring text
* Cleaned up computation and update variables to new docstring
* Updated naming tests. Added input check on arf_layout parameter `a`
... | test_arf_layout_negative_a_check | 88245f69f89dbee75cef67bdf35bbfb986a42d52 | networkx | test_layout.py | 9 | 3 | https://github.com/networkx/networkx.git | 1 | 30 | 0 | 9 | 48 | Python | {
"docstring": "\n Checks input parameters correctly raises errors. For example, `a` should be larger than 1\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 14,
"vocab_size": 14
} | def test_arf_layout_negative_a_check(self):
G = self.Gs
pytest.raises(ValueError, nx.arf_layout, G=G, a=-1)
| |
91,386 | 292,291 | 18 | tests/components/device_tracker/test_config_entry.py | 9 | 5 | async def test_connected_device_registered(hass):
registry = mock_registry(hass)
dispatches = []
| Ensure dhcp can still discover new devices from device trackers (#66822)
Co-authored-by: Martin Hjelmare <marhje52@gmail.com> | test_connected_device_registered | a18d4c51ff3ab9afd13ee08fe8c65e2f9b77f3b1 | core | test_config_entry.py | 8 | 50 | https://github.com/home-assistant/core.git | 1 | 204 | 0 | 8 | 31 | Python | {
"docstring": "Test dispatch on connected device being registered.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | async def test_connected_device_registered(hass):
registry = mock_registry(hass)
dispatches = []
| |
@RunIf(min_gpus=2, skip_windows=True, fairscale=True) | 69,605 | 241,580 | 72 | tests/strategies/test_sharded_strategy.py | 39 | 28 | def test_ddp_sharded_strategy_checkpoint_multi_gpu(tmpdir):
model = BoringModel()
trainer = Trainer(gpus=2, strategy="ddp_sharded_spawn", fast_dev_run=True)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer.save_checkpoint(checkpoint_path) | Rename training plugin test files & names to strategy (#11303) | test_ddp_sharded_strategy_checkpoint_multi_gpu | 650c710efacd633fa283955145342bb64063c883 | lightning | test_sharded_strategy.py | 12 | 9 | https://github.com/Lightning-AI/lightning.git | 2 | 93 | 1 | 35 | 177 | Python | {
"docstring": "Test to ensure that checkpoint is saved correctly when using multiple GPUs.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def test_ddp_sharded_strategy_checkpoint_multi_gpu(tmpdir):
model = BoringModel()
trainer = Trainer(gpus=2, strategy="ddp_sharded_spawn", fast_dev_run=True)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer.save_checkpoint(checkpoint_path)
saved_model = BoringM... |
29,408 | 130,870 | 522 | python/ray/serve/controller.py | 85 | 38 | def autoscale(self) -> None:
for deployment_name, (
deployment_info,
route_prefix,
) in self.list_deployments().items():
deployment_config = deployment_info.deployment_config
autoscaling_policy = depl | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | autoscale | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | controller.py | 15 | 36 | https://github.com/ray-project/ray.git | 6 | 180 | 0 | 56 | 284 | Python | {
"docstring": "Updates autoscaling deployments with calculated num_replicas.",
"language": "en",
"n_whitespaces": 5,
"n_words": 6,
"vocab_size": 6
} | def autoscale(self) -> None:
for deployment_name, (
deployment_info,
route_prefix,
) in self.list_deployments().items():
deployment_config = deployment_info.deployment_config
autoscaling_policy = deployment_info.autoscaling_policy
if ... | |
52,855 | 210,095 | 18 | ppdet/utils/checkpoint.py | 9 | 7 | def match_state_dict(model_state_dict, weight_state_dict):
| Add PP-YOLOv3 code (#5281)
* [ppyolov3] add ppyolov3 base code
* add ppyolov3 s/m/x
* modify ema
* modify code to convert onnx successfully
* support arbitrary shape
* update config to use amp default
* refine ppyolo_head code
* modify reparameter code
* refine act layer
* adapter pico_head ... | match_state_dict | ef83ab8a3f7814e9886a7a22c8dcc55f506b6081 | PaddleDetection | checkpoint.py | 10 | 46 | https://github.com/PaddlePaddle/PaddleDetection.git | 11 | 305 | 0 | 8 | 49 | Python | {
"docstring": "\n Match between the model state dict and pretrained weight state dict.\n Return the matched state dict.\n\n The method supposes that all the names in pretrained weight state dict are\n subclass of the names in models`, if the prefix 'backbone.' in pretrained weight\n keys is stripped. ... | def match_state_dict(model_state_dict, weight_state_dict):
model_keys = sorted(model_state_dict.keys())
weight_keys = sorted(weight_state_dict.keys())
| |
41,908 | 176,447 | 200 | networkx/algorithms/approximation/connectivity.py | 74 | 23 | def local_node_connectivity(G, source, target, cutoff=None):
if target == source:
raise nx.NetworkXError("source and target have to be different nodes.")
# Maximum possible node independent paths
if G.is_directed():
possible = min(G.out_degree(source), G.in_degree(target))
else:
... | Minor improvements from general code readthrough (#5414)
* Add deprecated directive to reversed docstring.
* Add missing dep directives to shpfiles.
* Remove defn of INF sentinel.
* typo.
* str -> comment in forloop.
* STY: appropriate casing for var name. | local_node_connectivity | cc1db275efc709cb964ce88abbfa877798d58c10 | networkx | connectivity.py | 13 | 21 | https://github.com/networkx/networkx.git | 7 | 143 | 0 | 56 | 232 | Python | {
"docstring": "Compute node connectivity between source and target.\n\n Pairwise or local node connectivity between two distinct and nonadjacent\n nodes is the minimum number of nodes that must be removed (minimum\n separating cutset) to disconnect them. By Menger's theorem, this is equal\n to the number... | def local_node_connectivity(G, source, target, cutoff=None):
if target == source:
raise nx.NetworkXError("source and target have to be different nodes.")
# Maximum possible node independent paths
if G.is_directed():
possible = min(G.out_degree(source), G.in_degree(target))
else:
... | |
35,618 | 153,802 | 1,145 | modin/core/dataframe/pandas/dataframe/dataframe.py | 304 | 48 | def _copartition(self, axis, other, how, sort, force_repartition=False):
if isinstance(other, type(self)):
other = [other]
self_index = self.axes[axis]
others_index = [o.axes[axis] for o in other]
joined_index, make_reindexer = self._join_index_objects(
... | PERF-#4493: Use partition size caches more in Modin dataframe. (#4495)
Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com>
Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: mvashishtha <mahesh@ponder.io> | _copartition | cca9468648521e9317de1cb69cf8e6b1d5292d21 | modin | dataframe.py | 16 | 68 | https://github.com/modin-project/modin.git | 21 | 462 | 0 | 163 | 694 | Python | {
"docstring": "\n Copartition two Modin DataFrames.\n\n Perform aligning of partitions, index and partition blocks.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to copartition along (0 - rows, 1 - columns).\n other : PandasDataframe\n Other Modin ... | def _copartition(self, axis, other, how, sort, force_repartition=False):
if isinstance(other, type(self)):
other = [other]
self_index = self.axes[axis]
others_index = [o.axes[axis] for o in other]
joined_index, make_reindexer = self._join_index_objects(
... | |
40,414 | 169,303 | 45 | pandas/core/indexes/multi.py | 18 | 4 | def size(self) -> int:
# override Index.size to avoid materializing _values
return len(self)
# ------------------------------------------- | PERF: MultiIndex.size (#48723)
* add MultiIndex.size
* whatsnew | size | 2fbdd1eb4ef73a470f3db60cbf38a7d9f6c3ffe1 | pandas | multi.py | 7 | 5 | https://github.com/pandas-dev/pandas.git | 1 | 13 | 0 | 16 | 27 | Python | {
"docstring": "\n Return the number of elements in the underlying data.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 8
} | def size(self) -> int:
# override Index.size to avoid materializing _values
return len(self)
# --------------------------------------------------------------------
# Levels Methods
| |
35,248 | 153,079 | 937 | modin/experimental/core/execution/native/implementations/omnisci_on_native/dataframe/dataframe.py | 278 | 49 | def groupby_agg(self, by, axis, agg, groupby_args, **kwargs):
# Currently we only expect 'by' to be a projection of the same frame.
# If 'by' holds a list of columns/series, then we create such projection
# to re-use code.
if not isinstance(by, DFAlgQueryCompiler):
i... | REFACTOR-#2656: Update modin to fit algebra (code only) (#3717)
Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru>
Co-authored-by: Vasily Litvinov <vasilij.n.litvinov@intel.com>
Co-authored-by: Alexey Prutskov <alexey.prutskov@intel.com>
Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com... | groupby_agg | 58bbcc37477866d19c8b092a0e1974a4f0baa586 | modin | dataframe.py | 17 | 105 | https://github.com/modin-project/modin.git | 34 | 774 | 0 | 161 | 546 | Python | {
"docstring": "\n Groupby with aggregation operation.\n\n Parameters\n ----------\n by : DFAlgQueryCompiler or list-like of str\n Grouping keys.\n axis : {0, 1}\n Only rows groupby is supported, so should be 0.\n agg : str or dict\n Aggregate... | def groupby_agg(self, by, axis, agg, groupby_args, **kwargs):
# Currently we only expect 'by' to be a projection of the same frame.
# If 'by' holds a list of columns/series, then we create such projection
# to re-use code.
if not isinstance(by, DFAlgQueryCompiler):
i... | |
18,931 | 92,539 | 214 | src/sentry/snuba/tasks.py | 48 | 29 | def delete_subscription_from_snuba(query_subscription_id, **kwargs):
try:
subscription = QuerySubscription.objects.get(id=query_subscription_id)
except QuerySubscription.DoesNotExist:
metrics.incr("snuba.subscriptions.delete.subscription_does_not_exist")
return
if subscription.... | feat(mep): Restructure how we determine entity subscription for alerts (#36605)
Previously we mapped a specific `EntityKey` to all `EntitySubscription` classes. As part of
introducing metric based performance alerts, we want to have the `EntitySubscription` determine the
specific entity that the subscription will ru... | delete_subscription_from_snuba | 06885ee7284a274d02a9dc1f6a0348c8edc07184 | sentry | tasks.py | 12 | 26 | https://github.com/getsentry/sentry.git | 5 | 142 | 0 | 40 | 227 | Python | {
"docstring": "\n Task to delete a corresponding subscription in Snuba from a `QuerySubscription` in\n Sentry.\n If the local subscription is marked for deletion (as opposed to disabled),\n then we delete the local subscription once we've successfully removed from Snuba.\n ",
"language": "en",
"n_... | def delete_subscription_from_snuba(query_subscription_id, **kwargs):
try:
subscription = QuerySubscription.objects.get(id=query_subscription_id)
except QuerySubscription.DoesNotExist:
metrics.incr("snuba.subscriptions.delete.subscription_does_not_exist")
return
if subscription.... | |
@pytest.mark.parametrize('count, expected', [(1, 100), (3, 300),
(5, 500), (7, 500)]) | 117,443 | 320,931 | 89 | tests/unit/mainwindow/test_messageview.py | 26 | 18 | def test_changing_timer_with_messages_shown(qtbot, view, config_stub):
config_stub.val.messages.timeout = 900000 # 15s
view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))
with qtbot.wait_signal(view._clear_timer.timeout):
config_stub.val.messages.timeout = 100
@pytest... | Add a MessageInfo data class
Preparation for #7246 | test_changing_timer_with_messages_shown | 5616a99eff34f7074641d1391ed77d6b4b743529 | qutebrowser | test_messageview.py | 11 | 5 | https://github.com/qutebrowser/qutebrowser.git | 1 | 57 | 1 | 24 | 143 | Python | {
"docstring": "When we change messages.timeout, the timer should be restarted.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_changing_timer_with_messages_shown(qtbot, view, config_stub):
config_stub.val.messages.timeout = 900000 # 15s
view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))
with qtbot.wait_signal(view._clear_timer.timeout):
config_stub.val.messages.timeout = 100
@pytest... |
14,108 | 66,132 | 62 | erpnext/hr/doctype/interview/interview.py | 96 | 31 | def get_events(start, end, filters=None):
from frappe.desk.calendar import get_event_conditions
events = []
event_color = {
"Pending": "#fff4f0",
"Under Review": "#d3e8fc",
"Cleared": "#eaf5ed",
"Rejected": "#fce7e7",
}
conditions = get_event_conditions("Interview", filters)
interviews = frappe.db.s... | style: format code with black | get_events | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | interview.py | 16 | 45 | https://github.com/frappe/erpnext.git | 7 | 216 | 0 | 75 | 373 | Python | {
"docstring": "Returns events for Gantt / Calendar view rendering.\n\n\t:param start: Start date-time.\n\t:param end: End date-time.\n\t:param filters: Filters (JSON).\n\t\n\t\t\tSELECT DISTINCT\n\t\t\t\t`tabInterview`.name, `tabInterview`.job_applicant, `tabInterview`.interview_round,\n\t\t\t\t`tabInterview`.schedu... | def get_events(start, end, filters=None):
from frappe.desk.calendar import get_event_conditions
events = []
event_color = {
"Pending": "#fff4f0",
"Under Review": "#d3e8fc",
"Cleared": "#eaf5ed",
"Rejected": "#fce7e7",
}
conditions = get_event_conditions("Interview", filters)
interviews = frappe.db.s... | |
31,364 | 138,228 | 263 | python/ray/tune/tests/test_experiment.py | 33 | 12 | def testFuncTrainableCheckpointConfigValidation(self):
with self.assertRaises(ValueError):
Experiment(
name="foo",
run="f1", # Will point to a wrapped function trainable
checkpoint_c | [Tune] Fix CheckpointConfig validation for function trainables (#31255)
This fixes an issue where a ValueError wasn't being properly raised when passing in a function trainable and setting `checkpoint_at_end=True` or `checkpoint_frequency > 0`. Previously, the error was only raised for function trainables of the form ... | testFuncTrainableCheckpointConfigValidation | 51b56ad0118ed3f4341410e8c75625d1ca8cd757 | ray | test_experiment.py | 13 | 19 | https://github.com/ray-project/ray.git | 1 | 93 | 0 | 21 | 161 | Python | {
"docstring": "Raise an error when trying to specify checkpoint_at_end/checkpoint_frequency\n with a function trainable.",
"language": "en",
"n_whitespaces": 18,
"n_words": 12,
"vocab_size": 12
} | def testFuncTrainableCheckpointConfigValidation(self):
with self.assertRaises(ValueError):
Experiment(
name="foo",
run="f1", # Will point to a wrapped function trainable
checkpoint_config=CheckpointConfig(checkpoint_at_end=True),
... | |
1,631 | 9,551 | 220 | reconstruction/ostec/utils/align2stylegan.py | 102 | 23 | def create_perspective_transform(src, dst, round=False, splat_args=False):
try:
transform_matrix = create_perspective_transform_matrix(src, dst)
error = None
except np.linalg.LinAlgError as e:
transform_matrix = np.identity(3, dtype=np.float)
error = "invalid input quads (%s... | initialize ostec | create_perspective_transform | 7375ee364e0df2a417f92593e09557f1b2a3575a | insightface | align2stylegan.py | 13 | 26 | https://github.com/deepinsight/insightface.git | 5 | 144 | 0 | 67 | 254 | Python | {
"docstring": " Returns a function which will transform points in quadrilateral\n ``src`` to the corresponding points on quadrilateral ``dst``::\n\n >>> transform = create_perspective_transform(\n ... [(0, 0), (10, 0), (10, 10), (0, 10)],\n ... [(50, 50), (100, 50), (1... | def create_perspective_transform(src, dst, round=False, splat_args=False):
try:
transform_matrix = create_perspective_transform_matrix(src, dst)
error = None
except np.linalg.LinAlgError as e:
transform_matrix = np.identity(3, dtype=np.float)
error = "invalid input quads (%s... | |
117,539 | 321,111 | 111 | qutebrowser/browser/qtnetworkdownloads.py | 29 | 19 | def get(self, url, cache=True, **kwargs):
if not url.isValid():
urlutils.invalid_url_error(url, "start download")
return None
req = QNetworkRequest(url)
user_agent = websettings.user_agent(url)
req.setHeader(QNetworkRequest.KnownHeaders.UserAgentHeader, ... | Run scripts/dev/rewrite_enums.py | get | 0877fb0d78635692e481c8bde224fac5ad0dd430 | qutebrowser | qtnetworkdownloads.py | 11 | 10 | https://github.com/qutebrowser/qutebrowser.git | 3 | 85 | 0 | 25 | 136 | Python | {
"docstring": "Start a download with a link URL.\n\n Args:\n url: The URL to get, as QUrl\n cache: If set to False, don't cache the response.\n **kwargs: passed to get_request().\n\n Return:\n The created DownloadItem.\n ",
"language": "en",
"n_whi... | def get(self, url, cache=True, **kwargs):
if not url.isValid():
urlutils.invalid_url_error(url, "start download")
return None
req = QNetworkRequest(url)
user_agent = websettings.user_agent(url)
req.setHeader(QNetworkRequest.KnownHeaders.UserAgentHeader, ... | |
71,160 | 246,332 | 518 | tests/federation/test_federation_server.py | 106 | 31 | def test_send_join_partial_state(self):
joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME
join_result = self._make_join(joining_user)
join_event_dict = join_result["event"]
add_hashes_and_signatures(
KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION],
join_ev... | Implement MSC3706: partial state in `/send_join` response (#11967)
* Make `get_auth_chain_ids` return a Set
It has a set internally, and a set is often useful where it gets used, so let's
avoid converting to an intermediate list.
* Minor refactors in `on_send_join_request`
A little bit of non-functional grou... | test_send_join_partial_state | 63c46349c41aa967e64a5a4042ef5177f934be47 | synapse | test_federation_server.py | 13 | 41 | https://github.com/matrix-org/synapse.git | 3 | 215 | 0 | 74 | 360 | Python | {
"docstring": "When MSC3706 support is enabled, /send_join should return partial state",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def test_send_join_partial_state(self):
joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME
join_result = self._make_join(joining_user)
join_event_dict = join_result["event"]
add_hashes_and_signatures(
KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION],
join_ev... | |
56,111 | 220,753 | 267 | python3.10.4/Lib/asyncio/streams.py | 87 | 10 | async def drain(self):
if self._reader is not None:
exc = self._reader.exception()
if exc is not None:
raise exc
if self._transport.is_closing():
# Wait for protocol.connection_lost() call
# Raise connection closing error if any,
... | add python 3.10.4 for windows | drain | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | streams.py | 11 | 8 | https://github.com/XX-net/XX-Net.git | 4 | 53 | 0 | 60 | 100 | Python | {
"docstring": "Flush the write buffer.\n\n The intended use is to write\n\n w.write(data)\n await w.drain()\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 13,
"vocab_size": 12
} | async def drain(self):
if self._reader is not None:
exc = self._reader.exception()
if exc is not None:
raise exc
if self._transport.is_closing():
# Wait for protocol.connection_lost() call
# Raise connection closing error if any,
... | |
52,191 | 208,066 | 48 | celery/canvas.py | 16 | 9 | def on_chord_header_start(self, chord, **header) -> dict:
if not isinstance(chord.tasks, group):
chord.tasks = group(c | Canvas Header Stamping (#7384)
* Strip down the header-stamping PR to the basics.
* Serialize groups.
* Add groups to result backend meta data.
* Fix spelling mistake.
* Revert changes to canvas.py
* Revert changes to app/base.py
* Add stamping implementation to canvas.py
* Send task to AMQP with ... | on_chord_header_start | 1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc | celery | canvas.py | 11 | 12 | https://github.com/celery/celery.git | 2 | 46 | 0 | 15 | 73 | Python | {
"docstring": "Method that is called on сhord header stamping start.\n\n Arguments:\n chord (chord): chord that is stamped.\n headers (Dict): Partial headers that could be merged with existing headers.\n Returns:\n Dict: headers to update.\n ",
"language"... | def on_chord_header_start(self, chord, **header) -> dict:
if not isinstance(chord.tasks, group):
chord.tasks = group(chord.tasks)
return self.on_group_start(chord.tasks, **header)
| |
34,932 | 151,043 | 436 | freqtrade/freqai/data_drawer.py | 105 | 17 | def load_historic_predictions_from_disk(self):
exists = self.historic_predictions_path.is_file()
if exists:
try:
with open(self.historic_predictions_path, "rb") as fp:
self.historic_predictions = cloudpickle.load(fp)
logger.info(
... | backup historical predictions pickle and load the backup in case of corruption | load_historic_predictions_from_disk | ec76214d023a6c53ffab0af8d43bc5b72b1d66af | freqtrade | data_drawer.py | 16 | 25 | https://github.com/freqtrade/freqtrade.git | 4 | 112 | 0 | 79 | 222 | Python | {
"docstring": "\n Locate and load a previously saved historic predictions.\n :return: bool - whether or not the drawer was located\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 18
} | def load_historic_predictions_from_disk(self):
exists = self.historic_predictions_path.is_file()
if exists:
try:
with open(self.historic_predictions_path, "rb") as fp:
self.historic_predictions = cloudpickle.load(fp)
logger.info(
... | |
43,713 | 181,992 | 52 | tests/test_css_parse.py | 17 | 14 | def test_background(self):
css =
stylesheet = Stylesheet()
stylesheet.parse(css)
styles = stylesheet.rules[0].styles
| Namespacing parsing tests into classes | test_background | 1103844708c7f3a3bd1fc33cae56eb59209ef6c0 | textual | test_css_parse.py | 10 | 9 | https://github.com/Textualize/textual.git | 1 | 48 | 0 | 15 | 79 | Python | {
"docstring": "#some-widget {\n text: on red;\n }\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 6,
"vocab_size": 6
} | def test_background(self):
css =
stylesheet = Stylesheet()
stylesheet.parse(css)
styles = stylesheet.rules[0].styles
assert styles.text_background == Color("red", type=ColorType.STANDARD, number=1)
| |
103,210 | 304,403 | 489 | homeassistant/components/dte_energy_bridge/sensor.py | 146 | 24 | def update(self) -> None:
try:
response = requests.get(self._url, timeout=5)
except (requests.exceptions.RequestException, ValueError):
_LOGGER.warning(
"Could not update status for DTE Energy Bridge (%s)", self._attr_name
)
return... | Improve entity type hints [d] (#77031) | update | bf7239c25db06f1377a895244a906b43242c9963 | core | sensor.py | 11 | 29 | https://github.com/home-assistant/core.git | 6 | 141 | 0 | 99 | 234 | Python | {
"docstring": "Get the energy usage data from the DTE energy bridge.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 8
} | def update(self) -> None:
try:
response = requests.get(self._url, timeout=5)
except (requests.exceptions.RequestException, ValueError):
_LOGGER.warning(
"Could not update status for DTE Energy Bridge (%s)", self._attr_name
)
return... | |
36,576 | 156,131 | 238 | dask/order.py | 78 | 20 | def ndependencies(dependencies, dependents):
num_needed = {}
result = {}
for k, v in dependencies.items():
num_needed[k] = len(v)
if not v:
result[k] = 1
num_dependencies = num_needed.copy()
current = []
current_pop = current.pop
cur | absolufy-imports - No relative - PEP8 (#8796)
Conversation in https://github.com/dask/distributed/issues/5889 | ndependencies | cccb9d8d8e33a891396b1275c2448c352ef40c27 | dask | order.py | 13 | 24 | https://github.com/dask/dask.git | 10 | 155 | 0 | 45 | 244 | Python | {
"docstring": "Number of total data elements on which this key depends\n\n For each key we return the number of tasks that must be run for us to run\n this task.\n\n Examples\n --------\n >>> inc = lambda x: x + 1\n >>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}\n >>> dependencies, depende... | def ndependencies(dependencies, dependents):
num_needed = {}
result = {}
for k, v in dependencies.items():
num_needed[k] = len(v)
if not v:
result[k] = 1
num_dependencies = num_needed.copy()
current = []
current_pop = current.pop
current_append = current.app... | |
39,982 | 167,375 | 181 | pandas/io/pytables.py | 59 | 13 | def validate_attr(self, append) -> None:
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if existing_fields is not None and existing_fields != list(self.values):
raise ValueError("appended items do not match existing items in table!")
... | TYP: some return annotations in pytables.py (#47512) | validate_attr | 7d2f9b8d59908fbf57c6453bc41891efbfe981a6 | pandas | pytables.py | 12 | 11 | https://github.com/pandas-dev/pandas.git | 6 | 78 | 0 | 34 | 124 | Python | {
"docstring": "validate that we have the same order as the existing & same dtype",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 11
} | def validate_attr(self, append) -> None:
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if existing_fields is not None and existing_fields != list(self.values):
raise ValueError("appended items do not match existing items in table!")
... | |
Final = _FinalForm('Final',
doc="""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error ... | 3,619 | 20,928 | 101 | pipenv/patched/notpip/_vendor/typing_extensions.py | 18 | 28 | def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only single type')
return typing._GenericAlias(self, (item,))
Final = _FinalForm('Final',
d | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | __getitem__ | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | typing_extensions.py | 11 | 4 | https://github.com/pypa/pipenv.git | 1 | 30 | 3 | 17 | 103 | Python | {
"docstring": "A special typing construct to indicate that a name\n cannot be re-assigned or overridden in a subclass.\n For example:\n\n MAX_SIZE: Final = 9000\n MAX_SIZE += 1 # Error reported by type checker",
"langu... | def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only single type')
return typing._GenericAlias(self, (item,))
Final = _FinalForm('Final',
doc= |
81,863 | 277,091 | 76 | keras/utils/tf_utils.py | 36 | 9 | def type_spec_from_value(value):
if is_extension_type(value):
return value._type_spec # pylint: disable=protected-access
# Get a TensorSpec for array-like data without
# converting the data to a Tensor
if hasattr(value, "shape") and hasattr(value, "dtype"):
return tf.TensorSpec(val... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | type_spec_from_value | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | tf_utils.py | 10 | 7 | https://github.com/keras-team/keras.git | 4 | 53 | 0 | 28 | 92 | Python | {
"docstring": "Grab type_spec without converting array-likes to tensors.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def type_spec_from_value(value):
if is_extension_type(value):
return value._type_spec # pylint: disable=protected-access
# Get a TensorSpec for array-like data without
# converting the data to a Tensor
if hasattr(value, "shape") and hasattr(value, "dtype"):
return tf.TensorSpec(val... | |
53,367 | 212,726 | 141 | DemoPrograms/Demo_User_Settings_Class.py | 49 | 19 | def make_window():
sg.theme(settings.get('-theme-', 'DarkBlue2')) # set the theme
layout = [[sg.Text('Settings Window')],
[sg.Input(settings.get('-input-', ''), k='-IN-')],
[sg.Listbox(sg.theme_list(), default_values=[settings['-theme-'],], size=(15, 10), k='-LISTBOX-')],
... | Catching up on the many many demo programs that were not checked in.... | make_window | cfe2c96a1fa6fc721c998179298a7d430ccbaefd | PySimpleGUI | Demo_User_Settings_Class.py | 14 | 10 | https://github.com/PySimpleGUI/PySimpleGUI.git | 1 | 181 | 0 | 46 | 304 | Python | {
"docstring": "\n Creates a new window. The default values for some elements are pulled directly from the\n \"User Settings\" without the use of temp variables.\n\n Some get_entry calls don't have a default value, such as theme, because there was an initial call\n that would have set the default value i... | def make_window():
sg.theme(settings.get('-theme-', 'DarkBlue2')) # set the theme
layout = [[sg.Text('Settings Window')],
[sg.Input(settings.get('-input-', ''), k='-IN-')],
[sg.Listbox(sg.theme_list(), default_values=[settings['-theme-'],], size=(15, 10), k='-LISTBOX-')],
... | |
72,922 | 249,450 | 52 | synapse/metrics/common_usage_metrics.py | 13 | 7 | async def _collect(self) -> CommonUsageMetrics:
dau_count = await self._store.count_daily_users()
return CommonUsageMetrics(
daily_active_users=dau_count,
)
| Share some metrics between the Prometheus exporter and the phone home stats (#13671) | _collect | 898fef2789c9b1a20ef53c7d588f536f51f0fe2f | synapse | common_usage_metrics.py | 10 | 8 | https://github.com/matrix-org/synapse.git | 1 | 26 | 0 | 13 | 46 | Python | {
"docstring": "Collect the common metrics and either create the CommonUsageMetrics object to\n use if it doesn't exist yet, or update it.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 20,
"vocab_size": 19
} | async def _collect(self) -> CommonUsageMetrics:
dau_count = await self._store.count_daily_users()
return CommonUsageMetrics(
daily_active_users=dau_count,
)
| |
42,702 | 178,456 | 260 | nuitka/plugins/Plugins.py | 65 | 12 | def getPreprocessorSymbols(cls):
if cls.preprocessor_symbols is None:
cls.preprocessor_symbols = OrderedDict()
for plugin in getActivePlugins():
value = plugin.getPreprocessorSymbols()
if value is not None:
assert type(value... | Minor cleanups
* Typos and minor problems only | getPreprocessorSymbols | 11b0190a5e2d77098b16ff01ae8597428e055f53 | Nuitka | Plugins.py | 16 | 10 | https://github.com/Nuitka/Nuitka.git | 5 | 75 | 0 | 47 | 124 | Python | {
"docstring": "Let plugins provide C defines to be used in compilation.\n\n Notes:\n The plugins can each contribute, but are hopefully using\n a namespace for their defines.\n\n Returns:\n OrderedDict(), where None value indicates no define value,\n i.e. \"-... | def getPreprocessorSymbols(cls):
if cls.preprocessor_symbols is None:
cls.preprocessor_symbols = OrderedDict()
for plugin in getActivePlugins():
value = plugin.getPreprocessorSymbols()
if value is not None:
assert type(value... | |
23,267 | 108,585 | 117 | lib/matplotlib/text.py | 38 | 12 | def _check_xy(self, renderer=None):
if renderer is None:
renderer = self.figure._get_renderer()
b = self.get_annotation_clip()
if b or (b is None and self.xycoords == "data"):
| MNT: make renderer always optional | _check_xy | 24b16804731d3a724e4ec0984da140b1a6b05c66 | matplotlib | text.py | 11 | 8 | https://github.com/matplotlib/matplotlib.git | 5 | 65 | 0 | 29 | 109 | Python | {
"docstring": "Check whether the annotation at *xy_pixel* should be drawn.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _check_xy(self, renderer=None):
if renderer is None:
renderer = self.figure._get_renderer()
b = self.get_annotation_clip()
if b or (b is None and self.xycoords == "data"):
# check if self.xy is inside the axes.
xy_pixel = self._get_position_xy(ren... | |
78,660 | 266,931 | 41 | lib/ansible/plugins/connection/__init__.py | 20 | 7 | def _split_ssh_args(argstring):
# In Python3, shlex.split doesn't work on a byte string.
return [to_text(x.strip()) for x in shlex.split(argstring) i | Remove more Python 2.x compatibility code from controller. (#77320) | _split_ssh_args | 4baf18c573c17cf9cd5716b28dbf38a32b57aaff | ansible | __init__.py | 10 | 2 | https://github.com/ansible/ansible.git | 3 | 32 | 0 | 20 | 55 | Python | {
"docstring": "\n Takes a string like '-o Foo=1 -o Bar=\"foo bar\"' and returns a\n list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to\n the argument list. The list will not contain any empty elements.\n ",
"language": "en",
"n_whitespaces": 63,
"n_words": 34,
"vocab_s... | def _split_ssh_args(argstring):
# In Python3, shlex.split doesn't work on a byte string.
return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.