ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
@not_implemented_for("directed")
@not_implemented_for("multigraph") | 41,909 | 176,448 | 294 | networkx/algorithms/asteroidal.py | 105 | 21 | def find_asteroidal_triple(G):
r
V = set(G.nodes)
if len(V) < 6:
# An asteroidal triple cannot exist in a graph with 5 or less vertices.
return None
component_structure = create_component_structure(G)
E_complement = set(nx.complement(G).edges)
for e in E_complement:
u ... | Minor improvements from general code readthrough (#5414)
* Add deprecated directive to reversed docstring.
* Add missing dep directives to shpfiles.
* Remove defn of INF sentinel.
* typo.
* str -> comment in forloop.
* STY: appropriate casing for var name. | find_asteroidal_triple | cc1db275efc709cb964ce88abbfa877798d58c10 | networkx | asteroidal.py | 15 | 61 | https://github.com/networkx/networkx.git | 7 | 169 | 1 | 80 | 280 | Python | {
"docstring": "Find an asteroidal triple in the given graph.\n\n An asteroidal triple is a triple of non-adjacent vertices such that\n there exists a path between any two of them which avoids the closed\n neighborhood of the third. It checks all independent triples of vertices\n and whether they are an a... | def find_asteroidal_triple(G):
r
V = set(G.nodes)
if len(V) < 6:
# An asteroidal triple cannot exist in a graph with 5 or less vertices.
return None
component_structure = create_component_structure(G)
E_complement = set(nx.complement(G).edges)
for e in E_complement:
u ... |
@frappe.whitelist() | 14,114 | 66,155 | 12 | erpnext/hr/doctype/leave_application/leave_application.py | 19 | 10 | def get_leave_entries(employee, leave_type, from_date, to_date):
return frappe.db.sql(
,
{"from_date": from_date, "to_date": to_date, "employee": employee, "leav | style: format code with black | get_leave_entries | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | leave_application.py | 10 | 18 | https://github.com/frappe/erpnext.git | 1 | 44 | 1 | 18 | 83 | Python | {
"docstring": "Returns leave entries between from_date and to_date.\n\t\tSELECT\n\t\t\temployee, leave_type, from_date, to_date, leaves, transaction_name, transaction_type, holiday_list,\n\t\t\tis_carry_forward, is_expired\n\t\tFROM `tabLeave Ledger Entry`\n\t\tWHERE employee=%(employee)s AND leave_type=%(leave_type... | def get_leave_entries(employee, leave_type, from_date, to_date):
return frappe.db.sql(
,
{"from_date": from_date, "to_date": to_date, "employee": employee, "leave_type": leave_type},
as_dict=1,
)
@frappe.whitelist() |
@register.filter(is_safe=True)
@stringfilter | 51,427 | 206,236 | 13 | django/template/defaultfilters.py | 9 | 7 | def addslashes(value):
return value.replace( | Refs #33476 -- Reformatted code with Black. | addslashes | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | defaultfilters.py | 12 | 2 | https://github.com/django/django.git | 1 | 29 | 1 | 9 | 81 | Python | {
"docstring": "\n Add slashes before quotes. Useful for escaping strings in CSV, for\n example. Less useful for escaping JavaScript; use the ``escapejs``\n filter instead.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 22,
"vocab_size": 19
} | def addslashes(value):
return value.replace("\\", "\\\\").replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter |
35,765 | 154,083 | 48 | modin/config/envvars.py | 16 | 7 | def _get_raw_from_config(cls) -> str:
| REFACTOR-#4629: Add type annotations to `modin/config` (#4685)
Signed-off-by: Karthik Velayutham <vkarthik@ponder.io> | _get_raw_from_config | 02363589aa5105e091fa3d790b29cddf94cc8118 | modin | envvars.py | 10 | 19 | https://github.com/modin-project/modin.git | 2 | 29 | 0 | 16 | 50 | Python | {
"docstring": "\n Read the value from environment variable.\n\n Returns\n -------\n str\n Config raw value.\n\n Raises\n ------\n TypeError\n If `varname` is None.\n KeyError\n If value is absent.\n ",
"language": "en",... | def _get_raw_from_config(cls) -> str:
if cls.varname is None:
raise TypeError("varname should not be None")
return os.environ[cls.varname]
| |
35,636 | 153,821 | 18 | modin/core/storage_formats/base/query_compiler.py | 4 | 6 | def is_monotonic_decreasing(self):
return SeriesDefault.register(pandas.Series.is_monotonic_decreasing)(self)
| REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514)
Co-authored-by: Rehan Sohail Durrani <rdurrani@berkeley.edu>
Signed-off-by: jeffreykennethli <jkli@ponder.io> | is_monotonic_decreasing | 57e29bc5d82348006c5170ef9ac0a9eedcd9acf9 | modin | query_compiler.py | 10 | 2 | https://github.com/modin-project/modin.git | 1 | 20 | 0 | 4 | 35 | Python | {
"docstring": "\n Return boolean if values in the object are monotonically decreasing.\n\n Returns\n -------\n bool\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 13,
"vocab_size": 13
} | def is_monotonic_decreasing(self):
return SeriesDefault.register(pandas.Series.is_monotonic_decreasing)(self)
| |
19,281 | 96,147 | 119 | src/sentry/buffer/redis.py | 41 | 19 | def get(self, model, columns, filters):
key = self._make_key(model, filters)
conn = self.cluster.get_local_client_for_key(key)
pipe = conn.pipeline()
for col in columns:
| fix(post_process): Fetch buffered `times_seen` values and add them to `Group.times_seen` (#31624)
In `post_process_group` we process issue alert rules and also ignored groups. Both of these can have
conditions that read from the `times_seen` value on the `Group`.
The problem here is that updates to `times_seen` ar... | get | 09726d7fc95e53bb516e328fc1811fc9a0704cac | sentry | redis.py | 12 | 10 | https://github.com/getsentry/sentry.git | 4 | 93 | 0 | 35 | 146 | Python | {
"docstring": "\n Fetches buffered values for a model/filter. Passed columns must be integer columns.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 12
} | def get(self, model, columns, filters):
key = self._make_key(model, filters)
conn = self.cluster.get_local_client_for_key(key)
pipe = conn.pipeline()
for col in columns:
pipe.hget(key, f"i+{col}")
results = pipe.execute()
return {
col: (... | |
17,671 | 83,385 | 52 | zerver/tests/test_subs.py | 13 | 8 | def test_non_ascii_subscription_for_principal(self) -> None:
iago = self.example_user("iago")
self.assert_adding_subscriptions_for_principal(
iago.id, get_realm("zulip"), ["hümbüǵ"], policy_name="Public" | stream_settings: Show stream privacy & description in stream events.
Provide stream privacy and description in stream notification events
when stream is created.
In function "send_messages_for_new_subscribers" for when stream is
created, put policy name and description of the stream.
Fixes #21004 | test_non_ascii_subscription_for_principal | 4b9770e270823b7ed2bbbeda0e4450f0ba6a288b | zulip | test_subs.py | 10 | 9 | https://github.com/zulip/zulip.git | 1 | 37 | 0 | 13 | 67 | Python | {
"docstring": "\n You can subscribe other people to streams even if they containing\n non-ASCII characters.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 13,
"vocab_size": 13
} | def test_non_ascii_subscription_for_principal(self) -> None:
iago = self.example_user("iago")
self.assert_adding_subscriptions_for_principal(
iago.id, get_realm("zulip"), ["hümbüǵ"], policy_name="Public"
)
| |
7,546 | 42,453 | 210 | nltk/corpus/reader/wordnet.py | 54 | 16 | def add_provs(self, reader):
fileids = reader.fileids()
for fileid in fileids:
prov, langfile = os.path.split(fileid)
file_name, file_extension = os.path.splitext(langfile)
if file_extension == ".tab":
lang = file_name.split("-")[-1]
... | Initialize empty provenance for default English | add_provs | 8ffd0d8190552d45f8b92e18da3fc41639e5185d | nltk | wordnet.py | 14 | 10 | https://github.com/nltk/nltk.git | 4 | 84 | 0 | 41 | 150 | Python | {
"docstring": "Add languages from Multilingual Wordnet to the provenance dictionary",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def add_provs(self, reader):
fileids = reader.fileids()
for fileid in fileids:
prov, langfile = os.path.split(fileid)
file_name, file_extension = os.path.splitext(langfile)
if file_extension == ".tab":
lang = file_name.split("-")[-1]
... | |
72,127 | 248,149 | 72 | tests/rest/client/test_relations.py | 19 | 10 | def test_thread_with_bundled_aggregations_for_latest(self) -> None:
self._send_relation(Rel | Include bundled aggregations for the latest event in a thread. (#12273)
The `latest_event` field of the bundled aggregations for `m.thread` relations
did not include bundled aggregations itself. This resulted in clients needing to
immediately request the event from the server (and thus making it useless that
the la... | test_thread_with_bundled_aggregations_for_latest | 75dff3dc980974960f55fa21fc8e672201f63045 | synapse | test_relations.py | 9 | 12 | https://github.com/matrix-org/synapse.git | 1 | 68 | 0 | 16 | 93 | Python | {
"docstring": "\n Bundled aggregations should get applied to the latest thread event.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def test_thread_with_bundled_aggregations_for_latest(self) -> None:
self._send_relation(RelationTypes.THREAD, "m.room.test")
channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
thread_2 = channel.json_body["event_id"]
self._send_relation(
RelationType... | |
11,592 | 56,933 | 18 | src/prefect/blocks/kubernetes.py | 4 | 5 | def from_environment(cls):
return cls.from_file(path=KUBE_CONFI | organizational changes for the KubernetesClusterConfig and add from_environment classmethod | from_environment | 574d10ff7612661b37801c811862f18998521d58 | prefect | kubernetes.py | 8 | 2 | https://github.com/PrefectHQ/prefect.git | 1 | 15 | 0 | 4 | 27 | Python | {
"docstring": "\n Factory method to produce an instance of this class using the default kube config location\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 15,
"vocab_size": 15
} | def from_environment(cls):
return cls.from_file(path=KUBE_CONFIG_DEFAULT_LOCATION)
| |
71,705 | 247,511 | 586 | tests/rest/media/v1/test_media_storage.py | 112 | 33 | def test_thumbnail_repeated_thumbnail(self) -> None:
self._test_thumbnail(
"scale", self.test_image.expected_scaled, self.test_image.expected_found
)
if not self.test_image.expected_found:
return
# Fetching again should work, without re-requesting the i... | Add type hints to `tests/rest`. (#12208)
Co-authored-by: Patrick Cloke <clokep@users.noreply.github.com> | test_thumbnail_repeated_thumbnail | 32c828d0f760492711a98b11376e229d795fd1b3 | synapse | test_media_storage.py | 11 | 49 | https://github.com/matrix-org/synapse.git | 4 | 263 | 0 | 68 | 414 | Python | {
"docstring": "Test that fetching the same thumbnail works, and deleting the on disk\n thumbnail regenerates it.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 15,
"vocab_size": 13
} | def test_thumbnail_repeated_thumbnail(self) -> None:
self._test_thumbnail(
"scale", self.test_image.expected_scaled, self.test_image.expected_found
)
if not self.test_image.expected_found:
return
# Fetching again should work, without re-requesting the i... | |
105,431 | 306,647 | 124 | homeassistant/components/wake_on_lan/switch.py | 23 | 14 | def update(self) -> None:
ping_cmd = [
"ping",
"-c",
"1",
"-W",
str(DEFAULT_PING_TIMEOUT),
str(self._host),
]
status = sp.call(ping_cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
self._state = not bool(status)
| Improve entity type hints [w] (#77886) | update | a6b6949793e2571bf46cdca2e541ddf64cb1fc71 | core | switch.py | 10 | 12 | https://github.com/home-assistant/core.git | 1 | 61 | 0 | 21 | 100 | Python | {
"docstring": "Check if device is on and update the state. Only called if assumed state is false.",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 14
} | def update(self) -> None:
ping_cmd = [
"ping",
"-c",
"1",
"-W",
str(DEFAULT_PING_TIMEOUT),
str(self._host),
]
status = sp.call(ping_cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
self._state = not bool(status)
| |
85,861 | 286,538 | 299 | openbb_terminal/portfolio/portfolio_model.py | 33 | 13 | def get_transactions(self):
df = self.__transactions[
[
"Date",
"Type",
"Ticker",
"Side",
"Price",
"Quantity",
"Fees",
"Investment",
"Currency",
... | Incorporate portfolio class into SDK (#3401)
* create functions to interact with portfolio
* fix some docstrings
* view docstrings
* make portfolio loading available in sdk
* reorder some methods
* fix bug
* update controller
* update website
* remove import
* change input name
* regenera... | get_transactions | 8e9e6bd57f4bc5d57ccedfacccda6342d5881266 | OpenBBTerminal | portfolio_model.py | 11 | 22 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 1 | 87 | 0 | 28 | 157 | Python | {
"docstring": "Get formatted transactions\n\n Returns\n -------\n pd.DataFrame: formatted transactions\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 8,
"vocab_size": 6
} | def get_transactions(self):
df = self.__transactions[
[
"Date",
"Type",
"Ticker",
"Side",
"Price",
"Quantity",
"Fees",
"Investment",
"Currency",
... | |
17,589 | 83,055 | 832 | zerver/tests/test_subs.py | 157 | 16 | def test_pick_colors(self) -> None:
used_colors: Set[str] = set()
color_map: Dict[int, str] = {}
recipient_ids = list(range(30))
user_color_map = pick_colors(used_colors, color_map, recipient_ids)
self.assertEqual(
user_color_map,
{
0: "#76... | stream colors: Try harder to avoid collisions.
We now use recipient_id % 24 for new stream colors
when users have already used all 24 of our canned
colors.
This fix doesn't address the scenario that somebody
dislikes one of our current canned colors, so if a
user continually changes canned color N to some other
color... | test_pick_colors | dd1c9c45c778dc5280c2b02c3b9fb327d2507cc1 | zulip | test_subs.py | 10 | 70 | https://github.com/zulip/zulip.git | 1 | 315 | 0 | 106 | 520 | Python | {
"docstring": "\n If we are assigning colors to a user with 24+ streams, we have to start\n re-using old colors. Our algorithm basically uses recipient_id % 24, so\n the following code reflects the worse case scenario that our new\n streams have recipient ids spaced out by exact multiple... | def test_pick_colors(self) -> None:
used_colors: Set[str] = set()
color_map: Dict[int, str] = {}
recipient_ids = list(range(30))
user_color_map = pick_colors(used_colors, color_map, recipient_ids)
self.assertEqual(
user_color_map,
{
0: "#76... | |
77,645 | 264,221 | 449 | netbox/extras/tests/test_customfields.py | 167 | 25 | def test_import(self):
data = (
('name', 'slug', 'status', 'cf_text', 'cf_longtext', 'cf_integer', 'cf_boolean', 'cf_date', 'cf_url', 'cf_json', 'cf_select', 'cf_multiselect'),
('Site 1', 'site-1', 'active', 'ABC', 'Foo', '123', 'True', '2020-01-01', 'http://example.com/1', '{"f... | Fixes #8317: Fix CSV import of multi-select custom field values | test_import | 7421e5f7d7e579ed1a0acf840c39ae61fd851504 | netbox | test_customfields.py | 12 | 35 | https://github.com/netbox-community/netbox.git | 2 | 501 | 0 | 128 | 888 | Python | {
"docstring": "\n Import a Site in CSV format, including a value for each CustomField.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 11
} | def test_import(self):
data = (
('name', 'slug', 'status', 'cf_text', 'cf_longtext', 'cf_integer', 'cf_boolean', 'cf_date', 'cf_url', 'cf_json', 'cf_select', 'cf_multiselect'),
('Site 1', 'site-1', 'active', 'ABC', 'Foo', '123', 'True', '2020-01-01', 'http://example.com/1', '{"f... | |
55,450 | 218,720 | 122 | python3.10.4/Lib/lib2to3/fixes/fix_renames.py | 37 | 9 | def build_pattern():
#bare = set()
for module, replace in list(MAPPING.items()):
for old_attr, new_attr in list(replace.items()):
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module)
#bare.add(old_attr)
#yield | add python 3.10.4 for windows | build_pattern | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | fix_renames.py | 12 | 11 | https://github.com/XX-net/XX-Net.git | 3 | 60 | 0 | 24 | 104 | Python | {
"docstring": "\n # import_name< 'import' (module=%r\n # | dotted_as_names< any* module=%r any* >) >\n # \n import_from< 'from' module_name=%r 'import'\n ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >\n ... | def build_pattern():
#bare = set()
for module, replace in list(MAPPING.items()):
for old_attr, new_attr in list(replace.items()):
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module)
#bare.add(old_attr)
#yield % (module, module)
yield % (m... | |
104,662 | 305,878 | 195 | homeassistant/components/plex/sensor.py | 42 | 18 | async def async_refresh_sensor(self) -> None:
_LOGGER.debug("Refreshing library sensor for '%s'", self.name)
try:
await self.hass.async_add_executor_job(self._update_state_and_attrs)
self._attr_available = True
except NotFound:
self._attr_available = ... | Improve entity type hints [p] (#77871) | async_refresh_sensor | 474844744bdd2b0dcba46b82d9d3fcd8e3dbad24 | core | sensor.py | 12 | 16 | https://github.com/home-assistant/core.git | 3 | 78 | 0 | 33 | 132 | Python | {
"docstring": "Update state and attributes for the library sensor.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | async def async_refresh_sensor(self) -> None:
_LOGGER.debug("Refreshing library sensor for '%s'", self.name)
try:
await self.hass.async_add_executor_job(self._update_state_and_attrs)
self._attr_available = True
except NotFound:
self._attr_available = ... | |
12,765 | 61,941 | 22 | .venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py | 8 | 6 | def __hash__(self):
return hash(self | upd; format | __hash__ | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | database.py | 10 | 2 | https://github.com/jindongwang/transferlearning.git | 1 | 27 | 0 | 7 | 46 | Python | {
"docstring": "\n Compute hash in a way which matches the equality test.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def __hash__(self):
return hash(self.name) + hash(self.version) + hash(self.source_url)
| |
20,199 | 100,746 | 68 | plugins/train/model/phaze_a.py | 22 | 7 | def _min_nodes(self) -> int:
if self._side == "gblock":
| Model updates
- Increase model summary width
- Phaze A updates
- Update some min/max values
- Add Decoder Filter Slope Mode
- Add additional arguments for Upsampling2D
- Adjust upsampling method for multiple upsamples in FC layers
- Typing | _min_nodes | a99049711f289b435e710d5b15f9c0e45c4251c3 | faceswap | phaze_a.py | 12 | 9 | https://github.com/deepfakes/faceswap.git | 2 | 52 | 0 | 18 | 91 | Python | {
"docstring": " int: The number of nodes for the first Dense. For non g-block layers this will be the\n given minimum filters multiplied by the dimensions squared. For g-block layers, this is the\n given value ",
"language": "en",
"n_whitespaces": 48,
"n_words": 33,
"vocab_size": 26
} | def _min_nodes(self) -> int:
if self._side == "gblock":
return self._config["fc_gblock_min_nodes"]
retval = self._scale_filters(self._config["fc_min_filters"])
retval = int(retval * self._config["fc_dimensions"] ** 2)
return retval
| |
36,242 | 155,114 | 75 | modin/config/envvars.py | 22 | 14 | def _get(cls) -> dict:
custom_parameters = super().get()
result = cls.default.copy()
result.update(
{key.replace("-", "_"): value for key, value in custom_parameters.items()} | FIX-#5187: Fixed RecursionError in OmnisciLaunchParameters.get() (#5199)
Signed-off-by: Andrey Pavlenko <andrey.a.pavlenko@gmail.com> | _get | c51ab405efec920dbb4baa2e2389409df04e8d43 | modin | envvars.py | 12 | 15 | https://github.com/modin-project/modin.git | 2 | 55 | 0 | 19 | 95 | Python | {
"docstring": "\n Get the resulted command-line options.\n\n Returns\n -------\n dict\n Decoded and verified config value.\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 13,
"vocab_size": 13
} | def _get(cls) -> dict:
custom_parameters = super().get()
result = cls.default.copy()
result.update(
{key.replace("-", "_"): value for key, value in custom_parameters.items()}
)
return result
| |
54,846 | 217,597 | 129 | python3.10.4/Lib/graphlib.py | 47 | 14 | def add(self, node, *predecessors):
| add python 3.10.4 for windows | add | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | graphlib.py | 10 | 8 | https://github.com/XX-net/XX-Net.git | 3 | 61 | 0 | 39 | 102 | Python | {
"docstring": "Add a new node and its predecessors to the graph.\n\n Both the *node* and all elements in *predecessors* must be hashable.\n\n If called multiple times with the same node argument, the set of dependencies\n will be the union of all dependencies passed in.\n\n It is possible... | def add(self, node, *predecessors):
if self._ready_nodes is not None:
raise ValueError("Nodes cannot be added after a call to prepare()")
# Create the node -> predecessor edges
nodeinfo = self._get_nodeinfo(node)
nodeinfo.npredecessors += len(predecessors)
... | |
577 | 3,840 | 214 | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_insight_streams.py | 87 | 32 | def test_stream_slices_with_state_and_slices(self, api, async_manager_mock, start_date):
end_date = start_date + duration(days=10)
cursor_value = start_date + duration(days=5)
state = {
AdsInsights.cursor_field: cursor_value.date().isoformat(),
"slices": [(cursor... | 🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)
* Facebook Marketing performance improvement
* add comments and little refactoring
* fix integration tests with the new config
* improve job status handling, limit concurrency to 10
* fix campaign jobs, refactor manager
* big refactori... | test_stream_slices_with_state_and_slices | a3aae8017a0a40ff2006e2567f71dccb04c997a5 | airbyte | test_base_insight_streams.py | 18 | 17 | https://github.com/airbytehq/airbyte.git | 1 | 244 | 0 | 62 | 386 | Python | {
"docstring": "Stream will use cursor_value from state, but will skip saved slices",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | def test_stream_slices_with_state_and_slices(self, api, async_manager_mock, start_date):
end_date = start_date + duration(days=10)
cursor_value = start_date + duration(days=5)
state = {
AdsInsights.cursor_field: cursor_value.date().isoformat(),
"slices": [(cursor... | |
52,095 | 207,776 | 434 | tests/admin_views/tests.py | 92 | 30 | def _test_readonly_foreignkey_links(self, admin_site):
chapter = Chapter.objects.create(
title="Chapter 1",
content="content",
book=Book.objects.create(name="Book 1"),
)
language = Language.objects.create(iso="_40", name="Test")
obj = ReadOnly... | Refs #33476 -- Reformatted code with Black. | _test_readonly_foreignkey_links | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 13 | 35 | https://github.com/django/django.git | 1 | 181 | 0 | 58 | 299 | Python | {
"docstring": "\n ForeignKey readonly fields render as links if the target model is\n registered in admin.\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 14,
"vocab_size": 14
} | def _test_readonly_foreignkey_links(self, admin_site):
chapter = Chapter.objects.create(
title="Chapter 1",
content="content",
book=Book.objects.create(name="Book 1"),
)
language = Language.objects.create(iso="_40", name="Test")
obj = ReadOnly... | |
14,247 | 66,616 | 38 | erpnext/patches/v12_0/fix_percent_complete_for_projects.py | 51 | 16 | def execute():
for project in frappe.get_all("Project", fields=["name", "percent_complete_method"]):
total = frappe.db.count("Task", dict(project=project.name))
if project.percent_complete_method == "Task Completion" and total > 0:
completed = frappe.db.sql(
,
project.name,
)[0][0]
per | style: format code with black | execute | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | fix_percent_complete_for_projects.py | 16 | 14 | https://github.com/frappe/erpnext.git | 6 | 132 | 0 | 40 | 217 | Python | {
"docstring": "select count(name) from tabTask where\n\t\t\t\t\tproject=%s and status in ('Cancelled', 'Completed')",
"language": "en",
"n_whitespaces": 9,
"n_words": 11,
"vocab_size": 11
} | def execute():
for project in frappe.get_all("Project", fields=["name", "percent_complete_method"]):
total = frappe.db.count("Task", dict(project=project.name))
if project.percent_complete_method == "Task Completion" and total > 0:
completed = frappe.db.sql(
,
project.name,
)[0][0]
percent_complet... | |
48,268 | 196,972 | 114 | sympy/parsing/mathematica.py | 37 | 14 | def mathematica(s, additional_translations=None):
parser = MathematicaParser(additional_translations)
if additional_translations is not None:
SymPyDeprecationWarning(
feature="additional_translations parameter for the Mathematica parser",
last_supported_version="1.9",
... | Support parsing functions and some more Mathematica nodes. Commented Mathematica code is now parsed correctly. | mathematica | 35a158ece2bec4d77d78a193fcafa4dd5fd5f691 | sympy | mathematica.py | 13 | 11 | https://github.com/sympy/sympy.git | 2 | 62 | 0 | 34 | 105 | Python | {
"docstring": "\n Translate a string containing a Wolfram Mathematica expression to a SymPy\n expression.\n\n If the translator is unable to find a suitable SymPy expression, the\n ``FullForm`` of the Mathematica expression will be output, using SymPy\n ``Function`` objects as nodes of the syntax tree... | def mathematica(s, additional_translations=None):
parser = MathematicaParser(additional_translations)
if additional_translations is not None:
SymPyDeprecationWarning(
feature="additional_translations parameter for the Mathematica parser",
last_supported_version="1.9",
... | |
14,535 | 67,468 | 18 | erpnext/setup/doctype/company/company.py | 28 | 15 | def update_company_current_month_sales(company):
current_month_year = formatdate(today(), "MM-yyyy")
results = frappe.db.sql(
.format(
current_month_year=current_month_year, company=frappe.db.escape(company)
),
as_dict=True,
)
monthly_total = results[0]["total"] if | style: format code with black | update_company_current_month_sales | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | company.py | 14 | 22 | https://github.com/frappe/erpnext.git | 2 | 80 | 0 | 25 | 129 | Python | {
"docstring": "\n\t\tSELECT\n\t\t\tSUM(base_grand_total) AS total,\n\t\t\tDATE_FORMAT(`posting_date`, '%m-%Y') AS month_year\n\t\tFROM\n\t\t\t`tabSales Invoice`\n\t\tWHERE\n\t\t\tDATE_FORMAT(`posting_date`, '%m-%Y') = '{current_month_year}'\n\t\t\tAND docstatus = 1\n\t\t\tAND company = {company}\n\t\tGROUP BY\n\t\t\... | def update_company_current_month_sales(company):
current_month_year = formatdate(today(), "MM-yyyy")
results = frappe.db.sql(
.format(
current_month_year=current_month_year, company=frappe.db.escape(company)
),
as_dict=True,
)
monthly_total = results[0]["total"] if len(results) > 0 else 0
frappe.db.set... | |
12,290 | 60,778 | 30 | .venv/lib/python3.8/site-packages/pip/_internal/metadata/base.py | 9 | 3 | def metadata_version(self):
# type: () -> Optional[str]
raise NotImplementedErro | upd; format | metadata_version | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | base.py | 7 | 2 | https://github.com/jindongwang/transferlearning.git | 1 | 10 | 0 | 9 | 21 | Python | {
"docstring": "Value of \"Metadata-Version:\" in the distribution, if available.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def metadata_version(self):
# type: () -> Optional[str]
raise NotImplementedError()
| |
83,683 | 281,280 | 92 | gamestonk_terminal/stocks/options/screener_controller.py | 23 | 10 | def call_ca(self, _):
if self.screen_tickers:
self.queue = ca_controller.ComparisonAnalysisController(
self.screen_tickers, self.queue
).menu(custom_path_menu_above= | Baseclass (#1141)
* A working decorator
* Basic intro
* Added more
* Refactor
* Refactor
* Cleaned code
* Simplified function (thanks Chavi)
* Small change
* Updating tests : fix issue with mock
* Updating tests : fix remaining mocks after merging
* Updating tests : black
* Cleaned up
... | call_ca | 006b3570b795215a17c64841110b649b03db9a98 | OpenBBTerminal | screener_controller.py | 13 | 7 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 2 | 42 | 0 | 22 | 74 | Python | {
"docstring": "Call the comparison analysis menu with selected tickers",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def call_ca(self, _):
if self.screen_tickers:
self.queue = ca_controller.ComparisonAnalysisController(
self.screen_tickers, self.queue
).menu(custom_path_menu_above="/stocks/")
else:
print("Some tickers must be screened first through one of th... | |
42,460 | 177,607 | 573 | label_studio/data_manager/actions/basic.py | 191 | 26 | def delete_tasks_predictions(project, queryset, **kwargs):
task_ids = queryset.values_list('id', flat=True)
predictions = Prediction.objects.filter(task__id__in=task_ids)
count = predictions.count()
predictions.delete()
queryset.update(updated_at=datetime.now())
return {'processed_items': c... | feat: DEV-1205: Add task.updated_at column (#1784)
* Update task.updated_at on annotation update (DEV-1205)
* Fix set updated_at on annotation delete (DEV-1205)
* Set update_at for every dm action (DEV-1205)
* Stop changing updated_at on actions (DEV-1205)
* Update experimental.py
Co-authored-by: Max Tk... | delete_tasks_predictions | 1c4328c5a8b10ee20ac4328ce30612d106350699 | label-studio | basic.py | 11 | 7 | https://github.com/heartexlabs/label-studio.git | 1 | 76 | 0 | 100 | 406 | Python | {
"docstring": " Delete all predictions by tasks ids\n\n :param project: project instance\n :param queryset: filtered tasks db queryset\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 16,
"vocab_size": 14
} | def delete_tasks_predictions(project, queryset, **kwargs):
task_ids = queryset.values_list('id', flat=True)
predictions = Prediction.objects.filter(task__id__in=task_ids)
count = predictions.count()
predictions.delete()
queryset.update(updated_at=datetime.now())
return {'processed_items': c... | |
24,962 | 113,577 | 226 | nni/compression/pytorch/base/scheduler.py | 42 | 11 | def clean_up(self):
if not self._cleaned:
for ref in self.referenced_paths():
self._reference_counter[ref] -= 1
if self._reference_counter[ref] <= 0:
os.remove(ref)
if s | [Compression] remove pruning v1 & refactor directory (#5228) | clean_up | d68c786ff81bad19c04619d6a999ff34aaa724e7 | nni | scheduler.py | 17 | 12 | https://github.com/microsoft/nni.git | 5 | 87 | 0 | 36 | 141 | Python | {
"docstring": "\n Counter of referenced file paths subtract 1. If the counter reach 0, then delete the file.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 16,
"vocab_size": 15
} | def clean_up(self):
if not self._cleaned:
for ref in self.referenced_paths():
self._reference_counter[ref] -= 1
if self._reference_counter[ref] <= 0:
os.remove(ref)
if self._reference_counter[ref] < 0:
... | |
51,277 | 205,911 | 380 | django/db/utils.py | 136 | 22 | def load_backend(backend_name):
# This backend was renamed in Django 1.9.
| Refs #33476 -- Reformatted code with Black. | load_backend | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | utils.py | 17 | 23 | https://github.com/django/django.git | 8 | 119 | 0 | 100 | 211 | Python | {
"docstring": "\n Return a database backend's \"base\" module given a fully qualified database\n backend name, or raise an error if it doesn't exist.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 21,
"vocab_size": 19
} | def load_backend(backend_name):
# This backend was renamed in Django 1.9.
if backend_name == "django.db.backends.postgresql_psycopg2":
backend_name = "django.db.backends.postgresql"
try:
return import_module("%s.base" % backend_name)
except ImportError as e_user:
# The data... | |
27,260 | 122,870 | 124 | jax/_src/pjit.py | 69 | 14 | def explode_superdims(sizes, dims):
strides_to_sizes = {stride: size for size, stride in zip(sizes, strides_for_sizes(sizes))}
dims = list(reversed(dims))
final_dims = []
for size, stride in | Move `pjit.py` to `jax/_src` in preparation for merging the `jit` and `pjit` frontend APIs
PiperOrigin-RevId: 495944279 | explode_superdims | 4b587fa1f0049db5366fd04812ab940d80a71a22 | jax | pjit.py | 12 | 18 | https://github.com/google/jax.git | 4 | 118 | 0 | 40 | 186 | Python | {
"docstring": "Explode superdims to fit a known shape.\n\n The unflattening process might mistakenly generate too few too large dimensions.\n For example, ``unflatten_superdims(np.arange(n))`` always returns ``[(n, 1)]``.\n This function takes a list of such contiguous super-dimensions and splits them\n into sma... | def explode_superdims(sizes, dims):
strides_to_sizes = {stride: size for size, stride in zip(sizes, strides_for_sizes(sizes))}
dims = list(reversed(dims))
final_dims = []
for size, stride in dims:
target_size = strides_to_sizes[stride]
new_dims = []
while size > target_size:
assert target_s... | |
41,886 | 176,421 | 100 | networkx/classes/function.py | 39 | 17 | def path_weight(G, path, weight):
multigraph = G.is_multigraph()
cost = 0
if not nx.is_path(G, path):
raise nx.NetworkXNoPath("path does not exist")
for node, nbr in nx.utils.pairwise(path):
if multigraph:
cost += min(v[weight] for v in G[ | Correct typo in docstring (int -> float) (#5398)
* Correct typo in docstring (int -> float)
This is based on https://stackoverflow.com/q/71494698/10693596
* Update function.py
* Update function.py | path_weight | eb22e121816896ec0664c41a0232e2f80a259b96 | networkx | function.py | 17 | 11 | https://github.com/networkx/networkx.git | 5 | 94 | 0 | 30 | 148 | Python | {
"docstring": "Returns total cost associated with specified path and weight\n\n Parameters\n ----------\n G : graph\n A NetworkX graph.\n\n path: list\n A list of node labels which defines the path to traverse\n\n weight: string\n A string indicating which edge attribute to use fo... | def path_weight(G, path, weight):
multigraph = G.is_multigraph()
cost = 0
if not nx.is_path(G, path):
raise nx.NetworkXNoPath("path does not exist")
for node, nbr in nx.utils.pairwise(path):
if multigraph:
cost += min(v[weight] for v in G[node][nbr].values())
el... | |
52,477 | 208,718 | 224 | IPython/core/history.py | 96 | 18 | def _run_sql(self, sql, params, raw=True, output=False, latest=False):
toget = 'source_raw' if raw else 'source'
sqlfrom = "history"
if output:
sqlfrom = "history LEFT JOIN output_history USING (session, line)"
toget = "history.%s, output_history.output" % toget
... | This fixed the mixing of multiple history seen in #13631
It forces get_tail to put the current session last in the returned
results. | _run_sql | dc5bcc1c50892a5128fcf128af28887226144927 | ipython | history.py | 12 | 15 | https://github.com/ipython/ipython.git | 8 | 118 | 0 | 68 | 188 | Python | {
"docstring": "Prepares and runs an SQL query for the history database.\n\n Parameters\n ----------\n sql : str\n Any filtering expressions to go after SELECT ... FROM ...\n params : tuple\n Parameters passed to the SQL query (to replace \"?\")\n raw, output :... | def _run_sql(self, sql, params, raw=True, output=False, latest=False):
toget = 'source_raw' if raw else 'source'
sqlfrom = "history"
if output:
sqlfrom = "history LEFT JOIN output_history USING (session, line)"
toget = "history.%s, output_history.output" % toget
... | |
29,172 | 130,247 | 56 | python/ray/_private/thirdparty/pathspec/pathspec.py | 13 | 7 | def __add__(self, other):
| [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | __add__ | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | pathspec.py | 11 | 5 | https://github.com/ray-project/ray.git | 2 | 31 | 0 | 12 | 51 | Python | {
"docstring": "\n Combines the :attr:`Pathspec.patterns` patterns from two\n :class:`PathSpec` instances.\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 8,
"vocab_size": 8
} | def __add__(self, other):
if isinstance(other, PathSpec):
return PathSpec(self.patterns + other.patterns)
else:
return NotImplemented
| |
28,871 | 129,004 | 40 | python/ray/node.py | 8 | 5 | def address(self):
if use_gcs_for_bootstrap():
return self._gcs_address
return self._redis_address
| [GCS][Bootstrap n/n] Do not start Redis in GCS bootstrapping mode (#21232)
After this change in GCS bootstrapping mode, Redis no longer starts and `address` is treated as the GCS address of the Ray cluster.
Co-authored-by: Yi Cheng <chengyidna@gmail.com>
Co-authored-by: Yi Cheng <74173148+iycheng@users.noreply.git... | address | 70db5c5592d94b611fee0a334414f1f4f5cc151a | ray | node.py | 8 | 4 | https://github.com/ray-project/ray.git | 2 | 19 | 0 | 7 | 34 | Python | {
"docstring": "Get the address for bootstrapping, e.g. the address to pass to\n `ray start` or `ray.int()` to start worker nodes, that has been\n converted to ip:port format.\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 26,
"vocab_size": 21
} | def address(self):
if use_gcs_for_bootstrap():
return self._gcs_address
return self._redis_address
| |
9,863 | 49,675 | 619 | modules/text/language_model/simnet_bow/module.py | 149 | 51 | def similarity(self, texts=[], data={}, use_gpu=False, batch_size=1):
if use_gpu:
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
int(_places[0])
except:
raise RuntimeError(
"Environment Variable CUDA_VISIBLE_... | Remove fluid api in modules and pkg. (#1906) | similarity | 8468e1ac6cfe165aa1e3cf4f77ab6fb66ce98614 | PaddleHub | module.py | 15 | 42 | https://github.com/PaddlePaddle/PaddleHub.git | 6 | 363 | 0 | 106 | 590 | Python | {
"docstring": "\n Get the sentiment prediction results results with the texts as input\n Args:\n texts(list): the input texts to be predicted which the first element is text_1(list)\n and the second element is text_2(list), such as [['这道题很难'], ['这道题不简单']]\n ... | def similarity(self, texts=[], data={}, use_gpu=False, batch_size=1):
if use_gpu:
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
int(_places[0])
except:
raise RuntimeError(
"Environment Variable CUDA_VISIBLE_... | |
1,484 | 8,699 | 267 | ludwig/collect.py | 113 | 32 | def cli_collect_weights(sys_argv):
parser = argparse.ArgumentParser(
description="This script loads a pretrained model " "and uses it collect weights.",
prog="ludwig collect_weights",
usage="%(prog)s [options]",
)
# ----------------
# Model parameters
# ----------------... | [Annotations] Logging Level Registry (#2814)
* Add DeveloperAPI annotations to some utils
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* remove annotations for private methods
* [Annotations] Logging Level Registry
Co-authored-by: pre-commit-ci[b... | cli_collect_weights | 6ee67ef2d2098d236e06d1d7672d92fc192c55b0 | ludwig | collect.py | 11 | 29 | https://github.com/ludwig-ai/ludwig.git | 3 | 202 | 0 | 88 | 365 | Python | {
"docstring": "Command Line Interface to collecting the weights for the model.\n\n --m: Input model that is necessary to collect to the tensors, this is a\n required *option*\n --t: Tensors to collect\n --od: Output directory of the model, defaults to results\n --v: Verbose: Defines the logging l... | def cli_collect_weights(sys_argv):
parser = argparse.ArgumentParser(
description="This script loads a pretrained model " "and uses it collect weights.",
prog="ludwig collect_weights",
usage="%(prog)s [options]",
)
# ----------------
# Model parameters
# ----------------... | |
14,093 | 66,051 | 14 | erpnext/hr/doctype/daily_work_summary/daily_work_summary.py | 20 | 9 | def get_user_emails_from_group(group):
group_doc = group
if isinstance(group_doc, str):
group_doc = frappe.g | style: format code with black | get_user_emails_from_group | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | daily_work_summary.py | 11 | 6 | https://github.com/frappe/erpnext.git | 2 | 35 | 0 | 16 | 60 | Python | {
"docstring": "Returns list of email of enabled users from the given group\n\n\t:param group: Daily Work Summary Group `name`",
"language": "en",
"n_whitespaces": 16,
"n_words": 18,
"vocab_size": 17
} | def get_user_emails_from_group(group):
group_doc = group
if isinstance(group_doc, str):
group_doc = frappe.get_doc("Daily Work Summary Group", group)
emails = get_users_email(group_doc)
return emails
| |
50,936 | 204,859 | 129 | django/db/backends/base/operations.py | 31 | 13 | def adapt_unknown_value(self, value):
if isinstance(value, datetime.datetime): # | Refs #33476 -- Reformatted code with Black. | adapt_unknown_value | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | operations.py | 10 | 11 | https://github.com/django/django.git | 5 | 80 | 0 | 22 | 127 | Python | {
"docstring": "\n Transform a value to something compatible with the backend driver.\n\n This method only depends on the type of the value. It's designed for\n cases where the target type isn't known, such as .raw() SQL queries.\n As a consequence it may not work perfectly in all circumst... | def adapt_unknown_value(self, value):
if isinstance(value, datetime.datetime): # must be before date
return self.adapt_datetimefield_value(value)
elif isinstance(value, datetime.date):
return self.adapt_datefield_value(value)
elif isinstance(value, datetime.time... | |
16,049 | 73,532 | 190 | wagtail/contrib/settings/tests/test_templates.py | 54 | 14 | def test_get_settings_variable_assignment_request_context(self):
request = self.get_request(site=self.other_site)
context = Context({"request": request})
template = Template(
"{% load wagtailsettings_tags %}"
"{% get_settings as wagtai | Reformat with black | test_get_settings_variable_assignment_request_context | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_templates.py | 11 | 15 | https://github.com/wagtail/wagtail.git | 1 | 74 | 0 | 35 | 137 | Python | {
"docstring": "\n Check that assigning the setting to a context variable with\n {% get_settings as wagtail_settings %} works.\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 16
} | def test_get_settings_variable_assignment_request_context(self):
request = self.get_request(site=self.other_site)
context = Context({"request": request})
template = Template(
"{% load wagtailsettings_tags %}"
"{% get_settings as wagtail_settings %}"
"... | |
35,894 | 154,275 | 416 | modin/core/io/column_stores/parquet_dispatcher.py | 109 | 14 | def get_dataset(cls, path, engine, storage_options):
if engine == "auto":
# We follow in concordance with pandas
engine_classes = | FEAT-#4733: Support fastparquet as engine for `read_parquet` (#4807)
Signed-off-by: Karthik Velayutham <vkarthik@ponder.io> | get_dataset | b240370bf83c88589d293b76b4a2409294e06f90 | modin | parquet_dispatcher.py | 16 | 24 | https://github.com/modin-project/modin.git | 6 | 103 | 0 | 82 | 193 | Python | {
"docstring": "\n Retrieve Parquet engine specific Dataset implementation.\n\n Parameters\n ----------\n path : str, path object or file-like object\n The filepath of the parquet file in local filesystem or hdfs.\n engine : str\n Parquet library to use (only '... | def get_dataset(cls, path, engine, storage_options):
if engine == "auto":
# We follow in concordance with pandas
engine_classes = [PyArrowDataset, FastParquetDataset]
error_msgs = ""
for engine_class in engine_classes:
try:
... | |
6,472 | 35,529 | 27 | templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py | 6 | 6 | def test_causal_lm_model_past_with_attn_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_model_past | Fix tf.concatenate + test past_key_values for TF models (#15774)
* fix wrong method name tf.concatenate
* add tests related to causal LM / decoder
* make style and quality
* clean-up
* Fix TFBertModel's extended_attention_mask when past_key_values is provided
* Fix tests
* fix copies
* More tf.int... | test_causal_lm_model_past_with_attn_mask | 8635407bc724c45142c1f91dbc9ef3ea681e1a56 | transformers | test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py | 9 | 3 | https://github.com/huggingface/transformers.git | 1 | 24 | 0 | 6 | 43 | Python | {
"docstring": "Test the causal LM model with `past_key_values` and `attention_mask`",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_causal_lm_model_past_with_attn_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs)
| |
57,097 | 223,838 | 65 | python3.10.4/Lib/email/mime/audio.py | 25 | 11 | def _whatsnd(data):
hdr = data[:512]
fakefile = BytesIO(hdr)
for testfn in sndhdr.tests:
res = testfn(hdr, fakefile)
if res is not None:
| add python 3.10.4 for windows | _whatsnd | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | audio.py | 12 | 8 | https://github.com/XX-net/XX-Net.git | 3 | 52 | 0 | 21 | 83 | Python | {
"docstring": "Try to identify a sound file type.\n\n sndhdr.what() has a pretty cruddy interface, unfortunately. This is why\n we re-do it here. It would be easier to reverse engineer the Unix 'file'\n command and use the standard 'magic' file, as shipped with a modern Unix.\n ",
"language": "en",
... | def _whatsnd(data):
hdr = data[:512]
fakefile = BytesIO(hdr)
for testfn in sndhdr.tests:
res = testfn(hdr, fakefile)
if res is not None:
return _sndhdr_MIMEmap.get(res[0])
return None | |
110,574 | 311,921 | 281 | tests/util/test_async.py | 37 | 13 | async def test_protect_loop_debugger_sleep(caplog):
block_async_io.enable()
with patch(
"homeassistant.util.async_.extract_stack",
return_value=[
Mock(
filename="/home/paulus/homeassistant/.venv/blah/pydevd.py",
lineno="23",
| Don't warn on time.sleep injected by the debugger (#65420) | test_protect_loop_debugger_sleep | 5a34feb7de440e0df748c9db500facc72a4c2646 | core | test_async.py | 15 | 24 | https://github.com/home-assistant/core.git | 1 | 84 | 0 | 31 | 148 | Python | {
"docstring": "Test time.sleep injected by the debugger is not reported.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | async def test_protect_loop_debugger_sleep(caplog):
block_async_io.enable()
with patch(
"homeassistant.util.async_.extract_stack",
return_value=[
Mock(
filename="/home/paulus/homeassistant/.venv/blah/pydevd.py",
lineno="23",
line=... | |
1,761 | 9,894 | 19 | jina/peapods/pods/__init__.py | 5 | 5 | def update_worker_pea_args(self):
self.peas_args['peas'] = self._set_peas_args(self.args)
| feat: star routing (#3900)
* feat(proto): adjust proto for star routing (#3844)
* feat(proto): adjust proto for star routing
* feat(proto): generate proto files
* feat(grpc): refactor grpclet interface (#3846)
* feat: refactor connection pool for star routing (#3872)
* feat(k8s): add more labels to k8s ... | update_worker_pea_args | 933415bfa1f9eb89f935037014dfed816eb9815d | jina | __init__.py | 9 | 2 | https://github.com/jina-ai/jina.git | 1 | 21 | 0 | 5 | 38 | Python | {
"docstring": " Update args of all its worker peas based on Pod args. Does not touch head and tail",
"language": "en",
"n_whitespaces": 17,
"n_words": 17,
"vocab_size": 17
} | def update_worker_pea_args(self):
self.peas_args['peas'] = self._set_peas_args(self.args)
| |
48,297 | 197,040 | 261 | sympy/ntheory/generate.py | 88 | 11 | def prevprime(n):
n = _as_int_ceiling(n)
if n < 3:
raise ValueError("no preceding primes")
if n < 8:
return {3: 2, 4: 3, 5: 3, 6: 5, 7: 5}[n]
if n <= sieve._list[-1]:
l, u = sieve.search(n)
if l == u:
return sieve[l-1]
else:
return sie... | Refactored import ordering in functions | prevprime | e0dc14eca132f37c5f49369eb4051eae37c9b119 | sympy | generate.py | 11 | 27 | https://github.com/sympy/sympy.git | 10 | 154 | 0 | 45 | 248 | Python | {
"docstring": " Return the largest prime smaller than n.\n\n Notes\n =====\n\n Potential primes are located at 6*j +/- 1. This\n property is used during searching.\n\n >>> from sympy import prevprime\n >>> [(i, prevprime(i)) for i in range(10, 15)]\n [(10, 7), (11, 7)... | def prevprime(n):
n = _as_int_ceiling(n)
if n < 3:
raise ValueError("no preceding primes")
if n < 8:
return {3: 2, 4: 3, 5: 3, 6: 5, 7: 5}[n]
if n <= sieve._list[-1]:
l, u = sieve.search(n)
if l == u:
return sieve[l-1]
else:
return sie... | |
106,525 | 307,759 | 447 | tests/components/recorder/test_statistics.py | 117 | 32 | def test_duplicate_statistics_handle_integrity_error(hass_recorder, caplog):
hass = hass_recorder()
wait_recording_done(hass)
period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00"))
external_energy_metadat... | Display statistics in the source's unit (#78031) | test_duplicate_statistics_handle_integrity_error | dd20a7ea62fc003748c5f0cf99be25c69c9b5a05 | core | test_statistics.py | 14 | 50 | https://github.com/home-assistant/core.git | 1 | 224 | 0 | 79 | 387 | Python | {
"docstring": "Test the recorder does not blow up if statistics is duplicated.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def test_duplicate_statistics_handle_integrity_error(hass_recorder, caplog):
hass = hass_recorder()
wait_recording_done(hass)
period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00"))
period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00"))
external_energy_metadat... | |
29,587 | 131,786 | 799 | python/ray/tests/test_resource_demand_scheduler.py | 130 | 35 | def testRequestResourcesRaceConditionWithResourceDemands(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"].update(
{
"empty_node": | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | testRequestResourcesRaceConditionWithResourceDemands | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | test_resource_demand_scheduler.py | 14 | 61 | https://github.com/ray-project/ray.git | 3 | 310 | 0 | 78 | 521 | Python | {
"docstring": "Test request_resources() with resource_demands.\n\n Tests when request_resources() is called simultaneously with resource\n demands in multiple orders.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 16,
"vocab_size": 14
} | def testRequestResourcesRaceConditionWithResourceDemands(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"].update(
{
"empty_node": {
"node_config": {},
"resources": {"CPU": 2, "GPU": 1},
... | |
17,397 | 82,431 | 279 | cms/tests/test_sitemap.py | 56 | 30 | def test_sitemap_unpublished_titles(self):
sitemap = CMSSitemap()
locations = []
urlset = sitemap.get_urls()
unpublished_titles = set()
for item in urlset:
locations.append(item['location'])
for page in Page.objects.drafts():
if page.get_p... | ci: Added codespell (#7355)
Co-authored-by: Christian Clauss <cclauss@me.com>
* ci: codespell config taken from #7292 | test_sitemap_unpublished_titles | c1290c9ff89cb00caa5469129fd527e9d82cd820 | django-cms | test_sitemap.py | 17 | 21 | https://github.com/django-cms/django-cms.git | 6 | 167 | 0 | 38 | 308 | Python | {
"docstring": "\n Check that titles attached to unpublished pages are not in the urlset.\n As titles are 'published' depending on their attached page, we create a\n set of unpublished titles by checking titles attached to the draft and\n public version of each page\n ",
"language... | def test_sitemap_unpublished_titles(self):
sitemap = CMSSitemap()
locations = []
urlset = sitemap.get_urls()
unpublished_titles = set()
for item in urlset:
locations.append(item['location'])
for page in Page.objects.drafts():
if page.get_p... | |
2,555 | 13,120 | 543 | jina/parsers/orchestrate/runtimes/remote.py | 160 | 22 | def mixin_gateway_parser(parser):
gp = add_arg_group(parser, title='Gateway')
_add_host(gp)
_add_proxy(gp)
gp.add_argument(
'--uses',
type=str,
default=None,
# TODO: add Jina Hub Gateway
help=,
)
gp.add_argument(
'--uses-with',
actio... | feat: allow passing custom gateway in Flow (#5189) | mixin_gateway_parser | cdaf7f87ececf9e13b517379ca183b17f0d7b007 | jina | remote.py | 10 | 87 | https://github.com/jina-ai/jina.git | 1 | 237 | 0 | 108 | 404 | Python | {
"docstring": "Add the options for remote expose at the Gateway\n :param parser: the parser\n \n The config of the gateway, it could be one of the followings:\n * the string literal of an Gateway class name\n * a Gateway YAML file (.yml, .yaml, .jaml)\n * a docker image (must start ... | def mixin_gateway_parser(parser):
gp = add_arg_group(parser, title='Gateway')
_add_host(gp)
_add_proxy(gp)
gp.add_argument(
'--uses',
type=str,
default=None,
# TODO: add Jina Hub Gateway
help=,
)
gp.add_argument(
'--uses-with',
actio... | |
56,687 | 222,649 | 211 | python3.10.4/Lib/distutils/command/bdist_rpm.py | 50 | 9 | def _format_changelog(self, changelog):
if not changelog:
return changelog
new_changelog = []
for line in changelog.strip().split('\n'):
line = line.strip()
if line[0] == '*':
new_changelog.extend(['', line])
elif line[0] =... | add python 3.10.4 for windows | _format_changelog | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | bdist_rpm.py | 14 | 15 | https://github.com/XX-net/XX-Net.git | 6 | 95 | 0 | 40 | 165 | Python | {
"docstring": "Format the changelog correctly and convert it to a list of strings\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 12,
"vocab_size": 12
} | def _format_changelog(self, changelog):
if not changelog:
return changelog
new_changelog = []
for line in changelog.strip().split('\n'):
line = line.strip()
if line[0] == '*':
new_changelog.extend(['', line])
elif line[0] =... | |
44,221 | 183,499 | 55 | src/textual/_animator.py | 27 | 5 | def _get_time(self) -> float:
# N.B. We could remove this method and always call `self._timer.get_time() | [App] Finally, time mocking in tests seems to be working! 😅
I had to add a flag in the `_timer` module that allows us to completely disable the "skip" feature of Timers, though - but it shouldn't cause too much trouble 🤞 | _get_time | 15df75919744fbea824bbf029cfb56029a3d0dc8 | textual | _animator.py | 8 | 3 | https://github.com/Textualize/textual.git | 1 | 16 | 0 | 26 | 31 | Python | {
"docstring": "Get the current wall clock time, via the internal Timer.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def _get_time(self) -> float:
# N.B. We could remove this method and always call `self._timer.get_time()` internally,
# but it's handy to have in mocking situations
return self._timer.get_time()
| |
43,607 | 181,829 | 683 | tpot/base.py | 131 | 34 | def _generate(self, pset, min_, max_, condition, type_=None):
if type_ is None:
type_ = pset.ret
expr = []
height = np.random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
# We've added a t... | Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068. | _generate | 388616b6247ca4ea8de4e2f340d6206aee523541 | tpot | base.py | 19 | 35 | https://github.com/EpistasisLab/tpot.git | 8 | 221 | 0 | 83 | 357 | Python | {
"docstring": "Generate a Tree as a list of lists.\n\n The tree is build from the root to the leaves, and it stop growing when\n the condition is fulfilled.\n\n Parameters\n ----------\n pset: PrimitiveSetTyped\n Primitive set from which primitives are selected.\n ... | def _generate(self, pset, min_, max_, condition, type_=None):
if type_ is None:
type_ = pset.ret
expr = []
height = np.random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
# We've added a t... | |
@pytest.mark.parametrize("kwargs", [{"min_frequency": 21, "max_categories": 1}]) | 75,664 | 259,230 | 484 | sklearn/preprocessing/tests/test_encoders.py | 252 | 31 | def test_ohe_infrequent_multiple_categories_dtypes():
pd = pytest.importorskip("pandas")
X = pd.DataFrame(
{
"str": ["a", "f", "c", "f", "f", "a", "c", "b", "b"],
"int": [5, 3, 0, 10, 10, 12, 0, 3, 5],
},
columns=["str", "int"],
)
ohe = OneHotEncode... | ENH Adds infrequent categories to OneHotEncoder (#16018)
* ENH Completely adds infrequent categories
* STY Linting
* STY Linting
* DOC Improves wording
* DOC Lint
* BUG Fixes
* CLN Address comments
* CLN Address comments
* DOC Uses math to description float min_frequency
* DOC Adds comment r... | test_ohe_infrequent_multiple_categories_dtypes | 7f0006c8aad1a09621ad19c3db19c3ff0555a183 | scikit-learn | test_encoders.py | 12 | 46 | https://github.com/scikit-learn/scikit-learn.git | 1 | 510 | 1 | 119 | 782 | Python | {
"docstring": "Test infrequent categories with a pandas dataframe with multiple dtypes.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def test_ohe_infrequent_multiple_categories_dtypes():
pd = pytest.importorskip("pandas")
X = pd.DataFrame(
{
"str": ["a", "f", "c", "f", "f", "a", "c", "b", "b"],
"int": [5, 3, 0, 10, 10, 12, 0, 3, 5],
},
columns=["str", "int"],
)
ohe = OneHotEncode... |
7,261 | 39,805 | 157 | dash/development/base_component.py | 32 | 15 | def _set_random_id(self):
if getattr(self, "persistence", False):
raise RuntimeError(
)
if "dash_snapshots" in sys.modules:
raise RuntimeError(
)
if not hasattr(self, "id"):
v = str(uuid.UUID(int=rd.ra... | error when autogenerated IDs are used with persistence or snapshots
also give set_random_id a leading underscore so it doesn't
need to become a reserved word (disallowed prop name) | _set_random_id | 41e322bd17bcbaa34e315b27b8f33f07e6671142 | dash | base_component.py | 16 | 26 | https://github.com/plotly/dash.git | 4 | 78 | 0 | 26 | 133 | Python | {
"docstring": "\n Attempting to use an auto-generated ID with the `persistence` prop.\n This is prohibited because persistence is tied to component IDs and\n auto-generated IDs can easily change.\n\n Please assign an explicit ID to this component.\n ... | def _set_random_id(self):
if getattr(self, "persistence", False):
raise RuntimeError(
)
if "dash_snapshots" in sys.modules:
raise RuntimeError(
)
if not hasattr(self, "id"):
v = str(uuid.UUID(int=rd.ra... | |
7,004 | 38,638 | 36 | src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py | 15 | 3 | def final():
head = []
head.append(("layernorm.weight", "norm.weight"))
head.append(("layernorm.bias", "norm.bias"))
head.append(("cl | Add CvT (#17299)
* Adding cvt files
* Adding cvt files
* changes in init file
* Adding cvt files
* changes in init file
* Style fixes
* Address comments from code review
* Apply suggestions from code review
Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
* Format l... | final | adc0ff25028d29af30386f2d7d3f85e290fbef57 | transformers | convert_cvt_original_pytorch_checkpoint_to_pytorch.py | 9 | 7 | https://github.com/huggingface/transformers.git | 1 | 51 | 0 | 14 | 98 | Python | {
"docstring": "\n Function helps in renaming final classification layer\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 7
} | def final():
head = []
head.append(("layernorm.weight", "norm.weight"))
head.append(("layernorm.bias", "norm.bias"))
head.append(("classifier.weight", "head.weight"))
head.append(("classifier.bias", "head.bias"))
return head
| |
75,191 | 258,142 | 50 | test/document_stores/test_sql.py | 15 | 9 | def test_delete_index(self, ds, documents):
ds.write_documents(documents, index="custom_index")
assert ds.get_document_count(index="custom_index") == len(documents)
ds.delete_index(index="custom_index")
assert ds.get_document_count(index="custom_index") == 0
| feat: add SQLDocumentStore tests (#3517)
* port SQL tests
* cleanup document_store_tests.py from sql tests
* leftover
* Update .github/workflows/tests.yml
Co-authored-by: Sara Zan <sara.zanzottera@deepset.ai>
* review comments
* Update test/document_stores/test_base.py
Co-authored-by: bogdankostic... | test_delete_index | 2bb81331b75aec68de0d45c4cb116170d265f1fe | haystack | test_sql.py | 10 | 5 | https://github.com/deepset-ai/haystack.git | 1 | 53 | 0 | 12 | 92 | Python | {
"docstring": "Contrary to other Document Stores, SQLDocumentStore doesn't raise if the index is empty",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def test_delete_index(self, ds, documents):
ds.write_documents(documents, index="custom_index")
assert ds.get_document_count(index="custom_index") == len(documents)
ds.delete_index(index="custom_index")
assert ds.get_document_count(index="custom_index") == 0
| |
@frappe.whitelist() | 14,118 | 66,161 | 25 | erpnext/hr/doctype/leave_application/leave_application.py | 44 | 20 | def add_holidays(events, start, end, employee, company):
applicable_holiday_list = get_holiday_list_for_employee(employee, company)
if not applicable_holiday_list:
return
for holiday in | style: format code with black | add_holidays | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | leave_application.py | 16 | 19 | https://github.com/frappe/erpnext.git | 3 | 96 | 1 | 41 | 165 | Python | {
"docstring": "select name, holiday_date, description\n\t\tfrom `tabHoliday` where parent=%s and holiday_date between %s and %s",
"language": "en",
"n_whitespaces": 12,
"n_words": 14,
"vocab_size": 12
} | def add_holidays(events, start, end, employee, company):
applicable_holiday_list = get_holiday_list_for_employee(employee, company)
if not applicable_holiday_list:
return
for holiday in frappe.db.sql(
,
(applicable_holiday_list, start, end),
as_dict=True,
):
events.append(
{
"doctype": "Holiday",
... |
42,899 | 179,091 | 317 | xlib/api/win32/dshow/helper.py | 82 | 38 | def get_video_input_devices_names() -> List[str]:
# based on https://docs.microsoft.com/ru-ru/windows/win32/directshow/selecting-a-capture-device
names = []
sys_dev_enum = strmif.ICreateDevEnum()
if ole32.CoCreateInstance(uuids.CLSID_SystemDeviceEnum, None, ole32.CLSCTX.CLSCTX_INPROC_SERVER, strmi... | update xlib.api.win32 | get_video_input_devices_names | 2be32787538f1b0ef83f648ee60d2d4d4868d3fd | DeepFaceLive | helper.py | 21 | 25 | https://github.com/iperov/DeepFaceLive.git | 7 | 230 | 0 | 55 | 363 | Python | {
"docstring": "\n returns a list of available names of VideoInputDevice's\n\n ole32 should be initialized before use\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 14,
"vocab_size": 13
} | def get_video_input_devices_names() -> List[str]:
# based on https://docs.microsoft.com/ru-ru/windows/win32/directshow/selecting-a-capture-device
names = []
sys_dev_enum = strmif.ICreateDevEnum()
if ole32.CoCreateInstance(uuids.CLSID_SystemDeviceEnum, None, ole32.CLSCTX.CLSCTX_INPROC_SERVER, strmi... | |
5,565 | 30,421 | 15 | spotdl/utils/console.py | 6 | 4 | def check_for_updates():
version_message = get_update_status()
print(version_message)
| moved console actions to a new file | check_for_updates | deca40c2e26afed62e1f9ec4be14aff9e125929b | spotify-downloader | console.py | 8 | 3 | https://github.com/spotDL/spotify-downloader.git | 1 | 14 | 0 | 6 | 28 | Python | {
"docstring": "\n Check for updates to the current version.\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 7
} | def check_for_updates():
version_message = get_update_status()
print(version_message)
| |
75,544 | 259,052 | 491 | sklearn/preprocessing/_polynomial.py | 101 | 24 | def _get_base_knot_positions(X, n_knots=10, knots="uniform", sample_weight=None):
if knots == "quantile":
percentiles = 100 * np.linspace(
start=0, stop=1, num=n_knots, dtype=np.float64
)
if sample_weight is None:
knots = np.percentil... | MNT Clean fixes and compat for old versions of our dependencies (#22642)
Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> | _get_base_knot_positions | 34f9dbf54164e3c62d68765fe45f27f067a45562 | scikit-learn | _polynomial.py | 16 | 26 | https://github.com/scikit-learn/scikit-learn.git | 5 | 172 | 0 | 72 | 259 | Python | {
"docstring": "Calculate base knot positions.\n\n Base knots such that first knot <= feature <= last knot. For the\n B-spline construction with scipy.interpolate.BSpline, 2*degree knots\n beyond the base interval are added.\n\n Returns\n -------\n knots : ndarray of shape (n... | def _get_base_knot_positions(X, n_knots=10, knots="uniform", sample_weight=None):
if knots == "quantile":
percentiles = 100 * np.linspace(
start=0, stop=1, num=n_knots, dtype=np.float64
)
if sample_weight is None:
knots = np.percentil... | |
81,090 | 273,161 | 102 | keras/layers/preprocessing/index_lookup.py | 27 | 16 | def _num_tokens(self, data):
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _num_tokens | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | index_lookup.py | 13 | 9 | https://github.com/keras-team/keras.git | 3 | 71 | 0 | 20 | 113 | Python | {
"docstring": "Count the number of tokens in a ragged, sparse or dense tensor.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def _num_tokens(self, data):
if tf_utils.is_sparse(data):
flat_values = data.values
elif tf_utils.is_ragged(data):
flat_values = data.flat_values
else:
flat_values = tf.reshape(data, [-1])
tokens, _, counts = tf.unique_with_counts(flat_values,... | |
@pytest.fixture(name="awair_offline", scope="session") | 102,571 | 303,762 | 11 | tests/components/awair/conftest.py | 6 | 8 | def no_devicess_fixture():
return jso | Add Awair Local API support (#75535) | no_devicess_fixture | ebbff7b60e43f17d65ead811d314602b9daddfc4 | core | conftest.py | 10 | 2 | https://github.com/home-assistant/core.git | 1 | 15 | 1 | 6 | 54 | Python | {
"docstring": "Fixture representing when no devices are found in Awair's cloud API.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def no_devicess_fixture():
return json.loads(load_fixture("awair/no_devices.json"))
@pytest.fixture(name="awair_offline", scope="session") |
@pytest.mark.parametrize(
"values, exp_any, exp_all, exp_any_noskip, exp_all_noskip",
[
([True, pd.NA], True, True, True, pd.NA),
([False, pd.NA], False, False, pd.NA, False),
([pd.NA], False, True, pd.NA, pd.NA),
([], False, True, False, True),
# GH-33253: all True / all... | 39,876 | 166,944 | 155 | pandas/tests/arrays/boolean/test_reduction.py | 76 | 10 | def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtyp | DOC: Added docstrings to fixtures defined in array module (#47211) | data | 89be1f053b695c4ce1c0569f737caf3f03c12128 | pandas | test_reduction.py | 13 | 5 | https://github.com/pandas-dev/pandas.git | 1 | 49 | 1 | 44 | 223 | Python | {
"docstring": "Fixture returning boolean array, with valid and missing values.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtype="boolean",
)
@pytest.mark.parametrize(
"values, exp_any, exp_all, exp_any_noskip, exp_all_noskip",
[
([True, pd.NA], True, True, True, pd.NA),
([False, ... |
20,477 | 101,038 | 92 | scripts/train.py | 19 | 8 | def should_toggle_mask(self) -> bool:
with self._lock:
retval = self._toggle_mask
if retval:
logger.debug("Sending toggle mask")
self._toggle_mask = False
| Live Preview - Replace cv2 with matplotlib viewer | should_toggle_mask | 7b9fc0454d982a2425ec44e90e5b05a87d149953 | faceswap | train.py | 12 | 14 | https://github.com/deepfakes/faceswap.git | 2 | 34 | 0 | 16 | 62 | Python | {
"docstring": " Check whether the mask should be toggled and return the value. If ``True`` is returned\n then resets :attr:`_toggle_mask` back to ``False``\n\n Returns\n -------\n bool\n ``True`` if the mask should be toggled otherwise ``False``. ",
"language": "en",
"n_whi... | def should_toggle_mask(self) -> bool:
with self._lock:
retval = self._toggle_mask
if retval:
logger.debug("Sending toggle mask")
self._toggle_mask = False
return retval
| |
75,964 | 259,877 | 106 | examples/linear_model/plot_tweedie_regression_insurance_claims.py | 57 | 27 | def load_mtpl2(n_samples=100000):
# freMTPL2freq dataset | ENH improve ARFF parser using pandas (#21938)
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
Co-authored-by: Olivier Grisel <olivier.grisel@gmail.com>
Co-authored-by: Adrin Jalali <adrin.jalali@gmail.com> | load_mtpl2 | a47d569e670fd4102af37c3165c9b1ddf6fd3005 | scikit-learn | plot_tweedie_regression_insurance_claims.py | 12 | 11 | https://github.com/scikit-learn/scikit-learn.git | 2 | 145 | 0 | 43 | 242 | Python | {
"docstring": "Fetch the French Motor Third-Party Liability Claims dataset.\n\n Parameters\n ----------\n n_samples: int, default=100000\n number of samples to select (for faster run time). Full dataset has\n 678013 samples.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 27,
"vo... | def load_mtpl2(n_samples=100000):
# freMTPL2freq dataset from https://www.openml.org/d/41214
df_freq = fetch_openml(data_id=41214, as_frame=True, parser="pandas").data
df_freq["IDpol"] = df_freq["IDpol"].astype(int)
df_freq.set_index("IDpol", inplace=True)
# freMTPL2sev dataset from https://ww... | |
20,043 | 100,579 | 100 | lib/gpu_stats/nvidia.py | 32 | 10 | def _get_driver(self) -> str:
try:
driver = pynvml.nvmlSystemGetDriverVersion().decode("utf-8")
except pynvml.NVMLError as err:
self._log("debug", f"Unable to obtain driver. Original error: {str(err)}")
driver = "No Nvidia driver found"
self._log("deb... | Refactor lib.gpu_stats (#1218)
* inital gpu_stats refactor
* Add dummy CPU Backend
* Update Sphinx documentation | _get_driver | bdbbad4d310fb606b6f412aa81e9f57ccd994e97 | faceswap | nvidia.py | 14 | 15 | https://github.com/deepfakes/faceswap.git | 2 | 52 | 0 | 27 | 109 | Python | {
"docstring": " Obtain the Nvidia driver version currently in use.\n\n Returns\n -------\n str\n The current GPU driver version\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 16,
"vocab_size": 14
} | def _get_driver(self) -> str:
try:
driver = pynvml.nvmlSystemGetDriverVersion().decode("utf-8")
except pynvml.NVMLError as err:
self._log("debug", f"Unable to obtain driver. Original error: {str(err)}")
driver = "No Nvidia driver found"
self._log("deb... | |
50,862 | 204,734 | 98 | django/core/serializers/__init__.py | 29 | 9 | def _load_serializers():
global _serializers
serializers = {}
for forma | Refs #33476 -- Reformatted code with Black. | _load_serializers | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | __init__.py | 13 | 11 | https://github.com/django/django.git | 4 | 58 | 0 | 22 | 91 | Python | {
"docstring": "\n Register built-in and settings-defined serializers. This is done lazily so\n that user code has a chance to (e.g.) set up custom settings without\n needing to be careful of import order.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 30,
"vocab_size": 29
} | def _load_serializers():
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_se... | |
53,626 | 213,075 | 66 | samtranslator/utils/py27hash_fix.py | 12 | 6 | def pop(self):
if self.keyorder:
value = self.keys()[0]
self.remove(value)
return value
return N | fix: Py27hash fix (#2182)
* Add third party py27hash code
* Add Py27UniStr and unit tests
* Add py27hash_fix utils and tests
* Add to_py27_compatible_template and tests
* Apply py27hash fix to wherever it is needed
* Apply py27hash fix, all tests pass except api_with_any_method_in_swagger
* apply py2... | pop | a5db070f446b7cfebdaa6ad2e3dcf78f6105a272 | serverless-application-model | py27hash_fix.py | 11 | 6 | https://github.com/aws/serverless-application-model.git | 2 | 31 | 0 | 10 | 53 | Python | {
"docstring": "\n Pops the top element from the sorted keys if it exists. Returns None otherwise.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 13
} | def pop(self):
if self.keyorder:
value = self.keys()[0]
self.remove(value)
return value
return None
| |
40,862 | 173,550 | 349 | magenta/models/onsets_frames_transcription/infer_util.py | 167 | 29 | def probs_to_pianoroll_viterbi(frame_probs, onset_probs, alpha=0.5):
n, d = onset_probs.shape
loss_matrix = np.zeros([n, d, 2], dtype=float)
path_matrix = np.zeros([n, d, 2], dtype=bool)
frame_losses = (1 - alpha) * -np.log(np.stack([1 - frame_probs,
frame_p... | [NumPy] Remove references to deprecated NumPy type aliases.
This change replaces references to a number of deprecated NumPy type aliases (np.bool, np.int, np.float, np.complex, np.object, np.str) with their recommended replacement (bool, int, float, complex, object, str).
NumPy 1.24 drops the deprecated aliases, so w... | probs_to_pianoroll_viterbi | 52828dc160781f422e670d414406ffe91c30066b | magenta | infer_util.py | 14 | 28 | https://github.com/magenta/magenta.git | 3 | 454 | 0 | 75 | 649 | Python | {
"docstring": "Viterbi decoding of frame & onset probabilities to pianoroll.\n\n Args:\n frame_probs: A numpy array (num-frames-by-num-pitches) of frame\n probabilities.\n onset_probs: A numpy array (num-frames-by-num-pitches) of onset\n probabilities.\n alpha: Relative weight of onset and frame ... | def probs_to_pianoroll_viterbi(frame_probs, onset_probs, alpha=0.5):
n, d = onset_probs.shape
loss_matrix = np.zeros([n, d, 2], dtype=float)
path_matrix = np.zeros([n, d, 2], dtype=bool)
frame_losses = (1 - alpha) * -np.log(np.stack([1 - frame_probs,
frame_p... | |
4,129 | 22,040 | 41 | pipenv/patched/pip/_vendor/requests/_internal_utils.py | 15 | 7 | def to_native_string(string, encoding="ascii"):
if isinstance(string, builtin_str):
out = string
else:
out = string.decode(encodin | Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | to_native_string | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | _internal_utils.py | 11 | 6 | https://github.com/pypa/pipenv.git | 2 | 33 | 0 | 12 | 57 | Python | {
"docstring": "Given a string object, regardless of type, returns a representation of\n that string in the native string type, encoding and decoding where\n necessary. This assumes ASCII unless told otherwise.\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 29,
"vocab_size": 24
} | def to_native_string(string, encoding="ascii"):
if isinstance(string, builtin_str):
out = string
else:
out = string.decode(encoding)
return out
| |
48,365 | 197,177 | 86 | sympy/parsing/mathematica.py | 20 | 11 | def mathematica(s, additional_translations=None):
parser = MathematicaParser(additional_translations)
if additional_translations | Adapt to new deprecation policy | mathematica | cddb6451ed54ab1f84cffb5313cbff709bbaf8e5 | sympy | mathematica.py | 11 | 11 | https://github.com/sympy/sympy.git | 2 | 52 | 0 | 19 | 88 | Python | {
"docstring": "\n Translate a string containing a Wolfram Mathematica expression to a SymPy\n expression.\n\n If the translator is unable to find a suitable SymPy expression, the\n ``FullForm`` of the Mathematica expression will be output, using SymPy\n ``Function`` objects as nodes of the syntax tree... | def mathematica(s, additional_translations=None):
parser = MathematicaParser(additional_translations)
if additional_translations is not None:
sympy_deprecation_warning(
,
deprecated_since_version="1.11",
active_deprecations_target="mathematica-parser-additional-... | |
47,072 | 194,779 | 635 | parlai/scripts/generate_model_card.py | 262 | 38 | def evaluation(self):
# adding info about the eval tasks
if self.eval_tasks == self.train_tasks:
msg = "For evalution, we used the same training datasets; check the [Datasets Used](#datasets-used) section for more information"
eval_list = ''
else:
msg... | autoformat (#4378) | evaluation | 81f722d29045a7a5841d0931a082ded1d1f13863 | ParlAI | generate_model_card.py | 14 | 36 | https://github.com/facebookresearch/ParlAI.git | 14 | 318 | 0 | 159 | 605 | Python | {
"docstring": "\n returns a section with dataset info about the eval tasks if they exist,\n information about the validation metric if it exists, and create a table with\n the validation metric.\n ",
"language": "en",
"n_whitespaces": 58,
"n_words": 29,
"vocab_size": 22
} | def evaluation(self):
# adding info about the eval tasks
if self.eval_tasks == self.train_tasks:
msg = "For evalution, we used the same training datasets; check the [Datasets Used](#datasets-used) section for more information"
eval_list = ''
else:
msg... | |
@tf_test_utils.with_eager_op_as_function | 80,977 | 272,207 | 284 | keras/integration_test/gradient_checkpoint_test.py | 110 | 42 | def _train_with_recompute(n_steps):
img_dim, n_channels, batch_size = 256, 1, 4
x, y = _get_dummy_data(img_dim, n_channels, batch_size)
# This model is the same model as _get_big_cnn_model but split into 3 parts.
models = _get_split_cnn_model(
img_dim, n_channels, num_partitions=3, blocks_p... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _train_with_recompute | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | gradient_checkpoint_test.py | 13 | 28 | https://github.com/keras-team/keras.git | 2 | 176 | 1 | 82 | 288 | Python | {
"docstring": "Trains a single large model with gradient checkpointing using tf.recompute_grad.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def _train_with_recompute(n_steps):
img_dim, n_channels, batch_size = 256, 1, 4
x, y = _get_dummy_data(img_dim, n_channels, batch_size)
# This model is the same model as _get_big_cnn_model but split into 3 parts.
models = _get_split_cnn_model(
img_dim, n_channels, num_partitions=3, blocks_p... |
30,200 | 134,124 | 100 | python/ray/tune/tests/test_syncer_callback.py | 45 | 25 | def test_syncer_callback_dead_node_log_error(caplog, ray_start_2_cpus, temp_data_dirs):
caplog.set_level(logging.ERROR, logger="ray.tune.syncer")
tmp_source, tmp_target = temp_data_dirs
syncer_callback = TestSyncerCallback(
sync_period=0,
local_logdir_override=tmp_target,
)
t... | [Tune] Catch SyncerCallback failure with dead node (#29438)
### Context
This issue was uncovered by this long running test: `long_running_distributed_pytorch_pbt_failure`. This test randomly kills nodes via `FailureInjectorCallback`, and the test failure happens when:
1. A trial result comes in and is processed
2... | test_syncer_callback_dead_node_log_error | fc9f8e458c4dad7a51e0d781917b1a003cb55cd7 | ray | test_syncer_callback.py | 10 | 13 | https://github.com/ray-project/ray.git | 1 | 86 | 0 | 42 | 135 | Python | {
"docstring": "Check that we catch + log errors when trying syncing with a dead remote node.",
"language": "en",
"n_whitespaces": 14,
"n_words": 15,
"vocab_size": 15
} | def test_syncer_callback_dead_node_log_error(caplog, ray_start_2_cpus, temp_data_dirs):
caplog.set_level(logging.ERROR, logger="ray.tune.syncer")
tmp_source, tmp_target = temp_data_dirs
syncer_callback = TestSyncerCallback(
sync_period=0,
local_logdir_override=tmp_target,
)
t... | |
42,354 | 177,335 | 81 | networkx/linalg/modularitymatrix.py | 44 | 18 | def directed_modularity_matrix(G, nodelist=None, weight=None):
import numpy as np
if nodelist is None:
nodelist = list(G)
| Use scipy.sparse array datastructure (#6037)
* Use scipy.sparse array datastructure
* Add reminder to rm wrapper when scipy adds creation fns.
* Rm mention of np matrix from code comment.
* Update networkx/algorithms/bipartite/matrix.py
Co-authored-by: Stefan van der Walt <sjvdwalt@gmail.com>
Co-authore... | directed_modularity_matrix | 8a325d26aa7fdd3a72580c4720fa97f971bbefcb | networkx | modularitymatrix.py | 10 | 10 | https://github.com/networkx/networkx.git | 2 | 92 | 0 | 35 | 147 | Python | {
"docstring": "Returns the directed modularity matrix of G.\n\n The modularity matrix is the matrix B = A - <A>, where A is the adjacency\n matrix and <A> is the expected adjacency matrix, assuming that the graph\n is described by the configuration model.\n\n More specifically, the element B_ij of B is d... | def directed_modularity_matrix(G, nodelist=None, weight=None):
import numpy as np
if nodelist is None:
nodelist = list(G)
A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr")
k_in = A.sum(axis=0)
k_out = A.sum(axis=1)
m = k_in.sum()
# Expected adjacen... | |
36,452 | 155,706 | 141 | dask/dataframe/io/parquet/core.py | 24 | 9 | def project_columns(self, columns):
if columns == se | Use map_partitions (Blockwise) in to_parquet (#8487) | project_columns | d98c1dd63e0d7f6a003e3ff70eca796c19b81d42 | dask | core.py | 8 | 12 | https://github.com/dask/dask.git | 2 | 45 | 0 | 23 | 66 | Python | {
"docstring": "Return a new ParquetFunctionWrapper object\n with a sub-column projection.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 9,
"vocab_size": 8
} | def project_columns(self, columns):
if columns == self.columns:
return self
return ParquetFunctionWrapper(
self.engine,
self.fs,
self.meta,
columns,
self.index,
None, # Already merged into common_kwargs
... | |
48,818 | 198,198 | 193 | sympy/tensor/array/expressions/array_expressions.py | 61 | 28 | def sort_args_by_name(self):
expr = self.expr
if not isinstance(expr, ArrayTensorProduct):
return self
arg | Rename files for array expression conversions in order to avoid naming conflicts in TAB-completion of the corresponding functions | sort_args_by_name | a69c49bec6caf2cb460dc4eedf0fec184db92f0e | sympy | array_expressions.py | 13 | 16 | https://github.com/sympy/sympy.git | 5 | 135 | 0 | 46 | 211 | Python | {
"docstring": "\n Sort arguments in the tensor product so that their order is lexicographical.\n\n Examples\n ========\n\n >>> from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array\n >>> from sympy import MatrixSymbol\n >>> from sympy.abc im... | def sort_args_by_name(self):
expr = self.expr
if not isinstance(expr, ArrayTensorProduct):
return self
args = expr.args
sorted_data = sorted(enumerate(args), key=lambda x: default_sort_key(x[1]))
pos_sorted, args_sorted = zip(*sorted_data)
reordering_... | |
29,944 | 133,158 | 93 | python/ray/util/iter.py | 36 | 6 | def gather_async(self, batch_ms=0, num_async=1) -> "LocalIterator[T]":
if num_async < 1:
raise ValueError("queue depth must be positive")
if batch_ms < 0:
| [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | gather_async | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | iter.py | 10 | 33 | https://github.com/ray-project/ray.git | 3 | 61 | 0 | 30 | 70 | Python | {
"docstring": "Returns a local iterable for asynchronous iteration.\n\n New items will be fetched from the shards asynchronously as soon as\n the previous one is computed. Items arrive in non-deterministic order.\n\n Arguments:\n batch_ms (int): Batches items for batch_ms milliseconds... | def gather_async(self, batch_ms=0, num_async=1) -> "LocalIterator[T]":
if num_async < 1:
raise ValueError("queue depth must be positive")
if batch_ms < 0:
raise ValueError("batch time must be positive")
# Forward reference to the returned iterator.
loca... | |
33,585 | 146,010 | 87 | python/ray/ml/tests/test_checkpoints.py | 24 | 13 | def test_dict_checkpoint_dict(self):
checkpoint = self._prepare_dict_checkpoint()
# Convert into dict checkpoint
data_dict = checkpoint.to_dict()
self.assertIsInstance(data_dict, dict)
# Create from dict
checkpoint = Checkpoint.from_dict(da | [ml] Add Ray ML / AIR checkpoint implementation (#22691)
This PR splits up the changes in #22393 and introduces an implementation of the ML Checkpoint interface used by Ray Tune.
This means, the TuneCheckpoint class implements the to/from_[bytes|dict|directory|object_ref|uri] conversion functions, as well as more h... | test_dict_checkpoint_dict | b267be475863a66e9feedb2be5f0a30a2ed8c493 | ray | test_checkpoints.py | 8 | 7 | https://github.com/ray-project/ray.git | 1 | 50 | 0 | 18 | 87 | Python | {
"docstring": "Test conversion from dict to dict checkpoint and back.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | def test_dict_checkpoint_dict(self):
checkpoint = self._prepare_dict_checkpoint()
# Convert into dict checkpoint
data_dict = checkpoint.to_dict()
self.assertIsInstance(data_dict, dict)
# Create from dict
checkpoint = Checkpoint.from_dict(data_dict)
self... | |
25,918 | 117,189 | 196 | tests/integration_tests/flows/test_company_independent.py | 29 | 18 | def test_5_model(self):
query =
predict_query =
for | Projects structure (#3532)
Projects structure | test_5_model | 7c02e15aa403a4ca1fa34489dd2df9136d6c961c | mindsdb | test_company_independent.py | 13 | 23 | https://github.com/mindsdb/mindsdb.git | 2 | 90 | 0 | 24 | 142 | Python | {
"docstring": "\n CREATE MODEL mindsdb.model_{}\n FROM test_integration_{} (\n select * from test_data.home_rentals limit 50\n ) PREDICT rental_price\n USING join_learn_process=true, time_aim=5\n \n select * from mindsdb.model_{} where sqft... | def test_5_model(self):
query =
predict_query =
for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:
self.sql_via_http(
query.format(char, char),
company_id=cid,
expected_resp_type=RESPONSE_TYPE.OK
)
response = se... | |
69,840 | 242,342 | 182 | src/PIL/Image.py | 59 | 12 | def putpalette(self, data, rawmode="RGB"):
from . import ImagePalette
if self.mode not in ("L", "LA", "P", "PA"):
raise ValueError("illegal image mode")
if isinstance(data, ImagePalette.ImagePalette):
palette = ImagePalette.raw(data.rawmode, data.palette)
... | Attach RGBA palettes from putpalette() when suitable | putpalette | 9cdb0508b6cbd3a3061017760a5eab4d13c3924a | Pillow | Image.py | 13 | 14 | https://github.com/python-pillow/Pillow.git | 5 | 118 | 0 | 43 | 204 | Python | {
"docstring": "\n Attaches a palette to this image. The image must be a \"P\", \"PA\", \"L\"\n or \"LA\" image.\n\n The palette sequence must contain at most 256 colors, made up of one\n integer value for each channel in the raw mode.\n For example, if the raw mode is \"RGB\", the... | def putpalette(self, data, rawmode="RGB"):
from . import ImagePalette
if self.mode not in ("L", "LA", "P", "PA"):
raise ValueError("illegal image mode")
if isinstance(data, ImagePalette.ImagePalette):
palette = ImagePalette.raw(data.rawmode, data.palette)
... | |
79,329 | 268,055 | 41 | test/lib/ansible_test/_internal/timeout.py | 19 | 14 | def get_timeout() -> t.Optional[t.Dict[str, t.Any]]:
if not os.path.exists(TIMEOUT_PATH):
return None
data = read_json_file(TIMEOUT_PATH)
data['deadline'] = datetime.datetime.strptime(data['deadline'] | ansible-test - Use more native type hints. (#78435)
* ansible-test - Use more native type hints.
Simple search and replace to switch from comments to native type hints for return types of functions with no arguments.
* ansible-test - Use more native type hints.
Conversion of simple single-line function annota... | get_timeout | 3eb0485dd92c88cc92152d3656d94492db44b183 | ansible | timeout.py | 10 | 7 | https://github.com/ansible/ansible.git | 2 | 60 | 0 | 16 | 100 | Python | {
"docstring": "Return details about the currently set timeout, if any, otherwise return None.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def get_timeout() -> t.Optional[t.Dict[str, t.Any]]:
if not os.path.exists(TIMEOUT_PATH):
return None
data = read_json_file(TIMEOUT_PATH)
data['deadline'] = datetime.datetime.strptime(data['deadline'], '%Y-%m-%dT%H:%M:%SZ')
return data
| |
@test_utils.run_v2_only | 83,360 | 280,504 | 15 | keras/saving/experimental/saving_lib_test.py | 11 | 12 | def my_mean_squared_error(y_true, y_pred):
return backend.mean(tf.math.squared_dif | Move new optimizer out of optimizer_experimental/ directory.
PiperOrigin-RevId: 488998585 | my_mean_squared_error | 5a105aadbdc6fde2c2529280c4789864adbb81c7 | keras | saving_lib_test.py | 10 | 2 | https://github.com/keras-team/keras.git | 1 | 29 | 1 | 11 | 58 | Python | {
"docstring": "Identical to built-in `mean_squared_error`, added here as a custom\n\n func.\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 10,
"vocab_size": 10
} | def my_mean_squared_error(y_true, y_pred):
return backend.mean(tf.math.squared_difference(y_pred, y_true), axis=-1)
module_my_mean_squared_error = my_mean_squared_error
@test_utils.run_v2_only |
19,916 | 100,438 | 278 | lib/model/session.py | 80 | 20 | def _amd_predict_with_optimized_batchsizes(self, feed, batch_size):
if isinstance(feed, np.ndarray):
feed = [feed]
items = feed[0].shape[0]
done_items = 0
results = []
while done_items < items:
if batch_size < 4: # Not much difference in BS < 4
... | Update all Keras Imports to be conditional (#1214)
* Remove custom keras importer
* first round keras imports fix
* launcher.py: Remove KerasFinder references
* 2nd round keras imports update (lib and extract)
* 3rd round keras imports update (train)
* remove KerasFinder from tests
* 4th round keras ... | _amd_predict_with_optimized_batchsizes | aa39234538a8f83e6aa2b60b8275a570e8876ac2 | faceswap | session.py | 14 | 19 | https://github.com/deepfakes/faceswap.git | 8 | 146 | 0 | 56 | 235 | Python | {
"docstring": " Minimizes the amount of kernels to be compiled when using the ``amd`` backend with\n varying batch sizes while trying to keep the batchsize as high as possible.\n\n Parameters\n ----------\n feed: numpy.ndarray or list\n The feed to be provided to the model as i... | def _amd_predict_with_optimized_batchsizes(self, feed, batch_size):
if isinstance(feed, np.ndarray):
feed = [feed]
items = feed[0].shape[0]
done_items = 0
results = []
while done_items < items:
if batch_size < 4: # Not much difference in BS < 4
... | |
24,044 | 110,304 | 73 | lib/matplotlib/patches.py | 23 | 6 | def set_positions(self, posA, posB):
if posA is not None:
| Doc: Fix grammar and spelling | set_positions | 03a0b5ea238014ba87f74ef766928287726aa00a | matplotlib | patches.py | 10 | 6 | https://github.com/matplotlib/matplotlib.git | 3 | 43 | 0 | 15 | 67 | Python | {
"docstring": "\n Set the start and end positions of the connecting path.\n\n Parameters\n ----------\n posA, posB : None, tuple\n (x, y) coordinates of arrow tail and arrow head respectively. If\n `None` use current value.\n ",
"language": "en",
"n_whites... | def set_positions(self, posA, posB):
if posA is not None:
self._posA_posB[0] = posA
if posB is not None:
self._posA_posB[1] = posB
self.stale = True
| |
18,130 | 86,579 | 557 | tests/sentry/api/endpoints/test_organization_metric_data.py | 101 | 34 | def test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data(self):
for tag, value, numbers in (
("transaction", "/foo/", [10, 11, 12]),
("transaction", "/bar/", [4, 5, 6]),
):
for subvalue in numbers:
self.store_performanc... | feat(metrics): Standardize tests and fix overall flakiness [TET-437] (#39660) | test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data | c67c560f667e6fc7fee2c6d62ac3987ba54f89d5 | sentry | test_organization_metric_data.py | 14 | 39 | https://github.com/getsentry/sentry.git | 4 | 239 | 0 | 82 | 431 | Python | {
"docstring": "\n Test that ensures when transactions table has null values for some fields (i.e. fields\n with a different entity than the entity of the field in the order by), then the table gets\n populated accordingly\n ",
"language": "en",
"n_whitespaces": 63,
"n_words": 34,
... | def test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data(self):
for tag, value, numbers in (
("transaction", "/foo/", [10, 11, 12]),
("transaction", "/bar/", [4, 5, 6]),
):
for subvalue in numbers:
self.store_performanc... | |
14,369 | 66,873 | 13 | erpnext/payroll/doctype/employee_benefit_application/employee_benefit_application.py | 21 | 10 | def get_earning_components_max_benefits(employee, date, earning_component):
| style: format code with black | get_earning_components_max_benefits | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | employee_benefit_application.py | 9 | 14 | https://github.com/frappe/erpnext.git | 2 | 38 | 0 | 18 | 56 | Python | {
"docstring": "\n\t\t\tselect amount\n\t\t\tfrom `tabSalary Detail`\n\t\t\twhere parent = %s and is_flexible_benefit = 1\n\t\t\tand salary_component = %s\n\t\t\torder by name\n\t\t",
"language": "en",
"n_whitespaces": 15,
"n_words": 20,
"vocab_size": 16
} | def get_earning_components_max_benefits(employee, date, earning_component):
salary_structure = get_assigned_salary_structure(employee, date)
amount = frappe.db.sql(
,
salary_structure,
earning_component,
)
return amount if amount else 0
| |
80,549 | 270,731 | 72 | keras/engine/base_layer.py | 22 | 9 | def get_input_mask_at(self, node_index):
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, "_keras_mask", None) for x in inputs]
else:
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | get_input_mask_at | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | base_layer.py | 11 | 6 | https://github.com/keras-team/keras.git | 3 | 50 | 0 | 19 | 80 | Python | {
"docstring": "Retrieves the input mask tensor(s) of a layer at a given node.\n\n Args:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n ... | def get_input_mask_at(self, node_index):
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, "_keras_mask", None) for x in inputs]
else:
return getattr(inputs, "_keras_mask", None)
| |
1,726 | 9,844 | 52 | jina/peapods/networking.py | 9 | 1 | def get_default_grpc_options():
retu | feat: star routing (#3900)
* feat(proto): adjust proto for star routing (#3844)
* feat(proto): adjust proto for star routing
* feat(proto): generate proto files
* feat(grpc): refactor grpclet interface (#3846)
* feat: refactor connection pool for star routing (#3872)
* feat(k8s): add more labels to k8s ... | get_default_grpc_options | 933415bfa1f9eb89f935037014dfed816eb9815d | jina | networking.py | 8 | 5 | https://github.com/jina-ai/jina.git | 1 | 22 | 0 | 8 | 39 | Python | {
"docstring": "\n Returns a list of default options used for creating grpc channels.\n Documentation is here https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/grpc_types.h\n :returns: list of tuples defining grpc parameters\n ",
"language": "en",
"n_whitespaces": 51,
... | def get_default_grpc_options():
return [
('grpc.max_send_message_length', -1),
('grpc.max_receive_message_length', -1),
]
| |
5,634 | 30,582 | 43 | src/ocrmypdf/builtin_plugins/concurrency.py | 11 | 6 | def _cancel_futures_kwargs(self):
if sys.version_info[:2] < (3, 9):
return {}
return dict(cancel_futures=Tru | Add shim for cancel_futures in older Pythons
Thanks @hfwittmann
Closes #993
Co-authored-by: H. Felix Wittmann <hfwittmann@users.noreply.github.com> | _cancel_futures_kwargs | 6b425aaebe33703bd44b1b15571e4af8533b851a | OCRmyPDF | concurrency.py | 8 | 4 | https://github.com/ocrmypdf/OCRmyPDF.git | 2 | 31 | 0 | 10 | 51 | Python | {
"docstring": "Shim older Pythons that do not have Executor.shutdown(...cancel_futures=).\n\n Remove this code when support for Python 3.8 is dropped.\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 18,
"vocab_size": 18
} | def _cancel_futures_kwargs(self):
if sys.version_info[:2] < (3, 9):
return {}
return dict(cancel_futures=True)
| |
27,734 | 124,984 | 18 | python/ray/data/_internal/util.py | 9 | 7 | def _estimate_available_parallelism() -> int:
cur_pg = ray.util.get_current_placement_ | [data] Add warnings when DatasetPipelines are under-parallelized or using too much memory (#26592)
Currently, it's not very easy to figure out why a DatasetPipeline may be underperforming. Add some warnings to help guide the user. As a next step, we can try to default to a good pipeline setting based on these constrai... | _estimate_available_parallelism | ef091c382eea427783ea75531fe9d5a5f008107c | ray | util.py | 9 | 5 | https://github.com/ray-project/ray.git | 1 | 21 | 0 | 9 | 38 | Python | {
"docstring": "Estimates the available CPU parallelism for this Dataset in the cluster.\n If we are currently in a placement group, take that into account.",
"language": "en",
"n_whitespaces": 25,
"n_words": 23,
"vocab_size": 21
} | def _estimate_available_parallelism() -> int:
cur_pg = ray.util.get_current_placement_group()
return _estimate_avail_cpus(cur_pg)
| |
41,588 | 175,299 | 113 | Lib/enum.py | 57 | 11 | def bin(num, max_bits=None):
ceiling = 2 ** (num).bit_length()
if num >= 0:
s = bltns.bin(num + ceiling).replace('1', '0', 1)
else: | bpo-40066: [Enum] update str() and format() output (GH-30582)
Undo rejected PEP-663 changes:
- restore `repr()` to its 3.10 status
- restore `str()` to its 3.10 status
New changes:
- `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result
... | bin | acf7403f9baea3ae1119fc6b4a3298522188bf96 | cpython | enum.py | 16 | 12 | https://github.com/python/cpython.git | 4 | 118 | 0 | 44 | 192 | Python | {
"docstring": "\n Like built-in bin(), except negative values are represented in\n twos-compliment, and the leading bit always indicates sign\n (0=positive, 1=negative).\n\n >>> bin(10)\n '0b0 1010'\n >>> bin(~10) # ~10 is -11\n '0b1 0101'\n ",
"language": "en",
"n_whitespaces": 58,
"... | def bin(num, max_bits=None):
ceiling = 2 ** (num).bit_length()
if num >= 0:
s = bltns.bin(num + ceiling).replace('1', '0', 1)
else:
s = bltns.bin(~num ^ (ceiling - 1) + ceiling)
sign = s[:3]
digits = s[3:]
if max_bits is not None:
if len(digits) < max_bits:
... | |
76,691 | 261,218 | 31 | sklearn/utils/__init__.py | 15 | 8 | def axis0_safe_slice(X, mask, len_mask):
if len_mask | DOC Ensure that sklearn.utils.axis0_safe_slice passes numpydoc (#24561) | axis0_safe_slice | 537c325f2927895449ce418b3a77750135c0ba7b | scikit-learn | __init__.py | 11 | 4 | https://github.com/scikit-learn/scikit-learn.git | 2 | 45 | 0 | 14 | 68 | Python | {
"docstring": "Return a mask which is safer to use on X than safe_mask.\n\n This mask is safer than safe_mask since it returns an\n empty array, when a sparse matrix is sliced with a boolean mask\n with all False, instead of raising an unhelpful error in older\n versions of SciPy.\n\n See: https://git... | def axis0_safe_slice(X, mask, len_mask):
if len_mask != 0:
return X[safe_mask(X, mask), :]
return np.zeros(shape=(0, X.shape[1]))
| |
76,401 | 260,662 | 366 | sklearn/impute/_base.py | 121 | 20 | def _most_frequent(array, extra_value, n_repeat):
# Compute the most frequent value in array only
if array.size > 0:
if array.dtype == object:
# scipy.stats.mode is slow with object dtype array.
# Python Counter is more efficient
counter = Counter(array)
... | MAINT fix the way to call stats.mode (#23633)
Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org>
Co-authored-by: Meekail Zain <34613774+Micky774@users.noreply.github.com>
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> | _most_frequent | 02a4b342181e5ff0226081691308414e53c3107b | scikit-learn | _base.py | 15 | 25 | https://github.com/scikit-learn/scikit-learn.git | 10 | 137 | 0 | 67 | 221 | Python | {
"docstring": "Compute the most frequent value in a 1d array extended with\n [extra_value] * n_repeat, where extra_value is assumed to be not part\n of the array.",
"language": "en",
"n_whitespaces": 30,
"n_words": 25,
"vocab_size": 24
} | def _most_frequent(array, extra_value, n_repeat):
# Compute the most frequent value in array only
if array.size > 0:
if array.dtype == object:
# scipy.stats.mode is slow with object dtype array.
# Python Counter is more efficient
counter = Counter(array)
... | |
75,886 | 259,737 | 503 | sklearn/discriminant_analysis.py | 144 | 24 | def _cov(X, shrinkage=None, covariance_estimator=None):
if covariance_estimator is None:
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, str):
if shrinkage == "auto":
sc = StandardScaler() # standardize features
X... | MNT Combine multiple `isinstance` call (#23204) | _cov | 3f1833d5805a99894f1fc6b858a9ac663e175997 | scikit-learn | discriminant_analysis.py | 16 | 33 | https://github.com/scikit-learn/scikit-learn.git | 12 | 197 | 0 | 87 | 333 | Python | {
"docstring": "Estimate covariance matrix (using optional covariance_estimator).\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n shrinkage : {'empirical', 'auto'} or float, default=None\n Shrinkage parameter, possible values:\n - None o... | def _cov(X, shrinkage=None, covariance_estimator=None):
if covariance_estimator is None:
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, str):
if shrinkage == "auto":
sc = StandardScaler() # standardize features
X... | |
4,211 | 22,139 | 50 | pipenv/patched/pip/_vendor/requests/utils.py | 24 | 9 | def from_key_val_list(value):
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError("cannot encode objects tha | Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | from_key_val_list | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | utils.py | 10 | 6 | https://github.com/pypa/pipenv.git | 3 | 39 | 0 | 22 | 63 | Python | {
"docstring": "Take an object and test to see if it can be represented as a\n dictionary. Unless it can not be represented as such, return an\n OrderedDict, e.g.,\n\n ::\n\n >>> from_key_val_list([('key', 'val')])\n OrderedDict([('key', 'val')])\n >>> from_key_val_list('string')\n ... | def from_key_val_list(value):
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError("cannot encode objects that are not 2-tuples")
return OrderedDict(value)
| |
3,373 | 20,445 | 237 | pipenv/patched/notpip/_vendor/pygments/lexers/__init__.py | 92 | 14 | def load_lexer_from_file(filename, lexername="CustomLexer", **options):
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
with open(filename, 'rb') as f:
exec(f.read(), custom_namespace)
# | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | load_lexer_from_file | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | __init__.py | 13 | 16 | https://github.com/pypa/pipenv.git | 5 | 100 | 0 | 70 | 176 | Python | {
"docstring": "Load a lexer from a file.\n\n This method expects a file located relative to the current working\n directory, which contains a Lexer class. By default, it expects the\n Lexer to be name CustomLexer; you can specify your own class name\n as the second argument to this function.\n\n Users... | def load_lexer_from_file(filename, lexername="CustomLexer", **options):
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
with open(filename, 'rb') as f:
exec(f.read(), custom_namespace)
# Retrieve the class `lexername` from ... | |
13,580 | 64,228 | 158 | erpnext/utilities/product.py | 214 | 45 | def get_price(item_code, price_list, customer_group, company, qty=1):
from erpnext.e_commerce.shopping_cart.cart import get_party
template_item_code = frappe.db.get_value("Item", item_code, "variant_of")
if price_list:
price = frappe.get_all("Item Price", fields=["price_list_rate", "currency"],
filters={"pric... | fix: fetch correct selling price. | get_price | 282fbf4b07740e14566f16d749b549239d7253a7 | erpnext | product.py | 24 | 58 | https://github.com/frappe/erpnext.git | 22 | 509 | 0 | 128 | 854 | Python | {
"docstring": "select\tC.conversion_factor\n\t\t\t\t\tfrom `tabUOM Conversion Detail` C\n\t\t\t\t\tinner join `tabItem` I on C.parent = I.name and C.uom = I.sales_uom\n\t\t\t\t\twhere I.name = %s",
"language": "en",
"n_whitespaces": 18,
"n_words": 23,
"vocab_size": 20
} | def get_price(item_code, price_list, customer_group, company, qty=1):
from erpnext.e_commerce.shopping_cart.cart import get_party
template_item_code = frappe.db.get_value("Item", item_code, "variant_of")
if price_list:
price = frappe.get_all("Item Price", fields=["price_list_rate", "currency"],
filters={"pric... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.