ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
@pytest.mark.parametrize("criterion", ("poisson", "squared_error")) | 75,307 | 258,587 | 483 | sklearn/ensemble/tests/test_forest.py | 247 | 50 | def test_poisson_vs_mse():
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 500, 10
X = datasets.make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
# We create a log-linear Poisson model and downscale coef as it will get
# ... | FIX poisson proxy_impurity_improvement (#22191) | test_poisson_vs_mse | 2b15b908c11b90a15253394b1a03bd535720d6ce | scikit-learn | test_forest.py | 14 | 32 | https://github.com/scikit-learn/scikit-learn.git | 3 | 279 | 1 | 163 | 458 | Python | {
"docstring": "Test that random forest with poisson criterion performs better than\n mse for a poisson target.\n\n There is a similar test for DecisionTreeRegressor.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 22,
"vocab_size": 19
} | def test_poisson_vs_mse():
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 500, 10
X = datasets.make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
# We create a log-linear Poisson model and downscale coef as it will get
# ... |
29,265 | 130,419 | 77 | python/ray/autoscaler/_private/cli_logger.py | 28 | 14 | def _external_caller_info():
frame = inspect.currentframe()
caller = frame
levels = 0
while caller.f_code.co_filename == __file__:
caller = caller.f_back
| [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | _external_caller_info | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | cli_logger.py | 11 | 11 | https://github.com/ray-project/ray.git | 2 | 59 | 0 | 22 | 100 | Python | {
"docstring": "Get the info from the caller frame.\n\n Used to override the logging function and line number with the correct\n ones. See the comment on _patched_makeRecord for more info.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 28,
"vocab_size": 24
} | def _external_caller_info():
frame = inspect.currentframe()
caller = frame
levels = 0
while caller.f_code.co_filename == __file__:
caller = caller.f_back
levels += 1
return {
"lineno": caller.f_lineno,
"filename": os.path.basename(caller.f_code.co_filename),
... | |
45,568 | 186,660 | 84 | certbot-apache/certbot_apache/_internal/override_centos.py | 27 | 11 | def _try_restart_fedora(self) -> None:
try:
util.run_script(['systemctl', 'restart', 'httpd'])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
# Finish with actual config check to see if systemctl restart helped
super(... | Add typing to certbot.apache (#9071)
* Add typing to certbot.apache
Co-authored-by: Adrien Ferrand <ferrand.ad@gmail.com> | _try_restart_fedora | 7d9e9a49005de7961e84d2a7c608db57dbab3046 | certbot | override_centos.py | 12 | 9 | https://github.com/certbot/certbot.git | 2 | 46 | 0 | 27 | 85 | Python | {
"docstring": "\n Tries to restart httpd using systemctl to generate the self signed key pair.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 12
} | def _try_restart_fedora(self) -> None:
try:
util.run_script(['systemctl', 'restart', 'httpd'])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
# Finish with actual config check to see if systemctl restart helped
super(... | |
56,078 | 220,661 | 78 | python3.10.4/Lib/asyncio/selector_events.py | 25 | 11 | async def sock_accept(self, sock):
base_events._check_ssl_s | add python 3.10.4 for windows | sock_accept | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | selector_events.py | 10 | 7 | https://github.com/XX-net/XX-Net.git | 3 | 50 | 0 | 24 | 86 | Python | {
"docstring": "Accept a connection.\n\n The socket must be bound to an address and listening for connections.\n The return value is a pair (conn, address) where conn is a new socket\n object usable to send and receive data on the connection, and address\n is the address bound to the socke... | async def sock_accept(self, sock):
base_events._check_ssl_socket(sock)
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = self.create_future()
self._sock_accept(fut, sock)
return await fut
| |
572 | 3,825 | 133 | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_insight_streams.py | 37 | 12 | def test_state(self, api, state):
stream = AdsInsights(
api=api,
start_ | 🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)
* Facebook Marketing performance improvement
* add comments and little refactoring
* fix integration tests with the new config
* improve job status handling, limit concurrency to 10
* fix campaign jobs, refactor manager
* big refactori... | test_state | a3aae8017a0a40ff2006e2567f71dccb04c997a5 | airbyte | test_base_insight_streams.py | 11 | 12 | https://github.com/airbytehq/airbyte.git | 1 | 96 | 0 | 24 | 152 | Python | {
"docstring": "State setter/getter should work with all combinations",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def test_state(self, api, state):
stream = AdsInsights(
api=api,
start_date=datetime(2010, 1, 1),
end_date=datetime(2011, 1, 1),
)
assert stream.state == {}
stream.state = state
actual_state = stream.state
actual_state["slice... | |
19,288 | 96,187 | 25 | src/sentry/search/events/builder.py | 11 | 3 | def get_snql_query(self) -> None:
raise NotImplementedError("get_snql_ | feat(MEP): Add initial framework for metric queries (#31649)
- This adds a MetricsQueryBuilder, which works very similarily to our
QueryBuilder, but with specific handlers for how metrics construct
queries
- This MetricsQueryBuilder does not yet construct snql queries, and will
not because table queries will... | get_snql_query | cf30c11a194aa5e61d8d7c7fc506764f846fcf82 | sentry | builder.py | 8 | 4 | https://github.com/getsentry/sentry.git | 1 | 13 | 0 | 11 | 26 | Python | {
"docstring": "Because metrics table queries need to make multiple requests per metric type this function cannot be\n inmplemented see run_query",
"language": "en",
"n_whitespaces": 25,
"n_words": 19,
"vocab_size": 19
} | def get_snql_query(self) -> None:
raise NotImplementedError("get_snql_query cannot be implemented for MetricsQueryBuilder")
| |
30,004 | 133,391 | 33 | python/ray/util/sgd/torch/worker_group.py | 12 | 8 | def _validate(self, params):
remote_worker_stats = [w.validate.remote(**params) for w in self.remote_workers]
| [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | _validate | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | worker_group.py | 10 | 3 | https://github.com/ray-project/ray.git | 2 | 29 | 0 | 11 | 47 | Python | {
"docstring": "Runs validation for each worker. Returns results as promises.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _validate(self, params):
remote_worker_stats = [w.validate.remote(**params) for w in self.remote_workers]
return remote_worker_stats
| |
23,404 | 108,967 | 508 | lib/mpl_toolkits/mplot3d/axes3d.py | 94 | 31 | def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):
_api.check_in_list(('auto', 'equal', 'equalxy', 'equalyz', 'equalxz'),
aspect=aspect)
super().set_aspect(
aspect='auto', adjustable=adjustable, anchor=anchor, share=share)
if... | Add equalxy, equalyz, equalxz aspect ratios
Update docstrings | set_aspect | 31d13198ecf6969b1b693c28a02b0805f3f20420 | matplotlib | axes3d.py | 16 | 25 | https://github.com/matplotlib/matplotlib.git | 8 | 255 | 0 | 65 | 399 | Python | {
"docstring": "\n Set the aspect ratios.\n\n Parameters\n ----------\n aspect : {'auto', 'equal', 'equalxy', 'equalxz', 'equalyz'}\n Possible values:\n\n ========= ==================================================\n value description\n ... | def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):
_api.check_in_list(('auto', 'equal', 'equalxy', 'equalyz', 'equalxz'),
aspect=aspect)
super().set_aspect(
aspect='auto', adjustable=adjustable, anchor=anchor, share=share)
if... | |
54,713 | 217,315 | 77 | python3.10.4/Lib/enum.py | 16 | 7 | def __getattr__(cls, name):
if _is_dunder(name):
raise AttributeError(name)
try:
return cl | add python 3.10.4 for windows | __getattr__ | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | enum.py | 10 | 7 | https://github.com/XX-net/XX-Net.git | 3 | 38 | 0 | 14 | 62 | Python | {
"docstring": "\n Return the enum member matching `name`\n\n We use __getattr__ instead of descriptors or inserting into the enum\n class' __dict__ in order to support `name` and `value` being both\n properties for enum members (which live in the class' __dict__) and\n enum members... | def __getattr__(cls, name):
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name) from None
| |
75,904 | 259,759 | 34 | sklearn/cluster/tests/test_bisect_k_means.py | 16 | 16 | def test_n_clusters(n_clusters):
rng = np.random.RandomState(0)
X | FEA Bisecting K-Means (#20031)
Co-authored-by: Gael Varoquaux <gael.varoquaux@normalesup.org>
Co-authored-by: Tom Dupré la Tour <tom.dupre-la-tour@m4x.org>
Co-authored-by: Julien Jerphanion <git@jjerphan.xyz>
Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com> | test_n_clusters | 0822851f5cb17827939a7d7b4f8c84f43184ae89 | scikit-learn | test_bisect_k_means.py | 10 | 6 | https://github.com/scikit-learn/scikit-learn.git | 1 | 62 | 0 | 14 | 100 | Python | {
"docstring": "Test if resulting labels are in range [0, n_clusters - 1].",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def test_n_clusters(n_clusters):
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
bisect_means = BisectingKMeans(n_clusters=n_clusters, random_state=0)
bisect_means.fit(X)
assert_array_equal(np.unique(bisect_means.labels_), np.arange(n_clusters))
| |
57,228 | 224,175 | 196 | mkdocs/tests/structure/nav_tests.py | 46 | 24 | def test_nested_ungrouped_nav(self):
nav_cfg = [
{'Home': 'index.md'},
{'Contact': 'about/contact.md'},
{'License Title': 'about/sub/license.md'},
]
expected = dedent(
)
cfg = load_config(nav=nav_cfg, site_url='http://example.com/'... | Some manual changes ahead of formatting code with Black | test_nested_ungrouped_nav | 372384d8102ddb4be6360f44d1bfddb8b45435a4 | mkdocs | nav_tests.py | 14 | 23 | https://github.com/mkdocs/mkdocs.git | 2 | 137 | 0 | 37 | 228 | Python | {
"docstring": "\n Page(title='Home', url='/')\n Page(title='Contact', url='/about/contact/')\n Page(title='License Title', url='/about/sub/license/')\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 7,
"vocab_size": 7
} | def test_nested_ungrouped_nav(self):
nav_cfg = [
{'Home': 'index.md'},
{'Contact': 'about/contact.md'},
{'License Title': 'about/sub/license.md'},
]
expected = dedent(
)
cfg = load_config(nav=nav_cfg, site_url='http://example.com/'... | |
29,378 | 130,806 | 156 | python/ray/node.py | 32 | 14 | def _get_log_file_names(self, name, unique=False):
if unique:
log_stdout = self._make_inc_temp(
suffix=".out", prefix=name, directory_name=self._logs_dir
)
log_stderr = self._make_inc_temp(
suffix=".err", prefix=name, directory_name=s... | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | _get_log_file_names | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | node.py | 13 | 12 | https://github.com/ray-project/ray.git | 2 | 91 | 0 | 21 | 151 | Python | {
"docstring": "Generate partially randomized filenames for log files.\n\n Args:\n name (str): descriptive string for this log file.\n unique (bool): if true, a counter will be attached to `name` to\n ensure the returned filename is not already used.\n\n Returns:\n ... | def _get_log_file_names(self, name, unique=False):
if unique:
log_stdout = self._make_inc_temp(
suffix=".out", prefix=name, directory_name=self._logs_dir
)
log_stderr = self._make_inc_temp(
suffix=".err", prefix=name, directory_name=s... | |
56,302 | 221,263 | 74 | python3.10.4/Lib/calendar.py | 24 | 10 | def yeardayscalendar(self, year, width=3):
months = [
self.monthdayscalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
| add python 3.10.4 for windows | yeardayscalendar | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | calendar.py | 11 | 6 | https://github.com/XX-net/XX-Net.git | 3 | 60 | 0 | 20 | 88 | Python | {
"docstring": "\n Return the data for the specified year ready for formatting (similar to\n yeardatescalendar()). Entries in the week lists are day numbers.\n Day numbers outside this month are zero.\n ",
"language": "en",
"n_whitespaces": 57,
"n_words": 28,
"vocab_size": 24
} | def yeardayscalendar(self, year, width=3):
months = [
self.monthdayscalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
| |
51,877 | 207,141 | 81 | tests/admin_filters/tests.py | 28 | 15 | def test_simplelistfilter_without_parameter(self):
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
msg = "The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.... | Refs #33476 -- Reformatted code with Black. | test_simplelistfilter_without_parameter | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 9 | 7 | https://github.com/django/django.git | 1 | 53 | 0 | 25 | 92 | Python | {
"docstring": "\n Any SimpleListFilter must define a parameter_name.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 6,
"vocab_size": 6
} | def test_simplelistfilter_without_parameter(self):
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
msg = "The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.... | |
18,375 | 88,327 | 226 | src/sentry/api/invite_helper.py | 54 | 19 | def from_session_or_email(cls, request, organization, email, instance=None, logger=None):
invite_token, invite_member_id = get_invite_details(request)
try:
if invite_token and invite_member_id:
om = OrganizationMember.objects.get(token=invite_token, id=invite_member... | Move invite code functionality from cookie to session (#40905)
Moves the invite functionality from cookies to the session. This is to
harden the security of the platform.
With the cookie approach, a client can manipulate the cookie value for
`pending-invite` resulting in situations where an invite code can be
re... | from_session_or_email | 565f971da955d57c754a47f5802fe9f9f7c66b39 | sentry | invite_helper.py | 14 | 14 | https://github.com/getsentry/sentry.git | 4 | 107 | 0 | 47 | 161 | Python | {
"docstring": "\n Initializes the ApiInviteHelper by locating the pending organization\n member via the currently set pending invite details in the session, or\n via the passed email if no cookie is currently set.\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 30,
"vocab_s... | def from_session_or_email(cls, request, organization, email, instance=None, logger=None):
invite_token, invite_member_id = get_invite_details(request)
try:
if invite_token and invite_member_id:
om = OrganizationMember.objects.get(token=invite_token, id=invite_member... | |
89,278 | 290,159 | 48 | tests/components/bluetooth/test_usage.py | 20 | 10 | async def test_multiple_bleak_scanner_instances(hass):
install_multiple_bleak_catcher()
instance = bleak.BleakScanner()
assert isinstance(instance, HaBleakScannerWrapper)
uninstall_multiple_bleak_catcher()
with patch("bleak.get_platform_scanner_backend_type"):
instance = bleak.Bleak... | Ensure we do not actually create a BleakScanner in the usage test (#81362)
Avoids a failure when bluetooth is turned off when
testing on macos:
bleak.exc.BleakError: Bluetooth device is turned off | test_multiple_bleak_scanner_instances | ab14e55c052433e42224199798b026637614685f | core | test_usage.py | 10 | 8 | https://github.com/home-assistant/core.git | 1 | 47 | 0 | 14 | 86 | Python | {
"docstring": "Test creating multiple BleakScanners without an integration.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | async def test_multiple_bleak_scanner_instances(hass):
install_multiple_bleak_catcher()
instance = bleak.BleakScanner()
assert isinstance(instance, HaBleakScannerWrapper)
uninstall_multiple_bleak_catcher()
with patch("bleak.get_platform_scanner_backend_type"):
instance = bleak.Bleak... | |
78,244 | 265,912 | 118 | netbox/utilities/utils.py | 59 | 17 | def highlight_string(value, highlight, trim_pre=None, trim_post=None, trim_placeholder='...'):
# Split value on highlight string
try:
pre, match, post = re.split(fr'({highlight})', value, maxsplit=1, flags=re.IGNORECASE)
except ValueError:
# Match not found
return escape(value)
... | Closes #10560: New global search (#10676)
* Initial work on new search backend
* Clean up search backends
* Return only the most relevant result per object
* Clear any pre-existing cached entries on cache()
* #6003: Implement global search functionality for custom field values
* Tweak field weights & do... | highlight_string | 9628dead07ccef9608b32906aa8194bc948e5a09 | netbox | utils.py | 12 | 10 | https://github.com/netbox-community/netbox.git | 6 | 97 | 0 | 48 | 185 | Python | {
"docstring": "\n Highlight a string within a string and optionally trim the pre/post portions of the original string.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 16,
"vocab_size": 13
} | def highlight_string(value, highlight, trim_pre=None, trim_post=None, trim_placeholder='...'):
# Split value on highlight string
try:
pre, match, post = re.split(fr'({highlight})', value, maxsplit=1, flags=re.IGNORECASE)
except ValueError:
# Match not found
return escape(value)
... | |
27,841 | 125,350 | 392 | python/ray/_private/state.py | 63 | 33 | def node_table(self):
self._check_connected()
node_table = self.global_state_accessor.get_node_table()
results = []
for node_info_item in node_table:
item = gcs_utils.GcsNodeInfo.FromString(node_info_item)
node_info = {
"NodeID": ray._pr... | [Python]More efficient node_table() in state.py (#26760)
This picks up https://github.com/ray-project/ray/pull/24088
The `get_node_table` already has resources of nodes, so we don't need to invoke `get_node_resource_info` for every node again. This change will reduce lots of rpc calls and make the api more efficient. | node_table | 62288724b2b4add7ad9b12ff5299559caaa5fb55 | ray | state.py | 15 | 27 | https://github.com/ray-project/ray.git | 4 | 172 | 0 | 53 | 288 | Python | {
"docstring": "Fetch and parse the Gcs node info table.\n\n Returns:\n Information about the node in the cluster.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 16,
"vocab_size": 13
} | def node_table(self):
self._check_connected()
node_table = self.global_state_accessor.get_node_table()
results = []
for node_info_item in node_table:
item = gcs_utils.GcsNodeInfo.FromString(node_info_item)
node_info = {
"NodeID": ray._pr... | |
81,097 | 273,174 | 99 | keras/layers/preprocessing/index_lookup.py | 15 | 9 | def vocabulary_size(self):
if tf.executing_eagerly():
return (
int(self.lookup_table.size().numpy())
+ self._token_start_index()
)
else:
return self.looku | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | vocabulary_size | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | index_lookup.py | 16 | 8 | https://github.com/keras-team/keras.git | 2 | 52 | 0 | 12 | 90 | Python | {
"docstring": "Gets the current size of the layer's vocabulary.\n\n Returns:\n The integer size of the vocabulary, including optional mask and oov indices.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 21,
"vocab_size": 17
} | def vocabulary_size(self):
if tf.executing_eagerly():
return (
int(self.lookup_table.size().numpy())
+ self._token_start_index()
)
else:
return self.lookup_table.size() + self._token_start_index()
| |
53,812 | 215,095 | 114 | tests/pytests/unit/modules/test_aixpkg.py | 38 | 17 | def test_version_with_invalid_names():
lslpp_mydog_out =
ver_chk = MagicMock(return_value={"retcode": 1, "stdout": lslpp_mydog_out})
with patch.dict(aixpkg.__grains | Working tests for install | test_version_with_invalid_names | f1c37893caf90738288e789c3233ab934630254f | salt | test_aixpkg.py | 12 | 31 | https://github.com/saltstack/salt.git | 1 | 92 | 0 | 33 | 161 | Python | {
"docstring": "\n test version of packages\n lslpp: Fileset mydog not installed.\n\n\nState codes: \n A -- Applied. \n B -- Broken. \n C -- Committed. \n E -- EFIX Locked. \n O -- Obsolete. (partially migrated to newer version) \n ? -- Inconsistent State...Run lppchk -v. \n\nType codes: \n F -- Installp Files... | def test_version_with_invalid_names():
lslpp_mydog_out =
ver_chk = MagicMock(return_value={"retcode": 1, "stdout": lslpp_mydog_out})
with patch.dict(aixpkg.__grains__, {"osarch": "PowerPC_POWER8"}), patch.dict(
aixpkg.__salt__,
{"cmd.run_all": ver_chk},
):
versions_checke... | |
7,878 | 43,222 | 13 | tests/models/test_dagrun.py | 7 | 4 | def test_mapped_literal_length_increase_adds_additional_ti(dag_maker, session):
with dag_make | Fix mapped task immutability after clear (#23667)
We should be able to detect if the structure of mapped task has changed
and verify the integrity.
This PR ensures this
Co-authored-by: Tzu-ping Chung <uranusjr@gmail.com> | test_mapped_literal_length_increase_adds_additional_ti | b692517ce3aafb276e9d23570e9734c30a5f3d1f | airflow | test_dagrun.py | 11 | 29 | https://github.com/apache/airflow.git | 3 | 233 | 0 | 7 | 34 | Python | {
"docstring": "Test that when the length of mapped literal increases, additional ti is added",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def test_mapped_literal_length_increase_adds_additional_ti(dag_maker, session):
with dag_maker(session=session) as dag:
| |
19,263 | 96,012 | 87 | tests/sentry/integrations/bitbucket/test_installed.py | 31 | 20 | def test_installed_without_username(self):
# Remove username to simulate privacy mode
del self.user_data_from_bitbucket["principal"]["username"]
response = self.client.post(self.path, data=self.user_data_from_bitbucket)
assert response.status_code == 200
integration = ... | fix(bitbucket): Fix domain name (#31536)
* fix(bitbucket): Fix domain name | test_installed_without_username | 2790a30b7f6a6cffa2cd1aa69c678327a41a0664 | sentry | test_installed.py | 10 | 7 | https://github.com/getsentry/sentry.git | 1 | 76 | 0 | 26 | 122 | Python | {
"docstring": "Test a user (not team) installation where the user has hidden their username from public view",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 15
} | def test_installed_without_username(self):
# Remove username to simulate privacy mode
del self.user_data_from_bitbucket["principal"]["username"]
response = self.client.post(self.path, data=self.user_data_from_bitbucket)
assert response.status_code == 200
integration = ... | |
21,273 | 101,891 | 29 | lib/gui/display.py | 8 | 5 | def _command_display(self, command):
| Typing - lib.gui.display_command | _command_display | dab823a3eb7a5257cb1e0818ee10ed234d3de97f | faceswap | display.py | 10 | 3 | https://github.com/deepfakes/faceswap.git | 1 | 20 | 0 | 8 | 39 | Python | {
"docstring": " Build the relevant command specific tabs based on the incoming Faceswap command.\n\n Parameters\n ----------\n command: str\n The Faceswap command that is being executed\n ",
"language": "en",
"n_whitespaces": 63,
"n_words": 23,
"vocab_size": 20
} | def _command_display(self, command):
build_tabs = getattr(self, f"_{command}_tabs")
build_tabs()
| |
13,532 | 63,924 | 49 | .venv/lib/python3.8/site-packages/pip/_vendor/urllib3/_collections.py | 13 | 7 | def itermerged(self):
for key in s | upd; format | itermerged | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | _collections.py | 13 | 4 | https://github.com/jindongwang/transferlearning.git | 2 | 39 | 0 | 13 | 66 | Python | {
"docstring": "Iterate over all headers, merging duplicate ones together.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def itermerged(self):
for key in self:
val = self._container[key.lower()]
yield val[0], ", ".join(val[1:])
| |
57,167 | 224,020 | 19 | mkdocs/structure/files.py | 5 | 7 | def get_file_from_path(self, path):
return self.src_paths.get(os.path.normpath(path))
| Remove spaces at the ends of docstrings, normalize quotes | get_file_from_path | e7f07cc82ab2be920ab426ba07456d8b2592714d | mkdocs | files.py | 10 | 2 | https://github.com/mkdocs/mkdocs.git | 1 | 24 | 0 | 5 | 40 | Python | {
"docstring": "Return a File instance with File.src_path equal to path.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def get_file_from_path(self, path):
return self.src_paths.get(os.path.normpath(path))
| |
@pytest.fixture | 52,203 | 208,104 | 56 | t/unit/conftest.py | 23 | 11 | def sleepdeprived(request):
module = request.node.get_closest_marker(
"sleepdeprived_patched_module").args[0]
old_sleep, module.sleep = module.sleep, noop
try:
yield
finally:
module.sleep = old_sleep
| Canvas Header Stamping (#7384)
* Strip down the header-stamping PR to the basics.
* Serialize groups.
* Add groups to result backend meta data.
* Fix spelling mistake.
* Revert changes to canvas.py
* Revert changes to app/base.py
* Add stamping implementation to canvas.py
* Send task to AMQP with ... | sleepdeprived | 1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc | celery | conftest.py | 11 | 8 | https://github.com/celery/celery.git | 2 | 42 | 1 | 19 | 83 | Python | {
"docstring": "Mock sleep method in patched module to do nothing.\n\n Example:\n >>> import time\n >>> @pytest.mark.sleepdeprived_patched_module(time)\n >>> def test_foo(self, sleepdeprived):\n >>> pass\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 21,
"vocab_si... | def sleepdeprived(request):
module = request.node.get_closest_marker(
"sleepdeprived_patched_module").args[0]
old_sleep, module.sleep = module.sleep, noop
try:
yield
finally:
module.sleep = old_sleep
# Taken from
# http://bitbucket.org/runeh/snippets/src/tip/missing_module... |
20,985 | 101,575 | 112 | lib/training/preview_tk.py | 30 | 11 | def _set_mouse_bindings(self) -> None:
logger.debug("Binding mouse events")
if system() == "Linux":
self._canvas.tag_bind(self._canvas.image_id, "<Button-4>", self._on_bound_zoom)
self._canvas.tag_bind | Training - Use custom preview pop-out | _set_mouse_bindings | 7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5 | faceswap | preview_tk.py | 12 | 15 | https://github.com/deepfakes/faceswap.git | 2 | 119 | 0 | 22 | 198 | Python | {
"docstring": " Set the mouse bindings for interacting with the preview image\n\n Mousewheel: Zoom in and out\n Mouse click: Move image\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 17
} | def _set_mouse_bindings(self) -> None:
logger.debug("Binding mouse events")
if system() == "Linux":
self._canvas.tag_bind(self._canvas.image_id, "<Button-4>", self._on_bound_zoom)
self._canvas.tag_bind(self._canvas.image_id, "<Button-5>", self._on_bound_zoom)
els... | |
96,701 | 297,739 | 40 | tests/helpers/test_area_registry.py | 22 | 9 | async def test_create_area_with_id_already_in_use(registry):
| Add aliases to area registry items (#84294)
* Add aliases to area registry items
* Update test
* Fix WS API | test_create_area_with_id_already_in_use | 1a42bd5c4cb51ffbfcaf8d5389b80a228712ac81 | core | test_area_registry.py | 10 | 6 | https://github.com/home-assistant/core.git | 1 | 50 | 0 | 17 | 90 | Python | {
"docstring": "Make sure that we can't create an area with a name already in use.",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 14
} | async def test_create_area_with_id_already_in_use(registry):
area1 = registry.async_create("mock")
updated_area1 = registry.async_update(area1.id, name="New Name")
assert updated_area1.id == area1.id
area2 = registry.async_create("mock")
assert area2.id == "mock_2"
| |
10,074 | 50,265 | 147 | modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/ernie_modeling.py | 43 | 20 | def forward(self, *args, **kwargs):
labels = kwargs.pop('label | add disco_diffusion_ernievil_base | forward | ffcde21305c61d950a9f93e57e6180c9a9665b87 | PaddleHub | ernie_modeling.py | 12 | 12 | https://github.com/PaddlePaddle/PaddleHub.git | 3 | 99 | 0 | 32 | 160 | Python | {
"docstring": "\n Args:\n labels (optional, `Variable` of shape [batch_size]):\n ground truth label id for each sentence\n Returns:\n loss (`Variable` of shape []):\n Cross entropy loss mean over batch\n if labels not set, returns None\... | def forward(self, *args, **kwargs):
labels = kwargs.pop('labels', None)
pooled, encoded = super(ErnieModelForSequenceClassification, self).forward(*args, **kwargs)
hidden = self.dropout(pooled)
logits = self.classifier(hidden)
if labels is not None:
if len(l... | |
12,832 | 62,023 | 23 | .venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py | 9 | 4 | def _get_project(self, name):
raise NotImplemen | upd; format | _get_project | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | locators.py | 8 | 2 | https://github.com/jindongwang/transferlearning.git | 1 | 13 | 0 | 9 | 25 | Python | {
"docstring": "\n For a given project, get a dictionary mapping available versions to Distribution\n instances.\n\n This should be implemented in subclasses.\n\n If called from a locate() request, self.matcher will be set to a\n matcher for the requirement to satisfy, otherwise it ... | def _get_project(self, name):
raise NotImplementedError('Please implement in the subclass')
| |
38,470 | 160,031 | 51 | numpy/core/tests/test_multiarray.py | 16 | 12 | def test_pickle_empty(self):
arr = np.array([]).reshape(999999, 0)
pk_dmp = pickle.dumps(arr)
pk_load = pickle.loads(pk_dmp)
assert pk_load.size == 0
| BUG: Fix unpickling an empty ndarray with a none-zero dimension (#21067)
Changing num to the number of bytes in the input array, PyArray_NBYTES(self). Solves #21009.
* Fixing nbyte size in methods.c:memcpy
* Adding a test
* Re-adding removed newline
* Shrinking the test array to save memory | test_pickle_empty | 935fe83ddaa3250d176bc848579ffdc4e1017090 | numpy | test_multiarray.py | 11 | 5 | https://github.com/numpy/numpy.git | 1 | 44 | 0 | 14 | 73 | Python | {
"docstring": "Checking if an empty array pickled and un-pickled will not cause a\n segmentation fault",
"language": "en",
"n_whitespaces": 20,
"n_words": 14,
"vocab_size": 14
} | def test_pickle_empty(self):
arr = np.array([]).reshape(999999, 0)
pk_dmp = pickle.dumps(arr)
pk_load = pickle.loads(pk_dmp)
assert pk_load.size == 0
| |
7,461 | 42,022 | 125 | seaborn/_oldcore.py | 46 | 8 | def get_semantics(cls, kwargs, semantics=None):
# TODO this should be get_variables since we have included x and y
if semantics is None:
semantics = | docs: fix typos (#2899)
* Small typo fixes
* Catch an additional typo
Co-authored-by: Michael Waskom <mwaskom@users.noreply.github.com> | get_semantics | 5910d6ef50196c8bd1f4ed40a5da202a39d7f62c | seaborn | _oldcore.py | 11 | 8 | https://github.com/mwaskom/seaborn.git | 5 | 55 | 0 | 34 | 88 | Python | {
"docstring": "Subset a dictionary arguments with known semantic variables.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def get_semantics(cls, kwargs, semantics=None):
# TODO this should be get_variables since we have included x and y
if semantics is None:
semantics = cls.semantics
variables = {}
for key, val in kwargs.items():
if key in semantics and val is not None:
... | |
2,995 | 19,485 | 176 | pipenv/utils/dependencies.py | 72 | 37 | def convert_deps_to_pip(deps, project=None, r=True, include_index=True):
from pipenv.vendor.requirementslib.models.requirements import Requirement
dependencies = []
for dep_name, dep in deps.items():
if project:
project.clear_pipfile_cache()
indexes = getattr(project, "pipf... | Code reorg utils into utils module reduces complexity (#4990)
* Split apart the massive utils.py into a utils module | convert_deps_to_pip | 3387881a6d4fc2d8bdc0f05c484cb2f7222acfb8 | pipenv | dependencies.py | 14 | 19 | https://github.com/pypa/pipenv.git | 7 | 167 | 0 | 55 | 266 | Python | {
"docstring": "\"Converts a Pipfile-formatted dependency to a pip-formatted one.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 7
} | def convert_deps_to_pip(deps, project=None, r=True, include_index=True):
from pipenv.vendor.requirementslib.models.requirements import Requirement
dependencies = []
for dep_name, dep in deps.items():
if project:
project.clear_pipfile_cache()
indexes = getattr(project, "pipf... | |
24,919 | 113,475 | 93 | nni/algorithms/hpo/hyperband_advisor.py | 25 | 9 | def handle_trial_end(self, data):
hyper_params = nni.load(data['hyper_params'])
if self.is_created_in_previous_exp(hyper_params['parameter_id']):
# The end of the recovered trial is ignored
return
self._handle_trial_end(hyper_params['parameter_id'])
if da... | [nas] fix issue introduced by the trial recovery feature (#5109) | handle_trial_end | bcc640c4e5e687a03fe21503692dad96e0b97fa7 | nni | hyperband_advisor.py | 10 | 7 | https://github.com/microsoft/nni.git | 3 | 60 | 0 | 24 | 105 | Python | {
"docstring": "\n Parameters\n ----------\n data: dict()\n it has three keys: trial_job_id, event, hyper_params\n trial_job_id: the id generated by training service\n event: the job's state\n hyper_params: the hyperparameters (a string) generated and r... | def handle_trial_end(self, data):
hyper_params = nni.load(data['hyper_params'])
if self.is_created_in_previous_exp(hyper_params['parameter_id']):
# The end of the recovered trial is ignored
return
self._handle_trial_end(hyper_params['parameter_id'])
if da... | |
56,021 | 220,508 | 115 | python3.10.4/Lib/asyncio/futures.py | 29 | 11 | def _copy_future_state(source, dest):
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(_convert_future_exc(ex... | add python 3.10.4 for windows | _copy_future_state | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | futures.py | 14 | 14 | https://github.com/XX-net/XX-Net.git | 4 | 80 | 0 | 22 | 138 | Python | {
"docstring": "Internal helper to copy state from another Future.\n\n The other Future may be a concurrent.futures.Future.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 15,
"vocab_size": 15
} | def _copy_future_state(source, dest):
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(_convert_future_exc(ex... | |
76,648 | 261,047 | 80 | sklearn/utils/tests/test_validation.py | 41 | 16 | def test_get_feature_names_invalid_dtypes(names, dtypes):
pd = | MAINT Clean deprecation for 1.2: validation (#24493)
* cln deprecations
* cln
* fix tst switch to pytest.raises | test_get_feature_names_invalid_dtypes | 9f9f1684e91fbfffbc446f786a8c64628b752efb | scikit-learn | test_validation.py | 11 | 9 | https://github.com/scikit-learn/scikit-learn.git | 1 | 74 | 0 | 34 | 123 | Python | {
"docstring": "Get feature names errors when the feature names have mixed dtypes",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 9
} | def test_get_feature_names_invalid_dtypes(names, dtypes):
pd = pytest.importorskip("pandas")
X = pd.DataFrame([[1, 2], [4, 5], [5, 6]], columns=names)
msg = re.escape(
"Feature names only support names that are all strings. "
f"Got feature names with dtypes: {dtypes}."
)
with p... | |
73,739 | 251,435 | 417 | mitmproxy/platform/pf.py | 133 | 17 | def lookup(address, port, s):
# We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.
# Those still appear as "127.0.0.1" in the table, so we need to strip the prefix.
address = re.sub(r"^::ffff:(?=\d+.\d+.\d+.\d+$)", "", address)
s = s.decode()
# ALL tcp 192.168.1.13:57474 -> 23... | make it black! | lookup | b3587b52b25077f68116b9852b041d33e7fc6601 | mitmproxy | pf.py | 19 | 23 | https://github.com/mitmproxy/mitmproxy.git | 10 | 200 | 0 | 82 | 358 | Python | {
"docstring": "\n Parse the pfctl state output s, to look up the destination host\n matching the client (address, port).\n\n Returns an (address, port) tuple, or None.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 24,
"vocab_size": 21
} | def lookup(address, port, s):
# We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.
# Those still appear as "127.0.0.1" in the table, so we need to strip the prefix.
address = re.sub(r"^::ffff:(?=\d+.\d+.\d+.\d+$)", "", address)
s = s.decode()
# ALL tcp 192.168.1.13:57474 -> 23... | |
18,185 | 86,903 | 155 | src/sentry/models/projectownership.py | 51 | 19 | def _hydrate_rules(cls, project_id, rules, type=OwnerRuleType.OWNERSHIP_RULE.value):
owners = [owner for rule in r | feat(commit-context): Refactor Issue Owner auto-assignment (#40048)
## Objective:
This PR refactors how we calculate the Issue Owners from Code
Owners/Ownership Rules and who should get the auto-assignment. Auto
Assignment will first go to the Suspect Committer (if it exists and the
setting is on) then to Issue Ow... | _hydrate_rules | 712ba34a4d51be636396e70557aa3f99471814be | sentry | projectownership.py | 14 | 12 | https://github.com/getsentry/sentry.git | 8 | 96 | 0 | 32 | 139 | Python | {
"docstring": "\n Get the last matching rule to take the most precedence.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 9
} | def _hydrate_rules(cls, project_id, rules, type=OwnerRuleType.OWNERSHIP_RULE.value):
owners = [owner for rule in rules for owner in rule.owners]
actors = {
key: val
for key, val in resolve_actors({owner for owner in owners}, project_id).items()
if val
... | |
11,603 | 56,999 | 60 | src/prefect/blocks/kubernetes.py | 10 | 8 | def activate(self) -> str:
load_kube_config_from_dict(
config_dict=s | add test coerage for get_api_client and activate | activate | 8f3ffd09dc47bfd2af6a635cc04c640febffd519 | prefect | kubernetes.py | 9 | 11 | https://github.com/PrefectHQ/prefect.git | 1 | 29 | 0 | 10 | 48 | Python | {
"docstring": "\n Convenience method for activating the k8s config stored in an instance of this block\n\n Returns current_context for sanity check\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 18
} | def activate(self) -> str:
load_kube_config_from_dict(
config_dict=self.config,
context=self.context,
)
return self.current_context()
| |
48,731 | 197,875 | 65 | sympy/core/expr.py | 18 | 8 | def as_coeff_add(self, *deps) -> tuple[Expr, tuple[Expr, ...]]:
| add some type hints to expr.py | as_coeff_add | 675e6d6ca7aa63ce26f8aa0ca2467976b6570113 | sympy | expr.py | 12 | 35 | https://github.com/sympy/sympy.git | 3 | 49 | 0 | 15 | 77 | Python | {
"docstring": "Return the tuple (c, args) where self is written as an Add, ``a``.\n\n c should be a Rational added to any terms of the Add that are\n independent of deps.\n\n args should be a tuple of all other terms of ``a``; args is empty\n if self is a Number or if self is independent ... | def as_coeff_add(self, *deps) -> tuple[Expr, tuple[Expr, ...]]:
if deps:
if not self.has_free(*deps):
return self, tuple()
return S.Zero, (self,)
| |
5,414 | 30,229 | 277 | spotdl/console/web.py | 112 | 19 | def create_github_url(url):
repo_only_url = re.compile(
r"https:\/\/github\.com\/[a-z\d](?:[a-z\d]|-(?=[a-z\d])){0,38}\/[a-zA-Z0-9]+$"
)
re_branch = re.compile("/(tree|blob)/(.+?)/")
# Check if the given url is a url to a GitHub repo. If it is, tell the
# user | update web code
Co-Authored-By: Peyton Creery <44987569+phcreery@users.noreply.github.com> | create_github_url | bbb7a02ef889134af71593102bc6f65035ab14cb | spotify-downloader | web.py | 19 | 23 | https://github.com/spotDL/spotify-downloader.git | 3 | 111 | 0 | 71 | 198 | Python | {
"docstring": "\n From the given url, produce a URL that is compatible with Github's REST API. Can handle blob or tree paths.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 20,
"vocab_size": 20
} | def create_github_url(url):
repo_only_url = re.compile(
r"https:\/\/github\.com\/[a-z\d](?:[a-z\d]|-(?=[a-z\d])){0,38}\/[a-zA-Z0-9]+$"
)
re_branch = re.compile("/(tree|blob)/(.+?)/")
# Check if the given url is a url to a GitHub repo. If it is, tell the
# user to use 'git clone' to dow... | |
75,819 | 259,555 | 12 | sklearn/metrics/cluster/_supervised.py | 6 | 4 | def homogeneity_score(labels_true, labels_pred):
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
| DOC Ensures that homogeneity_score passes numpydoc validation (#23006) | homogeneity_score | 4253eace9893eb6aef36ca631e7978b6a8808fbc | scikit-learn | _supervised.py | 8 | 2 | https://github.com/scikit-learn/scikit-learn.git | 1 | 18 | 0 | 6 | 29 | Python | {
"docstring": "Homogeneity metric of a cluster labeling given a ground truth.\n\n A clustering result satisfies homogeneity if all of its clusters\n contain only data points which are members of a single class.\n\n This metric is independent of the absolute values of the labels:\n a permutation of the cl... | def homogeneity_score(labels_true, labels_pred):
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
| |
49,376 | 199,720 | 62 | sympy/polys/orthopolys.py | 33 | 12 | def dup_chebyshevt(n, K):
if n | Restore domain elements in dup_* functions | dup_chebyshevt | 3d30d00c37371f142e6a0e9dc5058752d8c9d401 | sympy | orthopolys.py | 15 | 7 | https://github.com/sympy/sympy.git | 3 | 83 | 0 | 26 | 123 | Python | {
"docstring": "Low-level implementation of Chebyshev polynomials of the first kind.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | def dup_chebyshevt(n, K):
if n < 1:
return [K.one]
m2, m1 = [K.one], [K.one, K.zero]
for i in range(2, n+1):
m2, m1 = m1, dup_sub(dup_mul_ground(dup_lshift(m1, 1, K), K(2), K), m2, K)
return m1
| |
57,190 | 224,043 | 20 | mkdocs/tests/base.py | 8 | 8 | def get_markdown_toc(markdown_source):
md = markdown.Markdown(extensions=['toc | Remove spaces at the ends of docstrings, normalize quotes | get_markdown_toc | e7f07cc82ab2be920ab426ba07456d8b2592714d | mkdocs | base.py | 11 | 4 | https://github.com/mkdocs/mkdocs.git | 1 | 28 | 0 | 8 | 50 | Python | {
"docstring": "Return TOC generated by Markdown parser from Markdown source text.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def get_markdown_toc(markdown_source):
md = markdown.Markdown(extensions=['toc'])
md.convert(markdown_source)
return md.toc_tokens
| |
16,673 | 77,547 | 53 | wagtail/admin/widgets/chooser.py | 10 | 6 | def get_value_data_from_instance(self, instance):
| Split out common logic from get_value_data | get_value_data_from_instance | 39f7886a6f8ee98db7e73ce33d94c06139f35bd8 | wagtail | chooser.py | 11 | 5 | https://github.com/wagtail/wagtail.git | 1 | 28 | 0 | 10 | 49 | Python | {
"docstring": "\n Given a model instance, return a value that we can pass to both the server-side template\n and the client-side rendering code (via telepath) that contains all the information needed\n for display. Typically this is a dict of id, title etc; it must be JSON-serialisable.\n ... | def get_value_data_from_instance(self, instance):
return {
"id": instance.pk,
"edit_url": AdminURLFinder().get_edit_url(instance),
}
| |
53,473 | 212,865 | 10,839 | PySimpleGUI.py | 4,824 | 131 | def set_options(icon=None, button_color=None, element_size=(None, None), button_element_size=(None, None),
margins=(None, None),
element_padding=(None, None), auto_size_text=None, auto_size_buttons=None, font=None, border_width=None,
slider_border_width=None, slider_relie... | Addition of tooltip_offset parm to set_options call (major hack to get around 8.6.12 problem). Backed out the experiments to try and fix new problem with Ubuntu | set_options | 07bb93d47f01468660a01f42150e87e5cb08d546 | PySimpleGUI | PySimpleGUI.py | 16 | 14 | https://github.com/PySimpleGUI/PySimpleGUI.git | 1 | 255 | 0 | 1,112 | 19,192 | Python | {
"docstring": "\n :param icon: Can be either a filename or Base64 value. For Windows if filename, it MUST be ICO format. For Linux, must NOT be ICO. Most portable is to use a Base64 of a PNG file. This works universally across all OS's\n :type icon: bytes ... | def set_options(icon=None, button_color=None, element_size=(None, None), button_element_size=(None, None),
margins=(None, None),
element_padding=(None, None), auto_size_text=None, auto_size_buttons=None, font=None, border_width=None,
slider_border_width=None, slider_relie... | |
52,179 | 208,027 | 432 | celery/utils/imports.py | 84 | 18 | def find_module(module, path=None, imp=None):
if imp is None:
imp = import_module
with cwd_in_path():
try:
return imp(module)
except I | Minor refactors, found by static analysis (#7587)
* Remove deprecated methods in `celery.local.Proxy`
* Collapse conditionals for readability
* Remove unused parameter `uuid`
* Remove unused import `ClusterOptions`
* Remove dangerous mutable default argument
Continues work from #5478
* Remove always ... | find_module | 59263b0409e3f02dc16ca8a3bd1e42b5a3eba36d | celery | imports.py | 20 | 20 | https://github.com/celery/celery.git | 7 | 105 | 0 | 61 | 185 | Python | {
"docstring": "Version of :func:`imp.find_module` supporting dots.",
"language": "en",
"n_whitespaces": 4,
"n_words": 5,
"vocab_size": 5
} | def find_module(module, path=None, imp=None):
if imp is None:
imp = import_module
with cwd_in_path():
try:
return imp(module)
except ImportError:
# Raise a more specific error if the problem is that one of the
# dot-separated segments of the modul... | |
5,326 | 30,117 | 49 | spotdl/utils/ffmpeg.py | 24 | 7 | def get_ffmpeg_path() -> Optional[Path]:
# Check if ffmpeg is installed
global_ffmpeg = shutil.which("ffmpeg")
if global_ffmpeg:
return Path(global_ffmpeg)
| v4 init | get_ffmpeg_path | fa2ad657482aca9dc628e6d7062b8badf2706bb6 | spotify-downloader | ffmpeg.py | 9 | 9 | https://github.com/spotDL/spotify-downloader.git | 2 | 30 | 0 | 20 | 56 | Python | {
"docstring": "\n Get path to global ffmpeg binary or a local ffmpeg binary.\n Or None if not found.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 16,
"vocab_size": 15
} | def get_ffmpeg_path() -> Optional[Path]:
# Check if ffmpeg is installed
global_ffmpeg = shutil.which("ffmpeg")
if global_ffmpeg:
return Path(global_ffmpeg)
# Get local ffmpeg path
return get_local_ffmpeg()
| |
81,158 | 273,959 | 44 | keras/layers/rnn/legacy_cell_wrappers.py | 12 | 8 | def __call__(self, inputs, state, scope=None):
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self.cell.__call__, scope=scope
)
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | __call__ | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | legacy_cell_wrappers.py | 10 | 4 | https://github.com/keras-team/keras.git | 1 | 35 | 0 | 10 | 51 | Python | {
"docstring": "Runs the RNN cell step computation.\n\n We assume that the wrapped RNNCell is being built within its `__call__`\n method. We directly use the wrapped cell's `__call__` in the overridden\n wrapper `__call__` method.\n\n This allows to use the wrapped cell and the non-wrapped... | def __call__(self, inputs, state, scope=None):
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self.cell.__call__, scope=scope
)
| |
84,781 | 284,531 | 462 | openbb_terminal/portfolio/portfolio_model.py | 38 | 20 | def get_kurtosis(self) -> pd.DataFrame:
vals = list()
for period in portfolio_helper.PERIODS:
vals.append(
[
round(
scipy.stats.kurtosis(
portfolio_helper.filter_df_by_period(self.returns, period... | Portfolio improvements (#1818)
* improve portfolio controller
* improve menu ux with disabling command when port or bench are not loaded
* allow custom reset with benchmark and portfolio loaded
* bench needs portfolio loaded to use start date, reflect that
* fix tests
* allow to see sum of a portfolio h... | get_kurtosis | 0e3b62e143c981d81fb46a7e7bb75f93d9159198 | OpenBBTerminal | portfolio_model.py | 17 | 31 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 2 | 98 | 0 | 30 | 151 | Python | {
"docstring": "Class method that retrieves kurtosis for portfolio and benchmark selected\n\n Returns\n -------\n pd.DataFrame\n DataFrame with kurtosis for portfolio and benchmark for different periods\n ",
"language": "en",
"n_whitespaces": 62,
"n_words": 23,
"vocab_si... | def get_kurtosis(self) -> pd.DataFrame:
vals = list()
for period in portfolio_helper.PERIODS:
vals.append(
[
round(
scipy.stats.kurtosis(
portfolio_helper.filter_df_by_period(self.returns, period... | |
80,341 | 269,933 | 1,932 | keras/callbacks.py | 230 | 39 | def _save_model(self, epoch, batch, logs):
logs = logs or {}
if (
isinstance(self.save_freq, int)
or self.epochs_since_last_save >= self.period
):
# Block only when saving interval is reached.
logs = tf_utils.sync_to_numpy_or_python_type(... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _save_model | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | callbacks.py | 25 | 73 | https://github.com/keras-team/keras.git | 15 | 306 | 0 | 123 | 579 | Python | {
"docstring": "Saves the model.\n\n Args:\n epoch: the epoch this iteration is in.\n batch: the batch this iteration is in. `None` if the `save_freq`\n is set to `epoch`.\n logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.\n ",
"languag... | def _save_model(self, epoch, batch, logs):
logs = logs or {}
if (
isinstance(self.save_freq, int)
or self.epochs_since_last_save >= self.period
):
# Block only when saving interval is reached.
logs = tf_utils.sync_to_numpy_or_python_type(... | |
42,302 | 177,172 | 231 | networkx/algorithms/approximation/steinertree.py | 102 | 24 | def steiner_tree(G, terminal_nodes, weight="weight", method=None):
r
if method is None:
import warnings
msg = (
"steiner_tree will change default method from 'kou' to 'mehlhorn'"
| Add Mehlhorn Steiner approximations (#5629)
* Add Wu et al. and Mehlhorn Steiner approximations
* Change default steiner tree approximation method
* Add missing space in error message
* Changes as suggested
* Fix Kou implementation
* Bugfix and variable name change for Mehlhorn
* Add failing test cas... | steiner_tree | 56032abfdff74aebe7e6adbaa711bf4fd6bd7826 | networkx | steinertree.py | 18 | 86 | https://github.com/networkx/networkx.git | 5 | 141 | 0 | 81 | 226 | Python | {
"docstring": "Return an approximation to the minimum Steiner tree of a graph.\n\n The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes` (also *S*)\n is a tree within `G` that spans those nodes and has minimum size (sum of\n edge weights) among all such trees.\n\n The approximation algorithm is... | def steiner_tree(G, terminal_nodes, weight="weight", method=None):
r
if method is None:
import warnings
msg = (
"steiner_tree will change default method from 'kou' to 'mehlhorn'"
"in version 3.2.\nSet the `method` kwarg to remove this warning."
)
warnings... | |
15,916 | 72,955 | 35 | wagtail/api/v2/views.py | 10 | 7 | def find_object(self, queryset, request):
if "id" in request.GET:
| Reformat with black | find_object | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | views.py | 12 | 3 | https://github.com/wagtail/wagtail.git | 2 | 31 | 0 | 10 | 53 | Python | {
"docstring": "\n Override this to implement more find methods.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | def find_object(self, queryset, request):
if "id" in request.GET:
return queryset.get(id=request.GET["id"])
| |
39,421 | 163,372 | 57 | pandas/core/dtypes/cast.py | 21 | 8 | def _maybe_infer_dtype_type(element):
tipo = None
if hasattr(element, "dtype"):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = | CLN: assorted, privatize, easy issues (#45305) | maybe_infer_dtype_type | 5ba7d714014ae8feaccc0dd4a98890828cf2832d | pandas | cast.py | 11 | 8 | https://github.com/pandas-dev/pandas.git | 3 | 43 | 0 | 14 | 74 | Python | {
"docstring": "\n Try to infer an object's dtype, for use in arithmetic ops.\n\n Uses `element.dtype` if that's available.\n Objects implementing the iterator protocol are cast to a NumPy array,\n and from there the array's type is used.\n\n Parameters\n ----------\n element : object\n Po... | def _maybe_infer_dtype_type(element):
tipo = None
if hasattr(element, "dtype"):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
| |
108,472 | 309,776 | 107 | tests/components/alexa/test_smart_home.py | 59 | 11 | def test_create_api_message_special():
request = get_new_request("Alexa.PowerController", "TurnOn")
directive_header = request["directive"]["header"]
directive_header.pop("correlationToken")
directive = messages.AlexaDirective(request)
msg = directive.response("testName", "testNameSpace")._res... | Fix comments in Alexa (#64289) | test_create_api_message_special | c109d59862d1e2e28e54160ee75f9465771e99eb | core | test_smart_home.py | 10 | 16 | https://github.com/home-assistant/core.git | 1 | 133 | 0 | 36 | 252 | Python | {
"docstring": "Create an API message response of a request with non defaults.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def test_create_api_message_special():
request = get_new_request("Alexa.PowerController", "TurnOn")
directive_header = request["directive"]["header"]
directive_header.pop("correlationToken")
directive = messages.AlexaDirective(request)
msg = directive.response("testName", "testNameSpace")._res... | |
6,012 | 32,880 | 43 | tests/mixed_int8/test_mixed_int8.py | 9 | 9 | def tearDown(self):
r
del self.model_fp16
del self.model_8bit
gc.collect()
torch.c | `bitsandbytes` - `Linear8bitLt` integration into `transformers` models (#17901)
* first commit
* correct replace function
* add final changes
- works like charm!
- cannot implement tests yet
- tested
* clean up a bit
* add bitsandbytes dependencies
* working version
- added import function
- ad... | tearDown | 4a51075a96d2049f368b5f3dd6c0e9f08f599b62 | transformers | test_mixed_int8.py | 8 | 9 | https://github.com/huggingface/transformers.git | 1 | 27 | 0 | 8 | 46 | Python | {
"docstring": "\n TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to\n avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27\n ",
"language": "en",
"n_whitespaces": 49,
"n_words... | def tearDown(self):
r
del self.model_fp16
del self.model_8bit
gc.collect()
torch.cuda.empty_cache()
| |
71,142 | 246,307 | 681 | tests/rest/client/test_relations.py | 226 | 32 | def test_pagination_from_sync_and_messages(self):
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A")
self.assertEquals(200, channel.code, channel.json_body)
annotation_id = channel.json_body["event_id"]
# Send an event after the relation events.
s... | Support pagination tokens from /sync and /messages in the relations API. (#11952) | test_pagination_from_sync_and_messages | df36945ff0e4a293a9dac0da07e2c94256835b32 | synapse | test_relations.py | 13 | 39 | https://github.com/matrix-org/synapse.git | 5 | 289 | 0 | 111 | 505 | Python | {
"docstring": "Pagination tokens from /sync and /messages can be used to paginate /relations.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def test_pagination_from_sync_and_messages(self):
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A")
self.assertEquals(200, channel.code, channel.json_body)
annotation_id = channel.json_body["event_id"]
# Send an event after the relation events.
s... | |
22,025 | 104,910 | 31 | src/datasets/utils/streaming_download_manager.py | 10 | 6 | def download(self, url_or_urls):
url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)
| Add API code examples for Builder classes (#4313)
* 📝 add examples for builder classes
* 📝 apply quentin review | download | d1d4f1065fd4ab91b2c8682643dbd12f86d66fcd | datasets | streaming_download_manager.py | 9 | 3 | https://github.com/huggingface/datasets.git | 1 | 24 | 0 | 9 | 38 | Python | {
"docstring": "Download given url(s).\n\n Args:\n url_or_urls: url or `list`/`dict` of urls to download and extract. Each\n url is a `str`.\n\n Returns:\n downloaded_path(s): `str`, The downloaded paths matching the given input\n url_or_urls.\n\n ... | def download(self, url_or_urls):
url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)
return url_or_urls
| |
1,652 | 9,673 | 243 | reconstruction/ostec/utils/generate_heatmap.py | 148 | 20 | def draw_gaussian(image, point, sigma):
# Check if the gaussian is inside
point[0] = round(point[0], 2)
point[1] = round(point[1], 2)
ul = [math.floor(point[0] - 7.5 * sigma), math.floor(point[1] - 7.5 * sigma)]
br = [math.floor(point[0] + 7.5 * sigma), math.floor(point[1] + 7.5 * sigma)]
... | Improved landmark differentiability by heatmaps. | draw_gaussian | 2a8b181d4ddfc542d0784b8ea7341f09500ff299 | insightface | generate_heatmap.py | 15 | 21 | https://github.com/deepinsight/insightface.git | 6 | 469 | 0 | 86 | 667 | Python | {
"docstring": " Draw gaussian circle at a point in an image.\n\n Args:\n image (np.array): An image of shape (H, W)\n point (np.array): The center point of the guassian circle\n sigma (float): Standard deviation of the gaussian kernel\n\n Returns:\n np.array: The image with the draw... | def draw_gaussian(image, point, sigma):
# Check if the gaussian is inside
point[0] = round(point[0], 2)
point[1] = round(point[1], 2)
ul = [math.floor(point[0] - 7.5 * sigma), math.floor(point[1] - 7.5 * sigma)]
br = [math.floor(point[0] + 7.5 * sigma), math.floor(point[1] + 7.5 * sigma)]
... | |
56,422 | 221,530 | 54 | python3.10.4/Lib/collections/__init__.py | 15 | 4 | def setdefault(self, key, default=None):
if key in self:
return self[key]
self[key] = default
return default
| add python 3.10.4 for windows | setdefault | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | __init__.py | 8 | 5 | https://github.com/XX-net/XX-Net.git | 2 | 30 | 0 | 12 | 47 | Python | {
"docstring": "Insert key with a value of default if key is not in the dictionary.\n\n Return the value for key if key is in the dictionary, else default.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 27,
"vocab_size": 18
} | def setdefault(self, key, default=None):
if key in self:
return self[key]
self[key] = default
return default
| |
40,398 | 169,203 | 28 | web/pandas_web.py | 7 | 5 | def current_year(context):
context["current_year"] = datetime. | WEB: Add new footer to web (#48557) | current_year | bbf17ea692e437cec908eae6759ffff8092fb42e | pandas | pandas_web.py | 10 | 3 | https://github.com/pandas-dev/pandas.git | 1 | 22 | 0 | 7 | 40 | Python | {
"docstring": "\n Add the current year to the context, so it can be used for the copyright\n note, or other places where it is needed.\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 23,
"vocab_size": 20
} | def current_year(context):
context["current_year"] = datetime.datetime.now().year
return context
| |
505 | 3,627 | 98 | airbyte-integrations/connectors/source-s3/source_s3/source_files_abstract/stream.py | 37 | 12 | def fileformatparser_map(self) -> Mapping[str, type]:
return {
"csv": CsvParser,
"parquet": ParquetParser,
}
# TODO: make these user configurable in spec.json
ab_additional_col = "_ab_additional_properties"
ab_last_mod_col = "_ab_source_file_last_modified"
... | 🐛 Source S3: Loading of files' metadata (#8252) | fileformatparser_map | 91eff1dffdb04be968b6ee4ef8d8bbfeb2e882d0 | airbyte | stream.py | 8 | 6 | https://github.com/airbytehq/airbyte.git | 1 | 24 | 0 | 33 | 82 | Python | {
"docstring": "Mapping where every key is equal 'filetype' and values are corresponding parser classes.",
"language": "en",
"n_whitespaces": 15,
"n_words": 13,
"vocab_size": 13
} | def fileformatparser_map(self) -> Mapping[str, type]:
return {
"csv": CsvParser,
"parquet": ParquetParser,
}
# TODO: make these user configurable in spec.json
ab_additional_col = "_ab_additional_properties"
ab_last_mod_col = "_ab_source_file_last_modified"
... | |
36,553 | 156,094 | 57 | dask/dataframe/core.py | 18 | 9 | def pivot_table(self, index=None, columns=None, values=None, aggfunc="mean"):
from dask.dataframe.reshape import pivot_table
return pivot_table(
self, index=index, columns=colum | absolufy-imports - No relative - PEP8 (#8796)
Conversation in https://github.com/dask/distributed/issues/5889 | pivot_table | cccb9d8d8e33a891396b1275c2448c352ef40c27 | dask | core.py | 8 | 5 | https://github.com/dask/dask.git | 1 | 51 | 0 | 18 | 74 | Python | {
"docstring": "\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.\n\n Parameters\n ----------\n values : scalar\n ... | def pivot_table(self, index=None, columns=None, values=None, aggfunc="mean"):
from dask.dataframe.reshape import pivot_table
return pivot_table(
self, index=index, columns=columns, values=values, aggfunc=aggfunc
)
| |
31,314 | 138,092 | 141 | python/ray/tune/tests/test_actor_reuse.py | 42 | 19 | def test_multi_trial_reuse_with_failing(ray_start_4_cpus_extra):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "2"
register_trainable("foo2", MyResettableClass)
[trial1, trial2, trial3, trial4] = tune.run(
"foo2",
config={
"fail": tune.grid_search([False, True, False, False])... | [air/tune] Internal resource management 2 - Ray Tune to use new Ray AIR resource manager (#30016)
Includes/depends on #30777
TLDR: This PR refactors Ray Tune's resource management to use a central AIR resource management package instead of the tightly coupled PlacementGroupManager.
Ray Tune's resource management... | test_multi_trial_reuse_with_failing | 1510fb2cd631b2776092fb45ee4082e5e65f16f8 | ray | test_actor_reuse.py | 15 | 17 | https://github.com/ray-project/ray.git | 1 | 113 | 0 | 36 | 183 | Python | {
"docstring": "Test that failing trial's actors are not reused.\n\n - 2 trials can run at the same time\n - Trial 1 succeeds, trial 2 fails\n - Trial 3 will be scheduled after trial 2 failed, so won't reuse actor\n - Trial 4 will be scheduled after trial 1 succeeded, so will reuse actor\n ",
"langua... | def test_multi_trial_reuse_with_failing(ray_start_4_cpus_extra):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "2"
register_trainable("foo2", MyResettableClass)
[trial1, trial2, trial3, trial4] = tune.run(
"foo2",
config={
"fail": tune.grid_search([False, True, False, False])... | |
15,938 | 73,067 | 114 | wagtail/contrib/forms/views.py | 32 | 24 | def dispatch(self, request, *args, **kwargs):
page_id = kwargs.get("page_id")
if not get_forms_for_user(self.request.user).filter(id=page_id).exists():
raise PermissionDenied
self.page = get_object_or_404(Page, id=page_id).specific
self.submissions = self.get_quer... | Reformat with black | dispatch | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | views.py | 14 | 10 | https://github.com/wagtail/wagtail.git | 3 | 112 | 0 | 27 | 182 | Python | {
"docstring": "Check permissions, set the page and submissions, handle delete",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def dispatch(self, request, *args, **kwargs):
page_id = kwargs.get("page_id")
if not get_forms_for_user(self.request.user).filter(id=page_id).exists():
raise PermissionDenied
self.page = get_object_or_404(Page, id=page_id).specific
self.submissions = self.get_quer... | |
36,901 | 157,358 | 72 | ldm/models/diffusion/ddpm.py | 30 | 23 | def _prior_bpd(self, x_start):
batch_size = x_start.shape[0]
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logv... | release more models | _prior_bpd | ca86da3a30c4e080d4db8c25fca73de843663cb4 | stablediffusion | ddpm.py | 12 | 6 | https://github.com/Stability-AI/stablediffusion.git | 1 | 90 | 0 | 27 | 127 | Python | {
"docstring": "\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch... | def _prior_bpd(self, x_start):
batch_size = x_start.shape[0]
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logv... | |
39,660 | 165,326 | 39 | pandas/tests/window/test_rolling.py | 27 | 9 | def test_rolling_non_monotonic(method, expected):
# Based on an example found in computation.rst
use_expanding = [True, False, True, False, True, True, True, True]
df = DataFrame({"values": np.arange(len(use_expanding)) ** 2})
| ENH: Rolling window with step size (GH-15354) (#45765) | test_rolling_non_monotonic | 6caefb19f4d7c05451fafca182c6eb39fe9901ed | pandas | test_rolling.py | 15 | 9 | https://github.com/pandas-dev/pandas.git | 1 | 100 | 0 | 22 | 72 | Python | {
"docstring": "\n Make sure the (rare) branch of non-monotonic indices is covered by a test.\n\n output from 1.1.3 is assumed to be the expected output. Output of sum/mean has\n manually been verified.\n\n GH 36933.\n ",
"language": "en",
"n_whitespaces": 48,
"n_words": 32,
"vocab_size": 29
} | def test_rolling_non_monotonic(method, expected):
# Based on an example found in computation.rst
use_expanding = [True, False, True, False, True, True, True, True]
df = DataFrame({"values": np.arange(len(use_expanding)) ** 2})
| |
36,756 | 156,746 | 33 | dask/array/core.py | 12 | 7 | def clip(self, min=None, max=None):
from dask.array.ufunc import clip
return cl | Don't include docs in ``Array`` methods, just refer to module docs (#9244)
Co-authored-by: James Bourbeau <jrbourbeau@users.noreply.github.com> | clip | 2820bae493a49cb1d0a6e376985c5473b8f04fa8 | dask | core.py | 7 | 3 | https://github.com/dask/dask.git | 1 | 31 | 0 | 11 | 46 | Python | {
"docstring": "Return an array whose values are limited to ``[min, max]``.\n One of max or min must be given.\n\n Refer to :func:`dask.array.clip` for full documentation.\n\n See Also\n --------\n dask.array.clip : equivalent function\n ",
"language": "en",
"n_whitespace... | def clip(self, min=None, max=None):
from dask.array.ufunc import clip
return clip(self, min, max)
| |
77,311 | 262,675 | 105 | TTS/tts/layers/overflow/common_layers.py | 34 | 12 | def _floor_std(self, std):
r
origi | Adding OverFlow (#2183)
* Adding encoder
* currently modifying hmm
* Adding hmm
* Adding overflow
* Adding overflow setting up flat start
* Removing runs
* adding normalization parameters
* Fixing models on same device
* Training overflow and plotting evaluations
* Adding inference
* At t... | _floor_std | 3b8b105b0d6539ac12972de94e0b2a5077fa1ce2 | TTS | common_layers.py | 10 | 16 | https://github.com/coqui-ai/TTS.git | 2 | 50 | 0 | 31 | 83 | Python | {
"docstring": "\n It clamps the standard deviation to not to go below some level\n This removes the problem when the model tries to cheat for higher likelihoods by converting\n one of the gaussians to a point mass.\n\n Args:\n std (float Tensor): tensor containing the standard ... | def _floor_std(self, std):
r
original_tensor = std.clone().detach()
std = torch.clamp(std, min=self.std_floor)
if torch.any(original_tensor != std):
print(
"[*] Standard deviation was floored! The model is preventing overfitting, nothing serious to worry about... | |
13,247 | 63,314 | 1,003 | .venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py | 175 | 29 | def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):
nl = "\n"
out = []
namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if for... | upd; format | asXML | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | pyparsing.py | 18 | 49 | https://github.com/jindongwang/transferlearning.git | 16 | 278 | 0 | 83 | 454 | Python | {
"docstring": "\n (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 19,
"vocab_size": 18
} | def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):
nl = "\n"
out = []
namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if for... | |
19,283 | 96,149 | 22 | src/sentry/models/group.py | 8 | 5 | def times_seen_with_pending(self) -> int: | fix(post_process): Fetch buffered `times_seen` values and add them to `Group.times_seen` (#31624)
In `post_process_group` we process issue alert rules and also ignored groups. Both of these can have
conditions that read from the `times_seen` value on the `Group`.
The problem here is that updates to `times_seen` ar... | times_seen_with_pending | 09726d7fc95e53bb516e328fc1811fc9a0704cac | sentry | group.py | 7 | 6 | https://github.com/getsentry/sentry.git | 1 | 16 | 0 | 8 | 28 | Python | {
"docstring": "\n Returns `times_seen` with any additional pending updates from `buffers` added on. This value\n must be set first.\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 17,
"vocab_size": 17
} | def times_seen_with_pending(self) -> int:
return self.times_seen + self.times_seen_pending
| |
117,005 | 319,841 | 169 | src/documents/tests/test_api.py | 22 | 14 | def test_api_create_storage_path(self):
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"name": "A storage path",
"path": "Somewhere/{asn}",
},
),
content_ | Adds invalid storage path format test | test_api_create_storage_path | d7f7d839f8a6b7d0378dda1e0744739748d71b9c | paperless-ngx | test_api.py | 13 | 13 | https://github.com/paperless-ngx/paperless-ngx.git | 1 | 64 | 0 | 22 | 108 | Python | {
"docstring": "\n GIVEN:\n - API request to create a storage paths\n WHEN:\n - API is called\n THEN:\n - Correct HTTP response\n - New storage path is created\n ",
"language": "en",
"n_whitespaces": 98,
"n_words": 25,
"vocab_size": 19
} | def test_api_create_storage_path(self):
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"name": "A storage path",
"path": "Somewhere/{asn}",
},
),
content_type="application/... | |
@pytest.mark.django_db | 17,258 | 81,780 | 374 | awx/main/tests/functional/models/test_workflow.py | 63 | 31 | def test_set_all_ask_for_prompts_true_from_post(self, post, organization, inventory, org_admin):
r = post(
url=reverse('api:workflow_job_template_list'),
data=dict(
name='workflow that tests ask_for prompts',
organization=organization.id,
... | adding prompt-to-launch field on Labels field in Workflow Templates; with necessary UI and testing changes
Co-authored-by: Keith Grant <keithjgrant@gmail.com> | test_set_all_ask_for_prompts_true_from_post | 663ef2cc6413c0cdb26392bb046b37fe564fb546 | awx | test_workflow.py | 13 | 28 | https://github.com/ansible/awx.git | 1 | 151 | 1 | 44 | 234 | Python | {
"docstring": "\n Tests behaviour and values of ask_for_* fields on WFJT via POST\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 11
} | def test_set_all_ask_for_prompts_true_from_post(self, post, organization, inventory, org_admin):
r = post(
url=reverse('api:workflow_job_template_list'),
data=dict(
name='workflow that tests ask_for prompts',
organization=organization.id,
... |
28,779 | 128,697 | 128 | python/ray/_private/utils.py | 60 | 15 | def get_used_memory():
# Try to accurately figure out the memory usage if we are in a docker
# container.
docker_usage = None
# For cgroups v1:
memory_usage_filename = "/sys/fs/cgroup/memory/memory.stat"
# For cgroups v2:
memory_usage_filename | [core] update cgroup v1 memory usage calculation to ignore inactive (cache) files (#29103)
Signed-off-by: Clarence Ng clarence.wyng@gmail.com
Adjust used memory calculation for cgroup v1, to make it inline with how working set memory is calculated, which is what the cgroup oom killer uses. Before this change we inc... | get_used_memory | 036225dec2d1f0d895043ca5f0aeeff377aa7fc7 | ray | utils.py | 15 | 12 | https://github.com/ray-project/ray.git | 4 | 76 | 0 | 44 | 139 | Python | {
"docstring": "Return the currently used system memory in bytes\n\n Returns:\n The total amount of used memory\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 15,
"vocab_size": 13
} | def get_used_memory():
# Try to accurately figure out the memory usage if we are in a docker
# container.
docker_usage = None
# For cgroups v1:
memory_usage_filename = "/sys/fs/cgroup/memory/memory.stat"
# For cgroups v2:
memory_usage_filename_v2 = "/sys/fs/cgroup/memory.current"
if... | |
80,703 | 271,128 | 161 | keras/engine/data_adapter.py | 71 | 7 | def pack_x_y_sample_weight(x, y=None, sample_weight=None):
if y is None:
# For single x-input, we do no tuple wrapping since in this case
# there is no ambiguity. This also makes NumPy and Dataset
# consistent in that the user does not have to wrap their Dataset
# data in an unn... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | pack_x_y_sample_weight | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | data_adapter.py | 11 | 10 | https://github.com/keras-team/keras.git | 4 | 60 | 0 | 53 | 97 | Python | {
"docstring": "Packs user-provided data into a tuple.\n\n This is a convenience utility for packing data into the tuple formats\n that `Model.fit` uses.\n\n Standalone usage:\n\n >>> x = tf.ones((10, 1))\n >>> data = tf.keras.utils.pack_x_y_sample_weight(x)\n >>> isinstance(data, tf.Tensor)\n Tr... | def pack_x_y_sample_weight(x, y=None, sample_weight=None):
if y is None:
# For single x-input, we do no tuple wrapping since in this case
# there is no ambiguity. This also makes NumPy and Dataset
# consistent in that the user does not have to wrap their Dataset
# data in an unn... | |
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False]) | 76,232 | 260,408 | 209 | sklearn/linear_model/_glm/tests/test_glm.py | 91 | 40 | def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset):
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
n_samples, n_features = X.shape
params = dict(
alpha=alpha,
fit | TST tight tests for GLMs (#23619)
Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> | test_glm_regression_vstacked_X | 9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f | scikit-learn | test_glm.py | 11 | 25 | https://github.com/scikit-learn/scikit-learn.git | 2 | 188 | 1 | 71 | 320 | Python | {
"docstring": "Test that GLM converges for all solvers to correct solution on vstacked data.\n\n We work with a simple constructed data set with known solution.\n Fit on [X] with alpha is the same as fit on [X], [y]\n [X], [y] with 1 * alpha.\n It is the same a... | def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset):
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
n_samples, n_features = X.shape
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
# solver=solver, # only lbfgs availa... |
48,348 | 197,115 | 41 | sympy/tensor/tensor.py | 8 | 5 | def deprecate_data():
sympy_deprecation_warning(
,
| Update the various tensor deprecations | deprecate_data | cba899d4137b0b65f6850120ee42cd4fcd4f9dbf | sympy | tensor.py | 9 | 10 | https://github.com/sympy/sympy.git | 1 | 21 | 0 | 8 | 37 | Python | {
"docstring": "\n The data attribute of TensorIndexType is deprecated. Use The\n replace_with_arrays() method instead.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 12,
"vocab_size": 11
} | def deprecate_data():
sympy_deprecation_warning(
,
deprecated_since_version="1.4",
active_deprecations_target="deprecated-tensorindextype-attrs",
stacklevel=4,
)
| |
76,453 | 260,743 | 69 | sklearn/preprocessing/_function_transformer.py | 23 | 11 | def fit(self, X, y=None):
sel | MAINT Add parameter validation for `FunctionTransformer`. (#24180)
Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com> | fit | b85f799d0a7242aace8bffd5c8fd7cf3585340af | scikit-learn | _function_transformer.py | 11 | 6 | https://github.com/scikit-learn/scikit-learn.git | 4 | 57 | 0 | 22 | 91 | Python | {
"docstring": "Fit transformer by checking X.\n\n If ``validate`` is ``True``, ``X`` will be checked.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input array.\n\n y : Ignored\n Not used, present here for API consistency by con... | def fit(self, X, y=None):
self._validate_params()
X = self._check_input(X, reset=True)
if self.check_inverse and not (self.func is None or self.inverse_func is None):
self._check_inverse_transform(X)
return self
| |
48,967 | 198,505 | 47 | sympy/printing/dot.py | 17 | 9 | def styleof(expr, styles=default_styles):
style = {}
for typ, sty in styles:
if isinstance(expr, typ):
style | Code cleanup | styleof | 9d58006fc0a23afcba38f641c9472917c436428a | sympy | dot.py | 11 | 6 | https://github.com/sympy/sympy.git | 3 | 37 | 0 | 16 | 60 | Python | {
"docstring": " Merge style dictionaries in order\n\n Examples\n ========\n\n >>> from sympy import Symbol, Basic, Expr, S\n >>> from sympy.printing.dot import styleof\n >>> styles = [(Basic, {'color': 'blue', 'shape': 'ellipse'}),\n ... (Expr, {'color': 'black'})]\n\n >>> styleof(Bas... | def styleof(expr, styles=default_styles):
style = {}
for typ, sty in styles:
if isinstance(expr, typ):
style.update(sty)
return style
| |
14,760 | 68,324 | 21 | erpnext/support/report/first_response_time_for_issues/first_response_time_for_issues.py | 36 | 10 | def execute(filters=None):
columns = [
{"fieldname": "creation_date | fix: bulk fix (~330) missing translations | execute | a896895a9e76a68ab055ce7871bb9d181d3fac15 | erpnext | first_response_time_for_issues.py | 12 | 25 | https://github.com/frappe/erpnext.git | 1 | 79 | 0 | 31 | 142 | Python | {
"docstring": "\n\t\tSELECT\n\t\t\tdate(creation) as creation_date,\n\t\t\tavg(first_response_time) as avg_response_time\n\t\tFROM tabIssue\n\t\tWHERE\n\t\t\tdate(creation) between %s and %s\n\t\t\tand first_response_time > 0\n\t\tGROUP BY creation_date\n\t\tORDER BY creation_date desc\n\t",
"language": "en",
"n... | def execute(filters=None):
columns = [
{"fieldname": "creation_date", "label": _("Date"), "fieldtype": "Date", "width": 300},
{
"fieldname": "first_response_time",
"fieldtype": "Duration",
"label": _("First Response Time"),
"width": 300,
},
]
data = frappe.db.sql(
,
(filters.from_date, filters... | |
2,931 | 19,295 | 278 | PathPlanning/RRTStar/rrt_star.py | 74 | 22 | def choose_parent(self, new_node, near_inds):
if not near_inds:
return None
# search nearest cost in near_inds
costs = []
for i in near_inds:
near_node = self.node_list[i]
t_node = self.steer(near_node, new_node)
if t_node and sel... | Add optional robot radius to RRT/RRTStar path planners (#655)
* Add optional robot radius to RRT/RRTStar path planners.
* update __init__ and check_collision to include radius
* during animation, if a robot radius is given then it is drawn
* Add test for robot radius
* Correct import error
* Correct missing... | choose_parent | b53fdf75f66ccb63b5cfaadaa81253d43f01805a | PythonRobotics | rrt_star.py | 15 | 20 | https://github.com/AtsushiSakai/PythonRobotics.git | 6 | 138 | 0 | 53 | 224 | Python | {
"docstring": "\n Computes the cheapest point to new_node contained in the list\n near_inds and set such a node as the parent of new_node.\n Arguments:\n --------\n new_node, Node\n randomly generated node with a path from its neared point\n ... | def choose_parent(self, new_node, near_inds):
if not near_inds:
return None
# search nearest cost in near_inds
costs = []
for i in near_inds:
near_node = self.node_list[i]
t_node = self.steer(near_node, new_node)
if t_node and sel... | |
30,064 | 133,631 | 295 | rllib/agents/a3c/tests/test_a3c.py | 54 | 23 | def test_a3c_compilation(self):
config = a3c.DEFAULT_CONFIG.copy()
config["num_workers"] = 2
config["num_envs_per_worker"] = 2
num_iterations = 1
# Test against all frameworks.
for _ in framework_iterator(config, with_eager_tracing=True):
for env in... | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | test_a3c_compilation | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | test_a3c.py | 15 | 18 | https://github.com/ray-project/ray.git | 4 | 129 | 0 | 42 | 224 | Python | {
"docstring": "Test whether an A3CTrainer can be built with both frameworks.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def test_a3c_compilation(self):
config = a3c.DEFAULT_CONFIG.copy()
config["num_workers"] = 2
config["num_envs_per_worker"] = 2
num_iterations = 1
# Test against all frameworks.
for _ in framework_iterator(config, with_eager_tracing=True):
for env in... | |
3,863 | 21,475 | 372 | pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py | 76 | 24 | def extract(self, member, path="", set_attrs=True):
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_targ... | Vendor in pip 22.1.2 | extract | c69d55f7c82d5ae2cce542bcfb98d043ca4836a0 | pipenv | tarfile.py | 19 | 24 | https://github.com/pypa/pipenv.git | 8 | 170 | 0 | 52 | 279 | Python | {
"docstring": "Extract a member from the archive to the current working directory,\n using its full name. Its file information is extracted as accurately\n as possible. `member' may be a filename or a TarInfo object. You can\n specify a different directory using `path'. File attributes ... | def extract(self, member, path="", set_attrs=True):
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_targ... | |
5,604 | 30,465 | 32 | tests/types/test_artist.py | 17 | 8 | def test_artist_from_string():
artist = Artist.from_search_term("artist:gorillaz")
assert artist.name == "Gorillaz"
assert artist.url == "http://open.spotify.com/artist/3AA28KZvwAUcZuOKwyblJQ"
| Search album by string enhancement (#1663) | test_artist_from_string | 57ce5c09ee1ac101f79962e59bd44a0396dfb76c | spotify-downloader | test_artist.py | 9 | 5 | https://github.com/spotDL/spotify-downloader.git | 1 | 34 | 0 | 14 | 63 | Python | {
"docstring": "\n Test if Artist class can be initialized from string.\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 9,
"vocab_size": 9
} | def test_artist_from_string():
artist = Artist.from_search_term("artist:gorillaz")
assert artist.name == "Gorillaz"
assert artist.url == "http://open.spotify.com/artist/3AA28KZvwAUcZuOKwyblJQ"
assert len(artist.urls) > 1
| |
35,134 | 151,776 | 382 | freqtrade/freqai/RL/BaseEnvironment.py | 117 | 37 | def reset(self):
# custom_info is used for episodic reports and tensorboard logging
self.custom_info["Invalid"] = 0
self.custom_info["Hold"] = 0
self.custom_info["Unknown"] = 0
self.custom_info["pnl_factor"] = 0
self.custom_info["duration_factor"] = 0
sel... | reorganize/generalize tensorboard callback | reset | 24766928baddfed919be1138a64d51cdbb0d3764 | freqtrade | BaseEnvironment.py | 14 | 31 | https://github.com/freqtrade/freqtrade.git | 4 | 259 | 0 | 73 | 427 | Python | {
"docstring": "\n Reset is called at the beginning of every episode\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def reset(self):
# custom_info is used for episodic reports and tensorboard logging
self.custom_info["Invalid"] = 0
self.custom_info["Hold"] = 0
self.custom_info["Unknown"] = 0
self.custom_info["pnl_factor"] = 0
self.custom_info["duration_factor"] = 0
sel... | |
13,841 | 65,288 | 22 | erpnext/accounts/report/non_billed_report.py | 44 | 22 | def get_ordered_to_be_billed_data(args):
doctype, party = args.get("doctype"), args.get("party")
child_tab = doctype + " Item"
precision = (
get_field_precision(
frappe.get_meta(child_tab).get_field("billed_amt"), currency=get_default_currency()
)
or 2
)
project_field = get_project_field(doctype, party)
... | style: format code with black | get_ordered_to_be_billed_data | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | non_billed_report.py | 14 | 46 | https://github.com/frappe/erpnext.git | 2 | 125 | 0 | 35 | 208 | Python | {
"docstring": "\n\t\tSelect\n\t\t\t`{parent_tab}`.name, `{parent_tab}`.{date_field},\n\t\t\t`{parent_tab}`.{party}, `{parent_tab}`.{party}_name,\n\t\t\t`{child_tab}`.item_code,\n\t\t\t`{child_tab}`.base_amount,\n\t\t\t(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)),\n\t\t\t(`{child_tab}`.base_... | def get_ordered_to_be_billed_data(args):
doctype, party = args.get("doctype"), args.get("party")
child_tab = doctype + " Item"
precision = (
get_field_precision(
frappe.get_meta(child_tab).get_field("billed_amt"), currency=get_default_currency()
)
or 2
)
project_field = get_project_field(doctype, party)
... | |
32,811 | 142,825 | 73 | python/ray/tune/execution/ray_trial_executor.py | 19 | 6 | def get_staged_trial(self):
# TODO(xwjiang): This method should consider `self._cached_actor_pg`.
for trial in self._staged_trials:
if self._pg_m | [tune/structure] Introduce execution package (#26015)
Execution-specific packages are moved to tune.execution.
Co-authored-by: Xiaowei Jiang <xwjiang2010@gmail.com> | get_staged_trial | 0959f44b6fc217a4f2766ed46a721eb79b067b2c | ray | ray_trial_executor.py | 10 | 5 | https://github.com/ray-project/ray.git | 3 | 27 | 0 | 17 | 46 | Python | {
"docstring": "Get a trial whose placement group was successfully staged.\n\n Can also return None if no trial is available.\n\n Returns:\n Trial object or None.\n\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 23,
"vocab_size": 22
} | def get_staged_trial(self):
# TODO(xwjiang): This method should consider `self._cached_actor_pg`.
for trial in self._staged_trials:
if self._pg_manager.has_ready(trial):
return trial
return None
| |
35,986 | 154,453 | 66 | modin/core/dataframe/algebra/default2pandas/resample.py | 12 | 10 | def register(cls, func, squeeze_self=False, **kwargs):
return super().regi | REFACTOR-#4942: remove call method in favor of register due to duplication (#4943)
Signed-off-by: Myachev <anatoly.myachev@intel.com> | register | a6f47c8e1c27d85fc09926bb35c2f1a65a6d3e79 | modin | resample.py | 9 | 6 | https://github.com/modin-project/modin.git | 1 | 40 | 0 | 12 | 61 | Python | {
"docstring": "\n Build function that do fallback to pandas and aggregate resampled data.\n\n Parameters\n ----------\n func : callable\n Aggregation function to execute under resampled frame.\n squeeze_self : bool, default: False\n Whether or not to squeeze f... | def register(cls, func, squeeze_self=False, **kwargs):
return super().register(
Resampler.build_resample(func, squeeze_self),
fn_name=func.__name__,
**kwargs
)
| |
51,208 | 205,775 | 540 | django/db/models/query.py | 117 | 33 | def aggregate(self, *args, **kwargs):
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions(
(*args, *kwar | Refs #33476 -- Reformatted code with Black. | aggregate | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | query.py | 16 | 30 | https://github.com/django/django.git | 10 | 191 | 0 | 88 | 306 | Python | {
"docstring": "\n Return a dictionary containing the calculations (aggregation)\n over the current queryset.\n\n If args is present the expression is passed as a kwarg using\n the Aggregate object's default alias.\n ",
"language": "en",
"n_whitespaces": 64,
"n_words": 28,
"... | def aggregate(self, *args, **kwargs):
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions(
(*args, *kwargs.values()), method_name="aggregate"
)
for arg in args:
... | |
52,717 | 209,525 | 137 | scapy/contrib/http2.py | 51 | 7 | def _detect_bytelen_from_str(s):
# type: (str) -> int
assert len(s) >= 2
tmp_len = len(s)
i = 1
while orb(s[i]) & 0x80 > 0:
i += 1
assert i < tmp_len, 'EINVAL: s: out-of-bound read: unfinished A | E275 - Missing whitespace after keyword (#3711)
Co-authored-by: Alexander Aring <alex.aring@gmail.com>
Co-authored-by: Anmol Sarma <me@anmolsarma.in>
Co-authored-by: antoine.torre <torreantoine1@gmail.com>
Co-authored-by: Antoine Vacher <devel@tigre-bleu.net>
Co-authored-by: Arnaud Ebalard <arno@natisbad.org>
Co-... | _detect_bytelen_from_str | 08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf | scapy | http2.py | 10 | 10 | https://github.com/secdev/scapy.git | 2 | 55 | 0 | 37 | 93 | Python | {
"docstring": " _detect_bytelen_from_str returns the length of the machine\n representation of an AbstractUVarIntField starting at the beginning\n of s and which is assumed to expand over multiple bytes\n (value > _max_prefix_value).\n\n :param str s: the string to parse. It is assu... | def _detect_bytelen_from_str(s):
# type: (str) -> int
assert len(s) >= 2
tmp_len = len(s)
i = 1
while orb(s[i]) & 0x80 > 0:
i += 1
assert i < tmp_len, 'EINVAL: s: out-of-bound read: unfinished AbstractUVarIntField detected' # noqa: E501
... | |
14,597 | 67,696 | 4 | erpnext/stock/doctype/purchase_receipt/test_purchase_receipt.py | 10 | 7 | def get_gl_entries(voucher_type, voucher_no):
return frappe.db.sql(
,
(voucher_type, voucher_no) | style: format code with black | get_gl_entries | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | test_purchase_receipt.py | 8 | 8 | https://github.com/frappe/erpnext.git | 1 | 27 | 0 | 10 | 40 | Python | {
"docstring": "select account, debit, credit, cost_center, is_cancelled\n\t\tfrom `tabGL Entry` where voucher_type=%s and voucher_no=%s\n\t\torder by account desc",
"language": "en",
"n_whitespaces": 14,
"n_words": 17,
"vocab_size": 17
} | def get_gl_entries(voucher_type, voucher_no):
return frappe.db.sql(
,
(voucher_type, voucher_no),
as_dict=1,
)
| |
53,681 | 213,618 | 17 | ivy/core/random.py | 11 | 7 | def random_normal(mean=0.0, std=1.0, shape=None, dev=None, f=None):
return _cur_framework(f=f).random_normal(mean, | renamed dev_str arg to dev for all methods. | random_normal | d743336b1f3654cd0315f380f43eed4116997c1d | ivy | random.py | 10 | 2 | https://github.com/unifyai/ivy.git | 1 | 46 | 0 | 11 | 61 | Python | {
"docstring": "\n Draws samples from a normal distribution.\n\n :param mean: The mean of the normal distribution to sample from. Default is 0.\n :type mean: float\n :param std: The standard deviation of the normal distribution to sample from. Default is 1.\n :type std: float\n :param shape: Output ... | def random_normal(mean=0.0, std=1.0, shape=None, dev=None, f=None):
return _cur_framework(f=f).random_normal(mean, std, shape, dev)
| |
23,019 | 108,020 | 29 | lib/matplotlib/texmanager.py | 8 | 5 | def get_font_preamble(cls):
font_preamble, command = cls. | Move towards making texmanager stateless.
Previously, TexManager needed to call get_font_config at a specific
place in the middle of processing to update some internal attributes
before proceeding with TeX source generation. Instead, move towards
making TexManager stateless (except for caching), i.e. the user facing
... | get_font_preamble | 13147992b317c29c6e832ca7f6d05bf48aeb0718 | matplotlib | texmanager.py | 8 | 3 | https://github.com/matplotlib/matplotlib.git | 1 | 17 | 0 | 8 | 31 | Python | {
"docstring": "\n Return a string containing font configuration for the tex preamble.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def get_font_preamble(cls):
font_preamble, command = cls._get_font_preamble_and_command()
return font_preamble
| |
30,836 | 136,154 | 755 | rllib/utils/exploration/tests/test_explorations.py | 147 | 30 | def do_test_explorations(config, dummy_obs, prev_a=None, expected_mean_action=None):
# Test all frameworks.
for _ in framework_iterator(config):
print(f"Algorithm={config.algo_class}")
# Test for both the default Agent's exploration AND the `Random`
# exploration class.
fo... | [RLlib] AlgorithmConfig: Replace more occurrences of old config dicts; Make all Algorithms use the non-dict lookup for config properties. (#30096) | do_test_explorations | e715a8b7616f9f24839531fcefc1420f79ab13ec | ray | test_explorations.py | 18 | 36 | https://github.com/ray-project/ray.git | 10 | 231 | 0 | 85 | 357 | Python | {
"docstring": "Calls an Agent's `compute_actions` with different `explore` options.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def do_test_explorations(config, dummy_obs, prev_a=None, expected_mean_action=None):
# Test all frameworks.
for _ in framework_iterator(config):
print(f"Algorithm={config.algo_class}")
# Test for both the default Agent's exploration AND the `Random`
# exploration class.
fo... | |
11,227 | 55,138 | 21 | src/prefect/cli/base.py | 9 | 9 | def exit_with_success(message, **kwargs):
kwargs.setdefault("style", "green")
app.console.prin | Update `set` command; allow CLI `console` object to be patched | exit_with_success | c0cb1fee460c1bded9e3eb741ad7979402844bf8 | prefect | base.py | 8 | 4 | https://github.com/PrefectHQ/prefect.git | 1 | 35 | 0 | 9 | 61 | Python | {
"docstring": "\n Utility to print a stylized success message and exit with a zero code\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 13,
"vocab_size": 12
} | def exit_with_success(message, **kwargs):
kwargs.setdefault("style", "green")
app.console.print(message, **kwargs)
raise typer.Exit(0)
| |
36,153 | 154,845 | 91 | modin/_version.py | 61 | 7 | def get_keywords() -> Dict[str, str]:
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_f... | REFACTOR-#5012: Add mypy checks for singleton files in base modin directory (#5013)
Signed-off-by: Jonathan Shi <jhshi@ponder.io> | get_keywords | 446148dbf9b66debd0a0dbf9ce778253380d5921 | modin | _version.py | 9 | 7 | https://github.com/modin-project/modin.git | 1 | 38 | 0 | 51 | 76 | Python | {
"docstring": "Get the keywords needed to look up the version information.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def get_keywords() -> Dict[str, str]:
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_f... | |
36,416 | 155,520 | 798 | dask/array/slicing.py | 299 | 71 | def take(outname, inname, chunks, index, itemsize, axis=0):
from .core import PerformanceWarning
plan = slicing_plan(chunks[axis], index)
if len(plan) >= len(chunks[axis]) * 10:
factor = math.ceil(len(plan) / len(chunks[axis]))
warnings.warn(
"Slicing with an out-of-order ... | DOC: normalize whitespace in doctests in slicing.py (#8512) | take | fa8dfede71677a2301d4cd602cf4b27af41cbc4f | dask | slicing.py | 15 | 66 | https://github.com/dask/dask.git | 17 | 509 | 0 | 181 | 824 | Python | {
"docstring": "Index array with an iterable of index\n\n Handles a single index by a single list\n\n Mimics ``np.take``\n\n >>> from pprint import pprint\n >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], 8, axis=0)\n >>> chunks\n ((2, 1, 1),)\n >>> pprint(dsk) # doctest: +E... | def take(outname, inname, chunks, index, itemsize, axis=0):
from .core import PerformanceWarning
plan = slicing_plan(chunks[axis], index)
if len(plan) >= len(chunks[axis]) * 10:
factor = math.ceil(len(plan) / len(chunks[axis]))
warnings.warn(
"Slicing with an out-of-order ... | |
50,070 | 202,325 | 61 | tests/contenttypes_tests/test_models.py | 11 | 9 | def test_multidb(self):
ContentType.objects.clear_cache()
with self.assertNumQueries(0, using="default"), self.assertNumQueries(
1, using="other"
):
ContentType.objects.get_for_model(Author)
| Refs #33476 -- Reformatted code with Black. | test_multidb | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | test_models.py | 11 | 6 | https://github.com/django/django.git | 1 | 44 | 0 | 11 | 78 | Python | {
"docstring": "\n When using multiple databases, ContentType.objects.get_for_model() uses\n db_for_read().\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 7,
"vocab_size": 7
} | def test_multidb(self):
ContentType.objects.clear_cache()
with self.assertNumQueries(0, using="default"), self.assertNumQueries(
1, using="other"
):
ContentType.objects.get_for_model(Author)
| |
84,312 | 282,822 | 151 | gamestonk_terminal/econometrics/econometrics_model.py | 103 | 31 | def get_engle_granger_two_step_cointegration_test(y, x):
warnings.simplefilter(action="ignore", category=FutureWarning)
long_run_ols = sm.OLS(y, sm.add_constant(x))
warnings.simplefilter(action="default", category=FutureWarning)
long_run_ols_fit = long_run_ols.fit()
c, gamma = long_run_ols_fi... | Econometrics notebooks API (#1462)
* Add initial implementations of the API wrappers
* Fix typos in docstrings
* Fix typos an markdown linting errors in docs
* Ditch using insecure eval in favor of secure getattr
* Add GST notebooks API documentation
* Add notebook screenshot to the GST API docs | get_engle_granger_two_step_cointegration_test | 1b914d45e8575827c05a432d56846f5c5f2559c4 | OpenBBTerminal | econometrics_model.py | 13 | 12 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 1 | 147 | 0 | 88 | 230 | Python | {
"docstring": "Estimates long-run and short-run cointegration relationship for series y and x and apply\n the two-step Engle & Granger test for cointegration.\n\n Uses a 2-step process to first estimate coefficients for the long-run relationship\n y_t = c + gamma * x_t + z_t\n\n and then the short-te... | def get_engle_granger_two_step_cointegration_test(y, x):
warnings.simplefilter(action="ignore", category=FutureWarning)
long_run_ols = sm.OLS(y, sm.add_constant(x))
warnings.simplefilter(action="default", category=FutureWarning)
long_run_ols_fit = long_run_ols.fit()
c, gamma = long_run_ols_fi... | |
51,823 | 206,982 | 44 | tests/admin_changelist/tests.py | 12 | 11 | def test_deterministic_order_for_unordered_model(self):
superuser = self._create_superuser("superuser")
for counter in range(1, 51):
| Refs #33476 -- Reformatted code with Black. | test_deterministic_order_for_unordered_model | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 10 | 18 | https://github.com/django/django.git | 2 | 118 | 0 | 12 | 63 | Python | {
"docstring": "\n The primary key is used in the ordering of the changelist's results to\n guarantee a deterministic order, even when the model doesn't have any\n default ordering defined (#17198).\n ",
"language": "en",
"n_whitespaces": 57,
"n_words": 28,
"vocab_size": 25
} | def test_deterministic_order_for_unordered_model(self):
superuser = self._create_superuser("superuser")
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.