<?xml version="1.0" encoding="utf-8" ?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:syn="http://purl.org/rss/1.0/modules/syndication/" xmlns="http://purl.org/rss/1.0/">




    



<channel rdf:about="https://cis-india.org/internet-governance/blog/online-anonymity/search_rss">
  <title>We are anonymous, we are legion</title>
  <link>https://cis-india.org</link>
  
  <description>
    
            These are the search results for the query, showing results 336 to 350.
        
  </description>
  
  
  
  
  <image rdf:resource="https://cis-india.org/logo.png"/>

  <items>
    <rdf:Seq>
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/economic-times-february-20-2019-are-rss-fears-about-tik-tok-true"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/webinar-on-counter-comments-to-the-draft-intermediary-guidelines"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/cis-submission-to-the-un-special-rapporteur-on-freedom-of-speech-and-expression-surveillance-industry-and-human-rights"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/jessica-corbett-common-dreams-february-5-2019-civil-liberties-groups-warn-proposed-eu-terrorist-content-rule-threat-democratic"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/2019-international-asia-conference"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/economic-times-nilanjana-bhowmick-february-13-2019-make-our-digital-backyard-safe"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/zack-whittaker-natasha-lomas-february-15-2019-tech-crunch-even-years-later-twitter-doesnt-delete-your-direct-messages"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/unbox-2019-festival"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/hindustan-times-february-10-2019-smriti-kak-ramachandran-and-vidhi-choudhary-willing-to-participate-in-parliamentary-panel-hearing"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/data-infrastructures-inequities-reproductive-health-surveillance-india"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/livemint-abhijit-ahaskar-february-12-2019-what-the-governments-draft-it-intermediary-guidelines-say"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/business-standard-february-9-2019-sunil-abraham-intermediary-liability-law-needs-updating"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/akriti-bopanna-february-8-2019-comment-on-icann-draft-fy-20-operating-plan-and-budget"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/harsh-bajpai-ambika-tandon-and-amber-sinha-february-8-2019-the-future-of-work-in-automotive-sector-in-india"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/events/internet-speech-perspectives-on-regulation-and-policy"/>
        
    </rdf:Seq>
  </items>

</channel>


    <item rdf:about="https://cis-india.org/internet-governance/news/economic-times-february-20-2019-are-rss-fears-about-tik-tok-true">
    <title>Are RSS's fears about Tik Tok true? Here's what you should know</title>
    <link>https://cis-india.org/internet-governance/news/economic-times-february-20-2019-are-rss-fears-about-tik-tok-true</link>
    <description>
        &lt;b&gt;Swadeshi Jagran Manch has flagged security, business and social risks posed by Chinese apps such as TikTok. The RSS fears may not be totally unfounded.&lt;/b&gt;
        &lt;p&gt;The article was &lt;a class="external-link" href="https://economictimes.indiatimes.com/news/politics-and-nation/are-rsss-fears-about-tik-tok-true-heres-what-you-should-know/articleshow/68066972.cms"&gt;published in Economic Times&lt;/a&gt; on February 20, 2019. Shweta Mohandas was quoted. The story was also published by &lt;a class="external-link" href="https://www.moneycontrol.com/news/india/rss-calls-for-ban-on-chinese-social-media-apps-like-tik-tok-like-3562401.html"&gt;Moneycontrol News&lt;/a&gt;.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;Should India let Chinese social media apps and telecom companies proliferate in India? Swadeshi Jagran Manch (SJM), the economic wing of the Rashtriya Swayamsevak Sangh has written to Prime Minister Narendra Modi for a ban on these Chinese companies.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The statement comes days after the Pulwama attack by terrorists of Jaish-e-Muhammad (JeM). China has repeatedly helped Pakistan by blocking India’s efforts to get Pakistan-based JeM chief Masood Azhar listed by the UN Security Council as a global terrorist.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;SJM has flagged security, business and social risks posed by Chinese apps such as hugely popular TikTok.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The RSS fears may not be totally unfounded.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;TikTok, Kwai and LIKE have been downloaded by millions of smartphone users in small town India who are using them to share personal videos, away from the glare of scrutiny that falls on more mainstream social media platforms.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In November last year, ET reviewed more than 20 Chinese video apps that dominate the mobile entertainment network of tier-2 and tier-3 cities mostly thanks to titillating videos, suggestive notifications, risqué humour and raunchy content.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The Chinese apps pose several potential risks, Swetha Mohandas, policy officer at the Center for Internet and Society, an advocacy group, told ET in November last year. “The draft DP (data protection) Bill in the current stage provides greater responsibility on data fiduciaries to maintain the privacy of the individual and the security of the data,” she said. “There are a lot of questions that these apps pose with respect to the Bill, some of them being the security, the data storage provision, the personal data of children, and most importantly that these apps might have recordings that might be sensitive personal data.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Most of these apps including TikTok explicitly state that though they have appropriate technical and organisational measures in place, “they cannot guarantee the security of your information transmitted through the platform”.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;TikTok, the popular lip-sync app, is filled with 15-second clips of meme-friendly content featuring its youthful users miming to their favourite songs. The videos range from the harmless to the explicit, depending upon the users followed. The app has gone viral, having racked up close to 100 million downloads and with 20 million monthly active users in India.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;While all such apps carry a disclaimer stating that they are not directed at children, their target audience encompasses preteens and adolescents in tier-2 and tier-3 cities, according to experts.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Despite the rapidly growing user base, apps like TikTok don’t have a grievance redressal officer in India. The government is insisting on this for all major social media platforms.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In its letter to the PM, SJM said it was the duty of all Indians to take steps to prevent the economic gains of any nation or individual that directly or tacitly supports terrorists.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Referring to India putting economic pressure on Pakistan, SJM said, “At such a time, we believe it is imperative that the government create similar hurdles for Chinese companies that are using India for their economic gain. As has been said often, data is now considered the new oil. We should not allow Chinese companies to capture Indian user data without any restrictions and monitoring.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Bytedance's response: TikTok and Helo are committed to respecting local laws and regulations as well as maintaining a safe and positive in-app environment for our users in India. There is no basis for the factually incorrect claims raised by certain groups recently. We treat the safety and security of our user data very seriously. Moreover, we have robust measures to protect users against misuse, including easy reporting mechanisms that enable users and law enforcement to report content that violates our terms of use and community guidelines.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/economic-times-february-20-2019-are-rss-fears-about-tik-tok-true'&gt;https://cis-india.org/internet-governance/news/economic-times-february-20-2019-are-rss-fears-about-tik-tok-true&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    

   <dc:date>2019-02-22T02:13:35Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/webinar-on-counter-comments-to-the-draft-intermediary-guidelines">
    <title>Webinar on counter-comments to the draft Intermediary Guidelines</title>
    <link>https://cis-india.org/internet-governance/news/webinar-on-counter-comments-to-the-draft-intermediary-guidelines</link>
    <description>
        &lt;b&gt;CCAOI and the ISOC Delhi Chapter organised a webinar on February 11 to discuss the comments submitted to the Information Technology [Intermediary Guidelines (Amendment) Rules] 2018, and counter-comments that were due by February 14. &lt;/b&gt;
        &lt;p&gt;The agenda of the discussion was:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;A brief introduction to the counter comment process [Shashank Mishra]&lt;/li&gt;
&lt;li&gt;Invited stakeholders  comment on key issues and perspectives on the submissions and the points to be countered.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;The following people participated:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Amba Kak, Mozilla&lt;/li&gt;
&lt;li&gt;Rajesh Chharia, ISPAI&lt;/li&gt;
&lt;li&gt;Gurshabad Grover, CIS&lt;/li&gt;
&lt;li&gt;Priyanka Chaudhari, SFLC&lt;/li&gt;
&lt;li&gt;Divij Joshi, Vidhi Centre for Legal Policy&lt;/li&gt;
&lt;/ul&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/webinar-on-counter-comments-to-the-draft-intermediary-guidelines'&gt;https://cis-india.org/internet-governance/news/webinar-on-counter-comments-to-the-draft-intermediary-guidelines&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Intermediary Liability</dc:subject>
    
    
        <dc:subject>Information Technology</dc:subject>
    

   <dc:date>2019-02-22T01:51:19Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/cis-submission-to-the-un-special-rapporteur-on-freedom-of-speech-and-expression-surveillance-industry-and-human-rights">
    <title>CIS Submission to the UN Special Rapporteur on Freedom of Speech and Expression: Surveillance Industry and Human Rights</title>
    <link>https://cis-india.org/internet-governance/blog/cis-submission-to-the-un-special-rapporteur-on-freedom-of-speech-and-expression-surveillance-industry-and-human-rights</link>
    <description>
        &lt;b&gt;CIS responded to the call for submissions from the UN Special Rapporteur on Freedom of Speech and Expression. The submission was on the Surveillance Industry and Human Rights.&lt;/b&gt;
        
&lt;p&gt;CIS is grateful for the opportunity to submit the United Nations (UN) Special Rapporteur on call for submissions on the surveillance industry and human rights.1 Over the last decade, CIS has worked extensively on research around state and private surveillance around the world. In this response, individuals working at CIS wish to highlight these programs, with a special focus on India.&lt;/p&gt;
&lt;p&gt;The response can be accessed &lt;a href="https://cis-india.org/internet-governance/resources/the-surveillance-industry-and-human-rights.pdf"&gt;here&lt;/a&gt;.&lt;/p&gt;

        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/cis-submission-to-the-un-special-rapporteur-on-freedom-of-speech-and-expression-surveillance-industry-and-human-rights'&gt;https://cis-india.org/internet-governance/blog/cis-submission-to-the-un-special-rapporteur-on-freedom-of-speech-and-expression-surveillance-industry-and-human-rights&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Elonnai Hickok, Arindrajit Basu, Gurshabad Grover, Akriti Bopanna, Shweta Mohandas, Martyna Kalvaityte</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Human Rights</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Surveillance</dc:subject>
    

   <dc:date>2019-02-20T10:48:24Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/jessica-corbett-common-dreams-february-5-2019-civil-liberties-groups-warn-proposed-eu-terrorist-content-rule-threat-democratic">
    <title>Civil Liberties Groups Warn Proposed EU 'Terrorist Content' Rule a Threat to Democratic Values</title>
    <link>https://cis-india.org/internet-governance/news/jessica-corbett-common-dreams-february-5-2019-civil-liberties-groups-warn-proposed-eu-terrorist-content-rule-threat-democratic</link>
    <description>
        &lt;b&gt;Requiring filtering tools would be "a gamble with European Internet users' rights to privacy and data protection, freedom of expression and information, and non-discrimination and equality before the law."&lt;/b&gt;
        &lt;p&gt;The blog post by Jessica Corbett was published by &lt;a class="external-link" href="https://www.commondreams.org/news/2019/02/05/civil-liberties-groups-warn-proposed-eu-terrorist-content-rule-threat-democratic"&gt;Common Dreams&lt;/a&gt; on February 5, 2019. Centre for Internet &amp;amp; Society was a signatory.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;Dozens of human rights groups and academics have signed on to an &lt;a href="https://cdt.org/files/2019/02/Civil-Society-Letter-to-European-Parliament-on-Terrorism-Database.pdf"&gt;open letter&lt;/a&gt; (pdf) raising alarm about the European Union's proposed &lt;a href="https://edri.org/terrorist-content-regulation-document-pool/"&gt;Regulation on Preventing the Dissemination of Terrorist Content Online&lt;/a&gt;,  warning that its call for Internet hosts to employ "proactive measures"  to censor such content "will almost certainly lead platforms to adopt  poorly understood tools" at the expense of democratic values across the  globe.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;One of those tools is the Hash Database developed by Facebook,  YouTube, Microsoft, and Twitter. The 13 companies that use the  database—which supposedly contains 80,000 images and videos—can  automatically filter out material deemed "extreme" terrorist content.  However, as the letter explains, "almost nothing is publicly known about  the specific content that platforms block using the database, or about  companies' internal processes or error rates, and there is insufficient  clarity around the participating companies' definitions of 'terrorist  content.'"&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;"Countering terrorist violence is a shared priority, and our point is  not to question the good intentions of the database operators. But  lawmakers and the public have no meaningful information about how well  the database or any other existing filtering tool serves this goal, and  at what cost to democratic values and individual human rights," notes  the letter, whose signatories include the American Civil Liberties Union  (ACLU), the Brennan Center for Justice, the Electronic Frontier  Foundation (EFF), and the European Digital Rights (EDRi).&lt;/p&gt;
&lt;p&gt;As an EDRi &lt;a href="https://edri.org/open-letter-on-the-terrorism-database/"&gt;statement&lt;/a&gt; outlines, among the groups' main concerns are the following:&lt;/p&gt;
&lt;ul&gt;
&lt;li style="text-align: justify; "&gt;Lack of transparency of how the database works, and its  effectiveness, proportionality, and appropriateness to achieve the goals  the Terrorist Content Regulation aims to achieve;&lt;/li&gt;
&lt;li&gt;How filters are unable to understand the context and therefore they are error-prone; and&lt;/li&gt;
&lt;li style="text-align: justify; "&gt;Regardless of the possibility of filters to be accurate in the  future, the pervasive online monitoring on disadvantaged and  marginalized individuals.&lt;/li&gt;
&lt;/ul&gt;
&lt;p style="text-align: justify; "&gt;Given the uncertainties over the effectiveness and societal  costs of such tools, the letter charges that "requiring all platforms to  use black-box tools like the database would be a gamble with European  Internet users' rights to privacy and data protection, freedom of  expression and information, and non-discrimination and equality before  the law."&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;With those fundamental rights under threat, the groups are calling on  members of the European Parliament "to reject proactive filtering  obligations; provide sound, peer-reviewed research data supporting  policy recommendations and legal mandates around counter-terrorism; and  refrain from enacting laws that will drive Internet platforms to adopt  untested and poorly understood technologies to restrict online  expression."&lt;/p&gt;
&lt;p&gt;Read the full letter:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;&lt;i&gt;Dear Members of the European Parliament,&lt;/i&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;i&gt;The undersigned organizations write to share our concerns about  the EU’s proposed Regulation on Preventing the Dissemination of  Terrorist Content Online, and in particular the Regulation’s call for  Internet hosts to use “proactive measures” to detect terrorist content.  We are concerned that if this Regulation is adopted, it will almost  certainly lead platforms to adopt poorly understood tools, such as the  Hash Database referenced in the Explanatory Memorandum to the Regulation  and currently overseen by the Global Internet Forum to Counter  Terrorism. Countering terrorist violence is a shared priority, and our  point is not to question the good intentions of the Database operators.  But lawmakers and the public have no meaningful information about how  well the Database or any other existing filtering tool serves this goal,  and at what cost to democratic values and individual human rights. We  urge you to reject proactive filtering obligations; provide sound,  peer-reviewed research data supporting policy recommendations and legal  mandates around counter-terrorism; and refrain from enacting laws that  will drive Internet platforms to adopt untested and poorly understood  technologies to restrict online expression.&lt;/i&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;i&gt;The Database was initially developed by Facebook, YouTube,  Microsoft, and Twitter as a voluntary measure, and announced to the  public in 2016. It contains digital hash “fingerprints” of images  and4videos that platforms have identified as “extreme” terrorist  material, based not on the law but on their own Community Guidelines or  Terms of Service. The platforms can use automated filtering tools to  identify and remove duplicates of the hashed images or videos. As of  2018, the Database was said to contain hashes representing over 80,000  images or videos. At least thirteen companies now use the Database, and  some seventy companies have reportedly discussed adopting it.&lt;/i&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;i&gt;Almost nothing is publicly known about the specific content that  platforms block using the Database, or about companies’ internal  processes or error rates, and there is insufficient clarity around the  participating companies’ definitions of “terrorist content.”  Furthermore, there are no reports about how many legal processes or  investigations were opened after the content was blocked. This data  would be crucial to understand to what extent the measures are effective  and necessary in a democratic society, which are some of the sine qua  non requisites for restrictions of fundamental rights. We do know,  however, of conspicuous problems that seemingly result from content  filtering gone awry. The Syrian Archive, a civil society organization  preserving evidence of human rights abuses in Syria, for example,  reports that YouTube deleted over 100,000 of its videos. Videos and  other content which may be used in one context to advocate terrorist  violence may be essential elsewhere for news reporting, combating  terrorist recruitment online, or scholarship. Technical filters are  blind to these contextual differences. As three United Nations special  rapporteurs noted in a December 2018 letter, this problem raises serious  concerns about free expression rights under the proposed Regulation. It  is far from clear whether major platforms like YouTube or Facebook  adequately correct for this through employees’ review of filtering  decisions—and it seems highly unlikely that smaller platforms could even  attempt to do so, if required to use the Database or other filtering  tools.&lt;/i&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;i&gt;Failures of this sort seriously threaten Internet users’ rights  to seek and impart information. The pervasive monitoring that platforms  carry out in order to filter users’ communications also threatens  privacy and data protection rights. Moreover, these harms do not appear  to be equally distributed, but instead disproportionately disadvantage  individual Internet users based on their ethnic background, religion,  language, or location—in other words, harms fall on users who might  already be marginalized. More extensive use of the Database and other  automated filtering tools will amplify the risk of harms to users whose  messages and communications about matters of urgent public concern may  be wrongly removed by platforms. The United Nations Special Rapporteur  on the promotion and protection of human rights and fundamental freedoms  while countering terrorism has expressed concern about this lack of  clarity, and said that Facebook’s rules for classifying organizations as  terrorist are “at odds with international humanitarian law”.&lt;/i&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;i&gt;Due to the opacity of the Database’s operations, it is impossible  to assess the consequences of its nearly two years of operation. The  European public is being asked to rely on claims by platforms or vendors  about the efficacy of the Database and similar tools—or else to assume  that any current problems will be solved by hypothetical future  technologies or untested, post-removal appeal mechanisms. Such  optimistic assumptions cannot be justified given the serious problems  researchers have found with the few filtering tools available for  independent review. Requiring all platforms to use black-box tools like  the Database would be a gamble with European Internet users’ rights to  privacy and data protection, freedom of expression and information, and  non-discrimination and equality before the law. That gamble is neither  necessary nor proportionate as an exercise of state power.&lt;/i&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;i&gt;EU institutions’ embrace of the database and other filtering  tools will also have serious consequences for Internet users all over  the world, including in countries where various of the undersigned  organizations work to protect human rights. For one thing, when  platforms filter a video or image in response to a European authority’s  request, it will likely disappear for users everywhere—even if it is  part of critical news reporting or political discourse in other parts of  the world. For another, encoding proactive measures to filter and  remove content in an EU regulation gives authoritarian and  authoritarian-leaning regimes the cover they need to justify their own  vaguely worded and arbitrarily applied anti-terrorism legislation.  Platforms that have already developed content filtering capabilities in  order to comply with EU laws will find it difficult to resist demands to  use them in other regions and under other laws, to the detriment of  vulnerable Internet users around the globe. Your decisions in this area  will have global consequences.&lt;/i&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;i&gt;Signatories:&lt;/i&gt;&lt;br /&gt;&lt;i&gt; Access Now; Africa Freedom of  Information Centre; Agustina Del Campo, in an individual capacity  (Center for Studies on Freedom of Expression CELE); American Civil  Liberties Union (ACLU); ApTI Romania; Article 19; Bits of Freedom;  Brennan Center for Justice; Catalina Botero Marino, in an individual  capacity (Former Special Rapporteur of Freedom of Expression of the  Organization of American States; Center for Democracy &amp;amp; Technology  (CDT); Centre for Internet and Society; Chinmayi Arun, in an individual  capacity; Damian Loreti, in an individual capacity; Daphne Keller, in an  individual capacity (Stanford CIS); Derechos Digitales · América  Latina; Digital Rights Watch; Electronic Frontier Finland; Electronic  Frontier Foundation (EFF); Electronic Frontier Norway; Elena  Sherstoboeva, in an individual capacity (Higher School of Economics);  European Digital Rights (EDRi); Hermes Center; Hiperderecho; Homo  Digitalis; IT-Pol; Joan Barata, in an individual capacity (Stanford  CIS); Krisztina Rozgonyi, in an individual capacity (University of  Vienna); Open Rights Group; Open Technology Institute at New America;  Ossigeno; Pacific Islands News Association (PINA); People Over Politics;  Prostasia Foundation; R3D: Red en Defensa de los Derechos Digitales;  Sarah T. Roberts, Ph.D., in an individual capacity; Southeast Asian  Press Alliance; Social Media Exchange (SMEX), Lebanon; WITNESS; and  Xnet.&lt;/i&gt;&lt;/p&gt;
&lt;/blockquote&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/jessica-corbett-common-dreams-february-5-2019-civil-liberties-groups-warn-proposed-eu-terrorist-content-rule-threat-democratic'&gt;https://cis-india.org/internet-governance/news/jessica-corbett-common-dreams-february-5-2019-civil-liberties-groups-warn-proposed-eu-terrorist-content-rule-threat-democratic&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    

   <dc:date>2019-02-19T00:49:00Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/2019-international-asia-conference">
    <title>2019 International Asia Conference</title>
    <link>https://cis-india.org/internet-governance/news/2019-international-asia-conference</link>
    <description>
        &lt;b&gt;ITECHLAW organized the 2019 edition of International Asia Conference at JW Marriott hotel in Bangalore on January 31, 2019 and February 1, 2019. Sunil Abraham was a panelist in the session "Policy Making for the Emerging Tech in India".&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The rush of emerging technologies of Machine Learning, Internet of Things (IoT) and Virtual Reality (VR) is revolutionising the landscape in which humans exist. Innovators of the generation are ambitious, and their contributions have significantly impacted on various fields like healthcare, media and entertainment, agriculture, and other service models. As these technology advancements are driving new business and service models, there is a need for stakeholders and governments to ensure security and stability of the market without stifling innovations, stigmatising incentives or creating obstacles. Rapid spreading technology applications are resulting in drastic changes in today’s regulatory model, posing the difficult challenges for regulators. In India, the expeditiously developing start-up ecosystem and online consumer base, has stirred the regulators.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Intermediary liability, surveillance, data and privacy, digital taxation, data governance and sovereignty are the dominating debatable topics in India. The debates are not only between regulators and stakeholders, but consumers also joining in it. As the competition between Indian and Foreign Technology intensifies in the turf, the debate on tech-policy is considerably being mentioned in run-up of political parties to the general elections as well. Over the past one year, the country has witnessed some landmark judgments and contentious government proposals related to data and privacy, implications of which have affected over-the-top (“OTT”) services, online media, social media, e-commerce platforms, IoT services etc. The Indian regulatory framework on tech-policy is becoming stricter due to a very disruptive phase last year. The tech-giants like Facebook, Google, Twitter, and Amazon are themselves realising their enormous market influence. After the episodes of lynching, hate speeches etc., they are participating in policy-making efforts related to fake news and digital malfeasance. In this process legal industry is making considerable lobbying efforts for corporations to work with government to curb the menace of digital malpractice and make the internet safer.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;As the legal industry is participating in the process of creating an innovators-friendly regulatory regime, they are also striving to understand the disruptive technologies and adopt them for their own convenience. However, legal firms must understand that the technology cannot do their job for clients but can only upgrade the business model for them. The traditional law firm business model is not in sync with legal buyers. Effective deployment of technology will ameliorate the factor of its approachability to its clients.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;With the growing technology-based start-ups in India, it is going to be a hub for investments by big corporations. In order to keep attracting the investors there is a need for government to remove the potential hindrances that may make investors double-think. The government should prepare a level-playing field in the market by making citizens aware of the standard tech-policies and fostering the innovators-friendly regulatory regime.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;For more info &lt;a class="external-link" href="https://www.itechlaw.org/Bangalore2019"&gt;see the website&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/2019-international-asia-conference'&gt;https://cis-india.org/internet-governance/news/2019-international-asia-conference&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Intermediary Liability</dc:subject>
    

   <dc:date>2019-02-19T00:23:43Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/economic-times-nilanjana-bhowmick-february-13-2019-make-our-digital-backyard-safe">
    <title>Make our digital backyard safe</title>
    <link>https://cis-india.org/internet-governance/news/economic-times-nilanjana-bhowmick-february-13-2019-make-our-digital-backyard-safe</link>
    <description>
        &lt;b&gt;India has been patting itself on the back for being at the forefront of the ‘Fourth Industrial Revolution’ driven by digitisation. Reports have gushed about the speed and scale of digitisation. But this speed and scale have come at a cost to our privacy.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The article by Nilanjana Bhowmick was published in &lt;a class="external-link" href="https://economictimes.indiatimes.com/blogs/et-commentary/make-our-digital-backyard-safe/"&gt;Economic Times&lt;/a&gt; on February 13, 2019.&lt;/p&gt;
&lt;hr style="text-align: justify; " /&gt;
&lt;p style="text-align: justify; "&gt;According to GoI, this digital push has led to 99% of adult Indians having an Aadhaar number in 2017. GoI has also integrated personal information through the Jan Dhan-Aadhaar-Mobile phone trinity (JAM).&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;According to GoI, this digital push has led to 99% of adult Indians having an Aadhaar number in 2017. GoI has also integrated personal information through the Jan Dhan-Aadhaar-Mobile phone trinity (JAM).&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A July 2018 &lt;a href="https://www-01.ibm.com/common/ssi/cgi-bin/ssialias?htmlfid=55017055USEN&amp;amp;"&gt;IBM report&lt;/a&gt; stated  that the probability of data breach went up by 8.7% in India over the  last four years based on past experiences. The study also stated that  malicious or criminal attacks were the root cause for 42% of data  breaches, followed by system glitch at 30% and human error at 28%. This  28% has the potential to cause incalculable havoc, which includes the  leak of personal information by anyone — from a call centre executive to  a bank manager — who has access to it.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The prime reason for our lackadaisical attitude is that most Indians  don’t value privacy. We are yet to register the value of personal  information — the actual monetary, marketable value. My personal data,  for instance, costs roughly $2. If I take that as an average, then at  least $2 billion worth of data belonging to 1.3 billion Indians are at  stake here. Which is why, when this data is taken without consent, it is  a financial crime.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;What is perhaps more frightening is that when this data is taken  without consent by an untrusted source, it may also land you, victim of a  data breach, in jail.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Last month, I had noticed a suspicious movement of money in my  account. A large sum of money was deposited in my account in two  instalments, withinthe space of 12 hours. And while I am waiting for the  issue to be addressed by the authorities — RBI ombudsman, bank customer  service, enforcement directorate — the person who wired the money to my  account had got hold of my personal information, including my address  and phone number.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;He kept calling me on my phone and ‘requested’ I give the money  ‘back’ to his brother, ‘in cash or cheque’. Then his brother started  calling me, demanding I ‘return’ the money to him.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The movement of funds in my account could well have been a  money-laundering operation, and if I made the payment to the ‘sender’ as  demanded, the money trail would have implicated me. But what’s most  alarming is that if I was dealing with criminals, someone from my bank  had made them privy to my private information. And this is a top bank  with supposedly top-notch security.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Unfortunately, security is woefully lagging behind India’s speedy  digitisation. Neither are we investing enough on fortifying the system,  nor are we spending enough on postbreach responses. India spends a mere  $20,000 in notification costs, compared to the US’ $740,000.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The US also spends $1.76 million in post-data breach response  activities, including help desk activities, special investigations and  remediation. US and Canadian firms spend $258 and $213 per record  respectively to resolve amalicious or criminal attacks. Indian ones, on  an average, spend $76 per record.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Yes, digitisation is the future. But let’s first plug the social, institutional and systemic weaknesses in our systems.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/economic-times-nilanjana-bhowmick-february-13-2019-make-our-digital-backyard-safe'&gt;https://cis-india.org/internet-governance/news/economic-times-nilanjana-bhowmick-february-13-2019-make-our-digital-backyard-safe&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Nilanjana Bhowmick</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    

   <dc:date>2019-02-18T14:37:25Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/zack-whittaker-natasha-lomas-february-15-2019-tech-crunch-even-years-later-twitter-doesnt-delete-your-direct-messages">
    <title>Even years later, Twitter doesn't delete your direct messages</title>
    <link>https://cis-india.org/internet-governance/news/zack-whittaker-natasha-lomas-february-15-2019-tech-crunch-even-years-later-twitter-doesnt-delete-your-direct-messages</link>
    <description>
        &lt;b&gt;When does “delete” really mean delete? Not always, or even at all, if you’re Twitter .&lt;/b&gt;
        &lt;p&gt;The blog post by Zack Whittaker and Natasha Lomas was published in &lt;a class="external-link" href="https://techcrunch.com/2019/02/15/twitter-direct-messages/"&gt;Tech Crunch&lt;/a&gt; on February 15, 2019. Karan Saini was quoted.&lt;/p&gt;
&lt;hr /&gt;
&lt;p&gt;Twitter  retains direct messages for years, including messages you and others  have deleted, but also data sent to and from accounts that have been  deactivated and suspended, according to security researcher Karan Saini.&lt;/p&gt;
&lt;p&gt;Saini  found years-old messages in a file from an archive of his data obtained  through the website from accounts that were no longer on Twitter. He  also reported a similar bug, found a year earlier but not disclosed  until now, that allowed him to use a since-deprecated API to retrieve  direct messages even after a message was deleted from both the sender  and the recipient — though, the bug wasn’t able to retrieve messages  from suspended accounts.&lt;/p&gt;
&lt;p&gt;Saini told TechCrunch that he had “concerns” that the data was retained by Twitter for so long.&lt;/p&gt;
&lt;p&gt;Direct messages &lt;a href="https://www.cnet.com/how-to/how-to-unsend-twitter-direct-messages/"&gt;once let users “unsend” messages&lt;/a&gt; from someone else’s inbox, simply by deleting it from their own.  Twitter changed this years ago, and now only allows a user to delete  messages from their account. “Others in the conversation will still be  able to see direct messages or conversations that you have deleted,”  Twitter says in &lt;a href="https://help.twitter.com/en/using-twitter/direct-messages"&gt;a help page&lt;/a&gt;. Twitter also says in its &lt;a href="https://twitter.com/en/privacy"&gt;privacy policy&lt;/a&gt; that  anyone wanting to leave the service can have their account “deactivated  and then deleted.” After a 30-day grace period, the account disappears,  along with its data.&lt;/p&gt;
&lt;p&gt;But, in our tests, we could recover direct  messages from years ago — including old messages that had since been  lost to suspended or deleted accounts. By downloading &lt;a href="https://twitter.com/settings/your_twitter_data"&gt;your account’s data&lt;/a&gt;, it’s possible to download all of the data Twitter stores on you.&lt;/p&gt;
&lt;p&gt;&lt;img src="https://cis-india.org/home-images/Twitter.png/@@images/40867bd2-2284-4c9c-b42f-fb7a500b1c92.png" alt="Twitter" class="image-inline" title="Twitter" /&gt;&lt;/p&gt;
&lt;p&gt;A conversation, dated March 2016, with a suspended Twitter account was still retrievable today (Image: TechCrunch)&lt;/p&gt;
&lt;p&gt;Saini says this is a “functional bug” rather than a security flaw,  but argued that the bug allows anyone a “clear bypass” of Twitter  mechanisms to prevent accessed to suspended or deactivated accounts.&lt;/p&gt;
&lt;p&gt;But  it’s also a privacy matter, and a reminder that “delete” doesn’t mean  delete — especially with your direct messages. That can open up users,  particularly high-risk accounts like journalist and activists, to  government data demands that call for data from years earlier.&lt;/p&gt;
&lt;p&gt;That’s despite &lt;a href="https://help.twitter.com/en/rules-and-policies/twitter-law-enforcement-support"&gt;Twitter’s claim&lt;/a&gt; that once an account has been deactivated, there is “a very brief  period in which we may be able to access account information, including  tweets,” to law enforcement.&lt;/p&gt;
&lt;p&gt;A Twitter spokesperson said the  company was “looking into this further to ensure we have considered the  entire scope of the issue.”&lt;/p&gt;
&lt;p&gt;Retaining direct messages for years  may put the company in a legal grey area ground amid Europe’s new data  protection laws, which allows users to demand that a company deletes  their data.&lt;/p&gt;
&lt;p&gt;Neil Brown, a telecoms, tech and internet lawyer at &lt;a href="https://decoded.legal/"&gt;U.K. law firm Decoded Legal&lt;/a&gt;,  said there’s “no formality at all” to how a user can ask for their data  to be deleted. Any request from a user to delete their data that’s  directly communicated to the company “is a valid exercise” of a user’s  rights, he said.&lt;/p&gt;
&lt;p&gt;Companies can be fined up to four percent of their annual turnover for violating GDPR rules.&lt;/p&gt;
&lt;p&gt;“A  delete button is perhaps a different matter, as it is not obvious that  ‘delete’ means the same as ‘exercise my right of erasure’,” said Brown.  Given that there’s no case law yet under the new General Data Protection  Regulation regime, it will be up to the courts to decide, he said.&lt;/p&gt;
&lt;p&gt;When asked if Twitter thinks that consent to retain direct messages is withdrawn when a message or account is deleted, Twitter’s spokesperson had “nothing further” to add.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/zack-whittaker-natasha-lomas-february-15-2019-tech-crunch-even-years-later-twitter-doesnt-delete-your-direct-messages'&gt;https://cis-india.org/internet-governance/news/zack-whittaker-natasha-lomas-february-15-2019-tech-crunch-even-years-later-twitter-doesnt-delete-your-direct-messages&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Zack Whittaker and Natasha Lomas</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    

   <dc:date>2019-02-18T14:17:54Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/unbox-2019-festival">
    <title>Unbox Festival 2019: CIS organizes two Workshops</title>
    <link>https://cis-india.org/internet-governance/blog/unbox-2019-festival</link>
    <description>
        &lt;b&gt;Centre for Internet &amp; Society organized two workshops at the Unbox Festival 2019, in Bangalore, on 15 and 17 February 2019. &lt;/b&gt;
        &lt;h3 style="text-align: justify; "&gt;'What is your Feminist Infrastructure Wishlist?&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The first workshop 'What is your Feminist Infrastructure Wishlist?' was on Feminist Infrastructure Wishlists that was conducted by P.P. Sneha and Saumyaa Naidu on  15 February 2019. The objective of the workshop was to explore what it means to have infrastructure that is feminist. How do we build spaces, networks, and systems that are equal, inclusive, diverse, and accessible? We will also reflect on questions of network configurations, expertise, labour and visibility. For reading material &lt;a class="external-link" href="https://feministinternet.org/"&gt;click here&lt;/a&gt;.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;AI for Good&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;With a backdrop of AI for social good, we explore existing applications of artificial intelligence, how we interact and engage with this technology on a daily basis. A discussion led by Saumyaa Naidu and Shweta Mohandas invited participants to examine current narratives around AI and imagine how these may transform with time. Questions around how we can build an AI for the future will become the starting point to trace its implications relating to social impact, policy, gender, design, and privacy. For reading materials see &lt;a class="external-link" href="https://ainowinstitute.org/AI_Now_2018_Report.pdf"&gt;AI Now Report 2018&lt;/a&gt;, &lt;a class="external-link" href="https://www.propublica.org/article/machine-bias-risk-assessments-in-criminal-sentencing"&gt;Machine Bias&lt;/a&gt;, and &lt;a class="external-link" href="https://www.theatlantic.com/technology/archive/2016/03/why-do-so-many-digital-assistants-have-feminine-names/475884/"&gt;Why Do So Many Digital Assistants Have Feminine Names?&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;For info on Unbox Festival, &lt;a class="external-link" href="http://unboxfestival.com/"&gt;click here&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/unbox-2019-festival'&gt;https://cis-india.org/internet-governance/blog/unbox-2019-festival&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>saumyaa</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Gender</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2019-02-26T01:53:39Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/hindustan-times-february-10-2019-smriti-kak-ramachandran-and-vidhi-choudhary-willing-to-participate-in-parliamentary-panel-hearing">
    <title>‘Willing to participate, but need more time’: Twitter on parliamentary panel hearing</title>
    <link>https://cis-india.org/internet-governance/news/hindustan-times-february-10-2019-smriti-kak-ramachandran-and-vidhi-choudhary-willing-to-participate-in-parliamentary-panel-hearing</link>
    <description>
        &lt;b&gt;Executives from social media firm Twitter’s US headquarters will not appear before a parliamentary panel that has summoned them on Monday over perceived bias towards right-wing handles on the micro-blogging platform.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The article by Smriti Kak Ramachandran and Vidhi Choudhary was &lt;a class="external-link" href="https://www.hindustantimes.com/india-news/twitter-says-willing-to-participate-in-parliamentary-panel-hearing-seeks-more-time/story-C7cDq6n7kOJM3DOFOX45dI.html"&gt;published in Hindustan Times&lt;/a&gt; on February 10, 2019. Sunil Abraham was quoted.&lt;/p&gt;
&lt;hr style="text-align: justify; " /&gt;
&lt;p style="text-align: justify; "&gt;Executives from social media firm Twitter’s US headquarters will not appear before a parliamentary panel that has summoned them on Monday over perceived bias towards right-wing handles on the micro-blogging platform although a spokesperson for the firm said in a statement that this is only on account of timing and that Twitter is “willing to participate in” a hearing by the panel.&lt;br /&gt;&lt;br /&gt;“We have indicated that we are willing to participate in such a broad hearing process. Given the short notice of the hearing, we informed the committee that it would not be possible for senior officials from Twitter to travel from the United States to appear on Monday,” the statement said. The panel’s summons were issued on February 5, with a meeting with the parliamentary panel scheduled for Monday, February 11.&lt;br /&gt;&lt;br /&gt;A right-wing group, Youth for Social Media Democracy, recently held protests claiming the microblogging site suspends or shadow-bans accounts that appear sympathetic to the ruling Bharatiya Janata Party (BJP) and the government.&lt;br /&gt;&lt;br /&gt;Anurag Thakur, a BJP MP who heads the parliamentary panel on information and technology, asked IT ministry officials and Twitter representatives to be present at the meeting. He said the committee takes a serious note of Twitter’s response and would take “appropriate action on February 11.”&lt;br /&gt;&lt;br /&gt;According to an official aware of the letter sent to Twitter, the company was told “it may be noted that the Head of the Organisation has to appear before the Committee”.&lt;br /&gt;&lt;br /&gt;Twitter added in its statement that while it will work with the Lok Sabha secretariat to find a mutually agreeable date for a meeting so that a senior Twitter official (from the US) can attend it has “also offered representatives from Twitter India to come and answer questions on Monday”. “We await feedback from the government on both matters,” the statement added.&lt;br /&gt;&lt;br /&gt;In a previous statement, Twitter said that its India representatives do not enforce policy and that this is done “with impartiality” by a “specialized global team”.&lt;br /&gt;&lt;br /&gt;Thakur’s intervention wasn’t prompted by protests by Youth for Social Media Democracy alone. According to the people familiar with the matter, the issue has been repeatedly flagged at meetings of the Rashtriya Swayamsevak Sangh (RSS), the ideological parent of the BJP.&lt;br /&gt;&lt;br /&gt;Twitter denied these allegations. In a statement issued on Friday, the company said, “Twitter is a global platform that serves a global, public conversation. Elevating debate and open discourse is fundamental to the platform’s service, and its core values as a company. Twitter is committed to remain unbiased with the public interest in mind.”&lt;br /&gt;&lt;br /&gt;“The public conversation around Twitter’s policies and actions may be distorted by some who have a political agenda and this may be particularly acute during election cycles when highly-charged political rhetoric becomes more common. For our part, we will endeavour to be even more transparent in how we develop and enforce our policies to dispel conspiracy theories and mistrust,” Colin Crowell, global vice president, public policy, Twitter, added in the statement.&lt;br /&gt;&lt;br /&gt;A senior functionary of the RSS said it was soon after the January 1, 2018 clash between Maratha and Dalit groups in Maharashtra’s Bhima Koregaon that escalated into violence that functionaries of the Sangh began to notice posts on social media that were allegedly “anti-national” and had the potential to create “communal friction”.&lt;br /&gt;&lt;br /&gt;The content of some of the posts was construed to be similar to the expressions used by so-called “urban naxals”, this person said on condition of anonymity. Urban naxals is a term coined by the right wing for left-wing intellectuals who, they say, are suspected to have links to Maoist organisations.&lt;br /&gt;&lt;br /&gt;“Posts that spoke of destabilising the nation, that attacked the sovereignty of the country were being put up. No action was being taken, despite complaints to Twitter,” the functionary added.&lt;br /&gt;&lt;br /&gt;It was then that the Sangh chose to knock on Thakur’s doors.&lt;br /&gt;&lt;br /&gt;With 34.4 million users, Twitter has emerged as a key platform for political and social conversations. Given the reach of the medium, even the Election Commission has been monitoring the posts to ensure there is no adverse impact on election processes.&lt;br /&gt;&lt;br /&gt;Experts said Twitter and other platforms need to become more transparent. “Unless Twitter and other internet giants implement principles of natural justice, they will always be accused of bias,” said Sunil Abraham, co-founder of the think tank Centre for Internet and Society, adding that the platform does not “provide sufficient transparency regarding its decisions”.&lt;br /&gt;&lt;br /&gt;Lawyer Apar Gupta said that the parliamentary panel on IT needs to function more robustly. “It has not invited experts, academics, and civil society voices for deliberations. Also, the outcomes from hearings such as the ones on Aadhaar, privacy. data breaches, and net neutrality, done a while back, remain outstanding. Reports or recommendations have not been made to parliament.”&lt;br /&gt;&lt;br /&gt;In general, parliamentary panels do allow hearings to be deferred at the request of someone who has been summoned, although this is usually at the discretion of the chairman and also if the request is made immediately after the summons is issued.&lt;br /&gt;&lt;br /&gt;Gupta added that usually, a breach of privilege complaint is made by the chairman of the committee to the Lok Sabha speaker “who will then approve it and send it to the Privileges Committee of the Lok Sabha”.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/hindustan-times-february-10-2019-smriti-kak-ramachandran-and-vidhi-choudhary-willing-to-participate-in-parliamentary-panel-hearing'&gt;https://cis-india.org/internet-governance/news/hindustan-times-february-10-2019-smriti-kak-ramachandran-and-vidhi-choudhary-willing-to-participate-in-parliamentary-panel-hearing&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Smriti Kak Ramachandran and Vidhi Choudhary</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    

   <dc:date>2019-02-15T02:29:55Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/data-infrastructures-inequities-reproductive-health-surveillance-india">
    <title>Data Infrastructures and Inequities: Why Does Reproductive Health Surveillance in India Need Our Urgent Attention?</title>
    <link>https://cis-india.org/internet-governance/blog/data-infrastructures-inequities-reproductive-health-surveillance-india</link>
    <description>
        &lt;b&gt;In order to bring out certain conceptual and procedural problems with health monitoring in the Indian context, this article by Aayush Rathi and Ambika Tandon posits health monitoring as surveillance and not merely as a “data problem.” Casting a critical feminist lens, the historicity of surveillance practices unveils the gendered power differentials wedded into taken-for-granted “benign” monitoring processes. The unpacking of the Mother and Child Tracking System and the National Health Stack reveals the neo-liberal aspirations of the Indian state. &lt;/b&gt;
        
&lt;p&gt;&amp;nbsp;&lt;/p&gt;
&lt;p&gt;&lt;em&gt;The article was first published by &lt;a href="https://www.epw.in/engage/article/data-infrastructures-inequities-why-does-reproductive-health-surveillance-india-need-urgent-attention" target="_blank"&gt;EPW Engage, Vol. 54, Issue No. 6&lt;/a&gt;, on 9 February 2019.&lt;/em&gt;&lt;/p&gt;
&lt;hr /&gt;
&lt;h3&gt;&lt;strong&gt;Framing Reproductive Health as a Surveillance Question&lt;/strong&gt;&lt;/h3&gt;
&lt;p&gt;The approach of the postcolonial Indian state to healthcare has been Malthusian, with the prioritisation of family planning and birth control (Hodges 2004). Supported by the notion of socio-economic development arising out of a “modernisation” paradigm, the target-based approach to achieving reduced fertility rates has shaped India’s reproductive and child health (RCH) programme (Simon-Kumar 2006).&lt;/p&gt;
&lt;p&gt;This is also the context in which India’s abortion law, the Medical Termination of Pregnancy (MTP) Act, was framed in 1971, placing the decisional privacy of women seeking abortions in the hands of registered medical practitioners. The framing of the MTP act invisibilises females seeking abortions for non-medical reasons within the legal framework. The exclusionary provisions only exacerbated existing gaps in health provisioning, as access to safe and legal abortions had already been curtailed by severe geographic inequalities in funding, infrastructure, and human resources. The state has concomitantly been unable to meet contraceptive needs of married couples or reduce maternal and infant mortality rates in large parts of the country, mediating access along the lines of class, social status, education, and age (Sanneving et al 2013).&lt;/p&gt;
&lt;p&gt;While the official narrative around the RCH programme transitioned to focus on universal access to healthcare in the 1990s, the target-based approach continues to shape the reality on the ground. The provision of reproductive healthcare has been deeply unequal and, in some cases, in hospitals. These targets have been known to be met through the practice of forced, and often unsafe, sterilisation, in conditions of absence of adequate provisions or trained professionals, pre-sterilisation counselling, or alternative forms of contraception (Sama and PLD 2018). Further, patients have regularly been provided cash incentives, foreclosing the notion of free consent, especially given that the target population of these camps has been women from marginalised economic classes in rural India.&lt;/p&gt;
&lt;p&gt;Placing surveillance studies within a feminist praxis allows us to frame the reproductive health landscape as more than just an ill-conceived, benign monitoring structure. The critical lens becomes useful for highlighting that taken-for-granted structures of monitoring are wedded with power differentials: genetic screening in fertility clinics, identification documents such as birth certificates, and full-body screeners are just some of the manifestations of this (Adrejevic 2015). Emerging conversations around feminist surveillance studies highlight that these data systems are neither benign nor free of gendered implications (Andrejevic 2015). In continual remaking of the social, corporeal body as a data actor in society, such practices render some bodies normative and obfuscate others, based on categorisations put in place by the surveiller.&lt;/p&gt;
&lt;p&gt;In fact, the history of surveillance can be traced back to the colonial state where it took the form of systematic sexual and gendered violence enacted upon indigenous populations in order to render them compliant (Rifkin 2011; Morgensen 2011). Surveillance, then, manifests as a “scientific” rationalisation of complex social hieroglyphs (such as reproductive health) into formats enabling administrative interventions by the modern state. Lyon (2001) has also emphasised how the body emerged as the site of surveillance in order for the disciplining of the “irrational, sensual body”—essential to the functioning of the modern nation-state—to effectively happen.&lt;/p&gt;
&lt;h3&gt;&lt;strong&gt;Questioning the Information and Communications Technology for Development (ICT4D) and Big Data for Development (BD4D) Rhetoric&lt;/strong&gt;&lt;/h3&gt;
&lt;p&gt;Information and Communications Technology (ICT) and data-driven approaches to the development of a robust health information system, and by extension, welfare, have been offered as solutions to these inequities and exclusions in access to maternal and reproductive healthcare in the country.&lt;/p&gt;
&lt;p&gt;The move towards data-driven development in the country commenced with the introduction of the Health Management Information System in Andhra Pradesh in 2008, and the Mother and Child Tracking System (MCTS) nationally in 2011. These are reproductive health information systems (HIS) that collect granular data about each pregnancy from the antenatal to the post-natal period, at the level of each sub-centre as well as primary and community health centre. The introduction of HIS comprised cross-sectoral digitisation measures that were a part of the larger national push towards e-governance; along with health, thirty other distinct areas of governance, from land records to banking to employment, were identified for this move towards the digitalised provisioning of services (MeitY 2015).&lt;/p&gt;
&lt;p&gt;The HIS have been seen as playing a critical role in the ecosystem of health service provision globally. HIS-based interventions in reproductive health programming have been envisioned as a means of: (i) improving access to services in the context of a healthcare system ridden with inequalities; (ii) improving the quality of services provided, and (iii) producing better quality data to facilitate the objectives of India’s RCH programme, including family planning and population control. Accordingly, starting 2018, the MCTS is being replaced by the RCH portal in a phased manner. The RCH portal, in areas where the ANMOL (ANM Online) application has been introduced, captures data real-time through tablets provided to health workers (MoHFW 2015).&lt;/p&gt;
&lt;p&gt;A proposal to mandatorily link the Aadhaar with data on pregnancies and abortions through the MCTS/RCH has been made by the union minister for Women and Child Development as a deterrent to gender-biased sex selection (Tembhekar 2016). The proposal stems from the prohibition of gender-biased sex selection provided under the Pre-Conception and Pre-Natal Diagnostics Techniques (PCPNDT) Act, 1994. The approach taken so far under the PCPNDT Act, 2014 has been to regulate the use of technologies involved in sex determination. However, the steady decline in the national sex ratio since the passage of the PCPNDT Act provides a clear indication that the regulation of such technology has been largely ineffective. A national policy linking Aadhaar with abortions would be aimed at discouraging gender-biased sex selection through state surveillance, in direct violation of a female’s right to decisional privacy with regards to their own body.&lt;/p&gt;
&lt;p&gt;Linking Aadhaar would also be used as a mechanism to enable direct benefit transfer (DBT) to the beneficiaries of the national maternal benefits scheme. Linking reproductive health services to the Aadhaar ecosystem has been critiqued because it is exclusionary towards women with legitimate claims towards abortions and other reproductive services and benefits, and it heightens the risk of data breaches in a cultural fabric that already stigmatises abortions. The bodies on which this stigma is disproportionately placed, unmarried or disabled females, for instance, experience the harms of visibility through centralised surveillance mechanisms more acutely than others by being penalised for their deviance from cultural expectations.&amp;nbsp; This is in accordance with the theory of "data extremes,” wherein marginalised communities are seen as&amp;nbsp; living on the extremes of&amp;nbsp; data capture, leading to a data regime that either refuses to recognise them as legitimate entities or subjects them to overpolicing in order to discipline deviance (Arora 2016). In both developed and developing contexts, the broader purpose of identity management has largely been to demarcate legitimate and illegitimate actors within a population, either within the framework of security or welfare.&lt;/p&gt;
&lt;h3&gt;&lt;strong&gt;Potential Harms of the Data Model of Reproductive Health Provisioning&lt;/strong&gt;&lt;/h3&gt;
&lt;p&gt;Informational privacy and decisional privacy are critically shaped by data flows and security within the MCTS/RCH. No standards for data sharing and storage, or anonymisation and encryption of data have been implemented despite role-based authentication (NHSRC and Taurus Glocal 2011). The risks of this architectural design are further amplified in the context of the RCH/ANMOL where data is captured real-time. In the absence of adequate safeguards against data leaks, real-time data capture risks the publicising of reproductive health choices in an already stigmatised environment. This opens up avenues for further dilution of autonomy in making future reproductive health choices.&lt;/p&gt;
&lt;p&gt;Several core principles of informational privacy, such as limitations regarding data collection and usage, or informed consent, also need to be reworked within this context.&lt;sup&gt;[1]&lt;/sup&gt; For instance, the centrality of the requirement of “free, informed consent” by an individual would need to be replaced by other models, especially in the context of reproductive health of&amp;nbsp; rape survivors who are vulnerable and therefore unable to exercise full agency. The ability to make a free and informed choice, already dismantled in the context of contemporary data regimes, gets further precluded in such contexts. The constraints on privacy in decisions regarding the body are then replicated in the domain of reproductive data collection.&lt;/p&gt;
&lt;p&gt;What is uniform across these digitisation initiatives is their treatment of maternal and reproductive health as solely a medical event, framed as a data scarcity problem. In doing so, they tend to amplify the understanding of reproductive health through measurable indicators that ignore social determinants of health. For instance, several studies conducted in the rural Indian context have shown that the degree of women’s autonomy influences the degree of usage of pregnancy care, and that the uptake of pregnancy care was associated with village-level indicators such as economic development, provisioning of basic infrastructure and social cohesion. These contextual factors get overridden in pervasive surveillance systems that treat reproductive healthcare as comprising only of measurable indicators and behaviours, that are dependent on individual behaviour of practitioners and women themselves, rather than structural gaps within the system.&lt;/p&gt;
&lt;p&gt;While traditionally associated with state governance, the contemporary surveillance regime is experienced as distinct from its earlier forms due to its reliance on a nexus between surveillance by the state and private institutions and actors, with both legal frameworks and material apparatuses for data collection and sharing (Shepherd 2017). As with historical forms of surveillance, the harms of contemporary data regimes accrue disproportionately among already marginalised and dissenting communities and individuals. Data-driven surveillance has been critiqued for its excesses in multiple contexts globally, including in the domains of predictive policing, health management, and targeted advertising (Mason 2015). In the attempts to achieve these objectives, surveillance systems have been criticised for their reliance on replicating past patterns, reifying proximity to a hetero-patriarchal norm (Haggerty and Ericson 2000). Under data-driven surveillance systems, this proximity informs the preexisting boxes of identity for which algorithmic representations of the individual are formed. The boxes are defined contingent on the distinct objectives of the particular surveillance project, collating disparate pieces of data flows and resulting in the recasting of the singular offline self into various 'data doubles' (Haggerty and Ericson 2000). Refractive, rather than reflective, the data doubles have implications for the physical, embodied life of individual with an increasing number of service provisioning relying on the data doubles (Lyon 2001). Consider, for instance, apps on menstruation, fertility, and health, and wearables such as fitness trackers and pacers, that support corporate agendas around what a woman’s healthy body should look, be or behave like (Lupton 2014). Once viewed through the lens of power relations, the fetishised, apolitical notion of the data “revolution” gives way to what we may better understand as “dataveillance.”&lt;/p&gt;
&lt;h3&gt;&lt;strong&gt;Towards a Networked State and a Neo-liberal Citizen&lt;/strong&gt;&lt;/h3&gt;
&lt;p&gt;Following in this tradition of ICT being treated as the solution to problems plaguing India’s public health information system, a larger, all-pervasive healthcare ecosystem is now being proposed by the Indian state (NITI Aayog 2018). Termed the National Health Stack, it seeks to create a centralised electronic repository of health records of Indian citizens with the aim of capturing every instance of healthcare service usage. Among other functions, it also envisions a platform for the provisioning of health and wellness-based services that may be dispensed by public or private actors in an attempt to achieve universal health coverage. By allowing private parties to utilise the data collected through pullable open application program interfaces (APIs), it also fits within the larger framework of the National Health Policy 2017 that envisions the private sector playing a significant role in the provision of healthcare in India. It also then fits within the state–private sector nexus that characterises dataveillance. This, in turn, follows broader trends towards market-driven solutions and private financing of health sector reform measures that have already had profound consequences on the political economy of healthcare worldwide (Joe et al 2018).&lt;/p&gt;
&lt;p&gt;These initiatives are, in many ways, emblematic of the growing adoption of network governance reform by the Indian state (Newman 2001). This is a stark shift from its traditional posturing as the hegemonic sovereign nation state. This shift entails the delayering from large, hierarchical and unitary government systems to horizontally arranged, more flexible, relatively dispersed systems.&lt;sup&gt;[2]&lt;/sup&gt; The former govern through the power of rules and law, while the latter take the shape of self-regulating networks such as public–private contractual arrangements (Snellen 2005). ICTs have been posited as an effective tool in enabling the transition to network governance by enhancing local governance and interactive policymaking enabling the co-production of knowledge (Ferlie et al 2011). The development of these capabilities is also critical to addressing “wicked problems” such as healthcare (Rittel and Webber 1973).&lt;sup&gt;[3]&lt;/sup&gt; The application of the techno-deterministic, data-driven model to reproductive healthcare provision, then, resembles a fetishised approach to technological change. The NHSRC describes this as the collection of data without an objective, leading to a disproportional burden on data collection over use (NHSRC and Taurus Glocal 2011).&lt;/p&gt;
&lt;p&gt;The blurring of the functions of state and private actors is reflective of the neo-liberal ethic, which produces new practices of governmentality. Within the neo-liberal framework of reproductive healthcare, the citizen is constructed as an individual actor, with agency over and responsibility for their own health and well-being (Maturo et al 2016).&lt;/p&gt;
&lt;h3&gt;&lt;strong&gt;“Quantified Self” of the Neo-liberal Citizen&lt;/strong&gt;&lt;/h3&gt;
&lt;p&gt;Nowhere can the manifestation of this neo-liberal citizen can be seen as clearly as in the “quantified self” movement. The quantified self movement refers to the emergence of a whole range of apps that enable the user to track bodily functions and record data to achieve wellness and health goals, including menstruation, fertility, pregnancies, and health indicators in the mother and baby. Lupton (2015) labels this as the emergence of the “digitised reproductive citizen,” who is expected to be attentive to her fertility and sexual behaviour to achieve better reproductive health goals. The practice of collecting data around reproductive health is not new to the individual or the state, as has been demonstrated by the discussion above. What is new in this regime of datafication under the self-tracking movement is the monetisation of reproductive health data by private actors, the labour for which is performed by the user. Focusing on embodiment draws attention to different kinds of exploitation engendered by reproductive health apps. Not only is data about the body collected and sold, the unpaid labour for collection is extracted from the user. The reproductive body can then be understood as a cyborg, or a woman-machine hybrid, systematically digitising its bodily functions for profit-making within the capitalist (re)production machine (Fotoloulou 2016). Accordingly, all major reproductive health tracking apps have a business model that relies on selling information about users for direct marketing of products around reproductive health and well-being (Felizi and Varon nd).&lt;/p&gt;
&lt;p&gt;As has been pointed out in the case of big data more broadly, reproductive health applications (apps) facilitate the visibility of the female reproductive body in the public domain. Supplying anonymised data sets to medical researchers and universities fills some of the historical gaps in research around the female body and reproductive health. Reproductive and sexual health tracking apps globally provide their users a platform to engage with biomedical information around sexual and reproductive health. Through group chats on the platform, they are also able to engage with experiential knowledge of sexual and reproductive health. This could also help form transnational networks of solidarity around the body and health&amp;nbsp; (Fotopoulou 2016).&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;This radical potential of network-building around reproductive and sexual health is, however, tempered to a large extent by the reconfiguration of gendered stereotypes through these apps. In a study on reproductive health apps on Google Play Store, Lupton (2014) finds that products targeted towards female users are marketed through the discourse of risk and vulnerability, while those targeted towards male users are framed within that of virility. Apart from reiterating gendered stereotypes around the male and female body, such a discourse assumes that the entire labour of family planning is performed by females. This same is the case with the MCTS/RCH.&lt;/p&gt;
&lt;p&gt;Technological interventions such as reproductive health apps as well as HIS are based on the assumption that females have perfect control over decisions regarding their own bodies and reproductive health, despite this being disproved in India. The Guttmacher Institute (2014) has found that 60% of women in India report not having control over decisions regarding their own healthcare. The failure to account for the husband or the family as stakeholder in decision-making around reproductive health has been a historical failure of the family planning programme in India, and is now being replicated in other modalities. This notion of an autonomous citizen who is able to take responsibility of their own reproductive health and well-being does not hold true in the Indian context. It can even be seen as marginalising females who have already been excluded from the reproductive health system, as they are held responsible for their own inability to access healthcare.&lt;/p&gt;
&lt;h3&gt;&lt;strong&gt;Concluding Remarks&lt;/strong&gt;&lt;/h3&gt;
&lt;p&gt;The interplay that emerges between reproductive health surveillance and data infrastructures is a complex one. It requires the careful positioning of the political nature of data collection and processing as well as its hetero-patriarchal and colonial legacies, within the need for effective utilisation of data for achieving developmental goals. Assessing this discourse through a feminist lens identifies the web of power relations in data regimes. This problematises narratives of technological solutions for welfare provision.&lt;/p&gt;
&lt;p&gt;The reproductive healthcare framework in India then offers up a useful case study to assess these concerns. The growing adoption of ICT-based surveillance tools to equalise access to healthcare needs to be understood in the socio-economic, legal, and cultural context where these tools are being implemented. Increased surveillance has historically been associated with causing the structural gendered violence that it is now being offered as a solution to. This is a function of normative standards being constructed for reproductive behaviour that necessarily leave out broader definitions of reproductive health and welfare when viewed through a feminist lens. Within the larger context of health policymaking in India, moves towards privatisation then demonstrate the peculiarity of dataveillance as it functions through an unaccountable and pervasive overlapping of state and private surveillance practises. It remains to be seen how these trends in ICT-driven health policies affect access to reproductive rights and decisional privacy for millions of females in India and other parts of the global South.&lt;/p&gt;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;

        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/data-infrastructures-inequities-reproductive-health-surveillance-india'&gt;https://cis-india.org/internet-governance/blog/data-infrastructures-inequities-reproductive-health-surveillance-india&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Aayush Rathi and Ambika Tandon</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Big Data</dc:subject>
    
    
        <dc:subject>Data Systems</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    
    
        <dc:subject>Researchers at Work</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Research</dc:subject>
    
    
        <dc:subject>BD4D</dc:subject>
    
    
        <dc:subject>Healthcare</dc:subject>
    
    
        <dc:subject>Surveillance</dc:subject>
    
    
        <dc:subject>Big Data for Development</dc:subject>
    

   <dc:date>2019-12-30T16:44:32Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/livemint-abhijit-ahaskar-february-12-2019-what-the-governments-draft-it-intermediary-guidelines-say">
    <title>What the government's draft IT intermediary guidelines say</title>
    <link>https://cis-india.org/internet-governance/news/livemint-abhijit-ahaskar-february-12-2019-what-the-governments-draft-it-intermediary-guidelines-say</link>
    <description>
        &lt;b&gt;Intermediaries will have to hand over to government agencies any information within 72 hours.
Intermediaries will have to use automated tools to trace the person posting unlawful content.
&lt;/b&gt;
        &lt;p&gt;The article by Abhijit Ahaskar was &lt;a class="external-link" href="https://www.livemint.com/technology/tech-news/what-the-government-s-draft-it-intermediary-guidelines-say-1549959448471.html"&gt;published in Livemint&lt;/a&gt; on February 12, 2019. CIS research was quoted.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;With voices for regulating tech companies getting stronger in the wake of growing incidence of fake news being circulated through social media platforms, the Ministry of Electronics and Information Technology (MEITY) of India has decided to re-examine the Information Technology (IT) Intermediary Guidelines, 2011, under the IT Act, 2000.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Setting the wheel in motion, the ministry proposed a draft called Information Technology Intermediaries Guidelines (Amendment), 2018, and released the recommendations on its website for public comments in December 2018. The first round of comments ended on 31 January, 2019 and was made public last week. The second round of comments and counter-comments will close on 14 February, 2019.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;What the draft proposes&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The term intermediary refers to all tech companies that are hosting user data or are providing users with a platform for communication. This brings all internet, social media, telecom companies in its ambit.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The draft amendment proposes that intermediaries will have to hand over to governmentagencies any information that might be related to cyber security, national security and related with the investigation, prosecution or prevention of an offence, within 72 hours.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;They will have to take down or disable content considered defamatory or against national security under Article 19 (2) of the Constitution within 24 hours on being notified by the appropriate government or its agency in addition to using automated tools to identify, remove and trace the origin of such content. Intermediaries with over 55 lakh users will be required to have a permanent registered office with physical address and a senior official who would be available for coordination with law enforcement agencies.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Concerns over the draft guidelines&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Microsoft notes that the problem MEITY is trying to address is of fake news. “Existing regulations provide enough powers to work with social media platforms. There may be a case to bring out additional guidelines for certain types of intermediaries like social media platforms. There may also be a case to strengthen other laws which make the punishment of fake news and misuse of social media stringent. The focus should be on the perpetrators of the crime rather than the intermediaries," it has said in response to the guidelines. Regarding deployment of tools to proactively identify and remove unlawful content, Microsoft cautions that intermediaries will have to monitor all content passing through their systems for this, which is a violation of their individual privacy and right to freedom of expression. It will also be technically impractical due to the high cost of deploying such tech.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;According to Broadband India Forum, one of the grounds for the Supreme Court striking down Section 66A of the IT Act, 2000, in Shreya Singhal vs Union of India was the vagueness of the terms used in the provision, such as offensive, menacing and dangerous, which invaded the right of free speech. However, words with a similar level of vagueness, such as grossly harmful, harassing and hateful exist in the proposed draft.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The Centre for Internet and Society (CIS) pointed out that existing laws provide enough teeth to the Indian agencies to act. For instance, Section 505 of the IPC has provisions to penalise disinformation while Sections 290 and 153A of the IPC have provisions if the disinformation is being used to create communal strife. CIS has also flagged the scope of the term unlawful as it is not clearly defined, leaving room for broad interpretation. On the traceability clause, CIS draws attention to the lack of clarity on whether it applies on just social media platforms and messaging services or all intermediaries.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;This can be a bit of problem for ISPs which may have no access to contents of an encrypted communication sent and received on its network.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Threat to privacy&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The traceability clause, which requires intermediaries to use automated tools to trace the person posting unlawful content, came in for a lot of criticism. While the Ministry in an official tweet in January 2018 clarified that it only requires intermediaries to trace the origin of messages which lead to unlawful activities without breaking encryption, experts believe it isn’t possible without lowering encryption standards or building a backdoor to access encrypted communications.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Amnesty International slammed the clause, arguing, “While governments can legitimately use electronic surveillance to protect people from crime, forcing companies to weaken encryption will affect all users’ online privacy. Such measures would be inherently disproportionate, and therefore impermissible under international human rights law."&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Wipro in its response rues such a traceability requirement could lead to breaking of encryption on apps such as WhatsApp and Signal, and this will be a major threat to the privacy rights of citizens as enshrined in the Puttaswamy judgment of the Supreme Court.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Undue burden on small companies&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Commenting on the 72 hours timeline for furnishing user data, the Internet Freedom Foundation says that such short deadline for compliance can only be fulfilled by large social media platforms. This might make smaller companies over compliant to government demands for immunity resulting in a total disregard for user privacy.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Regarding taking down of unlawful content, technology policy researchers form National Institute of Public Finance &amp;amp; Policy (NIPFP) caution that overzealous implementation along with over reliance on technological tools for the detection of unlawful content would lead to the curtailment of online speech. They pointed out the instance where Facebook had removed posts documenting the ethnic cleansing of Rohingyas as it had classified Rohingya organisations as dangerous militant groups.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/livemint-abhijit-ahaskar-february-12-2019-what-the-governments-draft-it-intermediary-guidelines-say'&gt;https://cis-india.org/internet-governance/news/livemint-abhijit-ahaskar-february-12-2019-what-the-governments-draft-it-intermediary-guidelines-say&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    

   <dc:date>2019-02-13T00:31:29Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/business-standard-february-9-2019-sunil-abraham-intermediary-liability-law-needs-updating">
    <title>Intermediary liability law needs updating </title>
    <link>https://cis-india.org/internet-governance/blog/business-standard-february-9-2019-sunil-abraham-intermediary-liability-law-needs-updating</link>
    <description>
        &lt;b&gt;The time has come for India to exert its foreign policy muscle. There is a less charitable name for intermediary liability regimes like Sec 79 of the IT Act — private censorship regimes. &lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The article was published in &lt;a class="external-link" href="https://www.business-standard.com/article/opinion/intermediary-liability-law-needs-updating-119020900705_1.html"&gt;Business Standard&lt;/a&gt; on February 9, 2019.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;Intermediaries get immunity from liability emerging from user-generated and third-party content because they have no “actual knowledge” until it is brought to their notice using “take down” requests or orders.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Since some of the harm caused is immediate, irreparable and irreversible, it is the preferred alternative to approaching courts for each case. When intermediary liability regimes were first enacted, most intermediaries were acting as common carriers — ie they did not curate the experience of users in a substantial fashion. While some intermediaries like Wikipedia continue this common carrier tradition, others driven by advertising revenue no longer treat all parties and all pieces of content neutrally. Facebook, Google and Twitter do everything they can to raise advertising revenues. They make you depressed. And if they like you, they get you to go out and vote. There is an urgent need to update intermediary liability law.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In response to being summoned by multiple governments, Facebook has announced the establishment of an independent oversight board. A global free speech court for the world’s biggest online country. The time has come for India to exert its foreign policy muscle. The amendments to our intermediary liability regime can have global repercussions, and shape the structure and functioning of this and other global courts.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;While with one hand Facebook dealt the oversight board, with the other hand it took down APIs that would enable press and civil society to monitor political advertising in real time. How could they do that with no legal consequences? The answer is simple — those APIs were provided on a voluntary basis. There was no law requiring them to do so.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;There are two approaches that could be followed. One, as scholar of regulatory theory Amba Kak puts it, is to “disincentivise the black box”. Most transparency reports produced by intermediaries today are on a voluntary basis; there is no requirement for this under law. Our new law could require a extensive transparency with appropriate privacy safeguards for the government, affected parties and the general public in terms of revenues, content production and consumption, policy development, contracts, service-level agreements, enforcement, adjudication and appeal. User empowerment measures in the user interface and algorithm explainability could be required. The key word in this approach is transparency.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The alternative is to incentivise the black box. Here faith is placed in technological solutions like artificial intelligence. To be fair, technological solutions may be desirable for battling child pornography, where pre-censorship (or deletion before content is published) is required. Fingerprinting technology is used to determine if the content exists in a global database maintained by organisations like the Internet Watch Foundation. A similar technology called Content ID is used pre-censor copyright infringement. Unfortunately, this is done by ignoring the flexibilities that exist in Indian copyright law to promote education, protect access knowledge by the disabled, etc. Even within such narrow application of technologies, there have been false positives. Recently, a video of a blogger testing his microphone was identified as a pre-existing copyrighted work.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The goal of a policy-maker working on this amendment should be to prevent repeats of the Shreya Singhal judgment where sections of the IT Act were read down or struck down. To avoid similar constitution challenges in the future, the rules should not specify any new categories of illegal content, because that would be outside the scope of the parent clause. The fifth ground in the list is sufficient — “violates any law for the time being in force”. Additional grounds, such as “harms minors in anyway”, is vague and cannot apply to all categories of intermediaries — for example, a dating site for sexual minorities. The rights of children need to be protected. But that is best done within the ongoing amendment to the POCSO Act.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;As an engineer, I vote to eliminate redundancy. If there are specific offences that cannot fit in other parts of the law, those offences can be added as separate sections in the IT Act. For example, even though voyeurism is criminalised in the IT Act, the non-consensual distribution of intimate content could be criminalised, as it has been done in the Philippines.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Provisions that have to do with data retention and government access to that data for the purposes of national security, law enforcement and also anonymised datasets for the public interest should be in the upcoming Data Protection law. The rules for intermediary liability is not the correct place to deal with it, because data retention may also be required of those intermediaries that don’t handle any third-party information or user generated content. Finally, there have to be clear procedures in place for reinstatement of content that has been taken down.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;&lt;i&gt;Disclosure: The Centre for Internet and Society receives grants from Facebook, Google and Wikimedia Foundation&lt;/i&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/business-standard-february-9-2019-sunil-abraham-intermediary-liability-law-needs-updating'&gt;https://cis-india.org/internet-governance/blog/business-standard-february-9-2019-sunil-abraham-intermediary-liability-law-needs-updating&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>sunil</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Intermediary Liability</dc:subject>
    

   <dc:date>2019-02-13T00:05:30Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/akriti-bopanna-february-8-2019-comment-on-icann-draft-fy-20-operating-plan-and-budget">
    <title>CIS Comment on ICANN's Draft FY20 Operating Plan and Budget </title>
    <link>https://cis-india.org/internet-governance/blog/akriti-bopanna-february-8-2019-comment-on-icann-draft-fy-20-operating-plan-and-budget</link>
    <description>
        &lt;b&gt;At the Centre for Internet and Society, we are grateful for the opportunity to provide our comments on the proposed draft of ICANN’s FY20 Operating Plan and Budget along with their Five-Year Operating Plan Update. As part of the public comment process, ICANN provided a list of documents which can be found here that included their highlights of the budget, the total draft budget for FY20, an operating plan segregated by portfolios, amongst others.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The following  are our comments on relevant aspects from the different documents:&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;There are several significant undertakings which have not found adequate support in this budget, chief among them being the implementation of the ICANN Workstream 2 recommendations on Accountability. The budget accounts for any expenses that arise from WS2 as emanating from its contingency fund which is a mere 4%. Totalling more than 100 recommendations across 8 sub groups, execution of these would require significant expenditure. Ideally, this should have been budgeted for in the FY20 budget considering the final report was submitted in June, 2018 and conversations about its implementation have been carried out ever since. It is wondered if this is because the second Workstream does not have the effectuation of its recommendations in its mandate and hence it is easier for ICANN to be slow on it.&lt;a href="#_ftn1" name="_ftnref1"&gt;&lt;sup&gt;[1]&lt;/sup&gt;&lt;/a&gt; As a member of the community deeply interested in integrating human rights better in ICANN’s various processes, it is concerning to note the glacial pace of the approval of the aforementioned recommendations especially coupled with the lack of funds allocated to it. Further, there is 1 one person assigned to work on the WS2 implementation work which seems insufficient for the magnitude of work involved.&lt;a href="#_ftn2" name="_ftnref2"&gt;&lt;sup&gt;[2]&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A topical issue with ICANN currently is its tussle with the implementation of the General Data Protection Regulation (GDPR) and despite the prominence and extent of the legal burden involved, resources to complying with it have not been allocated. Again, it is within the umbrella of the contingency budget.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The Cross Community Working Group on New gTLD Auction Proceeds is also, presently, developing recommendations on how to distribute the proceeds. It is unclear where these will be funded from since their work is funded by the core ICANN budget yet it is assumed that the recommendations will be funded by the auction proceeds. Almost 7 years after the new gTLD round was open, it is alarming that ICANN has not formulated a plan for the proceeds and are still debating the merits of the entity which would resolve this question, as recently as the last ICANN meeting in October, 2018.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Another important policy development process being undertaken right now is the Working Group who is reviewing the current new gTLD policies to improve the process by proposing changes or new policies. There are no resources in the FY20 budget to implement the changes that will arise from this but only those to support the Working Group activities.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Lastly, the budgets lack information on how much each individual RIR contributes.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;&lt;span style="text-decoration: underline;"&gt;Staff costs&lt;/span&gt;&lt;/b&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;ICANN’s internal costs on their personnel have been rising for years and slated to account for more than half their annual budget with an estimated 56% or $76.3 million in the next financial year. The community has been consistent in calling upon them to revise their staff costs with many questioning if the growth in staff is justified.&lt;a href="#_ftn3" name="_ftnref3"&gt;&lt;sup&gt;&lt;sup&gt;[3]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; There was criticism from all quarters such as the GNSO Council who stated that it is “&lt;i&gt;not convinced that the proposed budget funds the policy work it needs to do over the coming year”.&lt;a href="#_ftn4" name="_ftnref4"&gt;&lt;sup&gt;&lt;b&gt;&lt;sup&gt;[4]&lt;/sup&gt;&lt;/b&gt;&lt;/sup&gt;&lt;/a&gt; &lt;/i&gt;The excessive use of professional service consultants has come under fire too.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;As pointed out in a mailing list, in comments on the FY19 budget, &lt;i&gt;every single constituency and stakeholder group&lt;/i&gt; remarked that personnel costs presented too high a burden on the budget. One of the suggestions presented by the NCSG was to relocate positions from from the LA headquarters to less expensive countries such as those in Asia. This can be seen from the high increase this budget of $200,000 in operational costs though no clear breakdown of that entails was given.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The view seems to be that ICANN repeatedly chooses to retain higher salaries while reducing funding for the community. This is even more of an issue since there employment remuneration scheme is opaque. In a DIDP I filed enquiring about the average salary across designations, gender, regions and the frequency of bonuses, the response was either to refer to their earlier documents which do not have concrete information or that the relevant documents were not in their possession.&lt;a href="#_ftn5" name="_ftnref5"&gt;&lt;sup&gt;&lt;sup&gt;[5]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;&lt;span style="text-decoration: underline;"&gt;ICANN Fellowship&lt;/span&gt;&lt;/b&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The budget of the fellowship has been reduced which is an important initiative to involve individuals in ICANN who cannot afford the cost of flying to the global ICANN meetings. The focus should be not only be on arriving at a suitable figure for the funding but also to ensure that people who either actively contribute or are likely to are supported as opposed to individuals who are already known in this circle.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Again, our attempts at understanding the Fellowship selection were met with resistance from ICANN. In a DIDP filed regarding it with questions such as if anyone had received it more than the maximum limit of thrice and details on the selection criteria, no clarity was provided.&lt;a href="#_ftn6" name="_ftnref6"&gt;&lt;sup&gt;&lt;sup&gt;[6]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;b&gt;&lt;span style="text-decoration: underline;"&gt;Lobbying and Sponsorship&lt;/span&gt;&lt;/b&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;At ICANN 63 in Barcelona, I enquired about ICANN’s sponsorship strategies and how the decision making is done with respect to which all events in each region to sponsor and for a comprehensive list of all sponsorship ICANN undertakes and receives. I was told such a document would be published soon but in the 4 months since then, none can be found. It is difficult to comment on the budget for such a team where there is not much information on the work it specifically carries out and the impact of such sponsoring activities. When questioned to someone on their team, I was told that it depends on the needs of each region and events that are significant in such regions. However without public accountability and transparency about these, sponsorship can be seen as a vague heading which could be better spent on community initiatives.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Talking of Transparency, it has also been pointed out that the Information Transparency Initiative has 3 million dollars set aside for its activities in this budget. It sounds positive yet with no deliverables to show in the past 2 years, it is difficult to ascertain the value of the investment in this initiative.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Lobbying activities do not find any mention in the budget and neither do the nature of sponsorship from other entities in terms of whether it is travel and accommodation of personnel or any other kind of institutional sponsorship.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref1" name="_ftn1"&gt;&lt;sup&gt;&lt;sup&gt;[1]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/icann-work-stream-2-recommendations-on-accountability&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref2" name="_ftn2"&gt;&lt;sup&gt;&lt;sup&gt;[2]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.icann.org/en/system/files/files/proposed-opplan-fy20-17dec18-en.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref3" name="_ftn3"&gt;&lt;sup&gt;&lt;sup&gt;[3]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; http://domainincite.com/22680-community-calls-on-icann-to-cut-staff-spending&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref4" name="_ftn4"&gt;&lt;sup&gt;&lt;sup&gt;[4]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Ibid&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref5" name="_ftn5"&gt;&lt;sup&gt;&lt;sup&gt;[5]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;https://cis-india.org/internet-governance/blog/didp-request-30-enquiry-about-the-employee-pay-structure-at-icann&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref6" name="_ftn6"&gt;&lt;sup&gt;&lt;sup&gt;[6]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/didp-31-on-icanns-fellowship-program&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/akriti-bopanna-february-8-2019-comment-on-icann-draft-fy-20-operating-plan-and-budget'&gt;https://cis-india.org/internet-governance/blog/akriti-bopanna-february-8-2019-comment-on-icann-draft-fy-20-operating-plan-and-budget&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>akriti</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>ICANN</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    

   <dc:date>2019-02-12T23:44:46Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/harsh-bajpai-ambika-tandon-and-amber-sinha-february-8-2019-the-future-of-work-in-automotive-sector-in-india">
    <title>The Future of Work in the Automotive Sector in India</title>
    <link>https://cis-india.org/internet-governance/blog/harsh-bajpai-ambika-tandon-and-amber-sinha-february-8-2019-the-future-of-work-in-automotive-sector-in-india</link>
    <description>
        &lt;b&gt;This report empirically studies the future of work in the automotive sector in India. The report has been authored by Harsh Bajpai, Ambika Tandon and Amber Sinha. Rakhi Sehgal and Aayush Rathi have edited the report.&lt;/b&gt;
        
&lt;h2&gt;Introduction&lt;/h2&gt;
&lt;p style="text-align: justify;"&gt;The adoption of information and communication based technology (ICTs) for industrial use is not a new phenomenon. However, the advent of Industry 4.0 hasbeen described as a paradigm shift in production, involving widespread automation and irreversible shifts in the structure of jobs. Industry 4.0 is widely understood as the technical integration of cyber-physical systems into production and logistics, and the use of Internet of Things (IoTs) in processes and systems. This may pose major challenges for industries, workers, and policymakers as they grapple with shifts in the structure of employment and content of jobs, bring about significant changes in business models, downstream services and the organisation of work.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;The adoption of information and communication based technology (ICTs) for industrial use is not a new phenomenon. However, the advent of Industry 4.0 hasbeen described as a paradigm shift in production, involving widespread automation and irreversible shifts in the structure of jobs. Industry 4.0 is widelyunderstood as the technical integration of cyber-physical systems into production and logistics, and the use of Internet of Things (IoTs) in processes and systems.This may pose major challenges for industries, workers, and policymakers as they grapple with shifts in the structure of employment and content of jobs, bringabout significant changes in business models, downstream services and the organisation of work.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;Industry 4.0 is characterised by four elements. First, the use of intelligent machines could have significant impact on production through the introduction of automated processes in ‘smart factories.’ Second, real-time production would begin optimising utilisation capacity, with shorter lead times and avoidance of standstills. Third, the self-organisation of machines can lead to decentralisation of production. Finally, Industry 4.0 is commonly characterised by the individualisation of production, responding to customer requests. The advancement of digital technology and consequent increase in automation has raised concerns about unemployment and changes in the structure of work. Globally, automation in manufacturing and services has been posited as replacing jobs with routine task content, while generating jobs with non-routine cognitive and manual tasks.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;Some scholars have argued that unemployment will increase globally as technology eliminates tens of million of jobs in the manufacturing sector. It could then result in the lowering of wages and employment opportunities for low skilled&amp;nbsp;workers, and increased investment in capital-intensive technologies for employer.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;However, this theory of technologically driven job loss and increasing inequality has been contested on numerous occasions, with the assertion that technology will be an enabler, will change task content rather than displace workers, and will also create new jobs . It has further been argued that other factors such as increasing globalisation, weakening trade unions and platforms for collective bargaining, and disaggregation of the supply chain through outsourcing has led to declined wages, income inequality, inadequate health and safety conditions, and displacement of workers.&lt;/p&gt;
&lt;p&gt;In India, there is little evidence of unemployment caused by adoption of technology due to Industry 4.0, but there is a strong consensus that technology affects labour by changing the job mix and skill demand. It should be noted that technological adoption under Industry 4.0 in advanced industrial economies has been driven by cost-benefit analysis due to accessible technology, and a highly skilled labour force. However, these key factors are serious impediments in the Indian context, which brings the large scale adoption of cyber-physical systems into question.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;The diffusion of low cost manual labour across a large majority of roles in manufacturing raises concerns about the cost-benefit analysis of investing capital inexpensive automative technology, while also accounting for the resultant displacement of labour. Further, the skill gap across the labour force implies that&amp;nbsp;the adoption of cyber-physical systems would require significant up-skilling or re-skilling to meet the potential shortage in highly skilled professionals.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;This is an in-depth case study on the future of work in the automotive sector in India. We chose to focus on the future of work in the automotive sector in India&amp;nbsp;for two reasons: first, the Indian automotive sector is one of largest contributors to the GDP at 7.2 percent, and second, it is one of the largest employment generators among non-agricultural industries. The first section details the structure of the automotive industry in India, including the range of stakeholders, and the national policy framework, through an analysis of academic literature, government reports, and legal documents.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;The second section explores different aspects of the future of work in the automotive sector, through a combination of in-depth semi-structured interviews and enterprise-based surveys in the North Indian belt of Gurgaon-Manesar-Dharuhera-Bawal. Challenges posed by shifts in the industrial relations framework, with increasing casualization and emergence of a typical forms of work, will also be explored, with specific reference to crises in collective bargaining and social security. We will then move onto looking at the state of female participation in the workforce in the automotive industry. The report concludes with policy recommendations addressing some of the challenges outlined above.&lt;/p&gt;
&lt;p style="text-align: justify;"&gt;Read the full report &lt;a href="https://cis-india.org/internet-governance/pdf-automotive-case-study" class="internal-link" title="PDF Automotive Case Study"&gt;here&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;&amp;nbsp;&lt;/p&gt;

        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/harsh-bajpai-ambika-tandon-and-amber-sinha-february-8-2019-the-future-of-work-in-automotive-sector-in-india'&gt;https://cis-india.org/internet-governance/blog/harsh-bajpai-ambika-tandon-and-amber-sinha-february-8-2019-the-future-of-work-in-automotive-sector-in-india&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Harsh Bajpai, Ambika Tandon, and Amber Sinha</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Automotive</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Internet of Things</dc:subject>
    

   <dc:date>2020-03-18T09:00:31Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/events/internet-speech-perspectives-on-regulation-and-policy">
    <title>Internet Speech: Perspectives on Regulation and Policy</title>
    <link>https://cis-india.org/internet-governance/events/internet-speech-perspectives-on-regulation-and-policy</link>
    <description>
        &lt;b&gt;The Centre for Internet &amp; Society and the University of Munich (LMU), Germany are jointly organizing an international symposium at India Habitat Centre in New Delhi on April 5, 2019&lt;/b&gt;
        &lt;p&gt;&lt;img src="https://cis-india.org/home-images/FreeSpeechSymposium_Poster_02.jpg/@@images/89fe6323-7608-482a-8072-dc241e9f0fda.jpeg" alt="Free Speech Poster" class="image-inline" title="Free Speech Poster" /&gt;&lt;/p&gt;
&lt;hr /&gt;
&lt;p&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/files/free-speech-symposium-agenda"&gt;&lt;b&gt;Click to download the agenda&lt;/b&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p&gt; &lt;/p&gt;
&lt;p&gt;&lt;a class="external-link" href="https://cis-india.org/internet-governance/files/free-speech-symposium-agenda"&gt; &lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/events/internet-speech-perspectives-on-regulation-and-policy'&gt;https://cis-india.org/internet-governance/events/internet-speech-perspectives-on-regulation-and-policy&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>akriti</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Freedom of Speech and Expression</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Featured</dc:subject>
    
    
        <dc:subject>Internet Freedom</dc:subject>
    
    
        <dc:subject>Event</dc:subject>
    

   <dc:date>2019-04-01T16:38:54Z</dc:date>
   <dc:type>Event</dc:type>
   </item>




</rdf:RDF>
